[WIP] ng-9: Driver for nodegroup operations

This adds the support for creating and deleting worker  nodegroups
using different stack per nodegroup. In order to be backwards
compatible, default nodegroups will remain in one stack.

Having this in micd cluster status is now calculated aggregating the
statuses of the underlying stacks.

Change-Id: I97839ab8495ed5d860785dff1f6e3cc59b6a9ff7
This commit is contained in:
Theodoros Tsioutsias 2019-06-24 13:49:42 +02:00
parent c981cc9170
commit de94afde5f
6 changed files with 281 additions and 53 deletions

View File

@ -11,6 +11,7 @@
# under the License. # under the License.
import abc import abc
import collections
import os import os
import six import six
@ -38,6 +39,10 @@ from magnum.objects import fields
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
NodeGroupStatus = collections.namedtuple('NodeGroupStatus',
'status reason is_default')
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)
class HeatDriver(driver.Driver): class HeatDriver(driver.Driver):
"""Base Driver class for using Heat """Base Driver class for using Heat
@ -58,12 +63,14 @@ class HeatDriver(driver.Driver):
scale_manager=scale_manager) scale_manager=scale_manager)
def _extract_template_definition(self, context, cluster, def _extract_template_definition(self, context, cluster,
scale_manager=None): scale_manager=None,
nodegroups=None):
cluster_template = conductor_utils.retrieve_cluster_template(context, cluster_template = conductor_utils.retrieve_cluster_template(context,
cluster) cluster)
definition = self.get_template_definition() definition = self.get_template_definition()
return definition.extract_definition(context, cluster_template, return definition.extract_definition(context, cluster_template,
cluster, cluster,
nodegroups=nodegroups,
scale_manager=scale_manager) scale_manager=scale_manager)
def _get_env_files(self, template_path, env_rel_paths): def _get_env_files(self, template_path, env_rel_paths):
@ -135,7 +142,12 @@ class HeatDriver(driver.Driver):
self.pre_delete_cluster(context, cluster) self.pre_delete_cluster(context, cluster)
LOG.info("Starting to delete cluster %s", cluster.uuid) LOG.info("Starting to delete cluster %s", cluster.uuid)
self._delete_stack(context, clients.OpenStackClients(context), cluster) osc = clients.OpenStackClients(context)
for ng in cluster.nodegroups:
if ng.is_default:
continue
self._delete_stack(context, osc, ng.stack_id)
self._delete_stack(context, osc, cluster.default_ng_master.stack_id)
def resize_cluster(self, context, cluster, resize_manager, def resize_cluster(self, context, cluster, resize_manager,
node_count, nodes_to_remove, nodegroup=None, node_count, nodes_to_remove, nodegroup=None,
@ -174,6 +186,8 @@ class HeatDriver(driver.Driver):
# no cluster_create_timeout value was passed in to the request # no cluster_create_timeout value was passed in to the request
# so falling back on configuration file value # so falling back on configuration file value
heat_timeout = cfg.CONF.cluster_heat.create_timeout heat_timeout = cfg.CONF.cluster_heat.create_timeout
heat_params['is_cluster_stack'] = True
fields = { fields = {
'stack_name': stack_name, 'stack_name': stack_name,
'parameters': heat_params, 'parameters': heat_params,
@ -222,10 +236,10 @@ class HeatDriver(driver.Driver):
# Find what changed checking the stack params # Find what changed checking the stack params
# against the ones in the template_def. # against the ones in the template_def.
stack = osc.heat().stacks.get(cluster.stack_id, stack = osc.heat().stacks.get(nodegroup.stack_id,
resolve_outputs=True) resolve_outputs=True)
stack_params = stack.parameters stack_params = stack.parameters
definition.add_nodegroup_params(cluster) definition.add_nodegroup_params(cluster, nodegroups=[nodegroup])
heat_params = definition.get_stack_diff(context, stack_params, cluster) heat_params = definition.get_stack_diff(context, stack_params, cluster)
LOG.debug('Updating stack with these params: %s', heat_params) LOG.debug('Updating stack with these params: %s', heat_params)
@ -241,10 +255,10 @@ class HeatDriver(driver.Driver):
} }
osc = clients.OpenStackClients(context) osc = clients.OpenStackClients(context)
osc.heat().stacks.update(cluster.stack_id, **fields) osc.heat().stacks.update(nodegroup.stack_id, **fields)
def _delete_stack(self, context, osc, cluster): def _delete_stack(self, context, osc, stack_id):
osc.heat().stacks.delete(cluster.stack_id) osc.heat().stacks.delete(stack_id)
class HeatPoller(object): class HeatPoller(object):
@ -260,39 +274,79 @@ class HeatPoller(object):
def poll_and_check(self): def poll_and_check(self):
# TODO(yuanying): temporary implementation to update api_address, # TODO(yuanying): temporary implementation to update api_address,
# node_addresses and cluster status # node_addresses and cluster status
ng_statuses = list()
for nodegroup in self.cluster.nodegroups:
self.nodegroup = nodegroup
status = self.extract_nodegroup_status()
ng_statuses.append(status)
self.aggregate_nodegroup_statuses(ng_statuses)
def extract_nodegroup_status(self):
try: try:
# Do not resolve outputs by default. Resolving all # Do not resolve outputs by default. Resolving all
# node IPs is expensive on heat. # node IPs is expensive on heat.
stack = self.openstack_client.heat().stacks.get( stack = self.openstack_client.heat().stacks.get(
self.cluster.stack_id, resolve_outputs=False) self.nodegroup.stack_id, resolve_outputs=False)
# poll_and_check is detached and polling long time to check
# status, so another user/client can call delete cluster/stack.
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE:
if self.nodegroup.is_default:
if self.nodegroup.role == 'master':
# Delete all cluster related info
self._delete_complete()
else:
self.nodegroup.destroy()
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE,
fields.ClusterStatus.UPDATE_COMPLETE):
# Resolve all outputs if the stack is COMPLETE
stack = self.openstack_client.heat().stacks.get(
self.nodegroup.stack_id, resolve_outputs=True)
self._sync_cluster_and_template_status(stack)
elif stack.stack_status != self.nodegroup.status:
self.template_def.nodegroup_output_mappings = list()
self.template_def.update_outputs(
stack, self.cluster_template, self.cluster,
nodegroups=[self.nodegroup])
self._sync_cluster_status(stack)
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED,
fields.ClusterStatus.DELETE_FAILED,
fields.ClusterStatus.UPDATE_FAILED,
fields.ClusterStatus.ROLLBACK_COMPLETE,
fields.ClusterStatus.ROLLBACK_FAILED):
self._sync_cluster_and_template_status(stack)
self._cluster_failed(stack)
except heatexc.NotFound: except heatexc.NotFound:
self._sync_missing_heat_stack() self._sync_missing_heat_stack()
return return NodeGroupStatus(status=self.nodegroup.status,
is_default=self.nodegroup.is_default,
reason=self.nodegroup.status_reason)
# poll_and_check is detached and polling long time to check status, def aggregate_nodegroup_statuses(self, ng_statuses):
# so another user/client can call delete cluster/stack. # NOTE(ttsiouts): Aggregate the nodegroup statuses and set the
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE: # cluster overall status.
self._delete_complete() FAIL = '_FAILED'
IN_PROGRESS = '_IN_PROGRESS'
COMPLETE = '_COMPLETE'
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE, CREATE = 'CREATE'
fields.ClusterStatus.UPDATE_COMPLETE): DELETE = 'DELETE'
# Resolve all outputs if the stack is COMPLETE UPDATE = 'UPDATE'
stack = self.openstack_client.heat().stacks.get( # Keep priority to the states below
self.cluster.stack_id, resolve_outputs=True) for state in (IN_PROGRESS, FAIL, COMPLETE):
if any(ns.status.endswith(state) for ns in ng_statuses):
self._sync_cluster_and_template_status(stack) status = getattr(fields.ClusterStatus, UPDATE+state)
elif stack.stack_status != self.cluster.status: self.cluster.status = status
self.template_def.update_outputs(stack, self.cluster_template, for action in (CREATE, DELETE):
self.cluster) if all(ns.status == action+state
self._sync_cluster_status(stack) for ns in ng_statuses if ns.is_default):
status = getattr(fields.ClusterStatus, action+state)
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED, self.cluster.status = status
fields.ClusterStatus.DELETE_FAILED, break
fields.ClusterStatus.UPDATE_FAILED, self.cluster.save()
fields.ClusterStatus.ROLLBACK_COMPLETE,
fields.ClusterStatus.ROLLBACK_FAILED):
self._sync_cluster_and_template_status(stack)
self._cluster_failed(stack)
def _delete_complete(self): def _delete_complete(self):
LOG.info('Cluster has been deleted, stack_id: %s', LOG.info('Cluster has been deleted, stack_id: %s',
@ -311,9 +365,9 @@ class HeatPoller(object):
self.cluster.uuid) self.cluster.uuid)
def _sync_cluster_status(self, stack): def _sync_cluster_status(self, stack):
self.cluster.status = stack.stack_status self.nodegroup.status = stack.stack_status
self.cluster.status_reason = stack.stack_status_reason self.nodegroup.status_reason = stack.stack_status_reason
self.cluster.save() self.nodegroup.save()
def get_version_info(self, stack): def get_version_info(self, stack):
stack_param = self.template_def.get_heat_param( stack_param = self.template_def.get_heat_param(
@ -330,8 +384,10 @@ class HeatPoller(object):
self.cluster.container_version = container_version self.cluster.container_version = container_version
def _sync_cluster_and_template_status(self, stack): def _sync_cluster_and_template_status(self, stack):
self.template_def.nodegroup_output_mappings = list()
self.template_def.update_outputs(stack, self.cluster_template, self.template_def.update_outputs(stack, self.cluster_template,
self.cluster) self.cluster,
nodegroups=[self.nodegroup])
self.get_version_info(stack) self.get_version_info(stack)
self._sync_cluster_status(stack) self._sync_cluster_status(stack)
@ -345,19 +401,20 @@ class HeatPoller(object):
def _sync_missing_heat_stack(self): def _sync_missing_heat_stack(self):
if self.cluster.status == fields.ClusterStatus.DELETE_IN_PROGRESS: if self.cluster.status == fields.ClusterStatus.DELETE_IN_PROGRESS:
self._delete_complete() if self.nodegroup.is_default and self.nodegroup.role == 'master':
self._delete_complete()
elif self.cluster.status == fields.ClusterStatus.CREATE_IN_PROGRESS: elif self.cluster.status == fields.ClusterStatus.CREATE_IN_PROGRESS:
self._sync_missing_stack(fields.ClusterStatus.CREATE_FAILED) self._sync_missing_stack(fields.ClusterStatus.CREATE_FAILED)
elif self.cluster.status == fields.ClusterStatus.UPDATE_IN_PROGRESS: elif self.cluster.status == fields.ClusterStatus.UPDATE_IN_PROGRESS:
self._sync_missing_stack(fields.ClusterStatus.UPDATE_FAILED) self._sync_missing_stack(fields.ClusterStatus.UPDATE_FAILED)
def _sync_missing_stack(self, new_status): def _sync_missing_stack(self, new_status):
self.cluster.status = new_status self.nodegroup.status = new_status
self.cluster.status_reason = _("Stack with id %s not found in " self.nodegroup.status_reason = _("Stack with id %s not found in "
"Heat.") % self.cluster.stack_id "Heat.") % self.cluster.stack_id
self.cluster.save() self.nodegroup.save()
LOG.info("Cluster with id %(id)s has been set to " LOG.info("Nodegroup with id %(id)s has been set to "
"%(status)s due to stack with id %(sid)s " "%(status)s due to stack with id %(sid)s "
"not found in Heat.", "not found in Heat.",
{'id': self.cluster.id, 'status': self.cluster.status, {'id': self.nodegroup.uuid, 'status': self.nodegroup.status,
'sid': self.cluster.stack_id}) 'sid': self.nodegroup.stack_id})

View File

@ -12,13 +12,20 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from pbr.version import SemanticVersion as SV from pbr.version import SemanticVersion as SV
from string import ascii_letters
from string import digits
from heatclient.common import template_utils
from magnum.common import clients from magnum.common import clients
from magnum.common import exception from magnum.common import exception
from magnum.common import keystone from magnum.common import keystone
from magnum.common import octavia from magnum.common import octavia
from magnum.common import short_id
from magnum.drivers.common import k8s_monitor from magnum.drivers.common import k8s_monitor
from magnum.drivers.heat import driver from magnum.drivers.heat import driver
from magnum.drivers.k8s_fedora_atomic_v1 import template_def from magnum.drivers.k8s_fedora_atomic_v1 import template_def
@ -109,3 +116,71 @@ class Driver(driver.HeatDriver):
'disable_rollback': not rollback 'disable_rollback': not rollback
} }
osc.heat().stacks.update(cluster.stack_id, **fields) osc.heat().stacks.update(cluster.stack_id, **fields)
def create_nodegroup(self, context, cluster, nodegroup):
osc = clients.OpenStackClients(context)
template_path, stack_params, env_files = (
self._extract_template_definition(context, cluster,
nodegroups=[nodegroup]))
tpl_files, template = template_utils.get_template_contents(
template_path)
environment_files, env_map = self._get_env_files(template_path,
env_files)
tpl_files.update(env_map)
# Make sure we end up with a valid hostname
valid_chars = set(ascii_letters + digits + '-')
# valid hostnames are 63 chars long, leaving enough room
# to add the random id (for uniqueness)
stack_name = "%s-%s" % (cluster.name[:20], nodegroup.name[:9])
stack_name = stack_name.replace('_', '-')
stack_name = stack_name.replace('.', '-')
stack_name = ''.join(filter(valid_chars.__contains__, stack_name))
# Make sure no duplicate stack name
stack_name = '%s-%s' % (stack_name, short_id.generate_id())
stack_name = stack_name.lower()
if cluster.create_timeout:
heat_timeout = cluster.create_timeout
else:
heat_timeout = cfg.CONF.cluster_heat.create_timeout
stack_params['is_cluster_stack'] = False
first_master = cluster.master_addresses[0]
network = osc.heat().resources.get(cluster.stack_id, 'network')
secgroup = osc.heat().resources.get(cluster.stack_id,
'secgroup_kube_minion')
stack_params['existing_master_private_ip'] = first_master
stack_params['existing_security_group'] = secgroup.attributes['id']
stack_params['fixed_network'] = network.attributes['fixed_network']
stack_params['fixed_subnet'] = network.attributes['fixed_subnet']
stack_params['trust_id'] = cluster.trust_id
stack_params['trustee_password'] = cluster.trustee_password
for label in nodegroup.labels:
stack_params[label] = nodegroup.labels[label]
fields = {
'stack_name': stack_name,
'parameters': stack_params,
'environment_files': environment_files,
'template': template,
'files': tpl_files,
'timeout_mins': heat_timeout
}
created_stack = osc.heat().stacks.create(**fields)
nodegroup.stack_id = created_stack['stack']['id']
return created_stack
def delete_nodegroup(self, context, cluster, nodegroup):
# Default nodegroups share stack_id so it will be deleted
# as soon as the cluster gets destroyed
if not nodegroup.stack_id:
nodegroup.destroy()
else:
osc = clients.OpenStackClients(context)
self._delete_stack(context, osc, nodegroup.stack_id)

View File

@ -1,12 +1,53 @@
heat_template_version: 2014-10-16 heat_template_version: queens
description: > description: >
This template will boot a Kubernetes cluster with one or more This template will boot a Kubernetes cluster with one or more
minions (as specified by the number_of_minions parameter, which minions (as specified by the number_of_minions parameter, which
defaults to 1). defaults to 1).
conditions:
master_only:
or:
- equals:
- get_param: role
- "master"
- equals:
- get_param: is_cluster_stack
- true
worker_only:
or:
- equals:
- get_param: role
- "worker"
- equals:
- get_param: is_cluster_stack
- true
create_cluster_resources:
equals:
- get_param: is_cluster_stack
- true
parameters: parameters:
# needs to become a list if we want to join master nodes?
existing_master_private_ip:
type: string
default: ""
is_cluster_stack:
type: boolean
default: false
role:
type: string
default: ""
existing_security_group:
type: string
default: ""
ssh_key_name: ssh_key_name:
type: string type: string
description: name of ssh key to be provisioned on our server description: name of ssh key to be provisioned on our server
@ -33,10 +74,16 @@ parameters:
master_image: master_image:
type: string type: string
description: glance image used to boot the server description: glance image used to boot the server
# When creating a new minion nodegroup this will not
# be provided by magnum. So make it default to ""
default: ""
minion_image: minion_image:
type: string type: string
description: glance image used to boot the server description: glance image used to boot the server
# When creating a new master nodegroup this will not
# be provided by magnum. So make it default to ""
default: ""
master_flavor: master_flavor:
type: string type: string
@ -660,6 +707,7 @@ resources:
# #
network: network:
condition: create_cluster_resources
type: ../../common/templates/network.yaml type: ../../common/templates/network.yaml
properties: properties:
existing_network: {get_param: fixed_network} existing_network: {get_param: fixed_network}
@ -670,6 +718,7 @@ resources:
private_network_name: private private_network_name: private
api_lb: api_lb:
condition: create_cluster_resources
type: ../../common/templates/lb_api.yaml type: ../../common/templates/lb_api.yaml
properties: properties:
fixed_subnet: {get_attr: [network, fixed_subnet]} fixed_subnet: {get_attr: [network, fixed_subnet]}
@ -678,6 +727,7 @@ resources:
port: {get_param: kubernetes_port} port: {get_param: kubernetes_port}
etcd_lb: etcd_lb:
condition: create_cluster_resources
type: ../../common/templates/lb_etcd.yaml type: ../../common/templates/lb_etcd.yaml
properties: properties:
fixed_subnet: {get_attr: [network, fixed_subnet]} fixed_subnet: {get_attr: [network, fixed_subnet]}
@ -691,6 +741,7 @@ resources:
# #
secgroup_kube_master: secgroup_kube_master:
condition: create_cluster_resources
type: OS::Neutron::SecurityGroup type: OS::Neutron::SecurityGroup
properties: properties:
rules: rules:
@ -727,6 +778,7 @@ resources:
port_range_max: 8472 port_range_max: 8472
secgroup_kube_minion: secgroup_kube_minion:
condition: create_cluster_resources
type: OS::Neutron::SecurityGroup type: OS::Neutron::SecurityGroup
properties: properties:
rules: rules:
@ -756,6 +808,7 @@ resources:
# allow any traffic between worker nodes # allow any traffic between worker nodes
secgroup_rule_tcp_kube_minion: secgroup_rule_tcp_kube_minion:
condition: create_cluster_resources
type: OS::Neutron::SecurityGroupRule type: OS::Neutron::SecurityGroupRule
properties: properties:
protocol: tcp protocol: tcp
@ -764,6 +817,7 @@ resources:
security_group: {get_resource: secgroup_kube_minion} security_group: {get_resource: secgroup_kube_minion}
remote_group: {get_resource: secgroup_kube_minion} remote_group: {get_resource: secgroup_kube_minion}
secgroup_rule_udp_kube_minion: secgroup_rule_udp_kube_minion:
condition: create_cluster_resources
type: OS::Neutron::SecurityGroupRule type: OS::Neutron::SecurityGroupRule
properties: properties:
protocol: udp protocol: udp
@ -779,6 +833,7 @@ resources:
# #
api_address_lb_switch: api_address_lb_switch:
condition: create_cluster_resources
type: Magnum::ApiGatewaySwitcher type: Magnum::ApiGatewaySwitcher
properties: properties:
pool_public_ip: {get_attr: [api_lb, floating_address]} pool_public_ip: {get_attr: [api_lb, floating_address]}
@ -787,6 +842,7 @@ resources:
master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]}
etcd_address_lb_switch: etcd_address_lb_switch:
condition: create_cluster_resources
type: Magnum::ApiGatewaySwitcher type: Magnum::ApiGatewaySwitcher
properties: properties:
pool_private_ip: {get_attr: [etcd_lb, address]} pool_private_ip: {get_attr: [etcd_lb, address]}
@ -799,6 +855,7 @@ resources:
# #
api_address_floating_switch: api_address_floating_switch:
condition: create_cluster_resources
type: Magnum::FloatingIPAddressSwitcher type: Magnum::FloatingIPAddressSwitcher
properties: properties:
public_ip: {get_attr: [api_address_lb_switch, public_ip]} public_ip: {get_attr: [api_address_lb_switch, public_ip]}
@ -811,11 +868,13 @@ resources:
# #
master_nodes_server_group: master_nodes_server_group:
condition: master_only
type: OS::Nova::ServerGroup type: OS::Nova::ServerGroup
properties: properties:
policies: [{get_param: nodes_affinity_policy}] policies: [{get_param: nodes_affinity_policy}]
worker_nodes_server_group: worker_nodes_server_group:
condition: worker_only
type: OS::Nova::ServerGroup type: OS::Nova::ServerGroup
properties: properties:
policies: [{get_param: nodes_affinity_policy}] policies: [{get_param: nodes_affinity_policy}]
@ -827,6 +886,7 @@ resources:
# #
kube_masters: kube_masters:
condition: master_only
type: OS::Heat::ResourceGroup type: OS::Heat::ResourceGroup
depends_on: depends_on:
- network - network
@ -943,6 +1003,7 @@ resources:
max_node_count: {get_param: max_node_count} max_node_count: {get_param: max_node_count}
kube_cluster_config: kube_cluster_config:
condition: create_cluster_resources
type: OS::Heat::SoftwareConfig type: OS::Heat::SoftwareConfig
properties: properties:
group: script group: script
@ -985,6 +1046,7 @@ resources:
- get_file: ../../common/templates/kubernetes/fragments/install-helm-modules.sh - get_file: ../../common/templates/kubernetes/fragments/install-helm-modules.sh
kube_cluster_deploy: kube_cluster_deploy:
condition: create_cluster_resources
type: OS::Heat::SoftwareDeployment type: OS::Heat::SoftwareDeployment
properties: properties:
actions: ['CREATE'] actions: ['CREATE']
@ -1002,6 +1064,7 @@ resources:
# #
kube_minions: kube_minions:
condition: worker_only
type: OS::Heat::ResourceGroup type: OS::Heat::ResourceGroup
depends_on: depends_on:
- network - network
@ -1021,12 +1084,32 @@ resources:
ssh_key_name: {get_param: ssh_key_name} ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: minion_image} server_image: {get_param: minion_image}
minion_flavor: {get_param: minion_flavor} minion_flavor: {get_param: minion_flavor}
fixed_network: {get_attr: [network, fixed_network]} # fixed_network: {get_param: fixed_network}
fixed_subnet: {get_attr: [network, fixed_subnet]} fixed_network:
if:
- create_cluster_resources
- get_attr: [network, fixed_network]
- get_param: fixed_network
# fixed_subnet: {get_param: fixed_subnet}
fixed_subnet:
if:
- create_cluster_resources
- get_attr: [network, fixed_subnet]
- get_param: fixed_subnet
network_driver: {get_param: network_driver} network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_cidr: {get_param: flannel_network_cidr}
kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]} #kube_master_ip: {get_param: existing_master_private_ip}
etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]} kube_master_ip:
if:
- create_cluster_resources
- get_attr: [api_address_lb_switch, private_ip]
- get_param: existing_master_private_ip
#etcd_server_ip: {get_param: existing_master_private_ip}
etcd_server_ip:
if:
- create_cluster_resources
- get_attr: [etcd_address_lb_switch, private_ip]
- get_param: existing_master_private_ip
external_network: {get_param: external_network} external_network: {get_param: external_network}
kube_allow_priv: {get_param: kube_allow_priv} kube_allow_priv: {get_param: kube_allow_priv}
docker_volume_size: {get_param: docker_volume_size} docker_volume_size: {get_param: docker_volume_size}
@ -1050,7 +1133,12 @@ resources:
kubernetes_port: {get_param: kubernetes_port} kubernetes_port: {get_param: kubernetes_port}
tls_disabled: {get_param: tls_disabled} tls_disabled: {get_param: tls_disabled}
verify_ca: {get_param: verify_ca} verify_ca: {get_param: verify_ca}
secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} # secgroup_kube_minion_id: {get_param: existing_security_group}
secgroup_kube_minion_id:
if:
- create_cluster_resources
- get_resource: secgroup_kube_minion
- get_param: existing_security_group
http_proxy: {get_param: http_proxy} http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy} https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy} no_proxy: {get_param: no_proxy}
@ -1079,6 +1167,7 @@ resources:
outputs: outputs:
api_address: api_address:
condition: create_cluster_resources
value: value:
str_replace: str_replace:
template: api_ip_address template: api_ip_address
@ -1089,6 +1178,7 @@ outputs:
the Kubernetes API. the Kubernetes API.
registry_address: registry_address:
condition: create_cluster_resources
value: value:
str_replace: str_replace:
template: localhost:port template: localhost:port
@ -1099,22 +1189,26 @@ outputs:
images. images.
kube_masters_private: kube_masters_private:
condition: master_only
value: {get_attr: [kube_masters, kube_master_ip]} value: {get_attr: [kube_masters, kube_master_ip]}
description: > description: >
This is a list of the "private" IP addresses of all the Kubernetes masters. This is a list of the "private" IP addresses of all the Kubernetes masters.
kube_masters: kube_masters:
condition: master_only
value: {get_attr: [kube_masters, kube_master_external_ip]} value: {get_attr: [kube_masters, kube_master_external_ip]}
description: > description: >
This is a list of the "public" IP addresses of all the Kubernetes masters. This is a list of the "public" IP addresses of all the Kubernetes masters.
Use these IP addresses to log in to the Kubernetes masters via ssh. Use these IP addresses to log in to the Kubernetes masters via ssh.
kube_minions_private: kube_minions_private:
condition: worker_only
value: {get_attr: [kube_minions, kube_minion_ip]} value: {get_attr: [kube_minions, kube_minion_ip]}
description: > description: >
This is a list of the "private" IP addresses of all the Kubernetes minions. This is a list of the "private" IP addresses of all the Kubernetes minions.
kube_minions: kube_minions:
condition: worker_only
value: {get_attr: [kube_minions, kube_minion_external_ip]} value: {get_attr: [kube_minions, kube_minion_external_ip]}
description: > description: >
This is a list of the "public" IP addresses of all the Kubernetes minions. This is a list of the "public" IP addresses of all the Kubernetes minions.

View File

@ -1,4 +1,4 @@
heat_template_version: 2014-10-16 heat_template_version: queens
description: > description: >
This is a nested stack that defines a single Kubernetes master, This stack is This is a nested stack that defines a single Kubernetes master, This stack is

View File

@ -1,4 +1,4 @@
heat_template_version: 2014-10-16 heat_template_version: queens
description: > description: >
This is a nested stack that defines a single Kubernetes minion, This stack is This is a nested stack that defines a single Kubernetes minion, This stack is

View File

@ -164,7 +164,9 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
objects.fields.ClusterStatus.DELETE_IN_PROGRESS, objects.fields.ClusterStatus.DELETE_IN_PROGRESS,
objects.fields.ClusterStatus.ROLLBACK_IN_PROGRESS] objects.fields.ClusterStatus.ROLLBACK_IN_PROGRESS]
filters = {'status': status} filters = {'status': status}
clusters = objects.Cluster.list(ctx, filters=filters) nodegroups = objects.NodeGroup.list(ctx, filters=filters)
cluster_ids = set(ng.cluster_id for ng in nodegroups)
clusters = [objects.Cluster.get(ctx, cid) for cid in cluster_ids]
if not clusters: if not clusters:
return return