[WIP] ng-9: Driver for nodegroup operations

This adds the support for creating and deleting worker  nodegroups
using different stack per nodegroup. In order to be backwards
compatible, default nodegroups will remain in one stack.

Having this in mind cluster status is now calculated aggregating the
statuses of the underlying stacks.

Change-Id: I97839ab8495ed5d860785dff1f6e3cc59b6a9ff7
This commit is contained in:
Theodoros Tsioutsias 2019-06-24 13:49:42 +02:00
parent c05bb74f0b
commit f64131e5bb
6 changed files with 317 additions and 66 deletions

View File

@ -11,6 +11,7 @@
# under the License.
import abc
import collections
import os
import six
@ -41,6 +42,10 @@ from magnum.objects import fields
LOG = logging.getLogger(__name__)
NodeGroupStatus = collections.namedtuple('NodeGroupStatus',
'name status reason is_default')
@six.add_metaclass(abc.ABCMeta)
class HeatDriver(driver.Driver):
"""Base Driver class for using Heat
@ -61,12 +66,14 @@ class HeatDriver(driver.Driver):
scale_manager=scale_manager)
def _extract_template_definition(self, context, cluster,
scale_manager=None):
scale_manager=None,
nodegroups=None):
cluster_template = conductor_utils.retrieve_cluster_template(context,
cluster)
definition = self.get_template_definition()
return definition.extract_definition(context, cluster_template,
cluster,
nodegroups=nodegroups,
scale_manager=scale_manager)
def _get_env_files(self, template_path, env_rel_paths):
@ -96,14 +103,18 @@ class HeatDriver(driver.Driver):
def delete_federation(self, context, federation):
return NotImplementedError("Must implement 'delete_federation'")
def create_nodegroup(self, context, cluster, nodegroup):
raise NotImplementedError("Must implement 'create_nodegroup'.")
def update_nodegroup(self, context, cluster, nodegroup):
raise NotImplementedError("Must implement 'update_nodegroup'.")
# we just need to save the nodegroup here.
nodegroup.save()
def delete_nodegroup(self, context, cluster, nodegroup):
raise NotImplementedError("Must implement 'delete_nodegroup'.")
# Default nodegroups share stack_id so it will be deleted
# as soon as the cluster gets destroyed
if not nodegroup.stack_id:
nodegroup.destroy()
else:
osc = clients.OpenStackClients(context)
self._delete_stack(context, osc, nodegroup.stack_id)
def update_cluster_status(self, context, cluster):
if cluster.stack_id is None:
@ -128,6 +139,16 @@ class HeatDriver(driver.Driver):
rollback=False):
self._update_stack(context, cluster, scale_manager, rollback)
def create_nodegroup(self, context, cluster, nodegroup):
stack = self._create_stack(context, clients.OpenStackClients(context),
cluster, cluster.create_timeout,
nodegroup=nodegroup)
nodegroup.stack_id = stack['stack']['id']
def get_nodegroup_extra_params(self, cluster, osc):
raise NotImplementedError("Must implement "
"'get_nodegroup_extra_params'")
@abc.abstractmethod
def upgrade_cluster(self, context, cluster, cluster_template,
max_batch_size, nodegroup, scale_manager=None,
@ -138,7 +159,14 @@ class HeatDriver(driver.Driver):
self.pre_delete_cluster(context, cluster)
LOG.info("Starting to delete cluster %s", cluster.uuid)
self._delete_stack(context, clients.OpenStackClients(context), cluster)
osc = clients.OpenStackClients(context)
for ng in cluster.nodegroups:
ng.status = fields.ClusterStatus.DELETE_IN_PROGRESS
ng.save()
if ng.is_default:
continue
self._delete_stack(context, osc, ng.stack_id)
self._delete_stack(context, osc, cluster.default_ng_master.stack_id)
def resize_cluster(self, context, cluster, resize_manager,
node_count, nodes_to_remove, nodegroup=None,
@ -147,9 +175,13 @@ class HeatDriver(driver.Driver):
node_count, nodes_to_remove, nodegroup=nodegroup,
rollback=rollback)
def _create_stack(self, context, osc, cluster, cluster_create_timeout):
def _create_stack(self, context, osc, cluster, cluster_create_timeout,
nodegroup=None):
nodegroups = [nodegroup] if nodegroup else None
template_path, heat_params, env_files = (
self._extract_template_definition(context, cluster))
self._extract_template_definition(context, cluster,
nodegroups=nodegroups))
tpl_files, template = template_utils.get_template_contents(
template_path)
@ -163,7 +195,10 @@ class HeatDriver(driver.Driver):
# valid hostnames are 63 chars long, leaving enough room
# to add the random id (for uniqueness)
stack_name = cluster.name[:30]
if nodegroup is None:
stack_name = cluster.name[:30]
else:
stack_name = "%s-%s" % (cluster.name[:20], nodegroup.name[:9])
stack_name = stack_name.replace('_', '-')
stack_name = stack_name.replace('.', '-')
stack_name = ''.join(filter(valid_chars.__contains__, stack_name))
@ -177,6 +212,14 @@ class HeatDriver(driver.Driver):
# no cluster_create_timeout value was passed in to the request
# so falling back on configuration file value
heat_timeout = cfg.CONF.cluster_heat.create_timeout
heat_params['is_cluster_stack'] = nodegroup is None
if nodegroup:
# In case we are creating a new stack for a new nodegroup then
# we need to extract more params.
heat_params.update(self.get_nodegroup_extra_params(cluster, osc))
fields = {
'stack_name': stack_name,
'parameters': heat_params,
@ -225,10 +268,10 @@ class HeatDriver(driver.Driver):
# Find what changed checking the stack params
# against the ones in the template_def.
stack = osc.heat().stacks.get(cluster.stack_id,
stack = osc.heat().stacks.get(nodegroup.stack_id,
resolve_outputs=True)
stack_params = stack.parameters
definition.add_nodegroup_params(cluster)
definition.add_nodegroup_params(cluster, nodegroups=[nodegroup])
heat_params = definition.get_stack_diff(context, stack_params, cluster)
LOG.debug('Updating stack with these params: %s', heat_params)
@ -244,10 +287,10 @@ class HeatDriver(driver.Driver):
}
osc = clients.OpenStackClients(context)
osc.heat().stacks.update(cluster.stack_id, **fields)
osc.heat().stacks.update(nodegroup.stack_id, **fields)
def _delete_stack(self, context, osc, cluster):
osc.heat().stacks.delete(cluster.stack_id)
def _delete_stack(self, context, osc, stack_id):
osc.heat().stacks.delete(stack_id)
class KubernetesDriver(HeatDriver):
@ -288,39 +331,129 @@ class HeatPoller(object):
def poll_and_check(self):
# TODO(yuanying): temporary implementation to update api_address,
# node_addresses and cluster status
ng_statuses = list()
self.default_ngs = list()
for nodegroup in self.cluster.nodegroups:
self.nodegroup = nodegroup
if self.nodegroup.is_default:
self.default_ngs.append(self.nodegroup)
status = self.extract_nodegroup_status()
# In case a non-default nodegroup is deleted, None
# is returned. We shouldn't add None in the list
if status is not None:
ng_statuses.append(status)
self.aggregate_nodegroup_statuses(ng_statuses)
def extract_nodegroup_status(self):
if self.nodegroup.stack_id is None:
# There is a slight window for a race condition here. If
# a nodegroup is created and just before the stack_id is
# assigned to it, this periodic task is executed, the
# periodic task would try to find the status of the
# stack with id = None. At that time the nodegroup status
# is already set to CREATE_IN_PROGRESS by the conductor.
# Keep this status for this loop until the stack_id is assigned.
return NodeGroupStatus(name=self.nodegroup.name,
status=self.nodegroup.status,
is_default=self.nodegroup.is_default,
reason=self.nodegroup.status_reason)
try:
# Do not resolve outputs by default. Resolving all
# node IPs is expensive on heat.
stack = self.openstack_client.heat().stacks.get(
self.cluster.stack_id, resolve_outputs=False)
self.nodegroup.stack_id, resolve_outputs=False)
# poll_and_check is detached and polling long time to check
# status, so another user/client can call delete cluster/stack.
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE:
if self.nodegroup.is_default:
default_ng_statuses = [
ng.status for ng in self.default_ngs
]
if all(status == fields.ClusterStatus.DELETE_COMPLETE
for status in default_ng_statuses):
# Delete all cluster related info
self._delete_complete()
else:
self.nodegroup.destroy()
return
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE,
fields.ClusterStatus.UPDATE_COMPLETE):
# Resolve all outputs if the stack is COMPLETE
stack = self.openstack_client.heat().stacks.get(
self.nodegroup.stack_id, resolve_outputs=True)
self._sync_cluster_and_template_status(stack)
elif stack.stack_status != self.nodegroup.status:
self.template_def.nodegroup_output_mappings = list()
self.template_def.update_outputs(
stack, self.cluster_template, self.cluster,
nodegroups=[self.nodegroup])
self._sync_cluster_status(stack)
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED,
fields.ClusterStatus.DELETE_FAILED,
fields.ClusterStatus.UPDATE_FAILED,
fields.ClusterStatus.ROLLBACK_COMPLETE,
fields.ClusterStatus.ROLLBACK_FAILED):
self._sync_cluster_and_template_status(stack)
self._nodegroup_failed(stack)
except heatexc.NotFound:
self._sync_missing_heat_stack()
return NodeGroupStatus(name=self.nodegroup.name,
status=self.nodegroup.status,
is_default=self.nodegroup.is_default,
reason=self.nodegroup.status_reason)
def aggregate_nodegroup_statuses(self, ng_statuses):
# NOTE(ttsiouts): Aggregate the nodegroup statuses and set the
# cluster overall status.
FAILED = '_FAILED'
IN_PROGRESS = '_IN_PROGRESS'
COMPLETE = '_COMPLETE'
UPDATE = 'UPDATE'
previous_state = self.cluster.status
self.cluster.status_reason = None
# Both default nodegroups will have the same status so it's
# enough to check one of them.
self.cluster.status = self.cluster.default_ng_master.status
default_ng = self.cluster.default_ng_master
if (default_ng.status.endswith(IN_PROGRESS) or
default_ng.status == fields.ClusterStatus.DELETE_COMPLETE):
self.cluster.save()
return
# poll_and_check is detached and polling long time to check status,
# so another user/client can call delete cluster/stack.
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE:
self._delete_complete()
# Keep priority to the states below
for state in (IN_PROGRESS, FAILED, COMPLETE):
if any(ns.status.endswith(state) for ns in ng_statuses
if not ns.is_default):
status = getattr(fields.ClusterStatus, UPDATE+state)
self.cluster.status = status
if state == FAILED:
reasons = ["%s failed" % (ns.name)
for ns in ng_statuses
if ns.status.endswith(FAILED)]
self.cluster.status_reason = ' ,'.join(reasons)
break
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE,
fields.ClusterStatus.UPDATE_COMPLETE):
# Resolve all outputs if the stack is COMPLETE
stack = self.openstack_client.heat().stacks.get(
self.cluster.stack_id, resolve_outputs=True)
if self.cluster.status == fields.ClusterStatus.CREATE_COMPLETE:
# Consider the scenario where the user:
# - creates the cluster (cluster: create_complete)
# - adds a nodegroup (cluster: update_complete)
# - deletes the nodegroup
# The cluster should go to CREATE_COMPLETE only if the previous
# state was CREATE_COMPLETE or CREATE_IN_PROGRESS. In all other
# cases, just go to UPDATE_COMPLETE.
if previous_state not in (fields.ClusterStatus.CREATE_COMPLETE,
fields.ClusterStatus.CREATE_IN_PROGRESS):
self.cluster.status = fields.ClusterStatus.UPDATE_COMPLETE
self._sync_cluster_and_template_status(stack)
elif stack.stack_status != self.cluster.status:
self.template_def.update_outputs(stack, self.cluster_template,
self.cluster)
self._sync_cluster_status(stack)
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED,
fields.ClusterStatus.DELETE_FAILED,
fields.ClusterStatus.UPDATE_FAILED,
fields.ClusterStatus.ROLLBACK_COMPLETE,
fields.ClusterStatus.ROLLBACK_FAILED):
self._sync_cluster_and_template_status(stack)
self._cluster_failed(stack)
self.cluster.save()
def _delete_complete(self):
LOG.info('Cluster has been deleted, stack_id: %s',
@ -339,9 +472,9 @@ class HeatPoller(object):
self.cluster.uuid)
def _sync_cluster_status(self, stack):
self.cluster.status = stack.stack_status
self.cluster.status_reason = stack.stack_status_reason
self.cluster.save()
self.nodegroup.status = stack.stack_status
self.nodegroup.status_reason = stack.stack_status_reason
self.nodegroup.save()
def get_version_info(self, stack):
stack_param = self.template_def.get_heat_param(
@ -358,34 +491,39 @@ class HeatPoller(object):
self.cluster.container_version = container_version
def _sync_cluster_and_template_status(self, stack):
self.template_def.nodegroup_output_mappings = list()
self.template_def.update_outputs(stack, self.cluster_template,
self.cluster)
self.cluster,
nodegroups=[self.nodegroup])
self.get_version_info(stack)
self._sync_cluster_status(stack)
def _cluster_failed(self, stack):
LOG.error('Cluster error, stack status: %(cluster_status)s, '
def _nodegroup_failed(self, stack):
LOG.error('Nodegroup error, stack status: %(ng_status)s, '
'stack_id: %(stack_id)s, '
'reason: %(reason)s',
{'cluster_status': stack.stack_status,
'stack_id': self.cluster.stack_id,
'reason': self.cluster.status_reason})
{'ng_status': stack.stack_status,
'stack_id': self.nodegroup.stack_id,
'reason': self.nodegroup.status_reason})
def _sync_missing_heat_stack(self):
if self.cluster.status == fields.ClusterStatus.DELETE_IN_PROGRESS:
self._delete_complete()
elif self.cluster.status == fields.ClusterStatus.CREATE_IN_PROGRESS:
if self.nodegroup.status == fields.ClusterStatus.DELETE_IN_PROGRESS:
default_ng_statuses = [ng.status for ng in self.default_ngs]
if all(status == fields.ClusterStatus.DELETE_COMPLETE
for status in default_ng_statuses):
self._delete_complete()
elif self.nodegroup.status == fields.ClusterStatus.CREATE_IN_PROGRESS:
self._sync_missing_stack(fields.ClusterStatus.CREATE_FAILED)
elif self.cluster.status == fields.ClusterStatus.UPDATE_IN_PROGRESS:
elif self.nodegroup.status == fields.ClusterStatus.UPDATE_IN_PROGRESS:
self._sync_missing_stack(fields.ClusterStatus.UPDATE_FAILED)
def _sync_missing_stack(self, new_status):
self.cluster.status = new_status
self.cluster.status_reason = _("Stack with id %s not found in "
"Heat.") % self.cluster.stack_id
self.cluster.save()
LOG.info("Cluster with id %(id)s has been set to "
self.nodegroup.status = new_status
self.nodegroup.status_reason = _("Stack with id %s not found in "
"Heat.") % self.cluster.stack_id
self.nodegroup.save()
LOG.info("Nodegroup with id %(id)s has been set to "
"%(status)s due to stack with id %(sid)s "
"not found in Heat.",
{'id': self.cluster.id, 'status': self.cluster.status,
'sid': self.cluster.stack_id})
{'id': self.nodegroup.uuid, 'status': self.nodegroup.status,
'sid': self.nodegroup.stack_id})

View File

@ -95,3 +95,19 @@ class Driver(driver.KubernetesDriver):
'disable_rollback': not rollback
}
osc.heat().stacks.update(cluster.stack_id, **fields)
def get_nodegroup_extra_params(self, cluster, osc):
network = osc.heat().resources.get(cluster.stack_id, 'network')
secgroup = osc.heat().resources.get(cluster.stack_id,
'secgroup_kube_minion')
for output in osc.heat().stacks.get(cluster.stack_id).outputs:
if output['output_key'] == 'api_address':
api_address = output['output_value']
break
extra_params = {
'existing_master_private_ip': api_address,
'existing_security_group': secgroup.attributes['id'],
'fixed_network': network.attributes['fixed_network'],
'fixed_subnet': network.attributes['fixed_subnet'],
}
return extra_params

View File

@ -1,12 +1,53 @@
heat_template_version: 2014-10-16
heat_template_version: queens
description: >
This template will boot a Kubernetes cluster with one or more
minions (as specified by the number_of_minions parameter, which
defaults to 1).
conditions:
master_only:
or:
- equals:
- get_param: role
- "master"
- equals:
- get_param: is_cluster_stack
- true
worker_only:
or:
- equals:
- get_param: role
- "worker"
- equals:
- get_param: is_cluster_stack
- true
create_cluster_resources:
equals:
- get_param: is_cluster_stack
- true
parameters:
# needs to become a list if we want to join master nodes?
existing_master_private_ip:
type: string
default: ""
is_cluster_stack:
type: boolean
default: false
role:
type: string
default: ""
existing_security_group:
type: string
default: ""
ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
@ -34,10 +75,16 @@ parameters:
master_image:
type: string
description: glance image used to boot the server
# When creating a new minion nodegroup this will not
# be provided by magnum. So make it default to ""
default: ""
minion_image:
type: string
description: glance image used to boot the server
# When creating a new master nodegroup this will not
# be provided by magnum. So make it default to ""
default: ""
master_flavor:
type: string
@ -684,6 +731,7 @@ resources:
#
network:
condition: create_cluster_resources
type: ../../common/templates/network.yaml
properties:
existing_network: {get_param: fixed_network}
@ -694,6 +742,7 @@ resources:
private_network_name: {get_param: fixed_network_name}
api_lb:
condition: create_cluster_resources
type: ../../common/templates/lb_api.yaml
properties:
fixed_subnet: {get_attr: [network, fixed_subnet]}
@ -702,6 +751,7 @@ resources:
port: {get_param: kubernetes_port}
etcd_lb:
condition: create_cluster_resources
type: ../../common/templates/lb_etcd.yaml
properties:
fixed_subnet: {get_attr: [network, fixed_subnet]}
@ -715,6 +765,7 @@ resources:
#
secgroup_kube_master:
condition: create_cluster_resources
type: OS::Neutron::SecurityGroup
properties:
rules:
@ -751,6 +802,7 @@ resources:
port_range_max: 8472
secgroup_kube_minion:
condition: create_cluster_resources
type: OS::Neutron::SecurityGroup
properties:
rules:
@ -780,6 +832,7 @@ resources:
# allow any traffic between worker nodes
secgroup_rule_tcp_kube_minion:
condition: create_cluster_resources
type: OS::Neutron::SecurityGroupRule
properties:
protocol: tcp
@ -788,6 +841,7 @@ resources:
security_group: {get_resource: secgroup_kube_minion}
remote_group: {get_resource: secgroup_kube_minion}
secgroup_rule_udp_kube_minion:
condition: create_cluster_resources
type: OS::Neutron::SecurityGroupRule
properties:
protocol: udp
@ -803,6 +857,7 @@ resources:
#
api_address_lb_switch:
condition: create_cluster_resources
type: Magnum::ApiGatewaySwitcher
properties:
pool_public_ip: {get_attr: [api_lb, floating_address]}
@ -811,6 +866,7 @@ resources:
master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]}
etcd_address_lb_switch:
condition: create_cluster_resources
type: Magnum::ApiGatewaySwitcher
properties:
pool_private_ip: {get_attr: [etcd_lb, address]}
@ -823,6 +879,7 @@ resources:
#
api_address_floating_switch:
condition: create_cluster_resources
type: Magnum::FloatingIPAddressSwitcher
properties:
public_ip: {get_attr: [api_address_lb_switch, public_ip]}
@ -835,11 +892,13 @@ resources:
#
master_nodes_server_group:
condition: master_only
type: OS::Nova::ServerGroup
properties:
policies: [{get_param: nodes_affinity_policy}]
worker_nodes_server_group:
condition: worker_only
type: OS::Nova::ServerGroup
properties:
policies: [{get_param: nodes_affinity_policy}]
@ -851,6 +910,7 @@ resources:
#
kube_masters:
condition: master_only
type: OS::Heat::ResourceGroup
depends_on:
- network
@ -972,6 +1032,7 @@ resources:
npd_enabled: {get_param: npd_enabled}
kube_cluster_config:
condition: create_cluster_resources
type: OS::Heat::SoftwareConfig
properties:
group: script
@ -1014,6 +1075,7 @@ resources:
- get_file: ../../common/templates/kubernetes/fragments/install-helm-modules.sh
kube_cluster_deploy:
condition: create_cluster_resources
type: OS::Heat::SoftwareDeployment
properties:
actions: ['CREATE']
@ -1031,6 +1093,7 @@ resources:
#
kube_minions:
condition: worker_only
type: OS::Heat::ResourceGroup
depends_on:
- network
@ -1050,12 +1113,32 @@ resources:
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: minion_image}
minion_flavor: {get_param: minion_flavor}
fixed_network: {get_attr: [network, fixed_network]}
fixed_subnet: {get_attr: [network, fixed_subnet]}
# fixed_network: {get_param: fixed_network}
fixed_network:
if:
- create_cluster_resources
- get_attr: [network, fixed_network]
- get_param: fixed_network
# fixed_subnet: {get_param: fixed_subnet}
fixed_subnet:
if:
- create_cluster_resources
- get_attr: [network, fixed_subnet]
- get_param: fixed_subnet
network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr}
kube_master_ip: {get_attr: [api_address_lb_switch, private_ip]}
etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]}
#kube_master_ip: {get_param: existing_master_private_ip}
kube_master_ip:
if:
- create_cluster_resources
- get_attr: [api_address_lb_switch, private_ip]
- get_param: existing_master_private_ip
#etcd_server_ip: {get_param: existing_master_private_ip}
etcd_server_ip:
if:
- create_cluster_resources
- get_attr: [etcd_address_lb_switch, private_ip]
- get_param: existing_master_private_ip
external_network: {get_param: external_network}
kube_allow_priv: {get_param: kube_allow_priv}
docker_volume_size: {get_param: docker_volume_size}
@ -1079,7 +1162,12 @@ resources:
kubernetes_port: {get_param: kubernetes_port}
tls_disabled: {get_param: tls_disabled}
verify_ca: {get_param: verify_ca}
secgroup_kube_minion_id: {get_resource: secgroup_kube_minion}
# secgroup_kube_minion_id: {get_param: existing_security_group}
secgroup_kube_minion_id:
if:
- create_cluster_resources
- get_resource: secgroup_kube_minion
- get_param: existing_security_group
http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy}
@ -1110,6 +1198,7 @@ resources:
outputs:
api_address:
condition: create_cluster_resources
value:
str_replace:
template: api_ip_address
@ -1120,6 +1209,7 @@ outputs:
the Kubernetes API.
registry_address:
condition: create_cluster_resources
value:
str_replace:
template: localhost:port
@ -1130,22 +1220,26 @@ outputs:
images.
kube_masters_private:
condition: master_only
value: {get_attr: [kube_masters, kube_master_ip]}
description: >
This is a list of the "private" IP addresses of all the Kubernetes masters.
kube_masters:
condition: master_only
value: {get_attr: [kube_masters, kube_master_external_ip]}
description: >
This is a list of the "public" IP addresses of all the Kubernetes masters.
Use these IP addresses to log in to the Kubernetes masters via ssh.
kube_minions_private:
condition: worker_only
value: {get_attr: [kube_minions, kube_minion_ip]}
description: >
This is a list of the "private" IP addresses of all the Kubernetes minions.
kube_minions:
condition: worker_only
value: {get_attr: [kube_minions, kube_minion_external_ip]}
description: >
This is a list of the "public" IP addresses of all the Kubernetes minions.

View File

@ -1,4 +1,4 @@
heat_template_version: 2014-10-16
heat_template_version: queens
description: >
This is a nested stack that defines a single Kubernetes master, This stack is

View File

@ -1,4 +1,4 @@
heat_template_version: 2014-10-16
heat_template_version: queens
description: >
This is a nested stack that defines a single Kubernetes minion, This stack is

View File

@ -164,6 +164,9 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
objects.fields.ClusterStatus.DELETE_IN_PROGRESS,
objects.fields.ClusterStatus.ROLLBACK_IN_PROGRESS]
filters = {'status': status}
# nodegroups = objects.NodeGroup.list(ctx, filters=filters)
# cluster_ids = set(ng.cluster_id for ng in nodegroups)
# clusters = [objects.Cluster.get(ctx, cid) for cid in cluster_ids]
clusters = objects.Cluster.list(ctx, filters=filters)
if not clusters:
return