Fix deletion of non-default node groups
Fixes a bug introduced in https://review.opendev.org/c/openstack/magnum-capi-helm/+/915031/5..6 which causes the incorrect list of node groups being processed when the _update_helm_release method is invoked from the driver's delete_node_group method. Closes-bug: #2095539 Change-Id: I98e81a107162272ddbe63e30eba9db188893b8ac
This commit is contained in:
@@ -812,8 +812,7 @@ class Driver(driver.Driver):
|
||||
additionalStorageClasses=additional_storage_classes,
|
||||
)
|
||||
|
||||
def _process_node_groups(self, cluster):
|
||||
nodegroups = cluster.nodegroups
|
||||
def _process_node_groups(self, cluster, nodegroups):
|
||||
nodegroup_set = []
|
||||
for ng in nodegroups:
|
||||
if ng.role != NODE_GROUP_ROLE_CONTROLLER:
|
||||
@@ -878,7 +877,7 @@ class Driver(driver.Driver):
|
||||
"enabled": self._get_autoheal_enabled(cluster),
|
||||
},
|
||||
},
|
||||
"nodeGroups": self._process_node_groups(cluster),
|
||||
"nodeGroups": self._process_node_groups(cluster, nodegroups),
|
||||
"addons": {
|
||||
"openstack": {
|
||||
"csiCinder": self._storageclass_definitions(
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Fixed an issue where non-default node groups could not be
|
||||
individually deleted. The node groups would get stuck in
|
||||
the DELETE_IN_PROGRESS state and the underlying VMs would
|
||||
keep running. Node groups were only cleaned up when the
|
||||
entire cluster was deleted.
|
||||
Reference in New Issue
Block a user