[fix] Sync nodegroup status before delete_complete
Magnum cluster deletion is not behaving as expected. While it appears to
delete successfully, _delete_complete routine in
magnum/drivers/heat/driver.py is never called because the status of
nodegroups have not had the chance to sync with Heat before
_check_delete_complete is called. As a result, for example, trustee user
accounts are left orphaned. This PS changes or order of activities so
that _delete_complete is invoked successfully.
Story: 2007965
Task: 40459
Change-Id: Ibadd5b57fe175bb0b100266e2dbcc2e1ea4efcf9
(cherry picked from commit 1cdc0628a2
)
This commit is contained in:
parent
f25640c0c2
commit
e0a4683c08
|
@ -493,15 +493,6 @@ class HeatPoller(object):
|
||||||
stack = self.openstack_client.heat().stacks.get(
|
stack = self.openstack_client.heat().stacks.get(
|
||||||
self.nodegroup.stack_id, resolve_outputs=False)
|
self.nodegroup.stack_id, resolve_outputs=False)
|
||||||
|
|
||||||
# poll_and_check is detached and polling long time to check
|
|
||||||
# status, so another user/client can call delete cluster/stack.
|
|
||||||
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE:
|
|
||||||
if self.nodegroup.is_default:
|
|
||||||
self._check_delete_complete()
|
|
||||||
else:
|
|
||||||
self.nodegroup.destroy()
|
|
||||||
return
|
|
||||||
|
|
||||||
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE,
|
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE,
|
||||||
fields.ClusterStatus.UPDATE_COMPLETE):
|
fields.ClusterStatus.UPDATE_COMPLETE):
|
||||||
# Resolve all outputs if the stack is COMPLETE
|
# Resolve all outputs if the stack is COMPLETE
|
||||||
|
@ -516,6 +507,15 @@ class HeatPoller(object):
|
||||||
nodegroups=[self.nodegroup])
|
nodegroups=[self.nodegroup])
|
||||||
self._sync_cluster_status(stack)
|
self._sync_cluster_status(stack)
|
||||||
|
|
||||||
|
# poll_and_check is detached and polling long time to check
|
||||||
|
# status, so another user/client can call delete cluster/stack.
|
||||||
|
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE:
|
||||||
|
if self.nodegroup.is_default:
|
||||||
|
self._check_delete_complete()
|
||||||
|
else:
|
||||||
|
self.nodegroup.destroy()
|
||||||
|
return
|
||||||
|
|
||||||
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED,
|
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED,
|
||||||
fields.ClusterStatus.DELETE_FAILED,
|
fields.ClusterStatus.DELETE_FAILED,
|
||||||
fields.ClusterStatus.UPDATE_FAILED,
|
fields.ClusterStatus.UPDATE_FAILED,
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
Fixes a regression which left behind trustee user accounts and certificates
|
||||||
|
when a cluster is deleted.
|
Loading…
Reference in New Issue