Add wait function to the node delete workflow

This wait function mimics what we used to have with the old mistral workflow,
by pausing and streaming heat status information while a node is being deleted.

Change-Id: I3d85bd85bffb8e1e6bc9d48b8d51fbab5302580d
Signed-off-by: Kevin Carter <kecarter@redhat.com>
This commit is contained in:
Kevin Carter 2020-02-07 08:31:00 -06:00
parent 045bd823a8
commit c0d45a826d
No known key found for this signature in database
GPG Key ID: CE94BD890A47B20A
2 changed files with 15 additions and 5 deletions

View File

@ -69,7 +69,9 @@ class TestDeleteNode(fakes.TestDeleteNode):
# TODO(someone): This test does not pass with autospec=True, it should # TODO(someone): This test does not pass with autospec=True, it should
# probably be fixed so that it can pass with that. # probably be fixed so that it can pass with that.
def test_node_delete(self): @mock.patch("heatclient.common.event_utils.poll_for_events")
def test_node_delete(self, mock_poll):
mock_poll.return_value = ("CREATE_IN_PROGRESS", "MESSAGE")
argslist = ['instance1', 'instance2', '--templates', argslist = ['instance1', 'instance2', '--templates',
'--stack', 'overcast', '--timeout', '90', '--yes'] '--stack', 'overcast', '--timeout', '90', '--yes']
verifylist = [ verifylist = [
@ -77,7 +79,6 @@ class TestDeleteNode(fakes.TestDeleteNode):
('nodes', ['instance1', 'instance2']) ('nodes', ['instance1', 'instance2'])
] ]
parsed_args = self.check_parser(self.cmd, argslist, verifylist) parsed_args = self.check_parser(self.cmd, argslist, verifylist)
self.websocket.wait_for_messages.return_value = iter([{ self.websocket.wait_for_messages.return_value = iter([{
"execution_id": "IDID", "execution_id": "IDID",
"status": "SUCCESS", "status": "SUCCESS",
@ -121,8 +122,9 @@ class TestDeleteNode(fakes.TestDeleteNode):
# Verify # Verify
self.workflow.executions.create.assert_not_called() self.workflow.executions.create.assert_not_called()
def test_node_delete_without_stack(self): @mock.patch("heatclient.common.event_utils.poll_for_events")
def test_node_delete_without_stack(self, mock_poll):
mock_poll.return_value = ("CREATE_IN_PROGRESS", "MESSAGE")
arglist = ['instance1', '--yes'] arglist = ['instance1', '--yes']
verifylist = [ verifylist = [
@ -160,12 +162,14 @@ class TestDeleteNode(fakes.TestDeleteNode):
self.assertRaises(exceptions.DeploymentError, self.assertRaises(exceptions.DeploymentError,
self.cmd.take_action, parsed_args) self.cmd.take_action, parsed_args)
@mock.patch("heatclient.common.event_utils.poll_for_events")
@mock.patch('tripleoclient.workflows.baremetal.expand_roles', @mock.patch('tripleoclient.workflows.baremetal.expand_roles',
autospec=True) autospec=True)
@mock.patch('tripleoclient.workflows.baremetal.undeploy_roles', @mock.patch('tripleoclient.workflows.baremetal.undeploy_roles',
autospec=True) autospec=True)
def test_node_delete_baremetal_deployment(self, mock_undeploy_roles, def test_node_delete_baremetal_deployment(self, mock_undeploy_roles,
mock_expand_roles): mock_expand_roles, mock_poll):
mock_poll.return_value = ("CREATE_IN_PROGRESS", "MESSAGE")
self.websocket.wait_for_messages.return_value = iter([{ self.websocket.wait_for_messages.return_value = iter([{
"execution_id": "IDID", "execution_id": "IDID",
"status": "SUCCESS", "status": "SUCCESS",

View File

@ -17,6 +17,7 @@ from __future__ import print_function
from tripleo_common.actions import scale from tripleo_common.actions import scale
from tripleoclient import exceptions from tripleoclient import exceptions
from tripleoclient import utils
from tripleoclient.workflows import base from tripleoclient.workflows import base
@ -72,3 +73,8 @@ def scale_down(clients, plan_name, nodes, timeout=None):
context = clients.tripleoclient.create_mistral_context() context = clients.tripleoclient.create_mistral_context()
scale_down_action = scale.ScaleDownAction(nodes=nodes, timeout=timeout) scale_down_action = scale.ScaleDownAction(nodes=nodes, timeout=timeout)
scale_down_action.run(context=context) scale_down_action.run(context=context)
utils.wait_for_stack_ready(
orchestration_client=clients.orchestration,
stack_name=plan_name,
action='UPDATE'
)