Remove shenanigans for stack event verbosity

It seems we want to output stack events irrespective of verbosity,
as per https://review.opendev.org/#/c/724856/. There is no point of
having all the useless logic for verbosity in wait_for_stack_ready().

Also fixes stack update for scale-down to list events.

Change-Id: I96a2a2255253aa2feac62b67ad5d5813e3126a20
changes/05/729105/1
Rabi Mishra 2 years ago
parent 4e5328b1b3
commit 2f9215b253
  1. 18
      tripleoclient/tests/v1/overcloud_node/test_overcloud_node.py
  2. 12
      tripleoclient/utils.py
  3. 2
      tripleoclient/v1/tripleo_deploy.py
  4. 5
      tripleoclient/workflows/deployment.py
  5. 10
      tripleoclient/workflows/package_update.py
  6. 9
      tripleoclient/workflows/scale.py

@ -45,10 +45,6 @@ class TestDeleteNode(fakes.TestDeleteNode):
self.app.client_manager.workflow_engine = mock.Mock()
self.tripleoclient = mock.Mock()
self.websocket = mock.Mock()
self.websocket.__enter__ = lambda s: self.websocket
self.websocket.__exit__ = lambda s, *exc: None
self.tripleoclient.messaging_websocket.return_value = self.websocket
self.workflow = self.app.client_manager.workflow_engine
self.stack_name = self.app.client_manager.orchestration.stacks.get
stack = self.stack_name.return_value = mock.Mock(
@ -76,9 +72,11 @@ class TestDeleteNode(fakes.TestDeleteNode):
self.addCleanup(wait_stack.stop)
self.app.client_manager.compute.servers.get.return_value = None
@mock.patch('heatclient.common.event_utils.get_events',
autospec=True)
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_node_delete(self, mock_playbook):
def test_node_delete(self, mock_playbook, mock_get_events):
argslist = ['instance1', 'instance2', '--templates',
'--stack', 'overcast', '--timeout', '90', '--yes']
verifylist = [
@ -121,9 +119,12 @@ class TestDeleteNode(fakes.TestDeleteNode):
self.cmd.take_action,
parsed_args)
@mock.patch('heatclient.common.event_utils.get_events',
autospec=True)
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_node_delete_without_stack(self, mock_playbook):
def test_node_delete_without_stack(self, mock_playbook,
mock_get_events):
arglist = ['instance1', '--yes']
verifylist = [
@ -133,12 +134,15 @@ class TestDeleteNode(fakes.TestDeleteNode):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
@mock.patch('heatclient.common.event_utils.get_events',
autospec=True)
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
@mock.patch('tripleoclient.utils.tempfile')
def test_node_delete_baremetal_deployment(self,
mock_tempfile,
mock_playbook):
mock_playbook,
mock_get_events):
bm_yaml = [{
'name': 'Compute',

@ -885,7 +885,7 @@ def create_tempest_deployer_input(config_name='tempest-deployer-input.conf'):
def wait_for_stack_ready(orchestration_client, stack_name, marker=None,
action='CREATE', verbose=False, nested_depth=2,
action='CREATE', nested_depth=2,
max_retries=10):
"""Check the status of an orchestration stack
@ -919,16 +919,13 @@ def wait_for_stack_ready(orchestration_client, stack_name, marker=None,
return False
stack_name = "%s/%s" % (stack.stack_name, stack.id)
if verbose:
out = sys.stdout
else:
out = open(os.devnull, "w")
retries = 0
while retries <= max_retries:
try:
stack_status, msg = event_utils.poll_for_events(
orchestration_client, stack_name, action=action,
poll_period=5, marker=marker, out=out,
poll_period=5, marker=marker, out=sys.stdout,
nested_depth=nested_depth)
print(msg)
return stack_status == '%s_COMPLETE' % action
@ -942,9 +939,6 @@ def wait_for_stack_ready(orchestration_client, stack_name, marker=None,
continue
log.error("Error occured while waiting for stack to be ready.")
raise e
finally:
if not verbose:
out.close()
raise RuntimeError(
"wait_for_stack_ready: Max retries {} reached".format(max_retries))

@ -1244,7 +1244,7 @@ class Deploy(command.Command):
# Wait for complete..
status = utils.wait_for_stack_ready(orchestration_client, stack_id,
verbose=True, nested_depth=6)
nested_depth=6)
if not status:
message = _("Stack create failed")
self.log.error(message)

@ -110,13 +110,10 @@ def deploy_and_wait(log, clients, stack, plan_name, verbose_level,
status='DEPLOY_FAILED')
raise
# we always want the heat stack output while it's going.
verbose_events = True
# TODO(rabi) Simplify call to get events as we don't need to wait
# for stack to be ready anymore i.e just get the events.
create_result = utils.wait_for_stack_ready(
orchestration_client, plan_name, marker, action, verbose_events)
orchestration_client, plan_name, marker, action)
if not create_result:
shell.OpenStackShell().run(["stack", "failures", "list", plan_name])
set_deployment_status(

@ -11,8 +11,6 @@
# under the License.
from __future__ import print_function
import time
from heatclient.common import event_utils
from tripleo_common.utils import plan
from tripleo_common.utils import stack
@ -44,21 +42,21 @@ def update(clients, container):
object_client = tripleoclients.object_store
plan.update_plan_environment_with_image_parameters(
object_client, container)
stack.stack_update(object_client, orchestration_client,
_STACK_TIMEOUT, container)
events = event_utils.get_events(orchestration_client,
stack_id=container,
event_args={'sort_dir': 'desc',
'limit': 1})
marker = events[0].id if events else None
time.sleep(10)
stack.stack_update(object_client, orchestration_client,
_STACK_TIMEOUT, container)
create_result = utils.wait_for_stack_ready(
clients.orchestration,
container,
marker,
'UPDATE',
verbose=True
)
if not create_result:
raise exceptions.DeploymentError(

@ -14,6 +14,7 @@
# under the License.
from __future__ import print_function
from heatclient.common import event_utils
from tripleo_common.actions import scale
from tripleoclient import utils
@ -75,6 +76,11 @@ def scale_down(log, clients, stack, nodes, timeout=None, verbosity=0,
verbosity=verbosity,
deployment_timeout=timeout
)
events = event_utils.get_events(clients.orchestration,
stack_id=stack.stack_name,
event_args={'sort_dir': 'desc',
'limit': 1})
marker = events[0].id if events else None
print('Running scale down')
context = clients.tripleoclient.create_mistral_context()
@ -84,5 +90,6 @@ def scale_down(log, clients, stack, nodes, timeout=None, verbosity=0,
utils.wait_for_stack_ready(
orchestration_client=clients.orchestration,
stack_name=stack.stack_name,
action='UPDATE'
action='UPDATE',
marker=marker
)

Loading…
Cancel
Save