Add asynchronous option to validation libs
In order to run ansible on its own and get validations
progress and result with the CLI, the review adds an
option to run ansible asynchronously
Change-Id: Id884c9fd606ba03cb7d7fb02491582f1f88c3857
(cherry picked from commit 6e1ab6f893
)
This commit is contained in:
parent
e3475a2830
commit
497c367dd1
|
@ -21,6 +21,7 @@ import os
|
|||
import six
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import yaml
|
||||
|
||||
from six.moves import configparser
|
||||
|
@ -277,7 +278,7 @@ class Ansible(object):
|
|||
gathering_policy='smart',
|
||||
extra_env_variables=None, parallel_run=False,
|
||||
callback_whitelist=None, ansible_cfg=None,
|
||||
ansible_timeout=30, ansible_artifact_path=None):
|
||||
ansible_timeout=30, ansible_artifact_path=None, run_async=False):
|
||||
|
||||
if not playbook_dir:
|
||||
playbook_dir = workdir
|
||||
|
@ -350,20 +351,17 @@ class Ansible(object):
|
|||
|
||||
if parallel_run:
|
||||
r_opts['directory_isolation_base_path'] = ansible_artifact_path
|
||||
|
||||
runner_config = ansible_runner.runner_config.RunnerConfig(**r_opts)
|
||||
runner_config.prepare()
|
||||
# NOTE(cloudnull): overload the output callback after prepare
|
||||
# to define the specific format we want.
|
||||
# This is only required until PR
|
||||
# https://github.com/ansible/ansible-runner/pull/387
|
||||
# is merged and released. After this PR has been
|
||||
# made available to us, this line should be removed.
|
||||
runner_config.env['ANSIBLE_STDOUT_CALLBACK'] = \
|
||||
envvars['ANSIBLE_STDOUT_CALLBACK']
|
||||
if backward_compat:
|
||||
runner_config.env.update(envvars)
|
||||
runner = ansible_runner.Runner(config=runner_config)
|
||||
|
||||
runner = ansible_runner.Runner(config=runner_config)
|
||||
if run_async:
|
||||
thr = threading.Thread(target=runner.run)
|
||||
thr.start()
|
||||
return playbook, runner.rc, runner.status
|
||||
status, rc = runner.run()
|
||||
return playbook, rc, status
|
||||
|
|
|
@ -129,3 +129,25 @@ class TestAnsible(TestCase):
|
|||
)
|
||||
self.assertEquals((_playbook, _rc, _status),
|
||||
('existing.yaml', 0, 'successful'))
|
||||
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch.object(Runner, 'run',
|
||||
return_value=fakes.fake_ansible_runner_run_return(rc=0))
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
@mock.patch('ansible_runner.runner_config.RunnerConfig')
|
||||
def test_run_success_run_async(self, mock_config, mock_open,
|
||||
mock_dump_artifact, mock_run,
|
||||
mock_mkdirs, mock_exists
|
||||
):
|
||||
_playbook, _rc, _status = self.run.run(
|
||||
playbook='existing.yaml',
|
||||
inventory='localhost,',
|
||||
workdir='/tmp',
|
||||
connection='local',
|
||||
run_async=True
|
||||
)
|
||||
self.assertEquals((_playbook, _rc, _status),
|
||||
('existing.yaml', None, 'unstarted'))
|
||||
|
|
|
@ -62,7 +62,7 @@ class ValidationActions(object):
|
|||
group=None, extra_vars=None, validations_dir=None,
|
||||
validation_name=None, extra_env_vars=None,
|
||||
ansible_cfg=None, quiet=True, workdir=None,
|
||||
limit_hosts=None):
|
||||
limit_hosts=None, run_async=False):
|
||||
self.log = logging.getLogger(__name__ + ".run_validations")
|
||||
playbooks = []
|
||||
validations_dir = (validations_dir if validations_dir
|
||||
|
@ -110,13 +110,16 @@ class ValidationActions(object):
|
|||
extra_env_variables=extra_env_vars,
|
||||
ansible_cfg=ansible_cfg,
|
||||
gathering_policy='explicit',
|
||||
ansible_artifact_path=artifacts_dir)
|
||||
results.append({'validation': {
|
||||
'playbook': _playbook,
|
||||
ansible_artifact_path=artifacts_dir,
|
||||
run_async=run_async)
|
||||
results.append({'playbook': _playbook,
|
||||
'rc_code': _rc,
|
||||
'status': _status,
|
||||
'validation_id': _playbook.split('.')[0]
|
||||
}})
|
||||
'validations': _playbook.split('.')[0],
|
||||
'UUID': validation_uuid,
|
||||
})
|
||||
if run_async:
|
||||
return results
|
||||
# Return log results
|
||||
vlog = ValidationLogs()
|
||||
return vlog.get_results(validation_uuid)
|
||||
|
|
|
@ -157,7 +157,7 @@ class ValidationLogs(object):
|
|||
"""Return logfiles content by validation_id"""
|
||||
log_files = glob.glob("{}/*_{}_*".format(self.logs_path,
|
||||
validation_id))
|
||||
return [self._get_content(l) for l in log_files]
|
||||
return [self._get_content(log) for log in log_files]
|
||||
|
||||
def get_logfile_by_uuid(self, uuid):
|
||||
"""Return logfiles by uuid"""
|
||||
|
@ -166,7 +166,7 @@ class ValidationLogs(object):
|
|||
def get_logfile_content_by_uuid(self, uuid):
|
||||
"""Return logfiles content by uuid"""
|
||||
log_files = glob.glob("{}/{}_*".format(self.logs_path, uuid))
|
||||
return [self._get_content(l) for l in log_files]
|
||||
return [self._get_content(log) for log in log_files]
|
||||
|
||||
def get_logfile_by_uuid_validation_id(self, uuid, validation_id):
|
||||
"""Return logfiles by uuid"""
|
||||
|
@ -177,7 +177,7 @@ class ValidationLogs(object):
|
|||
"""Return logfiles content filter by uuid and content"""
|
||||
log_files = glob.glob("{}/{}_{}_*".format(self.logs_path, uuid,
|
||||
validation_id))
|
||||
return [self._get_content(l) for l in log_files]
|
||||
return [self._get_content(log) for log in log_files]
|
||||
|
||||
def get_all_logfiles(self):
|
||||
"""Return logfiles from logs_path"""
|
||||
|
@ -203,13 +203,13 @@ class ValidationLogs(object):
|
|||
passed_number = 0
|
||||
last_execution = None
|
||||
dates = []
|
||||
for l in logs:
|
||||
if l.get('validation_output'):
|
||||
for log in logs:
|
||||
if log.get('validation_output'):
|
||||
failed_number += 1
|
||||
else:
|
||||
passed_number += 1
|
||||
date_time = \
|
||||
l['plays'][0]['play']['duration'].get('start').split('T')
|
||||
log['plays'][0]['play']['duration'].get('start').split('T')
|
||||
date_start = date_time[0]
|
||||
time_start = date_time[1].split('Z')[0]
|
||||
newdate = \
|
||||
|
|
Loading…
Reference in New Issue