Provide a way to run non-Ansible Validation

This patch allow the user to provide non-ansible Validation
to run on the hosts.
The Python or bash script given by the user are still run with Ansible
to take benefits of the feature offer by Ansible (running tasks via ssh
on remote hosts).

The python or bash script provided via the CLI is dumped into a generic
playbook and run with Ansible via the script builtin module.

the CLI looks like:
validation run --validation py-val.py \
    --validation-dir /home/stack/validations \
    --type python --limit compute --inventory /home/stack/myhost.yaml

Change-Id: I5eb4b62d30aab823de0a361128d0bff767be69aa
This commit is contained in:
matbu 2021-05-19 21:52:36 +02:00
parent 4d1df7b0e7
commit a675e6bad9
5 changed files with 169 additions and 17 deletions

View File

@ -72,6 +72,14 @@ class Run(BaseCommand):
default=None,
help=("Path where the run result in JUnitXML "
"format will be stored."))
parser.add_argument('--type', dest='validation_type',
default='ansible',
choices=['python', 'bash', 'ansible'],
help=("Type of the Validation to be run. "
"The default is Ansible playbook format. "
"Python and bash script are also supported. "
"For Python Make sure your python file is "
"executable."))
parser.add_argument(
'--python-interpreter',
@ -163,6 +171,12 @@ class Run(BaseCommand):
extra_vars = common.read_extra_vars_file(parsed_args.extra_vars_file)
if parsed_args.validation_type != 'ansible' \
and parsed_args.limit is None:
error_msg = (
"If you provide a non Ansible validation type, you must "
"specify a target host with --limit option.")
raise RuntimeError(error_msg)
try:
results = v_actions.run_validations(
inventory=parsed_args.inventory,
@ -176,8 +190,8 @@ class Run(BaseCommand):
python_interpreter=parsed_args.python_interpreter,
quiet=quiet_mode,
ssh_user=parsed_args.ssh_user,
log_path=parsed_args.validation_log_dir
)
log_path=parsed_args.validation_log_dir,
validation_type=parsed_args.validation_type)
except RuntimeError as e:
raise RuntimeError(e)

View File

@ -27,3 +27,9 @@ VALIDATIONS_LOG_BASEDIR = os.path.join(os.environ.get('HOME'), 'validations')
VALIDATION_ANSIBLE_ARTIFACT_PATH = os.path.join(
VALIDATIONS_LOG_BASEDIR,
'artifacts')
GENERIC_PLAYBOOK = [
{'gather_facts': False,
'hosts': '',
'name': 'Generic playbook',
'tasks': [{'name': '', 'script': ''}]}]

View File

@ -79,7 +79,8 @@ class TestRun(BaseCommand):
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'log_path': mock_log_dir}
'log_path': mock_log_dir,
'validation_type': 'ansible'}
arglist = ['--validation', 'foo',
'--extra-vars', 'key=value']
@ -111,7 +112,8 @@ class TestRun(BaseCommand):
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'log_path': mock_log_dir}
'log_path': mock_log_dir,
'validation_type': 'ansible'}
arglist = ['--validation', 'foo',
'--extra-vars', 'key=value1',
@ -156,7 +158,8 @@ class TestRun(BaseCommand):
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'log_path': mock_log_dir}
'log_path': mock_log_dir,
'validation_type': 'ansible'}
arglist = ['--validation', 'foo',
'--extra-vars-file', '/foo/vars.yaml']
@ -186,7 +189,8 @@ class TestRun(BaseCommand):
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'log_path': mock_log_dir}
'log_path': mock_log_dir,
'validation_type': 'ansible'}
arglist = ['--validation', 'foo',
'--extra-env-vars', 'key=value']
@ -220,7 +224,8 @@ class TestRun(BaseCommand):
'extra_env_vars': {'ANSIBLE_STDOUT_CALLBACK': 'default'},
'python_interpreter': sys.executable,
'quiet': False,
'ssh_user': 'doe'}
'ssh_user': 'doe',
'validation_type': 'ansible'}
arglist = ['--validation', 'foo',
'--extra-env-vars', 'ANSIBLE_STDOUT_CALLBACK=default']
@ -250,7 +255,8 @@ class TestRun(BaseCommand):
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'log_path': mock_log_dir}
'log_path': mock_log_dir,
'validation_type': 'ansible'}
arglist = ['--validation', 'foo',
'--extra-env-vars', 'key=value1',
@ -284,7 +290,8 @@ class TestRun(BaseCommand):
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe',
'log_path': mock_log_dir}
'log_path': mock_log_dir,
'validation_type': 'ansible'}
arglist = ['--validation', 'foo',
'--extra-vars', 'key=value',
@ -347,10 +354,61 @@ class TestRun(BaseCommand):
'extra_env_vars': {'key2': 'value2'},
'python_interpreter': sys.executable,
'quiet': True,
'ssh_user': 'doe'}
'ssh_user': 'doe',
'validation_type': 'ansible'}
arglist = ['--validation', 'foo']
verifylist = [('validation_name', ['foo'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(RuntimeError, self.cmd.take_action, parsed_args)
@mock.patch('getpass.getuser', return_value='doe')
def test_run_non_ansible_validation_no_host(self, mock_user):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'extra_vars': {'key': 'value'},
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible/',
'validation_name': ['foo'],
'extra_env_vars': {'key2': 'value2'},
'quiet': True,
'ssh_user': 'doe',
'validation_type': 'python'}
arglist = ['--validation', 'foo', '--type', 'python']
verifylist = [('validation_name', ['foo']),
('validation_type', 'python')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(RuntimeError, self.cmd.take_action, parsed_args)
@mock.patch('getpass.getuser',
return_value='doe')
@mock.patch('validations_libs.validation_actions.ValidationActions.'
'run_validations',
return_value=fakes.FAKE_SUCCESS_RUN)
def test_run_non_ansible_validation(self, mock_run, mock_user):
run_called_args = {
'inventory': 'localhost',
'limit_hosts': None,
'group': [],
'extra_vars': {'key': 'value'},
'validations_dir': '/usr/share/ansible/validation-playbooks',
'base_dir': '/usr/share/ansible/',
'validation_name': ['foo'],
'extra_env_vars': {'key2': 'value2'},
'quiet': True,
'ssh_user': 'doe',
'validation_type': 'python'}
arglist = ['--validation', 'foo', '--type', 'python',
'--limit', 'localhost']
verifylist = [('validation_name', ['foo']),
('validation_type', 'python'),
('limit', 'localhost')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)

View File

@ -88,7 +88,7 @@ class TestValidationActions(TestCase):
mock_ansible_run.return_value = ('fake.yaml', 0, 'successful')
run_called_args = {
'workdir': '/var/log/validations/artifacts/123_fake.yaml_time',
'workdir': '/var/log/validations/artifacts/123_fake_time',
'playbook': '/tmp/foo/fake.yaml',
'base_dir': '/usr/share/ansible',
'playbook_dir': '/tmp/foo',
@ -102,7 +102,7 @@ class TestValidationActions(TestCase):
'extra_env_variables': None,
'ansible_cfg': None,
'gathering_policy': 'explicit',
'ansible_artifact_path': '/var/log/validations/artifacts/123_fake.yaml_time',
'ansible_artifact_path': '/var/log/validations/artifacts/123_fake_time',
'log_path': '/var/log/validations',
'run_async': False,
'python_interpreter': None,
@ -148,7 +148,7 @@ class TestValidationActions(TestCase):
mock_ansible_run.return_value = ('fake.yaml', 0, 'successful')
run_called_args = {
'workdir': '/var/log/validations/artifacts/123_fake.yaml_time',
'workdir': '/var/log/validations/artifacts/123_fake_time',
'playbook': '/tmp/foo/fake.yaml',
'base_dir': '/usr/share/ansible',
'playbook_dir': '/tmp/foo',
@ -162,7 +162,7 @@ class TestValidationActions(TestCase):
'extra_env_variables': None,
'ansible_cfg': None,
'gathering_policy': 'explicit',
'ansible_artifact_path': '/var/log/validations/artifacts/123_fake.yaml_time',
'ansible_artifact_path': '/var/log/validations/artifacts/123_fake_time',
'log_path': '/var/log/validations',
'run_async': False,
'python_interpreter': None,
@ -454,3 +454,45 @@ class TestValidationActions(TestCase):
def test_get_status_no_param(self):
v_actions = ValidationActions()
self.assertRaises(RuntimeError, v_actions.get_status)
@mock.patch('validations_libs.validation_logs.ValidationLogs.get_results')
@mock.patch('validations_libs.utils.parse_all_validations_on_disk')
@mock.patch('validations_libs.ansible.Ansible.run')
@mock.patch('validations_libs.utils.create_artifacts_dir',
return_value=('1234', '/tmp/'))
def test_validation_run_non_ansible_type(self, mock_tmp, mock_ansible_run,
mock_validation_dir,
mock_results):
mock_validation_dir.return_value = [{
'description': 'My Validation One Description',
'groups': ['prep', 'pre-deployment'],
'id': 'foo',
'name': 'My Validition One Name',
'parameters': {}}]
mock_ansible_run.return_value = ('foo.yaml', 0, 'successful')
mock_results.return_value = [{'Duration': '0:00:01.761',
'Host_Group': 'overcloud',
'Status': 'PASSED',
'Status_by_Host': 'subnode-1,PASSED',
'UUID': 'foo',
'Unreachable_Hosts': '',
'Validations': 'ntp'}]
expected_run_return = [{'Duration': '0:00:01.761',
'Host_Group': 'overcloud',
'Status': 'PASSED',
'Status_by_Host': 'subnode-1,PASSED',
'UUID': 'foo',
'Unreachable_Hosts': '',
'Validations': 'ntp'}]
playbook = ['fake.yaml']
inventory = 'tmp/inventory.yaml'
run = ValidationActions()
run_return = run.run_validations(playbook, inventory,
group=fakes.GROUPS_LIST,
validations_dir='/tmp/foo',
limit_hosts='compute',
validation_type='python')
self.assertEqual(run_return, expected_run_return)

View File

@ -15,6 +15,7 @@
import logging
import os
import json
import tempfile
import yaml
from validations_libs.ansible import Ansible as v_ansible
@ -238,7 +239,7 @@ class ValidationActions(object):
skip_list=None,
callback_whitelist=None,
output_callback='validation_stdout',
ssh_user=None):
ssh_user=None, validation_type='ansible'):
"""Run one or multiple validations by name(s) or by group(s)
:param validation_name: A list of validation names
@ -302,6 +303,9 @@ class ValidationActions(object):
:rtype: ``list``
:param ssh_user: Ssh user for Ansible remote connection
:type ssh_user: ``string``
:param validation_type: Type of Validation to run, it could be
Ansible playbook, Python or bash script.
:type validation_type: ``string``
:Example:
@ -371,8 +375,33 @@ class ValidationActions(object):
play_name,
limit_hosts)
if _play:
# Handle non-ansible Validation type:
filename = None
if validation_type != 'ansible':
if limit_hosts:
for play in constants.GENERIC_PLAYBOOK:
play.update({'hosts': _hosts})
for tasks in play['tasks']:
tasks.update({'name': os.path.basename(_play)})
tasks.update({'script': playbook})
_, filename = tempfile.mkstemp(
prefix='{}-'.format(_play),
dir='/tmp')
try:
with open(filename, 'w') as tmp_play:
tmp_play.write(
yaml.safe_dump(constants.GENERIC_PLAYBOOK,
default_flow_style=False))
except OSError:
msg = "Unable to write temporary playbook in /tmp."
raise OSError(msg)
playbook = filename
else:
msg = "No valid hosts provided"
raise RuntimeError(msg)
validation_uuid, artifacts_dir = v_utils.create_artifacts_dir(
log_path=log_path, prefix=os.path.basename(playbook))
log_path=log_path, prefix=os.path.basename(_play))
run_ansible = v_ansible(validation_uuid)
_playbook, _rc, _status = run_ansible.run(
workdir=artifacts_dir,
@ -394,12 +423,15 @@ class ValidationActions(object):
run_async=run_async,
python_interpreter=python_interpreter,
ssh_user=ssh_user)
results.append({'playbook': _playbook,
results.append({'playbook': _play,
'rc_code': _rc,
'status': _status,
'validations': _playbook.split('.')[0],
'UUID': validation_uuid,
})
# clean /tmp if non Ansible validation
if filename:
os.remove(filename)
else:
self.log.debug('Skipping Validations: {}'.format(playbook))