diff --git a/lower-constraints.txt b/lower-constraints.txt index 4c57ad733..69e57d84d 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -151,6 +151,7 @@ traceback2==1.4.0 tripleo-common==11.4.0 ujson==1.35 unittest2==1.1.0 +validations-libs==1.0.0 vine==1.1.4 voluptuous==0.8.9 waitress==1.1.0 diff --git a/requirements.txt b/requirements.txt index d90b430a0..e7167d1eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,3 +20,4 @@ tenacity>=5.0.1 # Apache-2.0 tripleo-common>=11.4.0 # Apache-2.0 cryptography>=2.1 # BSD/Apache-2.0 futures>=3.0.0;python_version=='2.7' or python_version=='2.6' # BSD +validations-libs>=1.0.0 diff --git a/tripleoclient/tests/test_utils.py b/tripleoclient/tests/test_utils.py index e41b1493e..70b9293b7 100644 --- a/tripleoclient/tests/test_utils.py +++ b/tripleoclient/tests/test_utils.py @@ -1758,51 +1758,6 @@ class TestAnsibleSymlink(TestCase): mock_cmd.assert_not_called() -class TestGetParamFieldName(TestCase): - def test_with_empty_val_data(self): - input_parameter = {} - expected = "parameters" - - result = utils.get_param_field_name(input_parameter) - self.assertEqual(result, expected) - - def test_with_val_data_and_returns_parameters(self): - input_parameter = {'validations': [ - {'description': 'validation number one', - 'groups': ['prep', 'pre-deployment'], - 'id': 'Validation_Number_One', - 'name': 'Validation Number One', - 'parameters': {}}, - {'description': 'validation number two', - 'groups': ['post-deployment'], - 'id': 'Validation_Number_Two', - 'name': 'Validation Number Two', - 'parameters': {'config_file': "/etc/config.conf"}}, - ]} - expected = "parameters" - - result = utils.get_param_field_name(input_parameter) - self.assertEqual(result, expected) - - def test_with_val_data_and_returns_metadata(self): - input_parameter = {'validations': [ - {'description': 'validation number one', - 'groups': ['prep', 'pre-deployment'], - 'id': 'Validation_Number_One', - 'name': 'Validation Number One', - 'metadata': {}}, - {'description': 'validation number two', - 'groups': ['post-deployment'], - 'id': 'Validation_Number_Two', - 'name': 'Validation Number Two', - 'metadata': {'config_file': "/etc/config.conf"}}, - ]} - expected = "metadata" - - result = utils.get_param_field_name(input_parameter) - self.assertEqual(result, expected) - - class TestParseExtraVars(TestCase): def test_simple_case_text_format(self): input_parameter = ['key1=val1', 'key2=val2 key3=val3'] diff --git a/tripleoclient/tests/v1/tripleo/test_tripleo_validator.py b/tripleoclient/tests/v1/tripleo/test_tripleo_validator.py index f9ccd55d0..69e09014f 100644 --- a/tripleoclient/tests/v1/tripleo/test_tripleo_validator.py +++ b/tripleoclient/tests/v1/tripleo/test_tripleo_validator.py @@ -151,8 +151,8 @@ class TestValidatorGroupInfo(utils.TestCommand): # Get the command object to test self.cmd = tripleo_validator.TripleOValidatorGroupInfo(self.app, None) - @mock.patch('tripleoclient.utils.parse_all_validation_groups_on_disk', - return_value=GROUPS_LIST) + @mock.patch('validations_libs.validation_actions.ValidationActions.' + 'group_information', return_value=GROUPS_LIST) def test_show_group_info(self, mock_validations): arglist = [] verifylist = [] @@ -170,7 +170,8 @@ class TestValidatorList(utils.TestCommand): # Get the command object to test self.cmd = tripleo_validator.TripleOValidatorList(self.app, None) - @mock.patch('tripleoclient.utils.parse_all_validations_on_disk', + @mock.patch('validations_libs.validation_actions.ValidationActions.' + 'list_validations', return_value=VALIDATIONS_LIST) def test_validation_list_noargs(self, mock_validations): arglist = [] @@ -189,8 +190,9 @@ class TestValidatorShow(utils.TestCommand): # Get the command object to test self.cmd = tripleo_validator.TripleOValidatorShow(self.app, None) - @mock.patch('tripleoclient.utils.parse_all_validations_on_disk', - return_value=VALIDATIONS_LIST) + @mock.patch('validations_libs.validation_actions.ValidationActions.' + 'show_validations', + return_value=VALIDATIONS_LIST[0]) def test_validation_show(self, mock_validations): arglist = ['my_val1'] verifylist = [('validation_id', 'my_val1')] @@ -209,8 +211,9 @@ class TestValidatorShowParameter(utils.TestCommand): self.cmd = tripleo_validator.TripleOValidatorShowParameter(self.app, None) - @mock.patch('tripleoclient.utils.parse_all_validations_on_disk', - return_value=VALIDATIONS_LIST) + @mock.patch('validations_libs.validation_actions.ValidationActions.' + 'show_validations_parameters', + return_value=VALIDATIONS_LIST[1]) def test_validation_show_parameter(self, mock_validations): arglist = ['--validation', 'my_val2'] verifylist = [('validation_name', ['my_val2'])] @@ -229,7 +232,8 @@ class TestValidatorShowRun(utils.TestCommand): self.cmd = tripleo_validator.TripleOValidatorShowRun(self.app, None) - @mock.patch('tripleoclient.utils.parse_all_validations_logs_on_disk', + @mock.patch('validations_libs.validation_actions.ValidationLogs.' + 'get_logfile_content_by_uuid', return_value=VALIDATIONS_LOGS_CONTENTS_LIST) def test_validation_show_run(self, mock_validations): arglist = ['008886df-d297-1eaa-2a74-000000000008'] @@ -249,7 +253,8 @@ class TestValidatorShowHistory(utils.TestCommand): self.cmd = tripleo_validator.TripleOValidatorShowHistory(self.app, None) - @mock.patch('tripleoclient.utils.parse_all_validations_logs_on_disk', + @mock.patch('validations_libs.validation_actions.ValidationActions.' + 'show_history', return_value=VALIDATIONS_LOGS_CONTENTS_LIST) def test_validation_show_history(self, mock_validations): arglist = [] @@ -259,7 +264,8 @@ class TestValidatorShowHistory(utils.TestCommand): self.cmd.take_action(parsed_args) - @mock.patch('tripleoclient.utils.parse_all_validations_logs_on_disk', + @mock.patch('validations_libs.validation_actions.ValidationActions.' + 'show_history', return_value=VALIDATIONS_LOGS_CONTENTS_LIST) def test_validation_show_history_for_a_validation(self, mock_validations): arglist = [ diff --git a/tripleoclient/utils.py b/tripleoclient/utils.py index 76c2e4bcd..935ba08a3 100644 --- a/tripleoclient/utils.py +++ b/tripleoclient/utils.py @@ -31,7 +31,6 @@ import logging import shutil from six.moves.configparser import ConfigParser -import json import netaddr import os import os.path @@ -1909,181 +1908,6 @@ def _get_from_cfg(cfg, accessor, param, section): return val -def get_validation_metadata(validation, key): - default_metadata = { - 'name': 'Unnamed', - 'description': 'No description', - 'stage': 'No stage', - 'groups': [], - } - - try: - return validation[0]['vars']['metadata'].get(key, - default_metadata[key]) - except KeyError: - LOG.exception(_("Key '{key}' not even found in " - "default metadata").format(key=key)) - except TypeError: - LOG.exception(_("Failed to get validation metadata.")) - - -def get_validation_parameters(validation): - try: - return { - k: v - for k, v in validation[0]['vars'].items() - if k != 'metadata' - } - except KeyError: - LOG.debug(_("No parameters found for this validation")) - return dict() - - -def parse_all_validation_groups_on_disk(groups_file_path=None): - results = [] - - if not groups_file_path: - groups_file_path = constants.VALIDATION_GROUPS_INFO - - if not os.path.exists(groups_file_path): - return results - - with open(groups_file_path, 'r') as grps: - contents = yaml.safe_load(grps) - - for grp_name, grp_desc in sorted(contents.items()): - results.append((grp_name, grp_desc[0].get('description'))) - - return results - - -def parse_all_validations_on_disk(path, groups=None): - results = [] - validations_abspath = glob.glob("{path}/*.yaml".format(path=path)) - - if isinstance(groups, six.string_types): - group_list = [] - group_list.append(groups) - groups = group_list - - for pl in validations_abspath: - validation_id, ext = os.path.splitext(os.path.basename(pl)) - - with open(pl, 'r') as val_playbook: - contents = yaml.safe_load(val_playbook) - - validation_groups = get_validation_metadata(contents, 'groups') or [] - if not groups or set.intersection(set(groups), set(validation_groups)): - results.append({ - 'id': validation_id, - 'name': get_validation_metadata(contents, 'name'), - 'groups': get_validation_metadata(contents, 'groups'), - 'description': get_validation_metadata(contents, - 'description'), - 'parameters': get_validation_parameters(contents) - }) - - return results - - -def get_param_field_name(validations_data=None): - """Get the current parameters field name in a Dict - - Returns either 'parameters' or 'metadata'. - By Default, it returns 'parameters'. - """ - # TODO(gchamoul): Added for backwards compatibility and will be - # removed for Train release. - if validations_data is None: - validations_data = {} - - if 'metadata' in validations_data.get('validations', [[]])[0]: - return 'metadata' - return 'parameters' - - -def get_validations_parameters(validations_data, - validation_name=None, - groups=None): - if validation_name is None: - validation_name = [] - - if groups is None: - groups = [] - - params = {} - param_field_name = get_param_field_name(validations_data) - - for val in validations_data['validations']: - wanted_validation = False - wanted_group = False - if val.get('id') in validation_name: - wanted_validation = True - - for grp in groups: - if grp in val.get('groups'): - wanted_group = True - - if wanted_validation or wanted_group: - params[val.get('id')] = { - 'parameters': val.get(param_field_name) - } - - return params - - -def get_validations_json(validations_data): - """Return the validations information as a pretty printed json """ - return json.dumps(validations_data, indent=4, sort_keys=True) - - -def get_validations_yaml(validations_data): - """Return the validations information as a pretty printed yaml """ - return yaml.safe_dump(validations_data, - allow_unicode=True, - default_flow_style=False, - indent=2) - - -def get_new_validations_logs_on_disk(): - """Return a list of new log execution filenames """ - files = [] - - for root, dirs, filenames in os.walk(constants.VALIDATIONS_LOG_BASEDIR): - files = [ - f for f in filenames if not f.startswith('processed') - and os.path.splitext(f)[1] == '.json' - ] - - return files - - -def parse_all_validations_logs_on_disk(uuid_run=None, validation_id=None): - results = [] - path = constants.VALIDATIONS_LOG_BASEDIR - logfile = "{}/*.json".format(path) - - if validation_id: - logfile = "{}/*_{}_*.json".format(path, validation_id) - - if uuid_run: - logfile = "{}/*_{}_*.json".format(path, uuid_run) - - logfiles_path = glob.glob(logfile) - - for logfile_path in logfiles_path: - with open(logfile_path, 'r') as log: - contents = json.load(log) - results.append(contents) - - return results - - -def indent(text): - '''Indent the given text by four spaces.''' - return ''.join(' {}\n'.format(line) for line in text.splitlines()) - - def get_local_timezone(): info = run_command(['timedatectl'], name='timedatectl') timezoneline = [tz for tz in info.split('\n') if 'Time zone:' in tz] diff --git a/tripleoclient/v1/tripleo_validator.py b/tripleoclient/v1/tripleo_validator.py index 15d537129..d10617f2d 100644 --- a/tripleoclient/v1/tripleo_validator.py +++ b/tripleoclient/v1/tripleo_validator.py @@ -16,14 +16,7 @@ import argparse import json import logging -import os -import pwd -import six -import sys -import textwrap -import time -from concurrent.futures import ThreadPoolExecutor from osc_lib import exceptions from osc_lib.i18n import _ from prettytable import PrettyTable @@ -32,19 +25,27 @@ from tripleoclient import command from tripleoclient import constants from tripleoclient import utils as oooutils +from validations_libs import constants as v_consts +from validations_libs import utils as v_utils +from validations_libs.validation_actions import ValidationActions +from validations_libs.validation_logs import ValidationLogs + LOG = logging.getLogger(__name__ + ".TripleoValidator") RED = "\033[1;31m" GREEN = "\033[0;32m" +CYAN = "\033[36m" RESET = "\033[0;0m" FAILED_VALIDATION = "{}FAILED{}".format(RED, RESET) PASSED_VALIDATION = "{}PASSED{}".format(GREEN, RESET) +GROUP_FILE = constants.VALIDATION_GROUPS_INFO + class _CommaListGroupAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): - opts = constants.VALIDATION_GROUPS + opts = v_utils.get_validation_group_name_list(GROUP_FILE) for value in values.split(','): if value not in opts: message = ("Invalid choice: {value} (choose from {choice})" @@ -67,21 +68,8 @@ class TripleOValidatorGroupInfo(command.Lister): return parser def take_action(self, parsed_args): - group_file = constants.VALIDATION_GROUPS_INFO - group = oooutils.parse_all_validation_groups_on_disk(group_file) - - if not group: - raise exceptions.CommandError( - "Could not find groups information file %s" % group_file) - - group_info = [] - for gp in group: - validations = oooutils.parse_all_validations_on_disk( - constants.ANSIBLE_VALIDATION_DIR, gp[0]) - group_info.append((gp[0], gp[1], len(validations))) - - column_name = ("Groups", "Description", "Number of Validations") - return (column_name, group_info) + actions = ValidationActions(constants.ANSIBLE_VALIDATION_DIR) + return actions.group_information(GROUP_FILE) class TripleOValidatorShow(command.ShowOne): @@ -98,83 +86,14 @@ class TripleOValidatorShow(command.ShowOne): return parser def take_action(self, parsed_args): - validation = self.get_validations_details(parsed_args.validation_id) - logfile_contents = oooutils.parse_all_validations_logs_on_disk( - validation_id=parsed_args.validation_id) - - if not validation: - raise exceptions.CommandError( - "Could not find validation %s" % parsed_args.validation_id) - - return self.format_validation(validation, logfile_contents) - - def get_validations_details(self, validation): - results = oooutils.parse_all_validations_on_disk( - constants.ANSIBLE_VALIDATION_DIR) - - for r in results: - if r['id'] == validation: - return r - return [] - - def format_validation(self, validation, logfile): - column_names = ["ID"] - data = [validation.pop('id')] - - if 'name' in validation: - column_names.append("Name") - data.append(validation.pop('name')) - - if 'description' in validation: - column_names.append("Description") - data.append(textwrap.fill(validation.pop('description'))) - - if 'groups' in validation: - column_names.append("Groups") - data.append(", ".join(validation.pop('groups'))) - - other_fields = list(validation.keys()) - other_fields.sort() - for field in other_fields: - column_names.append(field.capitalize()) - data.append(validation[field]) - - # history, stats ... - total_number = 0 - failed_number = 0 - passed_number = 0 - last_execution = None - dates = [] - - if logfile: - total_number = len(logfile) - - for run in logfile: - if 'validation_output' in run and run.get('validation_output'): - failed_number += 1 - else: - passed_number += 1 - - date_time = \ - run['plays'][0]['play']['duration'].get('start').split('T') - date_start = date_time[0] - time_start = date_time[1].split('Z')[0] - newdate = \ - time.strptime(date_start + time_start, '%Y-%m-%d%H:%M:%S.%f') - dates.append(newdate) - - if dates: - last_execution = time.strftime('%Y-%m-%d %H:%M:%S', max(dates)) - - column_names.append("Number of execution") - data.append("Total: {}, Passed: {}, Failed: {}".format(total_number, - passed_number, - failed_number)) - - column_names.append("Last execution date") - data.append(last_execution) - - return column_names, data + LOG.debug(_('Show validation result')) + try: + actions = ValidationActions(constants.ANSIBLE_VALIDATION_DIR) + data = actions.show_validations(parsed_args.validation_id) + return data.keys(), data.values() + except Exception as e: + raise RuntimeError(_("Validations listing finished with errors\n" + "Output: {}").format(e)) class TripleOValidatorShowParameter(command.Command): @@ -217,14 +136,12 @@ class TripleOValidatorShowParameter(command.Command): parser.add_argument( '--download', - metavar=('[json|yaml]', '/tmp/myvars'), action='store', - default=[], - nargs=2, + default=None, help=_("Create a json or a yaml file " "containing all the variables " "available for the validations: " - "[yaml|json] /tmp/myvars") + "/tmp/myvars") ) parser.add_argument( @@ -239,65 +156,18 @@ class TripleOValidatorShowParameter(command.Command): return parser - def _create_variables_file(self, data, varsfile): - msg = (_("The file %s already exists on the filesystem, " - "do you still want to continue [y/N] ")) - - if varsfile[0] not in ['json', 'yaml']: - raise RuntimeError(_('Wrong file type: %s') % varsfile[0]) - else: - LOG.debug(_('Launch variables file creation')) - try: - if os.path.exists(varsfile[-1]): - confirm = oooutils.prompt_user_for_confirmation( - message=msg % varsfile[-1], logger=LOG) - if not confirm: - raise RuntimeError(_("Action not confirmed, exiting")) - - with open(varsfile[-1], 'w') as f: - params = {} - for val_name in list(data.keys()): - for k, v in data[val_name].get('parameters').items(): - params[k] = v - - if varsfile[0] == 'json': - f.write(oooutils.get_validations_json(params)) - elif varsfile[0] == 'yaml': - f.write(oooutils.get_validations_yaml(params)) - print( - _('The file %s has been created successfully') % - varsfile[-1]) - except Exception as e: - print(_("Creating variables file finished with errors")) - print('Output: {}'.format(e)) - - def _run_validator_show_parameter(self, parsed_args): - LOG.debug(_('Launch showing parameters for the validations')) - try: - validations = oooutils.parse_all_validations_on_disk( - constants.ANSIBLE_VALIDATION_DIR) - - out = oooutils.get_validations_parameters( - {'validations': validations}, - parsed_args.validation_name, - parsed_args.group - ) - - if parsed_args.download: - self._create_variables_file(out, - parsed_args.download) - else: - if parsed_args.format == 'yaml': - print(oooutils.get_validations_yaml(out)) - else: - print(oooutils.get_validations_json(out)) - except Exception as e: - raise RuntimeError(_("Validations Show Parameters " - "finished with errors\n" - "Output: {}").format(e)) - def take_action(self, parsed_args): - self._run_validator_show_parameter(parsed_args) + actions = ValidationActions(constants.ANSIBLE_VALIDATION_DIR) + params = actions.show_validations_parameters( + parsed_args.validation_name, + parsed_args.group, + parsed_args.format, + parsed_args.download) + if parsed_args.download: + print("The file {} has been created successfully").format( + parsed_args.download) + else: + print(params) class TripleOValidatorList(command.Lister): @@ -323,16 +193,11 @@ class TripleOValidatorList(command.Lister): def take_action(self, parsed_args): LOG.debug(_('Launch listing the validations')) try: - validations = oooutils.parse_all_validations_on_disk( - constants.ANSIBLE_VALIDATION_DIR, parsed_args.group) - - return_values = [] - column_name = ('ID', 'Name', 'Groups') - - for val in validations: - return_values.append((val.get('id'), val.get('name'), - ", ".join(val.get('groups')))) - return (column_name, return_values) + v_consts.DEFAULT_VALIDATIONS_BASEDIR = constants.\ + DEFAULT_VALIDATIONS_BASEDIR + actions = ValidationActions(constants.ANSIBLE_VALIDATION_DIR, + parsed_args.group) + return actions.list_validations() except Exception as e: raise RuntimeError(_("Validations listing finished with errors\n" "Output: {}").format(e)) @@ -357,15 +222,29 @@ class TripleOValidatorRun(command.Command): ) parser.add_argument( - '--workers', '-w', - metavar='N', - dest='workers', - default=1, - type=int, - help=_("The maximum number of threads that can " - "be used to execute the given validations") + '--quiet', + action='store', + default=False, + help=_( + "Run Ansible in silent mode." + ) ) + parser.add_argument( + '--limit', action='store', required=False, help=_( + "A string that identifies a single node or comma-separated" + "list of nodes to be upgraded in parallel in this upgrade" + " run invocation. For example: --limit \"compute-0," + " compute-1, compute-5\".") + ) + parser.add_argument('--playbook', + nargs="*", + default=None, + help=_("List of Ansible playbook to use for " + "validations. It could be a playbook path " + "or a list of playbook.") + ) + extra_vars_group = parser.add_mutually_exclusive_group(required=False) extra_vars_group.add_argument( @@ -391,6 +270,17 @@ class TripleOValidatorRun(command.Command): ) ) + extra_vars_group.add_argument( + '--extra-env-vars', + action='store', + default={}, + type=json.loads, + help=_( + "A dictionary as extra environment variables you may need " + "to provide to your Ansible execution example:" + "ANSIBLE_STDOUT_CALLBACK=default") + ) + ex_group = parser.add_mutually_exclusive_group(required=True) ex_group.add_argument( @@ -420,58 +310,10 @@ class TripleOValidatorRun(command.Command): return parser - def _run_ansible(self, logger, plan, workdir, log_path_dir, playbook, - inventory, retries, output_callback, extra_vars, - python_interpreter, gathering_policy): - oooutils.run_ansible_playbook( - logger=logger, - plan=plan, - workdir=workdir, - log_path_dir=log_path_dir, - playbook=playbook, - inventory=inventory, - retries=retries, - output_callback=output_callback, - extra_vars=extra_vars, - python_interpreter=python_interpreter, - gathering_policy=gathering_policy) - def _run_validator_run(self, parsed_args): LOG = logging.getLogger(__name__ + ".ValidationsRunAnsible") - playbooks = [] - extra_vars_input = {} - - if parsed_args.extra_vars: - extra_vars_input = parsed_args.extra_vars - - if parsed_args.extra_vars_file: - extra_vars_input = parsed_args.extra_vars_file - - if parsed_args.group: - LOG.debug(_('Getting the validations list by group')) - try: - output = oooutils.parse_all_validations_on_disk( - constants.ANSIBLE_VALIDATION_DIR, parsed_args.group) - for val in output: - playbooks.append(val.get('id') + '.yaml') - except Exception as e: - print( - _("Getting Validations list by group name" - "finished with errors")) - print('Output: {}'.format(e)) - - else: - for pb in parsed_args.validation_name: - if pb not in constants.VALIDATION_GROUPS: - playbooks.append(pb + '.yaml') - else: - raise exceptions.CommandError( - "Please, use '--group' argument instead of " - "'--validation' to run validation(s) by their name(s)." - ) - - python_interpreter = \ - "/usr/bin/python{}".format(sys.version_info[0]) + limit = parsed_args.limit + playbook = parsed_args.playbook static_inventory = oooutils.get_tripleo_ansible_inventory( ssh_user='heat-admin', @@ -479,133 +321,45 @@ class TripleOValidatorRun(command.Command): undercloud_connection='local', return_inventory_file_path=True) - failed_val = False + v_consts.DEFAULT_VALIDATIONS_BASEDIR = constants.\ + DEFAULT_VALIDATIONS_BASEDIR + actions = ValidationActions() + results = actions.run_validations( + playbook=(playbook if playbook else []), + inventory=static_inventory, + limit_hosts=limit, + group=parsed_args.group, + extra_vars=parsed_args.extra_vars, + validations_dir=constants.ANSIBLE_VALIDATION_DIR, + validation_name=parsed_args.validation_name, + extra_env_vars=parsed_args.extra_env_vars, + quiet=parsed_args.quiet) - with ThreadPoolExecutor(max_workers=parsed_args.workers) as executor: - LOG.debug(_('Running the validations with Ansible')) - tasks_exec = { - executor.submit( - self._run_ansible, - logger=LOG, - plan=parsed_args.plan, - workdir=constants.ANSIBLE_VALIDATION_DIR, - log_path_dir=pwd.getpwuid(os.getuid()).pw_dir, - playbook=playbook, - inventory=static_inventory, - retries=False, - output_callback='validation_json', - extra_vars=extra_vars_input, - python_interpreter=python_interpreter, - gathering_policy='explicit'): playbook - for playbook in playbooks - } - - results = [] - - for tk, pl in six.iteritems(tasks_exec): - try: - tk.result() - results.append({ - 'validation': { - 'validation_id': pl, - 'logfile': None, - 'status': 'PASSED', - 'output': 'Ansible playbook execution complete.' - }}) - except Exception as e: - failed_val = True - results.append({ - 'validation': { - 'validation_id': pl, - 'logfile': None, - 'status': 'FAILED', - 'output': str(e) - }}) - - if results: - new_log_files = oooutils.get_new_validations_logs_on_disk() - - for i in new_log_files: - val_id = "{}.yaml".format(i.split('_')[1]) - for res in results: - if res['validation'].get('validation_id') == val_id: - res['validation']['logfile'] = \ - os.path.join(constants.VALIDATIONS_LOG_BASEDIR, i) - - t = PrettyTable(border=True, header=True, padding_width=1) - t.field_names = [ - "UUID", "Validations", "Status", "Host Group(s)", - "Status by Host", "Unreachable Host(s)", "Duration"] - - for validation in results: - r = [] - logfile = validation['validation'].get('logfile', None) - if logfile and os.path.exists(logfile): - with open(logfile, 'r') as val: - contents = json.load(val) - - for i in contents['plays']: - host = [ - x for x in i['play'].get('host').split(', ') - ] - val_id = i['play'].get('validation_id') - time_elapsed = \ - i['play']['duration'].get('time_elapsed', None) - - r.append(contents['plays'][0]['play'].get('id')) - r.append(val_id) - if validation['validation'].get('status') == "PASSED": - r.append(PASSED_VALIDATION) - else: - r.append(FAILED_VALIDATION) - - unreachable_hosts = [] - hosts_result = [] - for ht in list(contents['stats'].keys()): - if contents['stats'][ht]['unreachable'] != 0: - unreachable_hosts.append(ht) - elif contents['stats'][ht]['failures'] != 0: - hosts_result.append("{}{}{}".format( - RED, ht, RESET)) - else: - hosts_result.append("{}{}{}".format( - GREEN, ht, RESET)) - - r.append(", ".join(host)) - r.append(", ".join(hosts_result)) - r.append("{}{}{}".format(RED, - ", ".join(unreachable_hosts), - RESET)) - r.append(time_elapsed) - t.add_row(r) - - t.sortby = "UUID" - for field in t.field_names: - if field == "Status": - t.align['Status'] = "l" - else: - t.align[field] = "l" - - print(t) - - if len(new_log_files) > len(results): - LOG.warning(_('Looks like we have more log files than ' - 'executed validations')) - - for i in new_log_files: - os.rename( - "{}/{}".format(constants.VALIDATIONS_LOG_BASEDIR, - i), "{}/processed_{}".format( - constants.VALIDATIONS_LOG_BASEDIR, i)) + # Build output + t = PrettyTable(border=True, header=True, padding_width=1) + # Set Field name by getting the result dict keys + t.field_names = results[0].keys() + for r in results: + if r.get('Status_by_Host'): + h = [] + for host in r['Status_by_Host'].split(', '): + _name, _status = host.split(',') + color = (GREEN if _status == 'PASSED' else RED) + _name = '{}{}{}'.format(color, _name, RESET) + h.append(_name) + r['Status_by_Host'] = ', '.join(h) + if r.get('status'): + status = r.get('status') + color = (CYAN if status in ['starting', 'running'] + else GREEN if status == 'PASSED' else RED) + r['status'] = '{}{}{}'.format(color, status, RESET) + t.add_row(r.values()) + print(t) LOG.debug(_('Removing static tripleo ansible inventory file')) oooutils.cleanup_tripleo_ansible_inventory_file( static_inventory) - if failed_val: - raise exceptions.CommandError( - _('One or more validations have failed!')) - def take_action(self, parsed_args): self._run_validator_run(parsed_args) @@ -628,25 +382,22 @@ class TripleOValidatorShowRun(command.Command): parser.add_argument('--full', action='store_true', + default=True, help='Show Full Details for the run') return parser def take_action(self, parsed_args): - logfile_contents = oooutils.parse_all_validations_logs_on_disk( - uuid_run=parsed_args.uuid) - - if len(logfile_contents) > 1: - raise exceptions.CommandError( - "Multiple log files found for UUID: %s" % parsed_args.uuid) - - if logfile_contents: + vlogs = ValidationLogs() + data = vlogs.get_logfile_content_by_uuid(parsed_args.uuid) + if data: if parsed_args.full: - print(oooutils.get_validations_json(logfile_contents[0])) + for d in data: + print(json.dumps(d, indent=4, sort_keys=True)) else: - for data in logfile_contents: - for tasks in data['validation_output']: - print(oooutils.get_validations_json(tasks)) + for d in data: + for p in d['plays']: + print(json.dumps(p['tasks'], indent=4, sort_keys=True)) else: raise exceptions.CommandError( "Could not find the log file linked to this UUID: %s" % @@ -667,41 +418,5 @@ class TripleOValidatorShowHistory(command.Lister): return parser def take_action(self, parsed_args): - logfile_contents = oooutils.parse_all_validations_logs_on_disk( - validation_id=parsed_args.validation) - - if not logfile_contents: - msg = "No History Found" - if parsed_args.validation: - raise exceptions.CommandError( - "{} for {}.".format( - msg, parsed_args.validation)) - else: - raise exceptions.CommandError( - "{}.".format(msg, parsed_args.validation)) - - return_values = [] - column_name = ('UUID', 'Validations', - 'Status', 'Execution at', - 'Duration') - - for run in logfile_contents: - status = PASSED_VALIDATION - if 'plays' in run and run.get('plays'): - date_time = \ - run['plays'][0]['play']['duration'].get('start').split('T') - time_elapsed = \ - run['plays'][0]['play']['duration'].get('time_elapsed') - date_start = date_time[0] - time_start = date_time[1].split('Z')[0] - - for k, v in six.iteritems(run['stats']): - if v.get('failures') != 0: - status = FAILED_VALIDATION - - return_values.append( - (run['plays'][0]['play'].get('id'), - run['plays'][0]['play'].get('validation_id'), status, - "{} {}".format(date_start, time_start), time_elapsed)) - - return (column_name, return_values) + actions = ValidationActions(constants.ANSIBLE_VALIDATION_DIR) + return actions.show_history(parsed_args.validation)