Browse Source

Add validations show functions

This review add functionnalities to parse and show
validations output as dict with the required format

Change-Id: I57ca3d18397c7fbdb084b06224d7cab3d4592d30
changes/81/713181/4
Mathieu Bultel 2 years ago
committed by Gael Chamoulaud
parent
commit
d2c91c8080
  1. 35
      validations_libs/show.py
  2. 9
      validations_libs/tests/fakes.py
  3. 40
      validations_libs/tests/test_utils.py
  4. 44
      validations_libs/tests/test_validations_show.py
  5. 99
      validations_libs/utils.py

35
validations_libs/show.py

@ -0,0 +1,35 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from validations_libs import utils as v_utils
LOG = logging.getLogger(__name__ + ".show")
class Show(object):
def __init__(self):
self.log = logging.getLogger(__name__ + ".Show")
def show_validations(self, validation):
"""Display detailed information about a Validation"""
# Get validation data:
data = v_utils.get_validations_data(
v_utils.get_validations_details(validation))
format = v_utils.get_validations_stats(
v_utils.parse_all_validations_logs_on_disk())
data.update(format)
return data

9
validations_libs/tests/fakes.py

@ -137,6 +137,15 @@ VALIDATIONS_LOGS_CONTENTS_LIST = [{
'validation_output': []
}]
VALIDATIONS_DATA = {'Description': 'My Validation One Description',
'Groups': ['prep', 'pre-deployment'],
'ID': 'my_val1',
'Name': 'My Validition One Name',
'parameters': {}}
VALIDATIONS_STATS = {'Last execution date': '2019-11-25 13:40:14',
'Number of execution': 'Total: 1, Passed: 1, Failed: 0'}
def fake_ansible_runner_run_return(status='successful', rc=0):
return status, rc

40
validations_libs/tests/test_utils.py

@ -0,0 +1,40 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from unittest import TestCase
from validations_libs import utils
from validations_libs.tests import fakes
class TestUtils(TestCase):
def setUp(self):
super(TestUtils, self).setUp()
def test_get_validations_data(self):
res = utils.get_validations_data(fakes.VALIDATIONS_LIST[0])
self.assertEqual(res, fakes.VALIDATIONS_DATA)
def test_get_validations_stats(self):
res = utils.get_validations_stats(fakes.VALIDATIONS_LOGS_CONTENTS_LIST)
self.assertEqual(res, fakes.VALIDATIONS_STATS)
@mock.patch('validations_libs.utils.parse_all_validations_on_disk',
return_value=fakes.VALIDATIONS_LIST)
def test_get_validations_details(self, mock_parse):
res = utils.get_validations_details('my_val1')
self.assertEqual(res, fakes.VALIDATIONS_LIST[0])

44
validations_libs/tests/test_validations_show.py

@ -0,0 +1,44 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from unittest import TestCase
from validations_libs.tests import fakes
from validations_libs.show import Show
class TestValidatorShow(TestCase):
def setUp(self):
super(TestValidatorShow, self).setUp()
@mock.patch('validations_libs.utils.parse_all_validations_on_disk',
return_value=fakes.VALIDATIONS_LIST)
@mock.patch('validations_libs.utils.get_validations_details',
return_value=fakes.VALIDATIONS_DATA)
@mock.patch('validations_libs.utils.parse_all_validations_logs_on_disk',
return_value=fakes.VALIDATIONS_LOGS_CONTENTS_LIST)
def test_validation_show(self, mock_parse_validation, mock_data, mock_log):
data = {'Description': 'My Validation One Description',
'Groups': ['prep', 'pre-deployment'],
'ID': 'my_val1',
'Name': 'My Validition One Name',
'parameters': {}}
data.update({'Last execution date': '2019-11-25 13:40:14',
'Number of execution': 'Total: 1, Passed: 1, Failed: 0'})
validations_show = Show()
out = validations_show.show_validations('foo')
self.assertEqual(out, data)

99
validations_libs/utils.py

@ -20,6 +20,7 @@ import os
import six
import shutil
import tempfile
import time
import yaml
from validations_libs import constants
@ -192,27 +193,6 @@ def parse_all_validation_groups_on_disk(groups_file_path=None):
return results
def parse_all_validations_logs_on_disk(uuid_run=None, validation_id=None):
results = []
path = constants.VALIDATIONS_LOG_BASEDIR
logfile = "{}/*.json".format(path)
if validation_id:
logfile = "{}/*_{}_*.json".format(path, validation_id)
if uuid_run:
logfile = "{}/*_{}_*.json".format(path, uuid_run)
logfiles_path = glob.glob(logfile)
for logfile_path in logfiles_path:
with open(logfile_path, 'r') as log:
contents = json.load(log)
results.append(contents)
return results
def get_validation_metadata(validation, key):
default_metadata = {
'name': 'Unnamed',
@ -279,5 +259,80 @@ def get_new_validations_logs_on_disk(validations_logs_dir):
f for f in filenames if not f.startswith('processed')
and os.path.splitext(f)[1] == '.json'
]
return files
def parse_all_validations_logs_on_disk(uuid_run=None, validation_id=None):
results = []
path = constants.VALIDATIONS_LOG_BASEDIR
logfile = "{}/*.json".format(path)
if validation_id:
logfile = "{}/*_{}_*.json".format(path, validation_id)
if uuid_run:
logfile = "{}/*_{}_*.json".format(path, uuid_run)
logfiles_path = glob.glob(logfile)
for logfile_path in logfiles_path:
with open(logfile_path, 'r') as log:
contents = json.load(log)
results.append(contents)
return results
def get_validations_details(validation):
results = parse_all_validations_on_disk(constants.ANSIBLE_VALIDATION_DIR)
for r in results:
if r['id'] == validation:
return r
return {}
def get_validations_data(validation):
data = {}
col_keys = ['ID', 'Name', 'Description', 'Groups']
if isinstance(validation, dict):
for key in validation.keys():
if key in map(str.lower, col_keys):
for k in col_keys:
if key == k.lower():
output_key = k
data[output_key] = validation.get(key)
else:
# Get all other values:
data[key] = validation.get(key)
return data
def get_validations_stats(log):
# Get validation stats
total_number = len(log)
failed_number = 0
passed_number = 0
last_execution = None
dates = []
for l in log:
if l.get('validation_output'):
failed_number += 1
else:
passed_number += 1
date_time = \
l['plays'][0]['play']['duration'].get('start').split('T')
date_start = date_time[0]
time_start = date_time[1].split('Z')[0]
newdate = \
time.strptime(date_start + time_start, '%Y-%m-%d%H:%M:%S.%f')
dates.append(newdate)
if dates:
last_execution = time.strftime('%Y-%m-%d %H:%M:%S', max(dates))
return {"Last execution date": last_execution,
"Number of execution": "Total: {}, Passed: {}, "
"Failed: {}".format(total_number,
passed_number,
failed_number)}
Loading…
Cancel
Save