Browse Source

TripleO Validations Logging CLI Introduction

This patch adds logging support for validations. It introduces two new
Tripleo Validator commands to allow the user to get the executions
history and get the details of them.

Change-Id: Ie80318a7fa684adb7c3bf7c99d526b0de64b0904
Depends-On: I502c38d3f27db3c6f62a47190136dd03627956bc
Depends-On: I0cb2743d1d4d118320a799c6820d48f9b917498f
Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
(cherry picked from commit 4fbd5ffe4f)
changes/77/710977/5
Gael Chamoulaud 7 months ago
committed by Cédric Jeanneret (Tengu)
parent
commit
6f877f6bbf
6 changed files with 468 additions and 58 deletions
  1. +7
    -0
      releasenotes/notes/validation_logging_features-a7c096868197c42a.yaml
  2. +3
    -0
      setup.cfg
  3. +7
    -0
      tripleoclient/constants.py
  4. +145
    -42
      tripleoclient/tests/v1/tripleo/test_tripleo_validator.py
  5. +39
    -0
      tripleoclient/utils.py
  6. +267
    -16
      tripleoclient/v1/tripleo_validator.py

+ 7
- 0
releasenotes/notes/validation_logging_features-a7c096868197c42a.yaml View File

@@ -0,0 +1,7 @@
---
features:
- |
The TripleO Validator CLI has now a new logging feature which keep every
validation execution log in the Undercloud (/var/log/validations/). The CLI
is able to display the history and allow the user to get the full execution
details.

+ 3
- 0
setup.cfg View File

@@ -121,7 +121,10 @@ openstack.tripleoclient.v1 =
tripleo_validator_list = tripleoclient.v1.tripleo_validator:TripleOValidatorList
tripleo_validator_run = tripleoclient.v1.tripleo_validator:TripleOValidatorRun
tripleo_validator_show = tripleoclient.v1.tripleo_validator:TripleOValidatorShow
tripleo_validator_show_history = tripleoclient.v1.tripleo_validator:TripleOValidatorShowHistory
tripleo_validator_show_parameter = tripleoclient.v1.tripleo_validator:TripleOValidatorShowParameter
tripleo_validator_show_run = tripleoclient.v1.tripleo_validator:TripleOValidatorShowRun

oslo.config.opts =
undercloud_config = tripleoclient.config.undercloud:list_opts
standalone_config = tripleoclient.config.standalone:list_opts


+ 7
- 0
tripleoclient/constants.py View File

@@ -92,6 +92,13 @@ ADDITIONAL_ARCHITECTURES = ['ppc64le']

DEFAULT_VALIDATIONS_BASEDIR = '/usr/share/openstack-tripleo-validations'

VALIDATIONS_LOG_BASEDIR = '/var/log/validations'

DEFAULT_WORK_DIR = '/var/lib/mistral'

ANSIBLE_INVENTORY = \
'/var/lib/mistral/overcloud/tripleo-ansible-inventory.yaml'

ANSIBLE_VALIDATION_DIR = \
'/usr/share/openstack-tripleo-validations/playbooks'



+ 145
- 42
tripleoclient/tests/v1/tripleo/test_tripleo_validator.py View File

@@ -14,7 +14,6 @@
#

import mock
import sys

from osc_lib.tests import utils
from tripleoclient.v1 import tripleo_validator
@@ -39,6 +38,110 @@ GROUPS_LIST = [
('group3', 'Group3 description'),
]

VALIDATIONS_LOGS_CONTENTS_LIST = [{
'plays': [{
'play': {
'duration': {
'end': '2019-11-25T13:40:17.538611Z',
'start': '2019-11-25T13:40:14.404623Z',
'time_elapsed': '0:00:03.753'
},
'host': 'undercloud',
'id': '008886df-d297-1eaa-2a74-000000000008',
'validation_id': '512e',
'validation_path':
'/usr/share/openstack-tripleo-validations/playbooks'
},
'tasks': [
{
'hosts': {
'undercloud': {
'_ansible_no_log': False,
'action': 'command',
'changed': False,
'cmd': [u'ls', '/sys/class/block/'],
'delta': '0:00:00.018913',
'end': '2019-11-25 13:40:17.120368',
'invocation': {
'module_args': {
'_raw_params': 'ls /sys/class/block/',
'_uses_shell': False,
'argv': None,
'chdir': None,
'creates': None,
'executable': None,
'removes': None,
'stdin': None,
'stdin_add_newline': True,
'strip_empty_ends': True,
'warn': True
}
},
'rc': 0,
'start': '2019-11-25 13:40:17.101455',
'stderr': '',
'stderr_lines': [],
'stdout': 'vda',
'stdout_lines': [u'vda']
}
},
'task': {
'duration': {
'end': '2019-11-25T13:40:17.336687Z',
'start': '2019-11-25T13:40:14.529880Z'
},
'id':
'008886df-d297-1eaa-2a74-00000000000d',
'name':
'advanced-format-512e-support : List the available drives'
}
},
{
'hosts': {
'undercloud': {
'action':
'advanced_format',
'changed': False,
'msg':
'All items completed',
'results': [{
'_ansible_item_label': 'vda',
'_ansible_no_log': False,
'ansible_loop_var': 'item',
'changed': False,
'item': 'vda',
'skip_reason': 'Conditional result was False',
'skipped': True
}],
'skipped': True
}
},
'task': {
'duration': {
'end': '2019-11-25T13:40:17.538611Z',
'start': '2019-11-25T13:40:17.341704Z'
},
'id': '008886df-d297-1eaa-2a74-00000000000e',
'name':
'advanced-format-512e-support: Detect the drive'
}
}
]
}],
'stats': {
'undercloud': {
'changed': 0,
'failures': 0,
'ignored': 0,
'ok': 1,
'rescued': 0,
'skipped': 1,
'unreachable': 0
}
},
'validation_output': []
}]


class TestValidatorGroupInfo(utils.TestCommand):

@@ -117,54 +220,54 @@ class TestValidatorShowParameter(utils.TestCommand):
self.cmd.take_action(parsed_args)


class TestValidatorRun(utils.TestCommand):
class TestValidatorShowRun(utils.TestCommand):

def setUp(self):
super(TestValidatorShowRun, self).setUp()

# Get the command object to test
self.cmd = tripleo_validator.TripleOValidatorShowRun(self.app,
None)

@mock.patch('tripleoclient.utils.parse_all_validations_logs_on_disk',
return_value=VALIDATIONS_LOGS_CONTENTS_LIST)
def test_validation_show_run(self, mock_validations):
arglist = ['008886df-d297-1eaa-2a74-000000000008']
verifylist = [('uuid', '008886df-d297-1eaa-2a74-000000000008')]

parsed_args = self.check_parser(self.cmd, arglist, verifylist)

self.cmd.take_action(parsed_args)


class TestValidatorShowHistory(utils.TestCommand):

def setUp(self):
super(TestValidatorRun, self).setUp()
super(TestValidatorShowHistory, self).setUp()

# Get the command object to test
self.cmd = tripleo_validator.TripleOValidatorRun(self.app, None)

@mock.patch('sys.exit')
@mock.patch('logging.getLogger')
@mock.patch('pwd.getpwuid')
@mock.patch('os.getuid')
@mock.patch('tripleoclient.utils.get_tripleo_ansible_inventory',
return_value='/home/stack/inventory.yaml')
@mock.patch('tripleoclient.utils.run_ansible_playbook',
autospec=True)
def test_validation_run_with_ansible(self, plan_mock, mock_inventory,
mock_getuid, mock_getpwuid,
mock_logger, mock_sysexit):
mock_pwuid = mock.Mock()
mock_pwuid.pw_dir = '/home/stack'
mock_getpwuid.return_value = mock_pwuid

mock_log = mock.Mock()
mock_logger.return_value = mock_log

playbooks_dir = '/usr/share/openstack-tripleo-validations/playbooks'
self.cmd = tripleo_validator.TripleOValidatorShowHistory(self.app,
None)

@mock.patch('tripleoclient.utils.parse_all_validations_logs_on_disk',
return_value=VALIDATIONS_LOGS_CONTENTS_LIST)
def test_validation_show_history(self, mock_validations):
arglist = []
verifylist = []

parsed_args = self.check_parser(self.cmd, arglist, verifylist)

self.cmd.take_action(parsed_args)

@mock.patch('tripleoclient.utils.parse_all_validations_logs_on_disk',
return_value=VALIDATIONS_LOGS_CONTENTS_LIST)
def test_validation_show_history_for_a_validation(self, mock_validations):
arglist = [
'--validation',
'check-ftype'
'512e'
]
verifylist = [('validation_name', ['check-ftype'])]
verifylist = [('validation', '512e')]

parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)

plan_mock.assert_called_once_with(
logger=mock_log,
plan='overcloud',
inventory='/home/stack/inventory.yaml',
workdir=playbooks_dir,
log_path_dir='/home/stack',
playbook='check-ftype.yaml',
retries=False,
output_callback='validation_output',
extra_vars={},
python_interpreter='/usr/bin/python{}'.format(sys.version_info[0]),
gathering_policy='explicit'
)

assert mock_sysexit.called
self.cmd.take_action(parsed_args)

+ 39
- 0
tripleoclient/utils.py View File

@@ -1870,6 +1870,11 @@ def parse_all_validations_on_disk(path, groups=None):
results = []
validations_abspath = glob.glob("{path}/*.yaml".format(path=path))

if isinstance(groups, six.string_types):
group_list = []
group_list.append(groups)
groups = group_list

for pl in validations_abspath:
validation_id, ext = os.path.splitext(os.path.basename(pl))

@@ -1949,6 +1954,40 @@ def get_validations_yaml(validations_data):
indent=2)


def get_new_validations_logs_on_disk():
"""Return a list of new log execution filenames """
files = []

for root, dirs, filenames in os.walk(constants.VALIDATIONS_LOG_BASEDIR):
files = [
f for f in filenames if not f.startswith('processed')
and os.path.splitext(f)[1] == '.json'
]

return files


def parse_all_validations_logs_on_disk(uuid_run=None, validation_id=None):
results = []
path = constants.VALIDATIONS_LOG_BASEDIR
logfile = "{}/*.json".format(path)

if validation_id:
logfile = "{}/*_{}_*.json".format(path, validation_id)

if uuid_run:
logfile = "{}/*_{}_*.json".format(path, uuid_run)

logfiles_path = glob.glob(logfile)

for logfile_path in logfiles_path:
with open(logfile_path, 'r') as log:
contents = json.load(log)
results.append(contents)

return results


def indent(text):
'''Indent the given text by four spaces.'''
return ''.join(' {}\n'.format(line) for line in text.splitlines())


+ 267
- 16
tripleoclient/v1/tripleo_validator.py View File

@@ -21,10 +21,12 @@ import pwd
import six
import sys
import textwrap
import time

from concurrent.futures import ThreadPoolExecutor
from osc_lib import exceptions
from osc_lib.i18n import _
from prettytable import PrettyTable

from tripleoclient import command
from tripleoclient import constants
@@ -32,6 +34,13 @@ from tripleoclient import utils as oooutils

LOG = logging.getLogger(__name__ + ".TripleoValidator")

RED = "\033[1;31m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"

FAILED_VALIDATION = "{}FAILED{}".format(RED, RESET)
PASSED_VALIDATION = "{}PASSED{}".format(GREEN, RESET)


class _CommaListGroupAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
@@ -65,8 +74,14 @@ class TripleOValidatorGroupInfo(command.Lister):
raise exceptions.CommandError(
"Could not find groups information file %s" % group_file)

column_name = ("Groups", "Description")
return (column_name, group)
group_info = []
for gp in group:
validations = oooutils.parse_all_validations_on_disk(
constants.ANSIBLE_VALIDATION_DIR, gp[0])
group_info.append((gp[0], gp[1], len(validations)))

column_name = ("Groups", "Description", "Number of Validations")
return (column_name, group_info)


class TripleOValidatorShow(command.ShowOne):
@@ -84,11 +99,14 @@ class TripleOValidatorShow(command.ShowOne):

def take_action(self, parsed_args):
validation = self.get_validations_details(parsed_args.validation_id)
logfile_contents = oooutils.parse_all_validations_logs_on_disk(
validation_id=parsed_args.validation_id)

if not validation:
raise exceptions.CommandError(
"Could not find validation %s" % parsed_args.validation_id)

return self.format_validation(validation)
return self.format_validation(validation, logfile_contents)

def get_validations_details(self, validation):
results = oooutils.parse_all_validations_on_disk(
@@ -99,7 +117,7 @@ class TripleOValidatorShow(command.ShowOne):
return r
return []

def format_validation(self, validation):
def format_validation(self, validation, logfile):
column_names = ["ID"]
data = [validation.pop('id')]

@@ -111,12 +129,51 @@ class TripleOValidatorShow(command.ShowOne):
column_names.append("Description")
data.append(textwrap.fill(validation.pop('description')))

if 'groups' in validation:
column_names.append("Groups")
data.append(", ".join(validation.pop('groups')))

other_fields = list(validation.keys())
other_fields.sort()
for field in other_fields:
column_names.append(field.capitalize())
data.append(validation[field])

# history, stats ...
total_number = 0
failed_number = 0
passed_number = 0
last_execution = None
dates = []

if logfile:
total_number = len(logfile)

for run in logfile:
if 'validation_output' in run and run.get('validation_output'):
failed_number += 1
else:
passed_number += 1

date_time = \
run['plays'][0]['play']['duration'].get('start').split('T')
date_start = date_time[0]
time_start = date_time[1].split('Z')[0]
newdate = \
time.strptime(date_start + time_start, '%Y-%m-%d%H:%M:%S.%f')
dates.append(newdate)

if dates:
last_execution = time.strftime('%Y-%m-%d %H:%M:%S', max(dates))

column_names.append("Number of execution")
data.append("Total: {}, Passed: {}, Failed: {}".format(total_number,
passed_number,
failed_number))

column_names.append("Last execution date")
data.append(last_execution)

return column_names, data


@@ -199,7 +256,7 @@ class TripleOValidatorShowParameter(command.Command):

with open(varsfile[-1], 'w') as f:
params = {}
for val_name in data.keys():
for val_name in list(data.keys()):
for k, v in data[val_name].get('parameters').items():
params[k] = v

@@ -274,7 +331,7 @@ class TripleOValidatorList(command.Lister):

for val in validations:
return_values.append((val.get('id'), val.get('name'),
val.get('groups')))
", ".join(val.get('groups'))))
return (column_name, return_values)
except Exception as e:
raise RuntimeError(_("Validations listing finished with errors\n"
@@ -400,12 +457,19 @@ class TripleOValidatorRun(command.Command):
playbooks.append(val.get('id') + '.yaml')
except Exception as e:
print(
_("Validations listing by group finished with errors"))
_("Getting Validations list by group name"
"finished with errors"))
print('Output: {}'.format(e))

else:
for pb in parsed_args.validation_name:
playbooks.append(pb + '.yaml')
if pb not in constants.VALIDATION_GROUPS:
playbooks.append(pb + '.yaml')
else:
raise exceptions.CommandError(
"Please, use '--group' argument instead of "
"'--validation' to run validation(s) by their name(s)."
)

python_interpreter = \
"/usr/bin/python{}".format(sys.version_info[0])
@@ -430,30 +494,217 @@ class TripleOValidatorRun(command.Command):
playbook=playbook,
inventory=static_inventory,
retries=False,
output_callback='validation_output',
output_callback='validation_json',
extra_vars=extra_vars_input,
python_interpreter=python_interpreter,
gathering_policy='explicit'): playbook
for playbook in playbooks
}

results = []

for tk, pl in six.iteritems(tasks_exec):
try:
rc, output = tk.result()
print('[SUCCESS] - {}\n{}'.format(pl, oooutils.indent(output)))
_rc, output = tk.result()
results.append({
'validation': {
'validation_id': pl,
'logfile': None,
'status': 'PASSED',
'output': output
}})
except Exception as e:
failed_val = True
LOG.error('[FAILED] - {}\n{}'.format(
pl, oooutils.indent(e.args[0])))
results.append({
'validation': {
'validation_id': pl,
'logfile': None,
'status': 'FAILED',
'output': str(e)
}})

if results:
new_log_files = oooutils.get_new_validations_logs_on_disk()

for i in new_log_files:
val_id = "{}.yaml".format(i.split('_')[1])
for res in results:
if res['validation'].get('validation_id') == val_id:
res['validation']['logfile'] = \
os.path.join(constants.VALIDATIONS_LOG_BASEDIR, i)

t = PrettyTable(border=True, header=True, padding_width=1)
t.field_names = [
"UUID", "Validations", "Status", "Host Group(s)",
"Status by Host", "Unreachable Host(s)", "Duration"]

for validation in results:
r = []
logfile = validation['validation'].get('logfile', None)
if logfile and os.path.exists(logfile):
with open(logfile, 'r') as val:
contents = json.load(val)

for i in contents['plays']:
host = [
x.encode('utf-8')
for x in i['play'].get('host').split(', ')
]
val_id = i['play'].get('validation_id')
time_elapsed = \
i['play']['duration'].get('time_elapsed', None)

r.append(contents['plays'][0]['play'].get('id'))
r.append(val_id)
if validation['validation'].get('status') == "PASSED":
r.append(PASSED_VALIDATION)
else:
r.append(FAILED_VALIDATION)

unreachable_hosts = []
hosts_result = []
for h in list(contents['stats'].keys()):
ht = h.encode('utf-8')
if contents['stats'][ht]['unreachable'] != 0:
unreachable_hosts.append(ht)
elif contents['stats'][ht]['failures'] != 0:
hosts_result.append("{}{}{}".format(
RED, ht, RESET))
else:
hosts_result.append("{}{}{}".format(
GREEN, ht, RESET))

r.append(", ".join(host))
r.append(", ".join(hosts_result))
r.append("{}{}{}".format(RED,
", ".join(unreachable_hosts),
RESET))
r.append(time_elapsed)
t.add_row(r)

t.sortby = "UUID"
for field in t.field_names:
if field == "Status":
t.align['Status'] = "l"
else:
t.align[field] = "l"

print(t)

if len(new_log_files) > len(results):
LOG.warn(_('Looks like we have more log files than '
'executed validations'))

for i in new_log_files:
os.rename(
"{}/{}".format(constants.VALIDATIONS_LOG_BASEDIR,
i), "{}/processed_{}".format(
constants.VALIDATIONS_LOG_BASEDIR, i))

LOG.debug(_('Removing static tripleo ansible inventory file'))
oooutils.cleanup_tripleo_ansible_inventory_file(
static_inventory)

if failed_val:
LOG.error(_('One or more validations have failed!'))
sys.exit(1)
sys.exit(0)
raise exceptions.CommandError(
_('One or more validations have failed!'))

def take_action(self, parsed_args):
self._run_validator_run(parsed_args)


class TripleOValidatorShowRun(command.Command):
"""Display details about a Validation execution"""

def get_parser(self, prog_name):
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False
)

parser.add_argument('uuid',
metavar="<uuid>",
type=str,
help='Validation UUID Run')

parser.add_argument('--full',
action='store_true',
help='Show Full Details for the run')

return parser

def take_action(self, parsed_args):
logfile_contents = oooutils.parse_all_validations_logs_on_disk(
uuid_run=parsed_args.uuid)

if len(logfile_contents) > 1:
raise exceptions.CommandError(
"Multiple log files found for UUID: %s" % parsed_args.uuid)

if logfile_contents:
if parsed_args.full:
print(oooutils.get_validations_json(logfile_contents[0]))
else:
for data in logfile_contents:
for tasks in data['validation_output']:
print(oooutils.get_validations_json(tasks))
else:
raise exceptions.CommandError(
"Could not find the log file linked to this UUID: %s" %
parsed_args.uuid)


class TripleOValidatorShowHistory(command.Lister):
"""Display Validations execution history"""

def get_parser(self, prog_name):
parser = super(TripleOValidatorShowHistory, self).get_parser(prog_name)

parser.add_argument('--validation',
metavar="<validation>",
type=str,
help='Display execution history for a validation')

return parser

def take_action(self, parsed_args):
logfile_contents = oooutils.parse_all_validations_logs_on_disk(
validation_id=parsed_args.validation)

if not logfile_contents:
msg = "No History Found"
if parsed_args.validation:
raise exceptions.CommandError(
"{} for {}.".format(
msg, parsed_args.validation))
else:
raise exceptions.CommandError(
"{}.".format(msg, parsed_args.validation))

return_values = []
column_name = ('UUID', 'Validations',
'Status', 'Execution at',
'Duration')

for run in logfile_contents:
status = PASSED_VALIDATION
if 'plays' in run and run.get('plays'):
date_time = \
run['plays'][0]['play']['duration'].get('start').split('T')
time_elapsed = \
run['plays'][0]['play']['duration'].get('time_elapsed')
date_start = date_time[0]
time_start = date_time[1].split('Z')[0]

for k, v in six.iteritems(run['stats']):
if v.get('failures') != 0:
status = FAILED_VALIDATION

return_values.append(
(run['plays'][0]['play'].get('id'),
run['plays'][0]['play'].get('validation_id'), status,
"{} {}".format(date_start, time_start), time_elapsed))

return (column_name, return_values)

Loading…
Cancel
Save