Add unit tests for ansible runner library
This commit is contained in:
parent
0fb4205479
commit
32d15b6a1b
@ -4,3 +4,5 @@
|
||||
|
||||
pbr>=3.1.1 # Apache-2.0
|
||||
six>=1.11.0 # MIT
|
||||
|
||||
ansible-runner
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
openstackdocstheme>=1.20.0 # Apache-2.0
|
||||
hacking<0.12,>=0.11.0 # Apache-2.0
|
||||
|
||||
mock
|
||||
coverage!=4.4,>=4.0 # Apache-2.0
|
||||
python-subunit>=1.0.0 # Apache-2.0/BSD
|
||||
sphinx>=1.8.0,<2.0.0;python_version=='2.7' # BSD
|
||||
|
@ -13,12 +13,12 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import constants
|
||||
import logging
|
||||
import os
|
||||
import six
|
||||
|
||||
from validations_libs.ansible import Ansible as v_ansible
|
||||
from validations_libs import constants
|
||||
from validations_libs import utils as v_utils
|
||||
|
||||
LOG = logging.getLogger(__name__ + ".run")
|
||||
|
@ -13,7 +13,7 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
from unittest import mock
|
||||
|
||||
|
||||
VALIDATIONS_LIST = [{
|
||||
@ -139,3 +139,7 @@ VALIDATIONS_LOGS_CONTENTS_LIST = [{
|
||||
},
|
||||
'validation_output': []
|
||||
}]
|
||||
|
||||
|
||||
def fake_ansible_runner_run_return(status='successful', rc=0):
|
||||
return status, rc
|
||||
|
@ -0,0 +1,141 @@
|
||||
# Copyright 2020 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from unittest import mock
|
||||
from unittest import TestCase
|
||||
|
||||
from ansible_runner import Runner
|
||||
from validations_libs.ansible import Ansible
|
||||
from validations_libs.tests import fakes
|
||||
from validations_libs import utils
|
||||
|
||||
|
||||
class TestAnsible(TestCase):
|
||||
|
||||
|
||||
def setUp(self):
|
||||
super(TestAnsible, self).setUp()
|
||||
self.unlink_patch = mock.patch('os.unlink')
|
||||
self.addCleanup(self.unlink_patch.stop)
|
||||
self.unlink_patch.start()
|
||||
self.run = Ansible()
|
||||
|
||||
@mock.patch('os.path.exists', return_value=False)
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
def test_check_no_playbook(self, mock_dump_artifact, mock_exists):
|
||||
self.assertRaises(
|
||||
RuntimeError,
|
||||
self.run.run,
|
||||
'non-existing.yaml',
|
||||
'localhost,',
|
||||
'/tmp'
|
||||
)
|
||||
mock_exists.assert_called_with('/tmp/non-existing.yaml')
|
||||
|
||||
|
||||
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch.object(
|
||||
Runner,
|
||||
'run',
|
||||
return_value=fakes.fake_ansible_runner_run_return(rc=1,
|
||||
status='failed')
|
||||
)
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
|
||||
return_value="/tmp/foo.yaml")
|
||||
def test_ansible_runner_error(self, mock_stdout, mock_dump_artifact,
|
||||
mock_run, mock_mkdirs, mock_exists,
|
||||
mock_mkdtemp):
|
||||
|
||||
stdout_file, _playbook, _rc, _status = self.run.run('existing.yaml',
|
||||
'localhost,',
|
||||
'/tmp')
|
||||
self.assertEquals((_playbook, _rc, _status),
|
||||
('existing.yaml', 1, 'failed'))
|
||||
|
||||
|
||||
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch.object(Runner,'run',
|
||||
return_value=fakes.fake_ansible_runner_run_return(rc=0)
|
||||
)
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
|
||||
return_value="/tmp/foo.yaml")
|
||||
def test_run_success_default(self, mock_stdout, mock_dump_artifact,
|
||||
mock_run, mock_mkdirs, mock_exists,
|
||||
mock_mkstemp):
|
||||
stdout_file, _playbook, _rc, _status = self.run.run(
|
||||
playbook='existing.yaml',
|
||||
inventory='localhost,',
|
||||
workdir='/tmp'
|
||||
)
|
||||
self.assertEquals((_playbook, _rc, _status),
|
||||
('existing.yaml', 0, 'successful'))
|
||||
|
||||
|
||||
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch.object(Runner,'run',
|
||||
return_value=fakes.fake_ansible_runner_run_return(rc=0)
|
||||
)
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
|
||||
return_value="/tmp/foo.yaml")
|
||||
def test_run_success_gathering_policy(self, mock_stdout,
|
||||
mock_dump_artifact, mock_run,
|
||||
mock_mkdirs, mock_exists,
|
||||
mock_mkstemp):
|
||||
stdout_file, _playbook, _rc, _status = self.run.run(
|
||||
playbook='existing.yaml',
|
||||
inventory='localhost,',
|
||||
workdir='/tmp',
|
||||
connection='local',
|
||||
gathering_policy='smart'
|
||||
)
|
||||
self.assertEquals((_playbook, _rc, _status),
|
||||
('existing.yaml', 0, 'successful'))
|
||||
|
||||
|
||||
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch.object(Runner,'run',
|
||||
return_value=fakes.fake_ansible_runner_run_return(rc=0)
|
||||
)
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
|
||||
return_value="/tmp/foo.yaml")
|
||||
def test_run_success_local(self, mock_stdout,
|
||||
mock_dump_artifact, mock_run,
|
||||
mock_mkdirs, mock_exists,
|
||||
mock_mkstemp):
|
||||
stdout_file, _playbook, _rc, _status = self.run.run(
|
||||
playbook='existing.yaml',
|
||||
inventory='localhost,',
|
||||
workdir='/tmp',
|
||||
connection='local'
|
||||
)
|
||||
self.assertEquals((_playbook, _rc, _status),
|
||||
('existing.yaml', 0, 'successful'))
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
# Copyright 2020 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -13,14 +13,14 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
import unittest
|
||||
from unittest import mock
|
||||
from unittest import TestCase
|
||||
|
||||
from validations_libs.tests import fakes
|
||||
from validations_libs.list import List
|
||||
|
||||
|
||||
class TestValidatorList(unittest.TestCase):
|
||||
class TestValidatorList(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestValidatorList, self).setUp()
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2018 Red Hat, Inc.
|
||||
# Copyright 2020 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -13,14 +13,14 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
import unittest
|
||||
from unittest import mock
|
||||
from unittest import TestCase
|
||||
|
||||
from validations_libs.tests import fakes
|
||||
from validations_libs.run import Run
|
||||
|
||||
|
||||
class TestValidatorRun(unittest.TestCase):
|
||||
class TestValidatorRun(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestValidatorRun, self).setUp()
|
||||
|
@ -21,7 +21,7 @@ import shutil
|
||||
import tempfile
|
||||
import yaml
|
||||
|
||||
from prettytable import PrettyTable
|
||||
#from prettytable import PrettyTable
|
||||
from validations_libs import constants
|
||||
|
||||
RED = "\033[1;31m"
|
||||
@ -238,81 +238,81 @@ def get_new_validations_logs_on_disk():
|
||||
return files
|
||||
|
||||
|
||||
def get_results(results):
|
||||
"""Get validations results and return as PrettytTable format"""
|
||||
new_log_files = get_new_validations_logs_on_disk()
|
||||
#def get_results(results):
|
||||
# """Get validations results and return as PrettytTable format"""
|
||||
# new_log_files = get_new_validations_logs_on_disk()
|
||||
|
||||
for i in new_log_files:
|
||||
val_id = "{}.yaml".format(i.split('_')[1])
|
||||
for res in results:
|
||||
if res['validation'].get('validation_id') == val_id:
|
||||
res['validation']['logfile'] = \
|
||||
os.path.join(constants.VALIDATIONS_LOG_BASEDIR, i)
|
||||
# for i in new_log_files:
|
||||
# val_id = "{}.yaml".format(i.split('_')[1])
|
||||
# for res in results:
|
||||
# if res['validation'].get('validation_id') == val_id:
|
||||
# res['validation']['logfile'] = \
|
||||
# os.path.join(constants.VALIDATIONS_LOG_BASEDIR, i)
|
||||
|
||||
t = PrettyTable(border=True, header=True, padding_width=1)
|
||||
t.field_names = [
|
||||
"UUID", "Validations", "Status", "Host Group(s)",
|
||||
"Status by Host", "Unreachable Host(s)", "Duration"]
|
||||
# t = PrettyTable(border=True, header=True, padding_width=1)
|
||||
# t.field_names = [
|
||||
# "UUID", "Validations", "Status", "Host Group(s)",
|
||||
# "Status by Host", "Unreachable Host(s)", "Duration"]
|
||||
|
||||
for validation in results:
|
||||
r = []
|
||||
logfile = validation['validation'].get('logfile', None)
|
||||
if logfile and os.path.exists(logfile):
|
||||
with open(logfile, 'r') as val:
|
||||
contents = json.load(val)
|
||||
# for validation in results:
|
||||
# r = []
|
||||
# logfile = validation['validation'].get('logfile', None)
|
||||
# if logfile and os.path.exists(logfile):
|
||||
# with open(logfile, 'r') as val:
|
||||
# contents = json.load(val)
|
||||
|
||||
for i in contents['plays']:
|
||||
host = [
|
||||
x.encode('utf-8')
|
||||
for x in i['play'].get('host').split(', ')
|
||||
]
|
||||
val_id = i['play'].get('validation_id')
|
||||
time_elapsed = \
|
||||
i['play']['duration'].get('time_elapsed', None)
|
||||
# for i in contents['plays']:
|
||||
# host = [
|
||||
# x.encode('utf-8')
|
||||
# for x in i['play'].get('host').split(', ')
|
||||
# ]
|
||||
# val_id = i['play'].get('validation_id')
|
||||
# time_elapsed = \
|
||||
# i['play']['duration'].get('time_elapsed', None)
|
||||
|
||||
r.append(contents['plays'][0]['play'].get('id'))
|
||||
r.append(val_id)
|
||||
if validation['validation'].get('status') == "PASSED":
|
||||
r.append(PASSED_VALIDATION)
|
||||
else:
|
||||
r.append(FAILED_VALIDATION)
|
||||
# r.append(contents['plays'][0]['play'].get('id'))
|
||||
# r.append(val_id)
|
||||
# if validation['validation'].get('status') == "PASSED":
|
||||
# r.append(PASSED_VALIDATION)
|
||||
# else:
|
||||
# r.append(FAILED_VALIDATION)
|
||||
|
||||
unreachable_hosts = []
|
||||
hosts_result = []
|
||||
for h in list(contents['stats'].keys()):
|
||||
ht = h.encode('utf-8')
|
||||
if contents['stats'][ht]['unreachable'] != 0:
|
||||
unreachable_hosts.append(ht)
|
||||
elif contents['stats'][ht]['failures'] != 0:
|
||||
hosts_result.append("{}{}{}".format(
|
||||
RED, ht, RESET))
|
||||
else:
|
||||
hosts_result.append("{}{}{}".format(
|
||||
GREEN, ht, RESET))
|
||||
# unreachable_hosts = []
|
||||
# hosts_result = []
|
||||
# for h in list(contents['stats'].keys()):
|
||||
# ht = h.encode('utf-8')
|
||||
# if contents['stats'][ht]['unreachable'] != 0:
|
||||
# unreachable_hosts.append(ht)
|
||||
# elif contents['stats'][ht]['failures'] != 0:
|
||||
# hosts_result.append("{}{}{}".format(
|
||||
# RED, ht, RESET))
|
||||
# else:
|
||||
# hosts_result.append("{}{}{}".format(
|
||||
# GREEN, ht, RESET))
|
||||
|
||||
r.append(", ".join(host))
|
||||
r.append(", ".join(hosts_result))
|
||||
r.append("{}{}{}".format(RED,
|
||||
", ".join(unreachable_hosts),
|
||||
RESET))
|
||||
r.append(time_elapsed)
|
||||
t.add_row(r)
|
||||
# r.append(", ".join(host))
|
||||
# r.append(", ".join(hosts_result))
|
||||
# r.append("{}{}{}".format(RED,
|
||||
# ", ".join(unreachable_hosts),
|
||||
# RESET))
|
||||
# r.append(time_elapsed)
|
||||
# t.add_row(r)
|
||||
|
||||
t.sortby = "UUID"
|
||||
for field in t.field_names:
|
||||
if field == "Status":
|
||||
t.align['Status'] = "l"
|
||||
else:
|
||||
t.align[field] = "l"
|
||||
# t.sortby = "UUID"
|
||||
# for field in t.field_names:
|
||||
# if field == "Status":
|
||||
# t.align['Status'] = "l"
|
||||
# else:
|
||||
# t.align[field] = "l"
|
||||
|
||||
print(t)
|
||||
# print(t)
|
||||
|
||||
if len(new_log_files) > len(results):
|
||||
LOG.warn('Looks like we have more log files than '
|
||||
'executed validations')
|
||||
# if len(new_log_files) > len(results):
|
||||
# LOG.warn('Looks like we have more log files than '
|
||||
# 'executed validations')
|
||||
|
||||
for i in new_log_files:
|
||||
os.rename(
|
||||
"{}/{}".format(constants.VALIDATIONS_LOG_BASEDIR,
|
||||
i), "{}/processed_{}".format(
|
||||
constants.VALIDATIONS_LOG_BASEDIR, i))
|
||||
# for i in new_log_files:
|
||||
# os.rename(
|
||||
# "{}/{}".format(constants.VALIDATIONS_LOG_BASEDIR,
|
||||
# i), "{}/processed_{}".format(
|
||||
# constants.VALIDATIONS_LOG_BASEDIR, i))
|
||||
|
Loading…
Reference in New Issue
Block a user