Add unit tests for ansible runner library

This commit is contained in:
Mathieu Bultel 2020-03-09 14:21:03 +01:00
parent 0fb4205479
commit 32d15b6a1b
8 changed files with 226 additions and 79 deletions

View File

@ -4,3 +4,5 @@
pbr>=3.1.1 # Apache-2.0 pbr>=3.1.1 # Apache-2.0
six>=1.11.0 # MIT six>=1.11.0 # MIT
ansible-runner

View File

@ -4,7 +4,7 @@
openstackdocstheme>=1.20.0 # Apache-2.0 openstackdocstheme>=1.20.0 # Apache-2.0
hacking<0.12,>=0.11.0 # Apache-2.0 hacking<0.12,>=0.11.0 # Apache-2.0
mock
coverage!=4.4,>=4.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0
python-subunit>=1.0.0 # Apache-2.0/BSD python-subunit>=1.0.0 # Apache-2.0/BSD
sphinx>=1.8.0,<2.0.0;python_version=='2.7' # BSD sphinx>=1.8.0,<2.0.0;python_version=='2.7' # BSD

View File

@ -13,12 +13,12 @@
# under the License. # under the License.
# #
import constants
import logging import logging
import os import os
import six import six
from validations_libs.ansible import Ansible as v_ansible from validations_libs.ansible import Ansible as v_ansible
from validations_libs import constants
from validations_libs import utils as v_utils from validations_libs import utils as v_utils
LOG = logging.getLogger(__name__ + ".run") LOG = logging.getLogger(__name__ + ".run")

View File

@ -13,7 +13,7 @@
# under the License. # under the License.
# #
import mock from unittest import mock
VALIDATIONS_LIST = [{ VALIDATIONS_LIST = [{
@ -139,3 +139,7 @@ VALIDATIONS_LOGS_CONTENTS_LIST = [{
}, },
'validation_output': [] 'validation_output': []
}] }]
def fake_ansible_runner_run_return(status='successful', rc=0):
return status, rc

View File

@ -0,0 +1,141 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from unittest import TestCase
from ansible_runner import Runner
from validations_libs.ansible import Ansible
from validations_libs.tests import fakes
from validations_libs import utils
class TestAnsible(TestCase):
def setUp(self):
super(TestAnsible, self).setUp()
self.unlink_patch = mock.patch('os.unlink')
self.addCleanup(self.unlink_patch.stop)
self.unlink_patch.start()
self.run = Ansible()
@mock.patch('os.path.exists', return_value=False)
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
def test_check_no_playbook(self, mock_dump_artifact, mock_exists):
self.assertRaises(
RuntimeError,
self.run.run,
'non-existing.yaml',
'localhost,',
'/tmp'
)
mock_exists.assert_called_with('/tmp/non-existing.yaml')
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.makedirs')
@mock.patch.object(
Runner,
'run',
return_value=fakes.fake_ansible_runner_run_return(rc=1,
status='failed')
)
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
return_value="/tmp/foo.yaml")
def test_ansible_runner_error(self, mock_stdout, mock_dump_artifact,
mock_run, mock_mkdirs, mock_exists,
mock_mkdtemp):
stdout_file, _playbook, _rc, _status = self.run.run('existing.yaml',
'localhost,',
'/tmp')
self.assertEquals((_playbook, _rc, _status),
('existing.yaml', 1, 'failed'))
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.makedirs')
@mock.patch.object(Runner,'run',
return_value=fakes.fake_ansible_runner_run_return(rc=0)
)
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
return_value="/tmp/foo.yaml")
def test_run_success_default(self, mock_stdout, mock_dump_artifact,
mock_run, mock_mkdirs, mock_exists,
mock_mkstemp):
stdout_file, _playbook, _rc, _status = self.run.run(
playbook='existing.yaml',
inventory='localhost,',
workdir='/tmp'
)
self.assertEquals((_playbook, _rc, _status),
('existing.yaml', 0, 'successful'))
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.makedirs')
@mock.patch.object(Runner,'run',
return_value=fakes.fake_ansible_runner_run_return(rc=0)
)
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
return_value="/tmp/foo.yaml")
def test_run_success_gathering_policy(self, mock_stdout,
mock_dump_artifact, mock_run,
mock_mkdirs, mock_exists,
mock_mkstemp):
stdout_file, _playbook, _rc, _status = self.run.run(
playbook='existing.yaml',
inventory='localhost,',
workdir='/tmp',
connection='local',
gathering_policy='smart'
)
self.assertEquals((_playbook, _rc, _status),
('existing.yaml', 0, 'successful'))
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.makedirs')
@mock.patch.object(Runner,'run',
return_value=fakes.fake_ansible_runner_run_return(rc=0)
)
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
return_value="/tmp/foo.yaml")
def test_run_success_local(self, mock_stdout,
mock_dump_artifact, mock_run,
mock_mkdirs, mock_exists,
mock_mkstemp):
stdout_file, _playbook, _rc, _status = self.run.run(
playbook='existing.yaml',
inventory='localhost,',
workdir='/tmp',
connection='local'
)
self.assertEquals((_playbook, _rc, _status),
('existing.yaml', 0, 'successful'))

View File

@ -1,4 +1,4 @@
# Copyright 2018 Red Hat, Inc. # Copyright 2020 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -13,14 +13,14 @@
# under the License. # under the License.
# #
import mock from unittest import mock
import unittest from unittest import TestCase
from validations_libs.tests import fakes from validations_libs.tests import fakes
from validations_libs.list import List from validations_libs.list import List
class TestValidatorList(unittest.TestCase): class TestValidatorList(TestCase):
def setUp(self): def setUp(self):
super(TestValidatorList, self).setUp() super(TestValidatorList, self).setUp()

View File

@ -1,4 +1,4 @@
# Copyright 2018 Red Hat, Inc. # Copyright 2020 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -13,14 +13,14 @@
# under the License. # under the License.
# #
import mock from unittest import mock
import unittest from unittest import TestCase
from validations_libs.tests import fakes from validations_libs.tests import fakes
from validations_libs.run import Run from validations_libs.run import Run
class TestValidatorRun(unittest.TestCase): class TestValidatorRun(TestCase):
def setUp(self): def setUp(self):
super(TestValidatorRun, self).setUp() super(TestValidatorRun, self).setUp()

View File

@ -21,7 +21,7 @@ import shutil
import tempfile import tempfile
import yaml import yaml
from prettytable import PrettyTable #from prettytable import PrettyTable
from validations_libs import constants from validations_libs import constants
RED = "\033[1;31m" RED = "\033[1;31m"
@ -238,81 +238,81 @@ def get_new_validations_logs_on_disk():
return files return files
def get_results(results): #def get_results(results):
"""Get validations results and return as PrettytTable format""" # """Get validations results and return as PrettytTable format"""
new_log_files = get_new_validations_logs_on_disk() # new_log_files = get_new_validations_logs_on_disk()
for i in new_log_files: # for i in new_log_files:
val_id = "{}.yaml".format(i.split('_')[1]) # val_id = "{}.yaml".format(i.split('_')[1])
for res in results: # for res in results:
if res['validation'].get('validation_id') == val_id: # if res['validation'].get('validation_id') == val_id:
res['validation']['logfile'] = \ # res['validation']['logfile'] = \
os.path.join(constants.VALIDATIONS_LOG_BASEDIR, i) # os.path.join(constants.VALIDATIONS_LOG_BASEDIR, i)
t = PrettyTable(border=True, header=True, padding_width=1) # t = PrettyTable(border=True, header=True, padding_width=1)
t.field_names = [ # t.field_names = [
"UUID", "Validations", "Status", "Host Group(s)", # "UUID", "Validations", "Status", "Host Group(s)",
"Status by Host", "Unreachable Host(s)", "Duration"] # "Status by Host", "Unreachable Host(s)", "Duration"]
for validation in results: # for validation in results:
r = [] # r = []
logfile = validation['validation'].get('logfile', None) # logfile = validation['validation'].get('logfile', None)
if logfile and os.path.exists(logfile): # if logfile and os.path.exists(logfile):
with open(logfile, 'r') as val: # with open(logfile, 'r') as val:
contents = json.load(val) # contents = json.load(val)
for i in contents['plays']: # for i in contents['plays']:
host = [ # host = [
x.encode('utf-8') # x.encode('utf-8')
for x in i['play'].get('host').split(', ') # for x in i['play'].get('host').split(', ')
] # ]
val_id = i['play'].get('validation_id') # val_id = i['play'].get('validation_id')
time_elapsed = \ # time_elapsed = \
i['play']['duration'].get('time_elapsed', None) # i['play']['duration'].get('time_elapsed', None)
r.append(contents['plays'][0]['play'].get('id')) # r.append(contents['plays'][0]['play'].get('id'))
r.append(val_id) # r.append(val_id)
if validation['validation'].get('status') == "PASSED": # if validation['validation'].get('status') == "PASSED":
r.append(PASSED_VALIDATION) # r.append(PASSED_VALIDATION)
else: # else:
r.append(FAILED_VALIDATION) # r.append(FAILED_VALIDATION)
unreachable_hosts = [] # unreachable_hosts = []
hosts_result = [] # hosts_result = []
for h in list(contents['stats'].keys()): # for h in list(contents['stats'].keys()):
ht = h.encode('utf-8') # ht = h.encode('utf-8')
if contents['stats'][ht]['unreachable'] != 0: # if contents['stats'][ht]['unreachable'] != 0:
unreachable_hosts.append(ht) # unreachable_hosts.append(ht)
elif contents['stats'][ht]['failures'] != 0: # elif contents['stats'][ht]['failures'] != 0:
hosts_result.append("{}{}{}".format( # hosts_result.append("{}{}{}".format(
RED, ht, RESET)) # RED, ht, RESET))
else: # else:
hosts_result.append("{}{}{}".format( # hosts_result.append("{}{}{}".format(
GREEN, ht, RESET)) # GREEN, ht, RESET))
r.append(", ".join(host)) # r.append(", ".join(host))
r.append(", ".join(hosts_result)) # r.append(", ".join(hosts_result))
r.append("{}{}{}".format(RED, # r.append("{}{}{}".format(RED,
", ".join(unreachable_hosts), # ", ".join(unreachable_hosts),
RESET)) # RESET))
r.append(time_elapsed) # r.append(time_elapsed)
t.add_row(r) # t.add_row(r)
t.sortby = "UUID" # t.sortby = "UUID"
for field in t.field_names: # for field in t.field_names:
if field == "Status": # if field == "Status":
t.align['Status'] = "l" # t.align['Status'] = "l"
else: # else:
t.align[field] = "l" # t.align[field] = "l"
print(t) # print(t)
if len(new_log_files) > len(results): # if len(new_log_files) > len(results):
LOG.warn('Looks like we have more log files than ' # LOG.warn('Looks like we have more log files than '
'executed validations') # 'executed validations')
for i in new_log_files: # for i in new_log_files:
os.rename( # os.rename(
"{}/{}".format(constants.VALIDATIONS_LOG_BASEDIR, # "{}/{}".format(constants.VALIDATIONS_LOG_BASEDIR,
i), "{}/processed_{}".format( # i), "{}/processed_{}".format(
constants.VALIDATIONS_LOG_BASEDIR, i)) # constants.VALIDATIONS_LOG_BASEDIR, i))