From 84aa2b68465bc146cf2cb0135ddd6d4fe34b5bda Mon Sep 17 00:00:00 2001 From: "Gael Chamoulaud (Strider)" Date: Wed, 11 Mar 2020 17:07:56 +0100 Subject: [PATCH] Improve the way we log on the file system Improve the way we log on the file system and share the same uuid for callback logging and ansible runner and artifact logging This patch also adds zuul.d directory: * Adds zuul.d/layout.yaml * Adds .gitreview file * Fixes ansible-runner requirements issue Co-Authored-By: Gael Chamoulaud Change-Id: I4da8c1974362029754aa5b09cd8e6c7d6a27dd0d Signed-off-by: Gael Chamoulaud (Strider) --- .gitreview | 4 ++ lower-constraints.txt | 1 + requirements.txt | 2 +- test-requirements.txt | 4 +- validations_libs/ansible.py | 71 +++++++++---------- validations_libs/constants.py | 2 + validations_libs/run.py | 51 ++++++------- validations_libs/tests/fakes.py | 2 - validations_libs/tests/test_ansible.py | 60 +++++++--------- .../tests/test_validations_run.py | 26 ++++--- validations_libs/utils.py | 52 +++++++++++++- zuul.d/layout.yaml | 12 ++++ 12 files changed, 176 insertions(+), 111 deletions(-) create mode 100644 .gitreview create mode 100644 zuul.d/layout.yaml diff --git a/.gitreview b/.gitreview new file mode 100644 index 00000000..f7b61cb1 --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.opendev.org +port=29418 +project=openstack/validations-libs.git diff --git a/lower-constraints.txt b/lower-constraints.txt index 7dcfd836..2b0205e9 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -1,6 +1,7 @@ alabaster==0.7.10 ansible-lint==3.4.21 ansible==2.4.3.0 +ansible-runner==1.4.4 anyjson==0.3.3 appdirs==1.4.3 asn1crypto==0.24.0 diff --git a/requirements.txt b/requirements.txt index 8542bab3..9712080f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,4 +5,4 @@ pbr>=3.1.1 # Apache-2.0 six>=1.11.0 # MIT -ansible-runner +ansible-runner>=1.4.4 # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index b4f0f9ec..d6a99f42 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,7 +4,8 @@ openstackdocstheme>=1.20.0 # Apache-2.0 hacking<0.12,>=0.11.0 # Apache-2.0 -mock + +mock>=2.0.0 # BSD coverage!=4.4,>=4.0 # Apache-2.0 python-subunit>=1.0.0 # Apache-2.0/BSD sphinx>=1.8.0,<2.0.0;python_version=='2.7' # BSD @@ -12,5 +13,4 @@ sphinx>=1.8.0,!=2.1.0;python_version>='3.4' # BSD testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT -reno>=2.5.0 # Apache-2.0 pre-commit # MIT diff --git a/validations_libs/ansible.py b/validations_libs/ansible.py index 38b81fc8..4b678c9d 100644 --- a/validations_libs/ansible.py +++ b/validations_libs/ansible.py @@ -20,12 +20,10 @@ import os import six import sys import tempfile -import uuid import yaml from six.moves import configparser from validations_libs import constants -from validations_libs import utils LOG = logging.getLogger(__name__ + ".ansible") @@ -40,8 +38,9 @@ except NameError: class Ansible(object): - def __init__(self): + def __init__(self, uuid=None): self.log = logging.getLogger(__name__ + ".Ansible") + self.uuid = uuid def _playbook_check(self, play, playbook_dir=None): """Check if playbook exist""" @@ -216,6 +215,9 @@ class Ansible(object): env['ANSIBLE_TRANSPORT'] = connection env['ANSIBLE_CACHE_PLUGIN_TIMEOUT'] = 7200 + if self.uuid: + env['ANSIBLE_UUID'] = self.uuid + if connection == 'local': env['ANSIBLE_PYTHON_INTERPRETER'] = sys.executable @@ -268,7 +270,7 @@ class Ansible(object): gathering_policy='smart', extra_env_variables=None, parallel_run=False, callback_whitelist=None, ansible_cfg=None, - ansible_timeout=30): + ansible_timeout=30, ansible_artifact_path=None): if not playbook_dir: playbook_dir = workdir @@ -284,9 +286,8 @@ class Ansible(object): ) ) - ansible_fact_path = self._creates_ansible_fact_dir() + # ansible_fact_path = self._creates_ansible_fact_dir() extravars = self._get_extra_vars(extra_vars) - callback_whitelist = self._callback_whitelist(callback_whitelist, output_callback) @@ -295,36 +296,34 @@ class Ansible(object): connection, gathering_policy, module_path, key, extra_env_variables, ansible_timeout, callback_whitelist) + if not ansible_artifact_path: + ansible_artifact_path = constants.VALIDATION_ANSIBLE_ARTIFACT_PATH + if 'ANSIBLE_CONFIG' not in env and not ansible_cfg: + ansible_cfg = os.path.join(ansible_artifact_path, 'ansible.cfg') + config = configparser.ConfigParser() + config.add_section('defaults') + config.set('defaults', 'internal_poll_interval', '0.05') + with open(ansible_cfg, 'w') as f: + config.write(f) + env['ANSIBLE_CONFIG'] = ansible_cfg + elif 'ANSIBLE_CONFIG' not in env and ansible_cfg: + env['ANSIBLE_CONFIG'] = ansible_cfg - command_path = None - - with utils.TempDirs(dir_path=constants.VALIDATION_RUN_LOG_PATH, - chdir=False,) as ansible_artifact_path: - if 'ANSIBLE_CONFIG' not in env and not ansible_cfg: - ansible_cfg = os.path.join(ansible_artifact_path, 'ansible.cfg') - config = configparser.ConfigParser() - config.add_section('defaults') - config.set('defaults', 'internal_poll_interval', '0.05') - with open(ansible_cfg, 'w') as f: - config.write(f) - env['ANSIBLE_CONFIG'] = ansible_cfg - elif 'ANSIBLE_CONFIG' not in env and ansible_cfg: - env['ANSIBLE_CONFIG'] = ansible_cfg - - r_opts = { - 'private_data_dir': workdir, - 'project_dir': playbook_dir, - 'inventory': self._inventory(inventory, ansible_artifact_path), - 'envvars': self._encode_envvars(env=env), - 'playbook': playbook, - 'verbosity': verbosity, - 'quiet': quiet, - 'extravars': extravars, - 'fact_cache': ansible_fact_path, - 'fact_cache_type': 'jsonfile', - 'artifact_dir': ansible_artifact_path, - 'rotate_artifacts': 256 - } + r_opts = { + 'private_data_dir': workdir, + 'project_dir': playbook_dir, + 'inventory': self._inventory(inventory, ansible_artifact_path), + 'envvars': self._encode_envvars(env=env), + 'playbook': playbook, + 'verbosity': verbosity, + 'quiet': quiet, + 'extravars': extravars, + 'fact_cache': ansible_artifact_path, + 'fact_cache_type': 'jsonfile', + 'artifact_dir': workdir, + 'rotate_artifacts': 256, + 'ident': '' + } if skip_tags: r_opts['skip_tags'] = skip_tags @@ -351,4 +350,4 @@ class Ansible(object): runner = ansible_runner.Runner(config=runner_config) status, rc = runner.run() - return runner.stdout.name, playbook, rc, status + return playbook, rc, status diff --git a/validations_libs/constants.py b/validations_libs/constants.py index be27233e..765f61d7 100644 --- a/validations_libs/constants.py +++ b/validations_libs/constants.py @@ -23,3 +23,5 @@ VALIDATION_GROUPS = ['no-op', 'post'] VALIDATION_RUN_LOG_PATH = '/var/lib/validations/logs' +VALIDATIONS_LOG_BASEDIR = '/var/logs/validations/' +VALIDATION_ANSIBLE_ARTIFACT_PATH = '/var/lib/validations/artifacts/' diff --git a/validations_libs/run.py b/validations_libs/run.py index e7391f87..c175e1d0 100644 --- a/validations_libs/run.py +++ b/validations_libs/run.py @@ -15,7 +15,6 @@ import logging import os -import six from validations_libs.ansible import Ansible as v_ansible from validations_libs import constants @@ -32,7 +31,7 @@ class Run(object): def run_validations(self, playbook=[], inventory='localhost', group=None, extra_vars=None, validations_dir=None, validation_name=None, extra_env_vars=None, - ansible_cfg=None, quiet=True): + ansible_cfg=None, quiet=True, workdir=None): self.log = logging.getLogger(__name__ + ".run_validations") @@ -63,33 +62,35 @@ class Run(object): raise("Please, use '--group' argument instead of " "'--validation' to run validation(s) by their " "name(s)." - ) + ) else: raise RuntimeError("No validations found") - run_ansible = v_ansible() self.log.debug('Running the validations with Ansible') results = [] - with v_utils.TempDirs(chdir=False) as tmp: - for playbook in playbooks: - stdout_file, _playbook, _rc, _status = run_ansible.run( - workdir=tmp, - playbook=playbook, - playbook_dir=(validations_dir if - validations_dir else - constants.ANSIBLE_VALIDATION_DIR), - parallel_run=True, - inventory=inventory, - output_callback='validation_json', - quiet=quiet, - extra_vars=extra_vars, - extra_env_variables=extra_env_vars, - ansible_cfg=ansible_cfg, - gathering_policy='explicit') + for playbook in playbooks: + validation_uuid, artifacts_dir = v_utils.create_artifacts_dir( + prefix=os.path.basename(playbook)) + run_ansible = v_ansible(validation_uuid) + _playbook, _rc, _status = run_ansible.run( + workdir=artifacts_dir, + playbook=playbook, + playbook_dir=(validations_dir if + validations_dir else + constants.ANSIBLE_VALIDATION_DIR), + parallel_run=True, + inventory=inventory, + output_callback='validation_json', + quiet=quiet, + extra_vars=extra_vars, + extra_env_variables=extra_env_vars, + ansible_cfg=ansible_cfg, + gathering_policy='explicit', + ansible_artifact_path=artifacts_dir) results.append({'validation': { - 'playbook': _playbook, - 'rc_code': _rc, - 'status': _status, - 'stdout_file': stdout_file - }}) + 'playbook': _playbook, + 'rc_code': _rc, + 'status': _status, + 'validation_id': validation_uuid + }}) return results diff --git a/validations_libs/tests/fakes.py b/validations_libs/tests/fakes.py index d9708f93..481306c3 100644 --- a/validations_libs/tests/fakes.py +++ b/validations_libs/tests/fakes.py @@ -13,8 +13,6 @@ # under the License. # -from unittest import mock - VALIDATIONS_LIST = [{ 'description': 'My Validation One Description', 'groups': ['prep', 'pre-deployment'], diff --git a/validations_libs/tests/test_ansible.py b/validations_libs/tests/test_ansible.py index 02a4009b..89225f4c 100644 --- a/validations_libs/tests/test_ansible.py +++ b/validations_libs/tests/test_ansible.py @@ -19,7 +19,6 @@ from unittest import TestCase from ansible_runner import Runner from validations_libs.ansible import Ansible from validations_libs.tests import fakes -from validations_libs import utils class TestAnsible(TestCase): @@ -44,7 +43,7 @@ class TestAnsible(TestCase): ) mock_exists.assert_called_with('/tmp/non-existing.yaml') - @mock.patch('tempfile.mkdtemp', return_value='/tmp/') + @mock.patch('six.moves.builtins.open') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') @mock.patch.object( @@ -55,32 +54,29 @@ class TestAnsible(TestCase): ) @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner.Runner.stdout', autospec=True, - return_value="/tmp/foo.yaml") - def test_ansible_runner_error(self, mock_stdout, mock_dump_artifact, + @mock.patch('ansible_runner.runner_config.RunnerConfig') + def test_ansible_runner_error(self, mock_config, mock_dump_artifact, mock_run, mock_mkdirs, mock_exists, - mock_mkdtemp): + mock_open): - stdout_file, _playbook, _rc, _status = self.run.run('existing.yaml', - 'localhost,', - '/tmp') + _playbook, _rc, _status = self.run.run('existing.yaml', + 'localhost,', + '/tmp') self.assertEquals((_playbook, _rc, _status), ('existing.yaml', 1, 'failed')) - @mock.patch('tempfile.mkdtemp', return_value='/tmp/') + @mock.patch('six.moves.builtins.open') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0) - ) + return_value=fakes.fake_ansible_runner_run_return(rc=0)) @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner.Runner.stdout', autospec=True, - return_value="/tmp/foo.yaml") - def test_run_success_default(self, mock_stdout, mock_dump_artifact, + @mock.patch('ansible_runner.runner_config.RunnerConfig') + def test_run_success_default(self, mock_config, mock_dump_artifact, mock_run, mock_mkdirs, mock_exists, - mock_mkstemp): - stdout_file, _playbook, _rc, _status = self.run.run( + mock_open): + _playbook, _rc, _status = self.run.run( playbook='existing.yaml', inventory='localhost,', workdir='/tmp' @@ -88,21 +84,19 @@ class TestAnsible(TestCase): self.assertEquals((_playbook, _rc, _status), ('existing.yaml', 0, 'successful')) - @mock.patch('tempfile.mkdtemp', return_value='/tmp/') + @mock.patch('six.moves.builtins.open') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0) - ) + return_value=fakes.fake_ansible_runner_run_return(rc=0)) @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner.Runner.stdout', autospec=True, - return_value="/tmp/foo.yaml") - def test_run_success_gathering_policy(self, mock_stdout, + @mock.patch('ansible_runner.runner_config.RunnerConfig') + def test_run_success_gathering_policy(self, mock_config, mock_dump_artifact, mock_run, mock_mkdirs, mock_exists, - mock_mkstemp): - stdout_file, _playbook, _rc, _status = self.run.run( + mock_open): + _playbook, _rc, _status = self.run.run( playbook='existing.yaml', inventory='localhost,', workdir='/tmp', @@ -112,21 +106,19 @@ class TestAnsible(TestCase): self.assertEquals((_playbook, _rc, _status), ('existing.yaml', 0, 'successful')) - @mock.patch('tempfile.mkdtemp', return_value='/tmp/') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0) - ) + return_value=fakes.fake_ansible_runner_run_return(rc=0)) @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner.Runner.stdout', autospec=True, - return_value="/tmp/foo.yaml") - def test_run_success_local(self, mock_stdout, + @mock.patch('six.moves.builtins.open') + @mock.patch('ansible_runner.runner_config.RunnerConfig') + def test_run_success_local(self, mock_config, mock_open, mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists, - mock_mkstemp): - stdout_file, _playbook, _rc, _status = self.run.run( + mock_mkdirs, mock_exists + ): + _playbook, _rc, _status = self.run.run( playbook='existing.yaml', inventory='localhost,', workdir='/tmp', diff --git a/validations_libs/tests/test_validations_run.py b/validations_libs/tests/test_validations_run.py index 847d24f3..02ef9bc2 100644 --- a/validations_libs/tests/test_validations_run.py +++ b/validations_libs/tests/test_validations_run.py @@ -27,7 +27,9 @@ class TestValidatorRun(TestCase): @mock.patch('validations_libs.utils.parse_all_validations_on_disk') @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_run_success(self, mock_ansible_run, + @mock.patch('validations_libs.utils.create_artifacts_dir', + return_value=('1234', '/tmp/')) + def test_validation_run_success(self, mock_tmp, mock_ansible_run, mock_validation_dir): mock_validation_dir.return_value = [{ 'description': 'My Validation One Description', @@ -35,14 +37,17 @@ class TestValidatorRun(TestCase): 'id': 'foo', 'name': 'My Validition One Name', 'parameters': {}}] - mock_ansible_run.return_value = ('/tmp/validation/stdout.log', - 'foo.yaml', 0, 'successful') + mock_ansible_run.return_value = ('foo.yaml', 0, 'successful') expected_run_return = [ {'validation': {'playbook': 'foo.yaml', 'rc_code': 0, 'status': 'successful', - 'stdout_file': '/tmp/validation/stdout.log'}}] + 'validation_id': '1234'}}, + {'validation': {'playbook': 'foo.yaml', + 'rc_code': 0, + 'status': 'successful', + 'validation_id': '1234'}}] playbook = ['fake.yaml'] inventory = 'tmp/inventory.yaml' @@ -55,7 +60,9 @@ class TestValidatorRun(TestCase): @mock.patch('validations_libs.utils.parse_all_validations_on_disk') @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_run_failed(self, mock_ansible_run, + @mock.patch('validations_libs.utils.create_artifacts_dir', + return_value=('1234', '/tmp/')) + def test_validation_run_failed(self, mock_tmp, mock_ansible_run, mock_validation_dir): mock_validation_dir.return_value = [{ 'description': 'My Validation One Description', @@ -63,14 +70,17 @@ class TestValidatorRun(TestCase): 'id': 'foo', 'name': 'My Validition One Name', 'parameters': {}}] - mock_ansible_run.return_value = ('/tmp/validation/stdout.log', - 'foo.yaml', 0, 'failed') + mock_ansible_run.return_value = ('foo.yaml', 0, 'failed') expected_run_return = [ {'validation': {'playbook': 'foo.yaml', 'rc_code': 0, 'status': 'failed', - 'stdout_file': '/tmp/validation/stdout.log'}}] + 'validation_id': '1234'}}, + {'validation': {'playbook': 'foo.yaml', + 'rc_code': 0, + 'status': 'failed', + 'validation_id': '1234'}}] playbook = ['fake.yaml'] inventory = 'tmp/inventory.yaml' diff --git a/validations_libs/utils.py b/validations_libs/utils.py index 9b39dc03..6bdb7dba 100644 --- a/validations_libs/utils.py +++ b/validations_libs/utils.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. # +import datetime import glob import json import logging @@ -22,6 +23,7 @@ import tempfile import yaml from validations_libs import constants +from uuid import uuid4 RED = "\033[1;31m" GREEN = "\033[0;32m" @@ -125,12 +127,35 @@ class TempDirs(object): LOG.info("Temporary directory [ %s ] cleaned up" % self.dir) +def current_time(): + return '%sZ' % datetime.datetime.utcnow().isoformat() + + +def create_artifacts_dir(dir_path=None, prefix=None): + dir_path = (dir_path if dir_path else + constants.VALIDATION_ANSIBLE_ARTIFACT_PATH) + validation_uuid = str(uuid4()) + log_dir = "{}/{}_{}_{}".format(dir_path, validation_uuid, + (prefix if prefix else ''), current_time()) + try: + os.makedirs(log_dir) + return validation_uuid, log_dir + except OSError: + LOG.exception("Error while creating Ansible artifacts log file." + "Please check the access rights for {}").format(log_dir) + + def parse_all_validations_on_disk(path, groups=None): results = [] validations_abspath = glob.glob("{path}/*.yaml".format(path=path)) + if isinstance(groups, six.string_types): + group_list = [] + group_list.append(groups) + groups = group_list + for pl in validations_abspath: - validation_id, ext = os.path.splitext(os.path.basename(pl)) + validation_id, _ext = os.path.splitext(os.path.basename(pl)) with open(pl, 'r') as val_playbook: contents = yaml.safe_load(val_playbook) @@ -167,6 +192,27 @@ def parse_all_validation_groups_on_disk(groups_file_path=None): return results +def parse_all_validations_logs_on_disk(uuid_run=None, validation_id=None): + results = [] + path = constants.VALIDATIONS_LOG_BASEDIR + logfile = "{}/*.json".format(path) + + if validation_id: + logfile = "{}/*_{}_*.json".format(path, validation_id) + + if uuid_run: + logfile = "{}/*_{}_*.json".format(path, uuid_run) + + logfiles_path = glob.glob(logfile) + + for logfile_path in logfiles_path: + with open(logfile_path, 'r') as log: + contents = json.load(log) + results.append(contents) + + return results + + def get_validation_metadata(validation, key): default_metadata = { 'name': 'Unnamed', @@ -224,11 +270,11 @@ def get_validation_group_name_list(): return results -def get_new_validations_logs_on_disk(): +def get_new_validations_logs_on_disk(validations_logs_dir): """Return a list of new log execution filenames """ files = [] - for root, dirs, filenames in os.walk(constants.VALIDATIONS_LOG_BASEDIR): + for root, dirs, filenames in os.walk(validations_logs_dir): files = [ f for f in filenames if not f.startswith('processed') and os.path.splitext(f)[1] == '.json' diff --git a/zuul.d/layout.yaml b/zuul.d/layout.yaml new file mode 100644 index 00000000..0d62a806 --- /dev/null +++ b/zuul.d/layout.yaml @@ -0,0 +1,12 @@ +- project: + templates: + - openstack-python3-ussuri-jobs + - check-requirements + check: + jobs: + - openstack-tox-linters + - openstack-tox-lower-constraints + gate: + jobs: + - openstack-tox-linters + - openstack-tox-lower-constraints