Improve the way we log on the file system

Improve the way we log on the file system and share
the same uuid for callback logging and ansible
runner and artifact logging

This patch also adds zuul.d directory:
* Adds zuul.d/layout.yaml
* Adds .gitreview file
* Fixes ansible-runner requirements issue

Co-Authored-By: Gael Chamoulaud <gchamoul@redhat.com>
Change-Id: I4da8c1974362029754aa5b09cd8e6c7d6a27dd0d
Signed-off-by: Gael Chamoulaud (Strider) <gchamoul@redhat.com>
This commit is contained in:
Gael Chamoulaud (Strider) 2020-03-11 17:07:56 +01:00
parent 06892d8c65
commit 84aa2b6846
No known key found for this signature in database
GPG Key ID: 4119D0305C651D66
12 changed files with 176 additions and 111 deletions

4
.gitreview Normal file
View File

@ -0,0 +1,4 @@
[gerrit]
host=review.opendev.org
port=29418
project=openstack/validations-libs.git

View File

@ -1,6 +1,7 @@
alabaster==0.7.10
ansible-lint==3.4.21
ansible==2.4.3.0
ansible-runner==1.4.4
anyjson==0.3.3
appdirs==1.4.3
asn1crypto==0.24.0

View File

@ -5,4 +5,4 @@
pbr>=3.1.1 # Apache-2.0
six>=1.11.0 # MIT
ansible-runner
ansible-runner>=1.4.4 # Apache-2.0

View File

@ -4,7 +4,8 @@
openstackdocstheme>=1.20.0 # Apache-2.0
hacking<0.12,>=0.11.0 # Apache-2.0
mock
mock>=2.0.0 # BSD
coverage!=4.4,>=4.0 # Apache-2.0
python-subunit>=1.0.0 # Apache-2.0/BSD
sphinx>=1.8.0,<2.0.0;python_version=='2.7' # BSD
@ -12,5 +13,4 @@ sphinx>=1.8.0,!=2.1.0;python_version>='3.4' # BSD
testrepository>=0.0.18 # Apache-2.0/BSD
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=2.2.0 # MIT
reno>=2.5.0 # Apache-2.0
pre-commit # MIT

View File

@ -20,12 +20,10 @@ import os
import six
import sys
import tempfile
import uuid
import yaml
from six.moves import configparser
from validations_libs import constants
from validations_libs import utils
LOG = logging.getLogger(__name__ + ".ansible")
@ -40,8 +38,9 @@ except NameError:
class Ansible(object):
def __init__(self):
def __init__(self, uuid=None):
self.log = logging.getLogger(__name__ + ".Ansible")
self.uuid = uuid
def _playbook_check(self, play, playbook_dir=None):
"""Check if playbook exist"""
@ -216,6 +215,9 @@ class Ansible(object):
env['ANSIBLE_TRANSPORT'] = connection
env['ANSIBLE_CACHE_PLUGIN_TIMEOUT'] = 7200
if self.uuid:
env['ANSIBLE_UUID'] = self.uuid
if connection == 'local':
env['ANSIBLE_PYTHON_INTERPRETER'] = sys.executable
@ -268,7 +270,7 @@ class Ansible(object):
gathering_policy='smart',
extra_env_variables=None, parallel_run=False,
callback_whitelist=None, ansible_cfg=None,
ansible_timeout=30):
ansible_timeout=30, ansible_artifact_path=None):
if not playbook_dir:
playbook_dir = workdir
@ -284,9 +286,8 @@ class Ansible(object):
)
)
ansible_fact_path = self._creates_ansible_fact_dir()
# ansible_fact_path = self._creates_ansible_fact_dir()
extravars = self._get_extra_vars(extra_vars)
callback_whitelist = self._callback_whitelist(callback_whitelist,
output_callback)
@ -295,11 +296,8 @@ class Ansible(object):
connection, gathering_policy, module_path,
key, extra_env_variables, ansible_timeout,
callback_whitelist)
command_path = None
with utils.TempDirs(dir_path=constants.VALIDATION_RUN_LOG_PATH,
chdir=False,) as ansible_artifact_path:
if not ansible_artifact_path:
ansible_artifact_path = constants.VALIDATION_ANSIBLE_ARTIFACT_PATH
if 'ANSIBLE_CONFIG' not in env and not ansible_cfg:
ansible_cfg = os.path.join(ansible_artifact_path, 'ansible.cfg')
config = configparser.ConfigParser()
@ -320,10 +318,11 @@ class Ansible(object):
'verbosity': verbosity,
'quiet': quiet,
'extravars': extravars,
'fact_cache': ansible_fact_path,
'fact_cache': ansible_artifact_path,
'fact_cache_type': 'jsonfile',
'artifact_dir': ansible_artifact_path,
'rotate_artifacts': 256
'artifact_dir': workdir,
'rotate_artifacts': 256,
'ident': ''
}
if skip_tags:
@ -351,4 +350,4 @@ class Ansible(object):
runner = ansible_runner.Runner(config=runner_config)
status, rc = runner.run()
return runner.stdout.name, playbook, rc, status
return playbook, rc, status

View File

@ -23,3 +23,5 @@ VALIDATION_GROUPS = ['no-op',
'post']
VALIDATION_RUN_LOG_PATH = '/var/lib/validations/logs'
VALIDATIONS_LOG_BASEDIR = '/var/logs/validations/'
VALIDATION_ANSIBLE_ARTIFACT_PATH = '/var/lib/validations/artifacts/'

View File

@ -15,7 +15,6 @@
import logging
import os
import six
from validations_libs.ansible import Ansible as v_ansible
from validations_libs import constants
@ -32,7 +31,7 @@ class Run(object):
def run_validations(self, playbook=[], inventory='localhost',
group=None, extra_vars=None, validations_dir=None,
validation_name=None, extra_env_vars=None,
ansible_cfg=None, quiet=True):
ansible_cfg=None, quiet=True, workdir=None):
self.log = logging.getLogger(__name__ + ".run_validations")
@ -67,13 +66,14 @@ class Run(object):
else:
raise RuntimeError("No validations found")
run_ansible = v_ansible()
self.log.debug('Running the validations with Ansible')
results = []
with v_utils.TempDirs(chdir=False) as tmp:
for playbook in playbooks:
stdout_file, _playbook, _rc, _status = run_ansible.run(
workdir=tmp,
validation_uuid, artifacts_dir = v_utils.create_artifacts_dir(
prefix=os.path.basename(playbook))
run_ansible = v_ansible(validation_uuid)
_playbook, _rc, _status = run_ansible.run(
workdir=artifacts_dir,
playbook=playbook,
playbook_dir=(validations_dir if
validations_dir else
@ -85,11 +85,12 @@ class Run(object):
extra_vars=extra_vars,
extra_env_variables=extra_env_vars,
ansible_cfg=ansible_cfg,
gathering_policy='explicit')
gathering_policy='explicit',
ansible_artifact_path=artifacts_dir)
results.append({'validation': {
'playbook': _playbook,
'rc_code': _rc,
'status': _status,
'stdout_file': stdout_file
'validation_id': validation_uuid
}})
return results

View File

@ -13,8 +13,6 @@
# under the License.
#
from unittest import mock
VALIDATIONS_LIST = [{
'description': 'My Validation One Description',
'groups': ['prep', 'pre-deployment'],

View File

@ -19,7 +19,6 @@ from unittest import TestCase
from ansible_runner import Runner
from validations_libs.ansible import Ansible
from validations_libs.tests import fakes
from validations_libs import utils
class TestAnsible(TestCase):
@ -44,7 +43,7 @@ class TestAnsible(TestCase):
)
mock_exists.assert_called_with('/tmp/non-existing.yaml')
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.makedirs')
@mock.patch.object(
@ -55,32 +54,29 @@ class TestAnsible(TestCase):
)
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
return_value="/tmp/foo.yaml")
def test_ansible_runner_error(self, mock_stdout, mock_dump_artifact,
@mock.patch('ansible_runner.runner_config.RunnerConfig')
def test_ansible_runner_error(self, mock_config, mock_dump_artifact,
mock_run, mock_mkdirs, mock_exists,
mock_mkdtemp):
mock_open):
stdout_file, _playbook, _rc, _status = self.run.run('existing.yaml',
_playbook, _rc, _status = self.run.run('existing.yaml',
'localhost,',
'/tmp')
self.assertEquals((_playbook, _rc, _status),
('existing.yaml', 1, 'failed'))
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.makedirs')
@mock.patch.object(Runner, 'run',
return_value=fakes.fake_ansible_runner_run_return(rc=0)
)
return_value=fakes.fake_ansible_runner_run_return(rc=0))
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
return_value="/tmp/foo.yaml")
def test_run_success_default(self, mock_stdout, mock_dump_artifact,
@mock.patch('ansible_runner.runner_config.RunnerConfig')
def test_run_success_default(self, mock_config, mock_dump_artifact,
mock_run, mock_mkdirs, mock_exists,
mock_mkstemp):
stdout_file, _playbook, _rc, _status = self.run.run(
mock_open):
_playbook, _rc, _status = self.run.run(
playbook='existing.yaml',
inventory='localhost,',
workdir='/tmp'
@ -88,21 +84,19 @@ class TestAnsible(TestCase):
self.assertEquals((_playbook, _rc, _status),
('existing.yaml', 0, 'successful'))
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.makedirs')
@mock.patch.object(Runner, 'run',
return_value=fakes.fake_ansible_runner_run_return(rc=0)
)
return_value=fakes.fake_ansible_runner_run_return(rc=0))
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
return_value="/tmp/foo.yaml")
def test_run_success_gathering_policy(self, mock_stdout,
@mock.patch('ansible_runner.runner_config.RunnerConfig')
def test_run_success_gathering_policy(self, mock_config,
mock_dump_artifact, mock_run,
mock_mkdirs, mock_exists,
mock_mkstemp):
stdout_file, _playbook, _rc, _status = self.run.run(
mock_open):
_playbook, _rc, _status = self.run.run(
playbook='existing.yaml',
inventory='localhost,',
workdir='/tmp',
@ -112,21 +106,19 @@ class TestAnsible(TestCase):
self.assertEquals((_playbook, _rc, _status),
('existing.yaml', 0, 'successful'))
@mock.patch('tempfile.mkdtemp', return_value='/tmp/')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.makedirs')
@mock.patch.object(Runner, 'run',
return_value=fakes.fake_ansible_runner_run_return(rc=0)
)
return_value=fakes.fake_ansible_runner_run_return(rc=0))
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
return_value="/foo/inventory.yaml")
@mock.patch('ansible_runner.runner.Runner.stdout', autospec=True,
return_value="/tmp/foo.yaml")
def test_run_success_local(self, mock_stdout,
@mock.patch('six.moves.builtins.open')
@mock.patch('ansible_runner.runner_config.RunnerConfig')
def test_run_success_local(self, mock_config, mock_open,
mock_dump_artifact, mock_run,
mock_mkdirs, mock_exists,
mock_mkstemp):
stdout_file, _playbook, _rc, _status = self.run.run(
mock_mkdirs, mock_exists
):
_playbook, _rc, _status = self.run.run(
playbook='existing.yaml',
inventory='localhost,',
workdir='/tmp',

View File

@ -27,7 +27,9 @@ class TestValidatorRun(TestCase):
@mock.patch('validations_libs.utils.parse_all_validations_on_disk')
@mock.patch('validations_libs.ansible.Ansible.run')
def test_validation_run_success(self, mock_ansible_run,
@mock.patch('validations_libs.utils.create_artifacts_dir',
return_value=('1234', '/tmp/'))
def test_validation_run_success(self, mock_tmp, mock_ansible_run,
mock_validation_dir):
mock_validation_dir.return_value = [{
'description': 'My Validation One Description',
@ -35,14 +37,17 @@ class TestValidatorRun(TestCase):
'id': 'foo',
'name': 'My Validition One Name',
'parameters': {}}]
mock_ansible_run.return_value = ('/tmp/validation/stdout.log',
'foo.yaml', 0, 'successful')
mock_ansible_run.return_value = ('foo.yaml', 0, 'successful')
expected_run_return = [
{'validation': {'playbook': 'foo.yaml',
'rc_code': 0,
'status': 'successful',
'stdout_file': '/tmp/validation/stdout.log'}}]
'validation_id': '1234'}},
{'validation': {'playbook': 'foo.yaml',
'rc_code': 0,
'status': 'successful',
'validation_id': '1234'}}]
playbook = ['fake.yaml']
inventory = 'tmp/inventory.yaml'
@ -55,7 +60,9 @@ class TestValidatorRun(TestCase):
@mock.patch('validations_libs.utils.parse_all_validations_on_disk')
@mock.patch('validations_libs.ansible.Ansible.run')
def test_validation_run_failed(self, mock_ansible_run,
@mock.patch('validations_libs.utils.create_artifacts_dir',
return_value=('1234', '/tmp/'))
def test_validation_run_failed(self, mock_tmp, mock_ansible_run,
mock_validation_dir):
mock_validation_dir.return_value = [{
'description': 'My Validation One Description',
@ -63,14 +70,17 @@ class TestValidatorRun(TestCase):
'id': 'foo',
'name': 'My Validition One Name',
'parameters': {}}]
mock_ansible_run.return_value = ('/tmp/validation/stdout.log',
'foo.yaml', 0, 'failed')
mock_ansible_run.return_value = ('foo.yaml', 0, 'failed')
expected_run_return = [
{'validation': {'playbook': 'foo.yaml',
'rc_code': 0,
'status': 'failed',
'stdout_file': '/tmp/validation/stdout.log'}}]
'validation_id': '1234'}},
{'validation': {'playbook': 'foo.yaml',
'rc_code': 0,
'status': 'failed',
'validation_id': '1234'}}]
playbook = ['fake.yaml']
inventory = 'tmp/inventory.yaml'

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import glob
import json
import logging
@ -22,6 +23,7 @@ import tempfile
import yaml
from validations_libs import constants
from uuid import uuid4
RED = "\033[1;31m"
GREEN = "\033[0;32m"
@ -125,12 +127,35 @@ class TempDirs(object):
LOG.info("Temporary directory [ %s ] cleaned up" % self.dir)
def current_time():
return '%sZ' % datetime.datetime.utcnow().isoformat()
def create_artifacts_dir(dir_path=None, prefix=None):
dir_path = (dir_path if dir_path else
constants.VALIDATION_ANSIBLE_ARTIFACT_PATH)
validation_uuid = str(uuid4())
log_dir = "{}/{}_{}_{}".format(dir_path, validation_uuid,
(prefix if prefix else ''), current_time())
try:
os.makedirs(log_dir)
return validation_uuid, log_dir
except OSError:
LOG.exception("Error while creating Ansible artifacts log file."
"Please check the access rights for {}").format(log_dir)
def parse_all_validations_on_disk(path, groups=None):
results = []
validations_abspath = glob.glob("{path}/*.yaml".format(path=path))
if isinstance(groups, six.string_types):
group_list = []
group_list.append(groups)
groups = group_list
for pl in validations_abspath:
validation_id, ext = os.path.splitext(os.path.basename(pl))
validation_id, _ext = os.path.splitext(os.path.basename(pl))
with open(pl, 'r') as val_playbook:
contents = yaml.safe_load(val_playbook)
@ -167,6 +192,27 @@ def parse_all_validation_groups_on_disk(groups_file_path=None):
return results
def parse_all_validations_logs_on_disk(uuid_run=None, validation_id=None):
results = []
path = constants.VALIDATIONS_LOG_BASEDIR
logfile = "{}/*.json".format(path)
if validation_id:
logfile = "{}/*_{}_*.json".format(path, validation_id)
if uuid_run:
logfile = "{}/*_{}_*.json".format(path, uuid_run)
logfiles_path = glob.glob(logfile)
for logfile_path in logfiles_path:
with open(logfile_path, 'r') as log:
contents = json.load(log)
results.append(contents)
return results
def get_validation_metadata(validation, key):
default_metadata = {
'name': 'Unnamed',
@ -224,11 +270,11 @@ def get_validation_group_name_list():
return results
def get_new_validations_logs_on_disk():
def get_new_validations_logs_on_disk(validations_logs_dir):
"""Return a list of new log execution filenames """
files = []
for root, dirs, filenames in os.walk(constants.VALIDATIONS_LOG_BASEDIR):
for root, dirs, filenames in os.walk(validations_logs_dir):
files = [
f for f in filenames if not f.startswith('processed')
and os.path.splitext(f)[1] == '.json'

12
zuul.d/layout.yaml Normal file
View File

@ -0,0 +1,12 @@
- project:
templates:
- openstack-python3-ussuri-jobs
- check-requirements
check:
jobs:
- openstack-tox-linters
- openstack-tox-lower-constraints
gate:
jobs:
- openstack-tox-linters
- openstack-tox-lower-constraints