Add validation config file mechanism
Introduce validation config file. The config default location of the config file will be stored in /etc/validation.cfg The variables precedence will be the following: * user's cli args * config file * default interval values Change-Id: I05c54a43bc0a03878793cca3f51e23f4a8b63a23
This commit is contained in:
parent
442b7071c9
commit
530c6a88ae
|
@ -20,3 +20,4 @@ Command Options
|
|||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoprogram-cliff:: validation.cli
|
||||
:application: validation
|
||||
|
|
|
@ -22,6 +22,10 @@ classifier =
|
|||
[files]
|
||||
packages = validations_libs
|
||||
|
||||
data_files =
|
||||
etc =
|
||||
validation.cfg
|
||||
|
||||
[compile_catalog]
|
||||
directory = validations-libs/locale
|
||||
domain = validations-lib
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
[default]
|
||||
# Default configuration for the Validation Framework
|
||||
# These are mainly CLI parameters which can be set here in order to avoid
|
||||
# to provide the same parameters on each runs.
|
||||
|
||||
# Location where the Validation playbooks are stored.
|
||||
validation_dir = /usr/share/ansible/validation-playbooks
|
||||
|
||||
# Path where the framework is supposed to write logs and results.
|
||||
# Note: this should not be a relative path.
|
||||
# Uncomment this line according to your prefered location:
|
||||
# validation_log_dir = /usr/share/validations
|
||||
|
||||
# Location where the Ansible Validation Callback, Libraries and Modules are
|
||||
# stored.
|
||||
ansible_base_dir = /usr/share/ansible/
|
||||
|
||||
# Ssh user for the remote access
|
||||
#ssh_user = stack
|
||||
|
||||
# Output log for the Validation results.
|
||||
output_log = output.log
|
||||
|
||||
# Limitation of the number of results to return to the console.
|
||||
history_limit = 15
|
||||
|
||||
[ansible_runner]
|
||||
# Ansible Runner configuration parameters.
|
||||
# Here you can set the Runner parameters which will be used by the framework.
|
||||
# Note that only those parameters are supported, any other custom parameters
|
||||
# will be ignored.
|
||||
|
||||
# Verbosity for Ansible
|
||||
verbosity = 5
|
||||
|
||||
# Fact cache directory location and type
|
||||
# fact_cache = /var/log/validations/artifacts/
|
||||
fact_cache_type = jsonfile
|
||||
|
||||
# Inventory for Ansible
|
||||
#inventory = hosts.yaml
|
||||
|
||||
quiet = True
|
||||
rotate_artifacts = 256
|
||||
|
||||
[ansible_environment]
|
||||
# Ansible Environment variables.
|
||||
# You can provide here, all the Ansible configuration variables documented here:
|
||||
# https://docs.ansible.com/ansible/latest/reference_appendices/config.html
|
||||
|
||||
# Here is a set of parameters used by the Validation Framework as example:
|
||||
#ANSIBLE_LOG_PATH = /home/stack/ansible.log
|
||||
#ANSIBLE_REMOTE_USER = stack
|
||||
ANSIBLE_CALLBACK_WHITELIST = validation_stdout,validation_json,profile_tasks
|
||||
ANSIBLE_STDOUT_CALLBACK = validation_stdout
|
||||
|
||||
# Callback settings which are part of Ansible environment variables.
|
||||
# Configuration for HTTP Server callback
|
||||
HTTP_JSON_SERVER = http://localhost
|
||||
HTTP_JSON_PORT = 8080
|
|
@ -26,6 +26,7 @@ import yaml
|
|||
|
||||
from six.moves import configparser
|
||||
from validations_libs import constants
|
||||
from validations_libs import utils
|
||||
|
||||
LOG = logging.getLogger(__name__ + ".ansible")
|
||||
|
||||
|
@ -275,6 +276,17 @@ class Ansible(object):
|
|||
else:
|
||||
return env
|
||||
|
||||
def _dump_validation_config(self, config, path, filename='validation.cfg'):
|
||||
"""Dump Validation config in artifact directory"""
|
||||
parser = configparser.ConfigParser()
|
||||
for section_key in config.keys():
|
||||
parser.add_section(section_key)
|
||||
for item_key in config[section_key].keys():
|
||||
parser.set(section_key, item_key,
|
||||
str(config[section_key][item_key]))
|
||||
with open('{}/{}'.format(path, filename), 'w') as conf:
|
||||
parser.write(conf)
|
||||
|
||||
def run(self, playbook, inventory, workdir, playbook_dir=None,
|
||||
connection='smart', output_callback=None,
|
||||
base_dir=constants.DEFAULT_VALIDATIONS_BASEDIR,
|
||||
|
@ -283,9 +295,10 @@ class Ansible(object):
|
|||
verbosity=0, quiet=False, extra_vars=None,
|
||||
gathering_policy='smart',
|
||||
extra_env_variables=None, parallel_run=False,
|
||||
callback_whitelist=None, ansible_cfg=None,
|
||||
callback_whitelist=None, ansible_cfg_file=None,
|
||||
ansible_timeout=30, ansible_artifact_path=None,
|
||||
log_path=None, run_async=False, python_interpreter=None):
|
||||
log_path=None, run_async=False, python_interpreter=None,
|
||||
validation_cfg_file=None):
|
||||
"""Execute one or multiple Ansible playbooks
|
||||
|
||||
:param playbook: The Absolute path of the Ansible playbook
|
||||
|
@ -341,9 +354,10 @@ class Ansible(object):
|
|||
Custom output_callback is also whitelisted.
|
||||
(Defaults to ``None``)
|
||||
:type callback_whitelist: ``list`` or ``string``
|
||||
:param ansible_cfg: Path to an ansible configuration file. One will be
|
||||
generated in the artifact path if this option is None.
|
||||
:type ansible_cfg: ``string``
|
||||
:param ansible_cfg_file: Path to an ansible configuration file. One
|
||||
will be generated in the artifact path if
|
||||
this option is None.
|
||||
:type ansible_cfg_file: ``string``
|
||||
:param ansible_timeout: Timeout for ansible connections.
|
||||
(Defaults to ``30 minutes``)
|
||||
:type ansible_timeout: ``integer``
|
||||
|
@ -360,6 +374,10 @@ class Ansible(object):
|
|||
``auto_silent`` or the default one
|
||||
``auto_legacy``)
|
||||
:type python_interpreter: ``string``
|
||||
:param validation_cfg_file: A dictionary of configuration for
|
||||
Validation loaded from an validation.cfg
|
||||
file.
|
||||
:type validation_cfg_file: ``dict``
|
||||
|
||||
:return: A ``tuple`` containing the the absolute path of the executed
|
||||
playbook, the return code and the status of the run
|
||||
|
@ -405,16 +423,17 @@ class Ansible(object):
|
|||
ansible_timeout, callback_whitelist,
|
||||
base_dir, python_interpreter))
|
||||
|
||||
if 'ANSIBLE_CONFIG' not in env and not ansible_cfg:
|
||||
ansible_cfg = os.path.join(ansible_artifact_path, 'ansible.cfg')
|
||||
config = configparser.ConfigParser()
|
||||
config.add_section('defaults')
|
||||
config.set('defaults', 'internal_poll_interval', '0.05')
|
||||
with open(ansible_cfg, 'w') as f:
|
||||
config.write(f)
|
||||
env['ANSIBLE_CONFIG'] = ansible_cfg
|
||||
elif 'ANSIBLE_CONFIG' not in env and ansible_cfg:
|
||||
env['ANSIBLE_CONFIG'] = ansible_cfg
|
||||
if 'ANSIBLE_CONFIG' not in env and not ansible_cfg_file:
|
||||
ansible_cfg_file = os.path.join(ansible_artifact_path,
|
||||
'ansible.cfg')
|
||||
ansible_config = configparser.ConfigParser()
|
||||
ansible_config.add_section('defaults')
|
||||
ansible_config.set('defaults', 'internal_poll_interval', '0.05')
|
||||
with open(ansible_cfg_file, 'w') as f:
|
||||
ansible_config.write(f)
|
||||
env['ANSIBLE_CONFIG'] = ansible_cfg_file
|
||||
elif 'ANSIBLE_CONFIG' not in env and ansible_cfg_file:
|
||||
env['ANSIBLE_CONFIG'] = ansible_cfg_file
|
||||
|
||||
if log_path:
|
||||
env['VALIDATIONS_LOG_DIR'] = log_path
|
||||
|
@ -434,7 +453,6 @@ class Ansible(object):
|
|||
|
||||
if not BACKWARD_COMPAT:
|
||||
r_opts.update({
|
||||
'envvars': envvars,
|
||||
'project_dir': playbook_dir,
|
||||
'fact_cache': ansible_artifact_path,
|
||||
'fact_cache_type': 'jsonfile'
|
||||
|
@ -453,6 +471,17 @@ class Ansible(object):
|
|||
|
||||
if parallel_run:
|
||||
r_opts['directory_isolation_base_path'] = ansible_artifact_path
|
||||
|
||||
if validation_cfg_file:
|
||||
if 'ansible_runner' in validation_cfg_file.keys():
|
||||
r_opts.update(validation_cfg_file['ansible_runner'])
|
||||
if 'ansible_environment' in validation_cfg_file.keys():
|
||||
envvars.update(validation_cfg_file['ansible_environment'])
|
||||
self._dump_validation_config(validation_cfg_file,
|
||||
ansible_artifact_path)
|
||||
if not BACKWARD_COMPAT:
|
||||
r_opts.update({'envvars': envvars})
|
||||
|
||||
runner_config = ansible_runner.runner_config.RunnerConfig(**r_opts)
|
||||
runner_config.prepare()
|
||||
runner_config.env['ANSIBLE_STDOUT_CALLBACK'] = \
|
||||
|
|
|
@ -14,11 +14,14 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from cliff.app import App
|
||||
from cliff.commandmanager import CommandManager
|
||||
|
||||
from validations_libs import utils
|
||||
|
||||
|
||||
class ValidationCliApp(App):
|
||||
"""Cliff application for the `ValidationCli` tool.
|
||||
|
@ -50,6 +53,38 @@ class ValidationCliApp(App):
|
|||
if err:
|
||||
self.LOG.debug('got an error: {}'.format(err))
|
||||
|
||||
def _format_arg(self):
|
||||
namespace, argv = self.parser.parse_known_args()
|
||||
return [arg.lstrip(self.parser.prefix_chars).replace('-', '_')
|
||||
for arg in argv]
|
||||
|
||||
def set_argument_parser(self, parser, section='default'):
|
||||
""" Set Arguments parser depending of the precedence ordering:
|
||||
* User CLI arguments
|
||||
* Configuration file
|
||||
* Default CLI values
|
||||
"""
|
||||
cli_args = self._format_arg()
|
||||
args, _ = parser.parse_known_args()
|
||||
config = utils.load_config(os.path.abspath(args.config))
|
||||
config_args = config.get(section, {})
|
||||
for key, value in args._get_kwargs():
|
||||
# matbu: manage the race when user's cli arg is the same than
|
||||
# the parser default value. The user's cli arg will *always*
|
||||
# takes precedence on others.
|
||||
if parser.get_default(key) == value and key in cli_args:
|
||||
try:
|
||||
cli_value = cli_args[cli_args.index(key)+1]
|
||||
config_args.update({key: cli_value})
|
||||
except KeyError:
|
||||
self.LOG.warning('Key not found in cli: {}').format(key)
|
||||
elif parser.get_default(key) != value:
|
||||
config_args.update({key: value})
|
||||
elif key not in config_args.keys():
|
||||
config_args.update({key: value})
|
||||
parser.set_defaults(**config_args)
|
||||
return parser
|
||||
|
||||
|
||||
def main(argv=sys.argv[1:]):
|
||||
v_cli = ValidationCliApp()
|
||||
|
|
|
@ -14,10 +14,15 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from cliff import _argparse
|
||||
from cliff.command import Command
|
||||
from cliff.lister import Lister
|
||||
from cliff.show import ShowOne
|
||||
|
||||
from validations_libs import constants
|
||||
from validations_libs import utils
|
||||
|
||||
# Handle backward compatibility for Cliff 2.16.0 in stable/train:
|
||||
if hasattr(_argparse, 'SmartHelpFormatter'):
|
||||
|
@ -40,6 +45,14 @@ class BaseCommand(Command):
|
|||
)
|
||||
for hook in self._hooks:
|
||||
hook.obj.get_parser(parser)
|
||||
|
||||
parser.add_argument(
|
||||
'--config',
|
||||
dest='config',
|
||||
default=utils.find_config_file(),
|
||||
help=("Config file path for Validation.")
|
||||
)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
|
@ -60,4 +73,27 @@ class BaseLister(Lister):
|
|||
for action in parser._actions:
|
||||
vf_parser._add_action(action)
|
||||
|
||||
vf_parser.add_argument(
|
||||
'--config',
|
||||
dest='config',
|
||||
default=utils.find_config_file(),
|
||||
help=("Config file path for Validation.")
|
||||
)
|
||||
|
||||
return vf_parser
|
||||
|
||||
|
||||
class BaseShow(ShowOne):
|
||||
"""Base Show client implementation class"""
|
||||
|
||||
def get_parser(self, parser):
|
||||
"""Argument parser for base show"""
|
||||
parser = super(BaseShow, self).get_parser(parser)
|
||||
parser.add_argument(
|
||||
'--config',
|
||||
dest='config',
|
||||
default=utils.find_config_file(),
|
||||
help=("Config file path for Validation.")
|
||||
)
|
||||
|
||||
return parser
|
||||
|
|
|
@ -46,28 +46,29 @@ class ListHistory(BaseLister):
|
|||
default=constants.VALIDATIONS_LOG_BASEDIR,
|
||||
help=("Path where the validation log files "
|
||||
"is located."))
|
||||
# Merge config and CLI args:
|
||||
if hasattr(self.app, 'set_argument_parser'):
|
||||
return self.app.set_argument_parser(parser)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
validation_log_dir = parsed_args.validation_log_dir
|
||||
history_limit = parsed_args.history_limit
|
||||
|
||||
if parsed_args.history_limit < 1:
|
||||
raise ValueError(
|
||||
(
|
||||
"Number <n> of the most recent runs must be > 0. "
|
||||
"You have provided {}").format(
|
||||
parsed_args.history_limit))
|
||||
if history_limit < 1:
|
||||
msg = ("Number <n> of the most recent runs must be > 0. "
|
||||
"You have provided {}").format(history_limit)
|
||||
raise ValueError(msg)
|
||||
self.app.LOG.info(
|
||||
(
|
||||
"Limiting output to the maximum of "
|
||||
"{} last validations.").format(
|
||||
parsed_args.history_limit))
|
||||
("Limiting output to the maximum of "
|
||||
"{} last validations.").format(history_limit))
|
||||
|
||||
actions = ValidationActions()
|
||||
|
||||
return actions.show_history(
|
||||
validation_ids=parsed_args.validation,
|
||||
log_path=parsed_args.validation_log_dir,
|
||||
history_limit=parsed_args.history_limit)
|
||||
history_limit=history_limit)
|
||||
|
||||
|
||||
class GetHistory(BaseCommand):
|
||||
|
@ -88,10 +89,12 @@ class GetHistory(BaseCommand):
|
|||
default=constants.VALIDATIONS_LOG_BASEDIR,
|
||||
help=("Path where the validation log files "
|
||||
"is located."))
|
||||
# Merge config and CLI args:
|
||||
if hasattr(self.app, 'set_argument_parser'):
|
||||
return self.app.set_argument_parser(parser)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
|
||||
self.app.LOG.debug(
|
||||
(
|
||||
"Obtaining information about the validation run {}\n"
|
||||
|
|
|
@ -14,14 +14,16 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cliff.lister import Lister
|
||||
import json
|
||||
import sys
|
||||
|
||||
from validations_libs.validation_actions import ValidationActions
|
||||
from validations_libs import constants
|
||||
from validations_libs.cli.base import BaseLister
|
||||
from validations_libs.cli.parseractions import CommaListAction
|
||||
|
||||
|
||||
class ValidationList(Lister):
|
||||
class ValidationList(BaseLister):
|
||||
"""List the Validations Catalog"""
|
||||
|
||||
def get_parser(self, parser):
|
||||
|
@ -52,6 +54,9 @@ class ValidationList(Lister):
|
|||
default=constants.ANSIBLE_VALIDATION_DIR,
|
||||
help=("Path where the validation playbooks "
|
||||
"are located."))
|
||||
# Merge config and CLI args:
|
||||
if hasattr(self.app, 'set_argument_parser'):
|
||||
return self.app.set_argument_parser(parser)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
|
@ -61,6 +66,7 @@ class ValidationList(Lister):
|
|||
category = parsed_args.category
|
||||
product = parsed_args.product
|
||||
validation_dir = parsed_args.validation_dir
|
||||
group = parsed_args.group
|
||||
|
||||
v_actions = ValidationActions(validation_path=validation_dir)
|
||||
return (v_actions.list_validations(groups=group,
|
||||
|
|
|
@ -15,9 +15,11 @@
|
|||
# under the License.
|
||||
|
||||
import getpass
|
||||
import os
|
||||
import sys
|
||||
|
||||
from validations_libs import constants
|
||||
from validations_libs import utils
|
||||
from validations_libs.validation_actions import ValidationActions
|
||||
from validations_libs.cli import common
|
||||
from validations_libs.cli.base import BaseCommand
|
||||
|
@ -156,13 +158,17 @@ class Run(BaseCommand):
|
|||
"if more than one product is required "
|
||||
"separate the product names with commas."))
|
||||
|
||||
# Merge config and CLI args:
|
||||
if hasattr(self.app, 'set_argument_parser'):
|
||||
return self.app.set_argument_parser(parser)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
"""Take validation action"""
|
||||
v_actions = ValidationActions(
|
||||
validation_path=parsed_args.validation_dir)
|
||||
# Load config:
|
||||
config = utils.load_config(os.path.abspath(parsed_args.config))
|
||||
|
||||
v_actions = ValidationActions(parsed_args.validation_dir)
|
||||
# Ansible execution should be quiet while using the validations_json
|
||||
# default callback and be verbose while passing ANSIBLE_SDTOUT_CALLBACK
|
||||
# environment variable to Ansible through the --extra-env-vars argument
|
||||
|
@ -195,8 +201,8 @@ class Run(BaseCommand):
|
|||
python_interpreter=parsed_args.python_interpreter,
|
||||
quiet=quiet_mode,
|
||||
ssh_user=parsed_args.ssh_user,
|
||||
log_path=parsed_args.validation_log_dir
|
||||
)
|
||||
log_path=parsed_args.validation_log_dir,
|
||||
validation_config=config)
|
||||
except RuntimeError as e:
|
||||
raise RuntimeError(e)
|
||||
|
||||
|
|
|
@ -14,15 +14,17 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cliff.show import ShowOne
|
||||
from cliff.lister import Lister
|
||||
import json
|
||||
import sys
|
||||
|
||||
from validations_libs.validation_actions import ValidationActions
|
||||
from validations_libs import constants
|
||||
from validations_libs.cli.parseractions import CommaListAction
|
||||
from validations_libs.cli.base import BaseShow
|
||||
from validations_libs.cli.base import BaseLister
|
||||
|
||||
|
||||
class Show(ShowOne):
|
||||
class Show(BaseShow):
|
||||
"""Show detailed informations about a Validation"""
|
||||
|
||||
def get_parser(self, parser):
|
||||
|
@ -36,6 +38,9 @@ class Show(ShowOne):
|
|||
metavar="<validation>",
|
||||
type=str,
|
||||
help="Show a specific validation.")
|
||||
# Merge config and CLI args:
|
||||
if hasattr(self.app, 'set_argument_parser'):
|
||||
return self.app.set_argument_parser(parser)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
|
@ -51,7 +56,7 @@ class Show(ShowOne):
|
|||
return data.keys(), data.values()
|
||||
|
||||
|
||||
class ShowGroup(Lister):
|
||||
class ShowGroup(BaseLister):
|
||||
"""Show detailed informations about Validation Groups"""
|
||||
|
||||
def get_parser(self, parser):
|
||||
|
@ -63,6 +68,9 @@ class ShowGroup(Lister):
|
|||
help=("Path where the validation playbooks "
|
||||
"are located."))
|
||||
|
||||
# Merge config and CLI args:
|
||||
if hasattr(self.app, 'set_argument_parser'):
|
||||
return self.app.set_argument_parser(parser)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
|
@ -72,7 +80,7 @@ class ShowGroup(Lister):
|
|||
return v_actions.group_information(constants.VALIDATION_GROUPS_INFO)
|
||||
|
||||
|
||||
class ShowParameter(ShowOne):
|
||||
class ShowParameter(BaseShow):
|
||||
"""Show Validation(s) parameter(s)
|
||||
|
||||
Display Validation(s) Parameter(s) which could be overriden during an
|
||||
|
@ -147,11 +155,15 @@ class ShowParameter(ShowOne):
|
|||
help=("Print representation of the validation. "
|
||||
"The choices of the output format is json,yaml. ")
|
||||
)
|
||||
|
||||
# Merge config and CLI args:
|
||||
if hasattr(self.app, 'set_argument_parser'):
|
||||
return self.app.set_argument_parser(parser)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
v_actions = ValidationActions(parsed_args.validation_dir)
|
||||
|
||||
validation_dir = parsed_args.validation_dir
|
||||
v_actions = ValidationActions(validation_dir)
|
||||
params = v_actions.show_validations_parameters(
|
||||
validations=parsed_args.validation_name,
|
||||
groups=parsed_args.group,
|
||||
|
|
|
@ -38,3 +38,7 @@ VALIDATIONS_LOG_BASEDIR = os.path.expanduser('~/validations')
|
|||
VALIDATION_ANSIBLE_ARTIFACT_PATH = os.path.join(
|
||||
VALIDATIONS_LOG_BASEDIR,
|
||||
'artifacts')
|
||||
|
||||
ANSIBLE_RUNNER_CONFIG_PARAMETERS = ['verbosity', 'extravars', 'fact_cache',
|
||||
'fact_cache_type', 'inventory', 'playbook',
|
||||
'project_dir', 'quiet', 'rotate_artifacts']
|
||||
|
|
|
@ -18,6 +18,12 @@ from unittest import TestCase
|
|||
from validations_libs.cli import app
|
||||
|
||||
|
||||
class FakeValidationApp(app.ValidationCliApp):
|
||||
|
||||
def set_argument_parser(self, parser):
|
||||
return parser
|
||||
|
||||
|
||||
class BaseCommand(TestCase):
|
||||
|
||||
def check_parser(self, cmd, args, verify_args):
|
||||
|
@ -35,7 +41,8 @@ class BaseCommand(TestCase):
|
|||
|
||||
def setUp(self):
|
||||
super(BaseCommand, self).setUp()
|
||||
self.app = app.ValidationCliApp()
|
||||
self.app = FakeValidationApp()
|
||||
|
||||
|
||||
KEYVALUEACTION_VALUES = {
|
||||
'valid': 'foo=bar',
|
||||
|
|
|
@ -0,0 +1,113 @@
|
|||
# Copyright 2021 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
try:
|
||||
from unittest import mock
|
||||
except ImportError:
|
||||
import mock
|
||||
from unittest import TestCase
|
||||
|
||||
from validations_libs.cli import app
|
||||
from validations_libs.cli import lister
|
||||
from validations_libs.cli import history
|
||||
|
||||
|
||||
class TestApp(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestApp, self).setUp()
|
||||
self.app = app.ValidationCliApp()
|
||||
|
||||
@mock.patch('argparse.ArgumentParser.parse_known_args',
|
||||
return_value=['foo', ['--foo-bar']])
|
||||
def test_config_args(self, mock_argv):
|
||||
self.assertEqual(['foo_bar'], self.app._format_arg())
|
||||
|
||||
|
||||
class TestArgApp(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestArgApp, self).setUp()
|
||||
self.app = app.ValidationCliApp()
|
||||
|
||||
def test_validation_dir_config_cli(self):
|
||||
args = ['--validation-dir', 'foo']
|
||||
cmd = lister.ValidationList(self.app, None)
|
||||
parser = cmd.get_parser('fake')
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertEqual('foo', parsed_args.validation_dir)
|
||||
|
||||
@mock.patch('validations_libs.constants.ANSIBLE_VALIDATION_DIR', 'bar')
|
||||
@mock.patch('validations_libs.utils.find_config_file',
|
||||
return_value='validation.cfg')
|
||||
def test_validation_dir_config_no_cli(self, mock_config):
|
||||
args = []
|
||||
cmd = lister.ValidationList(self.app, None)
|
||||
parser = cmd.get_parser('fake')
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertEqual('/usr/share/ansible/validation-playbooks',
|
||||
parsed_args.validation_dir)
|
||||
|
||||
@mock.patch('validations_libs.constants.ANSIBLE_VALIDATION_DIR', 'bar')
|
||||
@mock.patch('validations_libs.utils.find_config_file',
|
||||
return_value='/etc/validation.cfg')
|
||||
def test_validation_dir_config_no_cli_no_config(self, mock_config):
|
||||
args = []
|
||||
cmd = lister.ValidationList(self.app, None)
|
||||
parser = cmd.get_parser('fake')
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertEqual('bar', parsed_args.validation_dir)
|
||||
|
||||
@mock.patch('validations_libs.constants.ANSIBLE_VALIDATION_DIR',
|
||||
'/usr/share/ansible/validation-playbooks')
|
||||
@mock.patch('validations_libs.utils.find_config_file',
|
||||
return_value='validation.cfg')
|
||||
def test_validation_dir_config_no_cli_same_consts(self, mock_config):
|
||||
args = []
|
||||
cmd = lister.ValidationList(self.app, None)
|
||||
parser = cmd.get_parser('fake')
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertEqual('/usr/share/ansible/validation-playbooks',
|
||||
parsed_args.validation_dir)
|
||||
|
||||
def test_get_history_cli_arg(self):
|
||||
args = ['123', '--validation-log-dir', '/foo/log/dir']
|
||||
cmd = history.GetHistory(self.app, None)
|
||||
parser = cmd.get_parser('fake')
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertEqual('/foo/log/dir',
|
||||
parsed_args.validation_log_dir)
|
||||
|
||||
@mock.patch('validations_libs.utils.find_config_file',
|
||||
return_value='validation.cfg')
|
||||
def test_get_history_cli_arg_and_config_file(self, mock_config):
|
||||
args = ['123', '--validation-log-dir', '/foo/log/dir']
|
||||
cmd = history.GetHistory(self.app, None)
|
||||
parser = cmd.get_parser('fake')
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertEqual('/foo/log/dir',
|
||||
parsed_args.validation_log_dir)
|
||||
|
||||
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR',
|
||||
'/home/foo/validations')
|
||||
@mock.patch('validations_libs.utils.find_config_file',
|
||||
return_value='validation.cfg')
|
||||
def test_get_history_no_cli_arg_and_config_file(self, mock_config):
|
||||
args = ['123']
|
||||
cmd = history.GetHistory(self.app, None)
|
||||
parser = cmd.get_parser('fake')
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertEqual('/home/foo/validations',
|
||||
parsed_args.validation_log_dir)
|
|
@ -64,8 +64,9 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
|
||||
def test_run_command_extra_vars(self, mock_run, mock_user, mock_print,
|
||||
mock_log_dir):
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_extra_vars(self, mock_config, mock_run, mock_user,
|
||||
mock_print, mock_log_dir):
|
||||
run_called_args = {
|
||||
'inventory': 'localhost',
|
||||
'limit_hosts': None,
|
||||
|
@ -80,7 +81,9 @@ class TestRun(BaseCommand):
|
|||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir}
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo',
|
||||
'--extra-vars', 'key=value']
|
||||
|
@ -98,8 +101,9 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
|
||||
def test_run_command_extra_vars_twice(self, mock_run, mock_user,
|
||||
mock_print, mock_log_dir):
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_extra_vars_twice(self, mock_config, mock_run,
|
||||
mock_user, mock_print, mock_log_dir):
|
||||
run_called_args = {
|
||||
'inventory': 'localhost',
|
||||
'limit_hosts': None,
|
||||
|
@ -114,7 +118,9 @@ class TestRun(BaseCommand):
|
|||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir}
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo',
|
||||
'--extra-vars', 'key=value1',
|
||||
|
@ -144,7 +150,9 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
|
||||
def test_run_command_extra_vars_file(self, mock_run, mock_user, mock_open,
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_extra_vars_file(self, mock_config, mock_run,
|
||||
mock_user, mock_open,
|
||||
mock_yaml, mock_log_dir):
|
||||
|
||||
run_called_args = {
|
||||
|
@ -161,7 +169,9 @@ class TestRun(BaseCommand):
|
|||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir}
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo',
|
||||
'--extra-vars-file', '/foo/vars.yaml']
|
||||
|
@ -178,7 +188,9 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
|
||||
def test_run_command_extra_env_vars(self, mock_run, mock_user, mock_log_dir):
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_extra_env_vars(self, mock_config, mock_run, mock_user,
|
||||
mock_log_dir):
|
||||
run_called_args = {
|
||||
'inventory': 'localhost',
|
||||
'limit_hosts': None,
|
||||
|
@ -193,7 +205,9 @@ class TestRun(BaseCommand):
|
|||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir}
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo',
|
||||
'--extra-env-vars', 'key=value']
|
||||
|
@ -210,7 +224,9 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_extra_env_vars_with_custom_callback(self,
|
||||
mock_config,
|
||||
mock_run,
|
||||
mock_user,
|
||||
mock_log_dir):
|
||||
|
@ -229,7 +245,9 @@ class TestRun(BaseCommand):
|
|||
'extra_env_vars': {'ANSIBLE_STDOUT_CALLBACK': 'default'},
|
||||
'python_interpreter': sys.executable,
|
||||
'quiet': False,
|
||||
'ssh_user': 'doe'}
|
||||
'ssh_user': 'doe',
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo',
|
||||
'--extra-env-vars', 'ANSIBLE_STDOUT_CALLBACK=default']
|
||||
|
@ -246,7 +264,9 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
|
||||
def test_run_command_extra_env_vars_twice(self, mock_run, mock_user, mock_log_dir):
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_extra_env_vars_twice(self, mock_config, mock_run,
|
||||
mock_user, mock_log_dir):
|
||||
run_called_args = {
|
||||
'inventory': 'localhost',
|
||||
'limit_hosts': None,
|
||||
|
@ -261,7 +281,9 @@ class TestRun(BaseCommand):
|
|||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir}
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo',
|
||||
'--extra-env-vars', 'key=value1',
|
||||
|
@ -279,7 +301,9 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=copy.deepcopy(fakes.FAKE_SUCCESS_RUN))
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_extra_env_vars_and_extra_vars(self,
|
||||
mock_config,
|
||||
mock_run,
|
||||
mock_user,
|
||||
mock_log_dir):
|
||||
|
@ -297,7 +321,9 @@ class TestRun(BaseCommand):
|
|||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir}
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo',
|
||||
'--extra-vars', 'key=value',
|
||||
|
@ -325,7 +351,9 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=copy.deepcopy(fakes.FAKE_FAILED_RUN))
|
||||
def test_run_command_failed_validation(self, mock_run, mock_user, mock_log_dir):
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_failed_validation(self, mock_config, mock_run,
|
||||
mock_user, mock_log_dir):
|
||||
run_called_args = {
|
||||
'inventory': 'localhost',
|
||||
'limit_hosts': None,
|
||||
|
@ -340,7 +368,9 @@ class TestRun(BaseCommand):
|
|||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir}
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo']
|
||||
verifylist = [('validation_name', ['foo'])]
|
||||
|
@ -353,7 +383,8 @@ class TestRun(BaseCommand):
|
|||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=[])
|
||||
def test_run_command_no_validation(self, mock_run, mock_user):
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_command_no_validation(self, mock_config, mock_run, mock_user):
|
||||
run_called_args = {
|
||||
'inventory': 'localhost',
|
||||
'limit_hosts': None,
|
||||
|
@ -367,10 +398,84 @@ class TestRun(BaseCommand):
|
|||
'extra_env_vars': {'key2': 'value2'},
|
||||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe'}
|
||||
'ssh_user': 'doe',
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
arglist = ['--validation', 'foo']
|
||||
verifylist = [('validation_name', ['foo'])]
|
||||
|
||||
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
|
||||
self.assertRaises(RuntimeError, self.cmd.take_action, parsed_args)
|
||||
|
||||
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
|
||||
@mock.patch('getpass.getuser',
|
||||
return_value='doe')
|
||||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=fakes.FAKE_SUCCESS_RUN)
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_with_wrong_config(self, mock_config, mock_run, mock_user,
|
||||
mock_log_dir):
|
||||
arglist = ['--validation', 'foo', '--config', 'wrong.cfg']
|
||||
verifylist = [('validation_name', ['foo']),
|
||||
('config', 'wrong.cfg')]
|
||||
|
||||
run_called_args = {
|
||||
'inventory': 'localhost',
|
||||
'limit_hosts': None,
|
||||
'group': [],
|
||||
'category': [],
|
||||
'product': [],
|
||||
'extra_vars': None,
|
||||
'validations_dir': '/usr/share/ansible/validation-playbooks',
|
||||
'base_dir': '/usr/share/ansible',
|
||||
'validation_name': ['foo'],
|
||||
'extra_env_vars': None,
|
||||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
|
||||
self.cmd.take_action(parsed_args)
|
||||
mock_run.assert_called_with(**run_called_args)
|
||||
|
||||
@mock.patch('validations_libs.constants.VALIDATIONS_LOG_BASEDIR')
|
||||
@mock.patch('getpass.getuser',
|
||||
return_value='doe')
|
||||
@mock.patch('validations_libs.validation_actions.ValidationActions.'
|
||||
'run_validations',
|
||||
return_value=fakes.FAKE_SUCCESS_RUN)
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('validations_libs.utils.load_config', return_value={})
|
||||
def test_run_with_config(self, mock_config, mock_exists,
|
||||
mock_run, mock_user,
|
||||
mock_log_dir):
|
||||
arglist = ['--validation', 'foo', '--config', 'config.cfg']
|
||||
verifylist = [('validation_name', ['foo']),
|
||||
('config', 'config.cfg')]
|
||||
|
||||
run_called_args = {
|
||||
'inventory': 'localhost',
|
||||
'limit_hosts': None,
|
||||
'group': [],
|
||||
'category': [],
|
||||
'product': [],
|
||||
'extra_vars': None,
|
||||
'validations_dir': '/usr/share/ansible/validation-playbooks',
|
||||
'base_dir': '/usr/share/ansible',
|
||||
'validation_name': ['foo'],
|
||||
'extra_env_vars': None,
|
||||
'python_interpreter': sys.executable,
|
||||
'quiet': True,
|
||||
'ssh_user': 'doe',
|
||||
'log_path': mock_log_dir,
|
||||
'validation_config': {}
|
||||
}
|
||||
|
||||
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
|
||||
self.cmd.take_action(parsed_args)
|
||||
mock_run.assert_called_with(**run_called_args)
|
||||
|
|
|
@ -318,6 +318,20 @@ FAKE_FAILED_RUN = [{'Duration': '0:00:01.761',
|
|||
|
||||
FAKE_VALIDATIONS_PATH = '/usr/share/ansible/validation-playbooks'
|
||||
|
||||
DEFAULT_CONFIG = {'validation_dir': '/usr/share/ansible/validation-playbooks',
|
||||
'ansible_base_dir': '/usr/share/ansible/',
|
||||
'output_log': 'output.log',
|
||||
'history_limit': 15}
|
||||
|
||||
ANSIBLE_RUNNER_CONFIG = {'verbosity': 5,
|
||||
'fact_cache_type': 'jsonfile',
|
||||
'quiet': True, 'rotate_artifacts': 256}
|
||||
|
||||
ANSIBLE_ENVIRONNMENT_CONFIG = {'ANSIBLE_CALLBACK_WHITELIST':
|
||||
'validation_stdout,validation_json,'
|
||||
'profile_tasks',
|
||||
'ANSIBLE_STDOUT_CALLBACK': 'validation_stdout'}
|
||||
|
||||
|
||||
def fake_ansible_runner_run_return(status='successful', rc=0):
|
||||
return status, rc
|
||||
|
|
|
@ -348,3 +348,83 @@ class TestAnsible(TestCase):
|
|||
})
|
||||
|
||||
mock_config.assert_called_once_with(**opt)
|
||||
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch.object(Runner, 'run',
|
||||
return_value=fakes.fake_ansible_runner_run_return(rc=0))
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
@mock.patch('ansible_runner.runner_config.RunnerConfig')
|
||||
def test_run_success_with_config(self, mock_config, mock_open,
|
||||
mock_dump_artifact, mock_run,
|
||||
mock_mkdirs, mock_exists
|
||||
):
|
||||
fake_config = {'default': fakes.DEFAULT_CONFIG,
|
||||
'ansible_environment':
|
||||
fakes.ANSIBLE_ENVIRONNMENT_CONFIG,
|
||||
'ansible_runner': fakes.ANSIBLE_RUNNER_CONFIG
|
||||
}
|
||||
_playbook, _rc, _status = self.run.run(
|
||||
playbook='existing.yaml',
|
||||
inventory='localhost,',
|
||||
workdir='/tmp',
|
||||
connection='local',
|
||||
ansible_artifact_path='/tmp',
|
||||
validation_cfg_file=fake_config
|
||||
)
|
||||
self.assertEqual((_playbook, _rc, _status),
|
||||
('existing.yaml', 0, 'successful'))
|
||||
mock_open.assert_called_with('/tmp/validation.cfg', 'w')
|
||||
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch.object(Runner, 'run',
|
||||
return_value=fakes.fake_ansible_runner_run_return(rc=0))
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
@mock.patch('ansible_runner.runner_config.RunnerConfig')
|
||||
def test_run_success_with_empty_config(self, mock_config, mock_open,
|
||||
mock_dump_artifact, mock_run,
|
||||
mock_mkdirs, mock_exists
|
||||
):
|
||||
fake_config = {}
|
||||
_playbook, _rc, _status = self.run.run(
|
||||
playbook='existing.yaml',
|
||||
inventory='localhost,',
|
||||
workdir='/tmp',
|
||||
connection='local',
|
||||
ansible_cfg_file='/foo.cfg',
|
||||
ansible_artifact_path='/tmp',
|
||||
validation_cfg_file=fake_config
|
||||
)
|
||||
self.assertEqual((_playbook, _rc, _status),
|
||||
('existing.yaml', 0, 'successful'))
|
||||
mock_open.assert_not_called()
|
||||
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('os.makedirs')
|
||||
@mock.patch.object(Runner, 'run',
|
||||
return_value=fakes.fake_ansible_runner_run_return(rc=0))
|
||||
@mock.patch('ansible_runner.utils.dump_artifact', autospec=True,
|
||||
return_value="/foo/inventory.yaml")
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
@mock.patch('ansible_runner.runner_config.RunnerConfig')
|
||||
def test_run_success_with_ansible_config(self, mock_config, mock_open,
|
||||
mock_dump_artifact, mock_run,
|
||||
mock_mkdirs, mock_exists
|
||||
):
|
||||
fake_config = {}
|
||||
_playbook, _rc, _status = self.run.run(
|
||||
playbook='existing.yaml',
|
||||
inventory='localhost,',
|
||||
workdir='/tmp',
|
||||
connection='local',
|
||||
ansible_artifact_path='/tmp',
|
||||
validation_cfg_file=fake_config
|
||||
)
|
||||
self.assertEqual((_playbook, _rc, _status),
|
||||
('existing.yaml', 0, 'successful'))
|
||||
mock_open.assert_called_with('/tmp/ansible.cfg', 'w')
|
||||
|
|
|
@ -389,3 +389,40 @@ class TestUtils(TestCase):
|
|||
"""Test if failure to create artifacts dir raises 'RuntimeError'.
|
||||
"""
|
||||
self.assertRaises(RuntimeError, utils.create_artifacts_dir, "/foo/bar")
|
||||
|
||||
def test_eval_types_str(self):
|
||||
self.assertIsInstance(utils._eval_types('/usr'), str)
|
||||
|
||||
def test_eval_types_bool(self):
|
||||
self.assertIsInstance(utils._eval_types('True'), bool)
|
||||
|
||||
def test_eval_types_int(self):
|
||||
self.assertIsInstance(utils._eval_types('15'), int)
|
||||
|
||||
def test_eval_types_dict(self):
|
||||
self.assertIsInstance(utils._eval_types('{}'), dict)
|
||||
|
||||
@mock.patch('os.path.exists', return_value=True)
|
||||
@mock.patch('configparser.ConfigParser.sections',
|
||||
return_value=['default'])
|
||||
def test_load_config(self, mock_config, mock_exists):
|
||||
results = utils.load_config('foo.cfg')
|
||||
self.assertEqual(results, {})
|
||||
|
||||
def test_default_load_config(self):
|
||||
results = utils.load_config('validation.cfg')
|
||||
self.assertEqual(results['default'], fakes.DEFAULT_CONFIG)
|
||||
|
||||
def test_ansible_runner_load_config(self):
|
||||
results = utils.load_config('validation.cfg')
|
||||
self.assertEqual(results['ansible_runner'],
|
||||
fakes.ANSIBLE_RUNNER_CONFIG)
|
||||
|
||||
def test_ansible_environment_config_load_config(self):
|
||||
results = utils.load_config('validation.cfg')
|
||||
self.assertEqual(
|
||||
results['ansible_environment']['ANSIBLE_CALLBACK_WHITELIST'],
|
||||
fakes.ANSIBLE_ENVIRONNMENT_CONFIG['ANSIBLE_CALLBACK_WHITELIST'])
|
||||
self.assertEqual(
|
||||
results['ansible_environment']['ANSIBLE_STDOUT_CALLBACK'],
|
||||
fakes.ANSIBLE_ENVIRONNMENT_CONFIG['ANSIBLE_STDOUT_CALLBACK'])
|
||||
|
|
|
@ -104,13 +104,14 @@ class TestValidationActions(TestCase):
|
|||
'extra_vars': None,
|
||||
'limit_hosts': '!cloud1',
|
||||
'extra_env_variables': None,
|
||||
'ansible_cfg': None,
|
||||
'ansible_cfg_file': None,
|
||||
'gathering_policy': 'explicit',
|
||||
'ansible_artifact_path': '/var/log/validations/artifacts/123_fake.yaml_time',
|
||||
'log_path': '/var/log/validations',
|
||||
'run_async': False,
|
||||
'python_interpreter': None,
|
||||
'ssh_user': None
|
||||
'ssh_user': None,
|
||||
'validation_cfg_file': None
|
||||
}
|
||||
|
||||
playbook = ['fake.yaml']
|
||||
|
@ -164,13 +165,14 @@ class TestValidationActions(TestCase):
|
|||
'extra_vars': None,
|
||||
'limit_hosts': '!cloud1,cloud,!cloud2',
|
||||
'extra_env_variables': None,
|
||||
'ansible_cfg': None,
|
||||
'ansible_cfg_file': None,
|
||||
'gathering_policy': 'explicit',
|
||||
'ansible_artifact_path': '/var/log/validations/artifacts/123_fake.yaml_time',
|
||||
'log_path': '/var/log/validations',
|
||||
'run_async': False,
|
||||
'python_interpreter': None,
|
||||
'ssh_user': None
|
||||
'ssh_user': None,
|
||||
'validation_cfg_file': None
|
||||
}
|
||||
|
||||
playbook = ['fake.yaml']
|
||||
|
|
|
@ -12,11 +12,15 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import ast
|
||||
import configparser
|
||||
import datetime
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import site
|
||||
import six
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from os.path import join
|
||||
|
@ -488,3 +492,132 @@ def get_validations_parameters(validations_data,
|
|||
}
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def convert_data(data=''):
|
||||
"""Transform a string containing comma-separated validation or group name
|
||||
into a list. If `data` is already a list, it will simply return `data`.
|
||||
|
||||
:param data: A string or a list
|
||||
:type data: `string` or `list`
|
||||
:return: A list of data
|
||||
:rtype: `list`
|
||||
:raises: a `TypeError` exception if `data` is not a list or a string
|
||||
|
||||
:Example:
|
||||
|
||||
>>> data = "check-cpu,check-ram,check-disk-space"
|
||||
>>> convert_data(data)
|
||||
['check-cpu', 'check-ram', 'check-disk-space']
|
||||
...
|
||||
>>> data = "check-cpu , check-ram , check-disk-space"
|
||||
>>> convert_data(data)
|
||||
['check-cpu', 'check-ram', 'check-disk-space']
|
||||
...
|
||||
>>> data = "check-cpu,"
|
||||
>>> convert_data(data)
|
||||
['check-cpu']
|
||||
...
|
||||
>>> data = ['check-cpu', 'check-ram', 'check-disk-space']
|
||||
>>> convert_data(data)
|
||||
['check-cpu', 'check-ram', 'check-disk-space']
|
||||
"""
|
||||
if isinstance(data, six.string_types):
|
||||
return [
|
||||
conv_data.strip() for conv_data in data.split(',') if conv_data
|
||||
]
|
||||
elif not isinstance(data, list):
|
||||
raise TypeError("The input data should be either a List or a String")
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
def _eval_types(value):
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return ast.literal_eval(value)
|
||||
except (SyntaxError, NameError, ValueError):
|
||||
pass
|
||||
try:
|
||||
return str(value)
|
||||
except ValueError:
|
||||
msg = ("Can not eval or type not supported for value: {},").format(
|
||||
value)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def load_config(config):
|
||||
"""Load Config File from CLI"""
|
||||
if not os.path.exists(config):
|
||||
msg = ("Config file {} could not be found, ignoring...").format(config)
|
||||
LOG.warning(msg)
|
||||
return {}
|
||||
else:
|
||||
msg = "Validation config file found: {}".format(config)
|
||||
LOG.info(msg)
|
||||
parser = configparser.ConfigParser()
|
||||
parser.optionxform = str
|
||||
parser.read(config)
|
||||
data = {}
|
||||
try:
|
||||
for section in parser.sections():
|
||||
for keys, values in parser.items(section):
|
||||
if section not in data:
|
||||
# Init section in dictionary
|
||||
data[section] = {}
|
||||
if section == 'ansible_environment':
|
||||
# for Ansible environment variables we dont want to cast
|
||||
# types, each values should a type String.
|
||||
data[section][keys] = values
|
||||
elif section == 'ansible_runner' and \
|
||||
keys not in constants.ANSIBLE_RUNNER_CONFIG_PARAMETERS:
|
||||
# for Ansible runner parameters, we select only a set
|
||||
# of parameters which will be passed as **kwargs in the
|
||||
# runner, so we have to ignore all the others.
|
||||
msg = ("Incompatible key found for ansible_runner section {}, "
|
||||
"ignoring {} ...").format(section, keys)
|
||||
LOG.warning(msg)
|
||||
continue
|
||||
else:
|
||||
data[section][keys] = _eval_types(values)
|
||||
except configparser.NoSectionError:
|
||||
msg = ("Wrong format for the config file {}, "
|
||||
"section {} can not be found, ignoring...").format(config,
|
||||
section)
|
||||
LOG.warning(msg)
|
||||
return {}
|
||||
return data
|
||||
|
||||
|
||||
def find_config_file(config_file_name='validation.cfg'):
|
||||
""" Find the config file for Validation in the following order:
|
||||
* environment validation VALIDATION_CONFIG
|
||||
* current user directory
|
||||
* user home directory
|
||||
* Python prefix path which has been used for the installation
|
||||
* /etc/validation.cfg
|
||||
"""
|
||||
def _check_path(path):
|
||||
if os.path.exists(path):
|
||||
if os.path.isfile(path) and os.access(path,
|
||||
os.R_OK):
|
||||
return path
|
||||
# Build a list of potential paths with the correct order:
|
||||
paths = []
|
||||
env_config = os.getenv("VALIDATION_CONFIG", "")
|
||||
if _check_path(env_config):
|
||||
return env_config
|
||||
paths.append(os.getcwd())
|
||||
paths.append(os.path.expanduser('~'))
|
||||
for prefix in site.PREFIXES:
|
||||
paths.append(os.path.join(prefix, 'etc'))
|
||||
paths.append('/etc')
|
||||
|
||||
for path in paths:
|
||||
current_path = os.path.join(path, config_file_name)
|
||||
if _check_path(current_path):
|
||||
return current_path
|
||||
return current_path
|
||||
|
|
|
@ -267,7 +267,8 @@ class ValidationActions(object):
|
|||
log_path=constants.VALIDATIONS_LOG_BASEDIR,
|
||||
python_interpreter=None, skip_list=None,
|
||||
callback_whitelist=None,
|
||||
output_callback='validation_stdout', ssh_user=None):
|
||||
output_callback='validation_stdout', ssh_user=None,
|
||||
validation_config=None):
|
||||
"""Run one or multiple validations by name(s), by group(s) or by
|
||||
product(s)
|
||||
|
||||
|
@ -336,6 +337,9 @@ class ValidationActions(object):
|
|||
:rtype: ``list``
|
||||
:param ssh_user: Ssh user for Ansible remote connection
|
||||
:type ssh_user: ``string``
|
||||
:param config: A dictionary of configuration for Validation loaded from
|
||||
an validation.cfg file.
|
||||
:type config: ``dict``
|
||||
|
||||
:Example:
|
||||
|
||||
|
@ -424,13 +428,14 @@ class ValidationActions(object):
|
|||
extra_vars=extra_vars,
|
||||
limit_hosts=_hosts,
|
||||
extra_env_variables=extra_env_vars,
|
||||
ansible_cfg=ansible_cfg,
|
||||
ansible_cfg_file=ansible_cfg,
|
||||
gathering_policy='explicit',
|
||||
ansible_artifact_path=artifacts_dir,
|
||||
log_path=log_path,
|
||||
run_async=run_async,
|
||||
python_interpreter=python_interpreter,
|
||||
ssh_user=ssh_user)
|
||||
ssh_user=ssh_user,
|
||||
validation_cfg_file=validation_config)
|
||||
else:
|
||||
_playbook, _rc, _status = run_ansible.run(
|
||||
workdir=artifacts_dir,
|
||||
|
@ -445,13 +450,14 @@ class ValidationActions(object):
|
|||
extra_vars=extra_vars,
|
||||
limit_hosts=_hosts,
|
||||
extra_env_variables=extra_env_vars,
|
||||
ansible_cfg=ansible_cfg,
|
||||
ansible_cfg_file=ansible_cfg,
|
||||
gathering_policy='explicit',
|
||||
ansible_artifact_path=artifacts_dir,
|
||||
log_path=log_path,
|
||||
run_async=run_async,
|
||||
python_interpreter=python_interpreter,
|
||||
ssh_user=ssh_user)
|
||||
ssh_user=ssh_user,
|
||||
validation_cfg_file=validation_config)
|
||||
results.append({'playbook': _playbook,
|
||||
'rc_code': _rc,
|
||||
'status': _status,
|
||||
|
|
Loading…
Reference in New Issue