Move tests to pytest

Pytest makes each scenario into individual selectable test.
To be able to run each scenario separately is very useful for development.

Change-Id: I4b1c990a1fd839ce327cd7faa27159a9b9632fed
This commit is contained in:
Vsevolod Fedorov 2022-06-15 11:12:23 +03:00
parent 248a2bddb7
commit f4d64f9f66
49 changed files with 2042 additions and 1953 deletions

View File

@ -90,6 +90,10 @@ execute the command::
tox -e py38
Unit tests could be run in parallel, using pytest-parallel pytest plugin::
tox -e py38 -- --workers=auto
* Note: View ``tox.ini`` to run tests on other versions of Python,
generating the documentation and additionally for any special notes
on running the test to validate documentation external URLs from behind

View File

@ -3,13 +3,11 @@
# process, which may cause wedges in the gate later.
coverage>=4.0 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD
python-subunit>=0.0.18 # Apache-2.0/BSD
sphinx>=4.4.0
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=1.4.0 # MIT
stestr>=2.0.0,!=3.0.0 # Apache-2.0/BSD
tox>=2.9.1 # MIT
mock>=2.0; python_version < '3.0' # BSD
sphinxcontrib-programoutput
pluggy<1.0.0 # the last version that supports Python 2
pytest==7.1.2
pytest-mock==3.7.0
pytest-parallel==0.1.1

View File

View File

@ -1,409 +0,0 @@
#!/usr/bin/env python
#
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import doctest
import configparser
import io
import json
import logging
import os
import pkg_resources
import re
import xml.etree.ElementTree as XML
import fixtures
import six
from six.moves import StringIO
import testtools
from testtools.content import text_content
import testscenarios
from yaml import safe_dump
from jenkins_jobs.config import JJBConfig
from jenkins_jobs.errors import InvalidAttributeError
import jenkins_jobs.local_yaml as yaml
from jenkins_jobs.alphanum import AlphanumSort
from jenkins_jobs.modules import project_externaljob
from jenkins_jobs.modules import project_flow
from jenkins_jobs.modules import project_githuborg
from jenkins_jobs.modules import project_matrix
from jenkins_jobs.modules import project_maven
from jenkins_jobs.modules import project_multibranch
from jenkins_jobs.modules import project_multijob
from jenkins_jobs.modules import view_all
from jenkins_jobs.modules import view_delivery_pipeline
from jenkins_jobs.modules import view_list
from jenkins_jobs.modules import view_nested
from jenkins_jobs.modules import view_pipeline
from jenkins_jobs.modules import view_sectioned
from jenkins_jobs.parser import YamlParser
from jenkins_jobs.registry import ModuleRegistry
from jenkins_jobs.xml_config import XmlJob
from jenkins_jobs.xml_config import XmlJobGenerator
# This dance deals with the fact that we want unittest.mock if
# we're on Python 3.4 and later, and non-stdlib mock otherwise.
try:
from unittest import mock # noqa
except ImportError:
import mock # noqa
def get_scenarios(
fixtures_path,
in_ext="yaml",
out_ext="xml",
plugins_info_ext="plugins_info.yaml",
filter_func=None,
):
"""Returns a list of scenarios, each scenario being described
by two parameters (yaml and xml filenames by default).
- content of the fixture output file (aka expected)
"""
scenarios = []
files = {}
for dirpath, _, fs in os.walk(fixtures_path):
for fn in fs:
if fn in files:
files[fn].append(os.path.join(dirpath, fn))
else:
files[fn] = [os.path.join(dirpath, fn)]
input_files = [
files[f][0] for f in files if re.match(r".*\.{0}$".format(in_ext), f)
]
for input_filename in input_files:
if input_filename.endswith(plugins_info_ext):
continue
if callable(filter_func) and filter_func(input_filename):
continue
output_candidate = re.sub(
r"\.{0}$".format(in_ext), ".{0}".format(out_ext), input_filename
)
# assume empty file if no output candidate found
if os.path.basename(output_candidate) in files:
out_filenames = files[os.path.basename(output_candidate)]
else:
out_filenames = None
plugins_info_candidate = re.sub(
r"\.{0}$".format(in_ext), ".{0}".format(plugins_info_ext), input_filename
)
if os.path.basename(plugins_info_candidate) not in files:
plugins_info_candidate = None
conf_candidate = re.sub(r"\.yaml$|\.json$", ".conf", input_filename)
conf_filename = files.get(os.path.basename(conf_candidate), None)
if conf_filename:
conf_filename = conf_filename[0]
else:
# for testing purposes we want to avoid using user config files
conf_filename = os.devnull
scenarios.append(
(
input_filename,
{
"in_filename": input_filename,
"out_filenames": out_filenames,
"conf_filename": conf_filename,
"plugins_info_filename": plugins_info_candidate,
},
)
)
return scenarios
class BaseTestCase(testtools.TestCase):
# TestCase settings:
maxDiff = None # always dump text difference
longMessage = True # keep normal error message when providing our
def setUp(self):
super(BaseTestCase, self).setUp()
self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
def _read_utf8_content(self):
# if None assume empty file
if not self.out_filenames:
return ""
# Read XML content, assuming it is unicode encoded
xml_content = ""
for f in sorted(self.out_filenames):
with io.open(f, "r", encoding="utf-8") as xml_file:
xml_content += "%s" % xml_file.read()
return xml_content
def _read_yaml_content(self, filename):
with io.open(filename, "r", encoding="utf-8") as yaml_file:
yaml_content = yaml.load(yaml_file)
return yaml_content
def _get_config(self):
jjb_config = JJBConfig(self.conf_filename)
jjb_config.validate()
return jjb_config
class BaseScenariosTestCase(testscenarios.TestWithScenarios, BaseTestCase):
scenarios = []
fixtures_path = None
@mock.patch("pkg_resources.iter_entry_points")
def test_yaml_snippet(self, mock):
if not self.in_filename:
return
jjb_config = self._get_config()
expected_xml = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
plugins_info = None
if self.plugins_info_filename:
plugins_info = self._read_yaml_content(self.plugins_info_filename)
self.addDetail(
"plugins-info-filename", text_content(self.plugins_info_filename)
)
self.addDetail("plugins-info", text_content(str(plugins_info)))
parser = YamlParser(jjb_config)
e = pkg_resources.EntryPoint.parse
d = pkg_resources.Distribution()
config = configparser.ConfigParser()
config.read(os.path.dirname(__file__) + "/../setup.cfg")
groups = {}
for key in config["entry_points"]:
groups[key] = list()
for line in config["entry_points"][key].split("\n"):
if "" == line.strip():
continue
groups[key].append(e(line, dist=d))
def mock_iter_entry_points(group, name=None):
return (
entry for entry in groups[group] if name is None or name == entry.name
)
mock.side_effect = mock_iter_entry_points
registry = ModuleRegistry(jjb_config, plugins_info)
registry.set_parser_data(parser.data)
pub = self.klass(registry)
project = None
if "project-type" in yaml_content:
if yaml_content["project-type"] == "maven":
project = project_maven.Maven(registry)
elif yaml_content["project-type"] == "matrix":
project = project_matrix.Matrix(registry)
elif yaml_content["project-type"] == "flow":
project = project_flow.Flow(registry)
elif yaml_content["project-type"] == "githuborg":
project = project_githuborg.GithubOrganization(registry)
elif yaml_content["project-type"] == "multijob":
project = project_multijob.MultiJob(registry)
elif yaml_content["project-type"] == "multibranch":
project = project_multibranch.WorkflowMultiBranch(registry)
elif yaml_content["project-type"] == "multibranch-defaults":
project = project_multibranch.WorkflowMultiBranchDefaults(
registry
) # noqa
elif yaml_content["project-type"] == "externaljob":
project = project_externaljob.ExternalJob(registry)
if "view-type" in yaml_content:
if yaml_content["view-type"] == "all":
project = view_all.All(registry)
elif yaml_content["view-type"] == "delivery_pipeline":
project = view_delivery_pipeline.DeliveryPipeline(registry)
elif yaml_content["view-type"] == "list":
project = view_list.List(registry)
elif yaml_content["view-type"] == "nested":
project = view_nested.Nested(registry)
elif yaml_content["view-type"] == "pipeline":
project = view_pipeline.Pipeline(registry)
elif yaml_content["view-type"] == "sectioned":
project = view_sectioned.Sectioned(registry)
else:
raise InvalidAttributeError("view-type", yaml_content["view-type"])
if project:
xml_project = project.root_xml(yaml_content)
else:
xml_project = XML.Element("project")
# Generate the XML tree directly with modules/general
pub.gen_xml(xml_project, yaml_content)
# check output file is under correct path
if "name" in yaml_content:
prefix = os.path.dirname(self.in_filename)
# split using '/' since fullname uses URL path separator
expected_folders = [
os.path.normpath(
os.path.join(
prefix,
"/".join(parser._getfullname(yaml_content).split("/")[:-1]),
)
)
]
actual_folders = [os.path.dirname(f) for f in self.out_filenames]
self.assertEquals(
expected_folders,
actual_folders,
"Output file under wrong path, was '%s', should be '%s'"
% (
self.out_filenames[0],
os.path.join(
expected_folders[0], os.path.basename(self.out_filenames[0])
),
),
)
# Prettify generated XML
pretty_xml = XmlJob(xml_project, "fixturejob").output().decode("utf-8")
self.assertThat(
pretty_xml,
testtools.matchers.DocTestMatches(
expected_xml, doctest.ELLIPSIS | doctest.REPORT_NDIFF
),
)
class SingleJobTestCase(BaseScenariosTestCase):
def test_yaml_snippet(self):
config = self._get_config()
expected_xml = (
self._read_utf8_content()
.strip()
.replace("<BLANKLINE>", "")
.replace("\n\n", "\n")
)
parser = YamlParser(config)
parser.parse(self.in_filename)
plugins_info = None
if self.plugins_info_filename:
plugins_info = self._read_yaml_content(self.plugins_info_filename)
self.addDetail(
"plugins-info-filename", text_content(self.plugins_info_filename)
)
self.addDetail("plugins-info", text_content(str(plugins_info)))
registry = ModuleRegistry(config, plugins_info)
registry.set_parser_data(parser.data)
job_data_list, view_data_list = parser.expandYaml(registry)
# Generate the XML tree
xml_generator = XmlJobGenerator(registry)
xml_jobs = xml_generator.generateXML(job_data_list)
xml_jobs.sort(key=AlphanumSort)
# check reference files are under correct path for folders
prefix = os.path.dirname(self.in_filename)
# split using '/' since fullname uses URL path separator
expected_folders = list(
set(
[
os.path.normpath(
os.path.join(prefix, "/".join(job_data["name"].split("/")[:-1]))
)
for job_data in job_data_list
]
)
)
actual_folders = [os.path.dirname(f) for f in self.out_filenames]
six.assertCountEqual(
self,
expected_folders,
actual_folders,
"Output file under wrong path, was '%s', should be '%s'"
% (
self.out_filenames[0],
os.path.join(
expected_folders[0], os.path.basename(self.out_filenames[0])
),
),
)
# Prettify generated XML
pretty_xml = (
"\n".join(job.output().decode("utf-8") for job in xml_jobs)
.strip()
.replace("\n\n", "\n")
)
self.assertThat(
pretty_xml,
testtools.matchers.DocTestMatches(
expected_xml, doctest.ELLIPSIS | doctest.REPORT_NDIFF
),
)
class JsonTestCase(BaseScenariosTestCase):
def test_yaml_snippet(self):
expected_json = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
pretty_json = json.dumps(yaml_content, indent=4, separators=(",", ": "))
self.assertThat(
pretty_json,
testtools.matchers.DocTestMatches(
expected_json, doctest.ELLIPSIS | doctest.REPORT_NDIFF
),
)
class YamlTestCase(BaseScenariosTestCase):
def test_yaml_snippet(self):
expected_yaml = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
# using json forces expansion of yaml anchors and aliases in the
# outputted yaml, otherwise it would simply appear exactly as
# entered which doesn't show that the net effect of the yaml
data = StringIO(json.dumps(yaml_content))
pretty_yaml = safe_dump(json.load(data), default_flow_style=False)
self.assertThat(
pretty_yaml,
testtools.matchers.DocTestMatches(
expected_yaml, doctest.ELLIPSIS | doctest.REPORT_NDIFF
),
)

View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
#
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
@ -15,13 +17,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import builders
from tests import base
class TestCaseModuleBuilders(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = builders.Builders
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(builders.Builders)

View File

@ -13,33 +13,40 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import os.path
import pytest
import jenkins_jobs
from tests import base
from tests.base import mock
class TestCaseJobCache(base.BaseTestCase):
@mock.patch("jenkins_jobs.builder.JobCache.get_cache_dir", lambda x: "/bad/file")
def test_save_on_exit(self):
"""
Test that the cache is saved on normal object deletion
"""
# Override fixture - do not use this mock.
@pytest.fixture(autouse=True)
def job_cache_mocked(mocker):
pass
with mock.patch("jenkins_jobs.builder.JobCache.save") as save_mock:
with mock.patch("os.path.isfile", return_value=False):
with mock.patch("jenkins_jobs.builder.JobCache._lock"):
jenkins_jobs.builder.JobCache("dummy")
save_mock.assert_called_with()
@mock.patch("jenkins_jobs.builder.JobCache.get_cache_dir", lambda x: "/bad/file")
def test_cache_file(self):
"""
Test providing a cachefile.
"""
test_file = os.path.abspath(__file__)
with mock.patch("os.path.join", return_value=test_file):
with mock.patch("yaml.safe_load"):
with mock.patch("jenkins_jobs.builder.JobCache._lock"):
jenkins_jobs.builder.JobCache("dummy").data = None
def test_save_on_exit(mocker):
"""
Test that the cache is saved on normal object deletion
"""
mocker.patch("jenkins_jobs.builder.JobCache.get_cache_dir", lambda x: "/bad/file")
save_mock = mocker.patch("jenkins_jobs.builder.JobCache.save")
mocker.patch("os.path.isfile", return_value=False)
mocker.patch("jenkins_jobs.builder.JobCache._lock")
jenkins_jobs.builder.JobCache("dummy")
save_mock.assert_called_with()
def test_cache_file(mocker):
"""
Test providing a cachefile.
"""
mocker.patch("jenkins_jobs.builder.JobCache.get_cache_dir", lambda x: "/bad/file")
test_file = os.path.abspath(__file__)
mocker.patch("os.path.join", return_value=test_file)
mocker.patch("yaml.safe_load")
mocker.patch("jenkins_jobs.builder.JobCache._lock")
jenkins_jobs.builder.JobCache("dummy").data = None

View File

24
tests/cmd/conftest.py Normal file
View File

@ -0,0 +1,24 @@
from pathlib import Path
import pytest
from jenkins_jobs.cli import entry
@pytest.fixture
def fixtures_dir():
return Path(__file__).parent / "fixtures"
@pytest.fixture
def default_config_file(fixtures_dir):
return str(fixtures_dir / "empty_builder.ini")
@pytest.fixture
def execute_jenkins_jobs():
def execute(args):
jenkins_jobs = entry.JenkinsJobs(args)
jenkins_jobs.execute()
return execute

View File

@ -0,0 +1,3 @@
[jenkins]
url=http://test-jenkins.with.non.default.url:8080/
query_plugins_info=True

View File

@ -18,55 +18,57 @@
# of actions by the JJB library, usually through interaction with the
# python-jenkins library.
import os
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
from unittest import mock
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info", mock.MagicMock)
class DeleteTests(CmdTestsBase):
@mock.patch("jenkins_jobs.cli.subcommand.update." "JenkinsManager.delete_jobs")
@mock.patch("jenkins_jobs.cli.subcommand.update." "JenkinsManager.delete_views")
def test_delete_single_job(self, delete_job_mock, delete_view_mock):
"""
Test handling the deletion of a single Jenkins job.
"""
def test_delete_single_job(mocker, default_config_file, execute_jenkins_jobs):
"""
Test handling the deletion of a single Jenkins job.
"""
args = ["--conf", self.default_config_file, "delete", "test_job"]
self.execute_jenkins_jobs_with_args(args)
mocker.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager.delete_jobs")
mocker.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager.delete_views")
@mock.patch("jenkins_jobs.cli.subcommand.update." "JenkinsManager.delete_jobs")
@mock.patch("jenkins_jobs.cli.subcommand.update." "JenkinsManager.delete_views")
def test_delete_multiple_jobs(self, delete_job_mock, delete_view_mock):
"""
Test handling the deletion of multiple Jenkins jobs.
"""
args = ["--conf", default_config_file, "delete", "test_job"]
execute_jenkins_jobs(args)
args = ["--conf", self.default_config_file, "delete", "test_job1", "test_job2"]
self.execute_jenkins_jobs_with_args(args)
@mock.patch("jenkins_jobs.builder.JenkinsManager.delete_job")
def test_delete_using_glob_params(self, delete_job_mock):
"""
Test handling the deletion of multiple Jenkins jobs using the glob
parameters feature.
"""
def test_delete_multiple_jobs(mocker, default_config_file, execute_jenkins_jobs):
"""
Test handling the deletion of multiple Jenkins jobs.
"""
args = [
"--conf",
self.default_config_file,
"delete",
"--path",
os.path.join(self.fixtures_path, "cmd-002.yaml"),
"*bar*",
]
self.execute_jenkins_jobs_with_args(args)
calls = [mock.call("bar001"), mock.call("bar002")]
delete_job_mock.assert_has_calls(calls, any_order=True)
self.assertEqual(
delete_job_mock.call_count,
len(calls),
"Jenkins.delete_job() was called '%s' times when "
"expected '%s'" % (delete_job_mock.call_count, len(calls)),
)
mocker.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager.delete_jobs")
mocker.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager.delete_views")
args = ["--conf", default_config_file, "delete", "test_job1", "test_job2"]
execute_jenkins_jobs(args)
def test_delete_using_glob_params(
mocker, fixtures_dir, default_config_file, execute_jenkins_jobs
):
"""
Test handling the deletion of multiple Jenkins jobs using the glob
parameters feature.
"""
delete_job_mock = mocker.patch("jenkins_jobs.builder.JenkinsManager.delete_job")
args = [
"--conf",
default_config_file,
"delete",
"--path",
str(fixtures_dir / "cmd-002.yaml"),
"*bar*",
]
execute_jenkins_jobs(args)
calls = [mock.call("bar001"), mock.call("bar002")]
delete_job_mock.assert_has_calls(calls, any_order=True)
assert delete_job_mock.call_count == len(
calls
), "Jenkins.delete_job() was called '%s' times when " "expected '%s'" % (
delete_job_mock.call_count,
len(calls),
)

View File

@ -17,31 +17,30 @@
# of actions by the JJB library, usually through interaction with the
# python-jenkins library.
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
import pytest
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info", mock.MagicMock)
class DeleteAllTests(CmdTestsBase):
@mock.patch("jenkins_jobs.cli.subcommand.update." "JenkinsManager.delete_all_jobs")
def test_delete_all_accept(self, delete_job_mock):
"""
Test handling the deletion of a single Jenkins job.
"""
def test_delete_all_accept(mocker, default_config_file, execute_jenkins_jobs):
"""
Test handling the deletion of a single Jenkins job.
"""
args = ["--conf", self.default_config_file, "delete-all"]
with mock.patch(
"jenkins_jobs.builder.JenkinsManager.get_views", return_value=[None]
):
with mock.patch("jenkins_jobs.utils.input", return_value="y"):
self.execute_jenkins_jobs_with_args(args)
mocker.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager.delete_all_jobs")
mocker.patch("jenkins_jobs.builder.JenkinsManager.get_views", return_value=[None])
mocker.patch("jenkins_jobs.utils.input", return_value="y")
@mock.patch("jenkins_jobs.cli.subcommand.update." "JenkinsManager.delete_all_jobs")
def test_delete_all_abort(self, delete_job_mock):
"""
Test handling the deletion of a single Jenkins job.
"""
args = ["--conf", default_config_file, "delete-all"]
execute_jenkins_jobs(args)
args = ["--conf", self.default_config_file, "delete-all"]
with mock.patch("jenkins_jobs.utils.input", return_value="n"):
self.assertRaises(SystemExit, self.execute_jenkins_jobs_with_args, args)
def test_delete_all_abort(mocker, default_config_file, execute_jenkins_jobs):
"""
Test handling the deletion of a single Jenkins job.
"""
mocker.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager.delete_all_jobs")
mocker.patch("jenkins_jobs.utils.input", return_value="n")
args = ["--conf", default_config_file, "delete-all"]
with pytest.raises(SystemExit):
execute_jenkins_jobs(args)

View File

@ -12,87 +12,83 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import os
from testscenarios.testcase import TestWithScenarios
from collections import namedtuple
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
import pytest
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info", mock.MagicMock)
class ListFromJenkinsTests(TestWithScenarios, CmdTestsBase):
JobsScenario = namedtuple("JobsScnenario", "name jobs globs found")
scenarios = [
("single", dict(jobs=["job1"], globs=[], found=["job1"])),
("multiple", dict(jobs=["job1", "job2"], globs=[], found=["job1", "job2"])),
(
"multiple_with_folder",
dict(
jobs=["folder1", "folder1/job1", "folder1/job2"],
globs=[],
found=["folder1", "folder1/job1", "folder1/job2"],
),
),
(
"multiple_with_glob",
dict(
jobs=["job1", "job2", "job3"],
globs=["job[1-2]"],
found=["job1", "job2"],
),
),
(
"multiple_with_multi_glob",
dict(
jobs=["job1", "job2", "job3", "job4"],
globs=["job1", "job[24]"],
found=["job1", "job2", "job4"],
),
),
]
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_jobs")
def test_list(self, get_jobs_mock):
def _get_jobs():
return [{"fullname": fullname} for fullname in self.jobs]
get_jobs_mock.side_effect = _get_jobs
console_out = io.BytesIO()
args = ["--conf", self.default_config_file, "list"] + self.globs
with mock.patch("sys.stdout", console_out):
self.execute_jenkins_jobs_with_args(args)
self.assertEqual(
console_out.getvalue().decode("utf-8").rstrip(), ("\n".join(self.found))
)
jobs_scenarios = [
JobsScenario("single", jobs=["job1"], globs=[], found=["job1"]),
JobsScenario("multiple", jobs=["job1", "job2"], globs=[], found=["job1", "job2"]),
JobsScenario(
"multiple_with_folder",
jobs=["folder1", "folder1/job1", "folder1/job2"],
globs=[],
found=["folder1", "folder1/job1", "folder1/job2"],
),
JobsScenario(
"multiple_with_glob",
jobs=["job1", "job2", "job3"],
globs=["job[1-2]"],
found=["job1", "job2"],
),
JobsScenario(
"multiple_with_multi_glob",
jobs=["job1", "job2", "job3", "job4"],
globs=["job1", "job[24]"],
found=["job1", "job2", "job4"],
),
]
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info", mock.MagicMock)
class ListFromYamlTests(TestWithScenarios, CmdTestsBase):
@pytest.mark.parametrize(
"scenario",
[pytest.param(s, id=s.name) for s in jobs_scenarios],
)
def test_from_jenkins_tests(
capsys, mocker, default_config_file, execute_jenkins_jobs, scenario
):
def get_jobs():
return [{"fullname": fullname} for fullname in scenario.jobs]
scenarios = [
("all", dict(globs=[], found=["bam001", "bar001", "bar002", "baz001"])),
(
"some",
dict(
globs=["*am*", "*002", "bar001"], found=["bam001", "bar001", "bar002"]
),
),
]
mocker.patch("jenkins_jobs.builder.JenkinsManager.get_jobs", side_effect=get_jobs)
def test_list(self):
path = os.path.join(self.fixtures_path, "cmd-002.yaml")
args = ["--conf", default_config_file, "list"] + scenario.globs
execute_jenkins_jobs(args)
console_out = io.BytesIO()
with mock.patch("sys.stdout", console_out):
self.execute_jenkins_jobs_with_args(
["--conf", self.default_config_file, "list", "-p", path] + self.globs
)
expected_out = "\n".join(scenario.found)
captured = capsys.readouterr()
assert captured.out.rstrip() == expected_out
self.assertEqual(
console_out.getvalue().decode("utf-8").rstrip(), ("\n".join(self.found))
)
YamlScenario = namedtuple("YamlScnenario", "name globs found")
yaml_scenarios = [
YamlScenario("all", globs=[], found=["bam001", "bar001", "bar002", "baz001"]),
YamlScenario(
"some",
globs=["*am*", "*002", "bar001"],
found=["bam001", "bar001", "bar002"],
),
]
@pytest.mark.parametrize(
"scenario",
[pytest.param(s, id=s.name) for s in yaml_scenarios],
)
def test_from_yaml_tests(
capsys, fixtures_dir, default_config_file, execute_jenkins_jobs, scenario
):
path = fixtures_dir / "cmd-002.yaml"
execute_jenkins_jobs(
["--conf", default_config_file, "list", "-p", str(path)] + scenario.globs
)
expected_out = "\n".join(scenario.found)
captured = capsys.readouterr()
assert captured.out.rstrip() == expected_out

View File

@ -18,295 +18,294 @@
# of actions by the JJB library, usually through interaction with the
# python-jenkins library.
import difflib
import filecmp
import io
import difflib
import os
import shutil
import tempfile
import yaml
from unittest import mock
import jenkins
from six.moves import StringIO
import testtools
import pytest
from testtools.assertions import assert_that
from jenkins_jobs.cli import entry
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info", mock.MagicMock)
class TestTests(CmdTestsBase):
def test_non_existing_job(self):
"""
Run test mode and pass a non-existing job name
(probably better to fail here)
"""
args = [
"--conf",
self.default_config_file,
"test",
os.path.join(self.fixtures_path, "cmd-001.yaml"),
"invalid",
]
self.execute_jenkins_jobs_with_args(args)
def test_valid_job(self):
"""
Run test mode and pass a valid job name
"""
args = [
"--conf",
self.default_config_file,
"test",
os.path.join(self.fixtures_path, "cmd-001.yaml"),
"foo-job",
]
console_out = io.BytesIO()
with mock.patch("sys.stdout", console_out):
self.execute_jenkins_jobs_with_args(args)
def test_console_output(self):
"""
Run test mode and verify that resulting XML gets sent to the console.
"""
console_out = io.BytesIO()
with mock.patch("sys.stdout", console_out):
args = [
"--conf",
self.default_config_file,
"test",
os.path.join(self.fixtures_path, "cmd-001.yaml"),
]
self.execute_jenkins_jobs_with_args(args)
xml_content = io.open(
os.path.join(self.fixtures_path, "cmd-001.xml"), "r", encoding="utf-8"
).read()
self.assertEqual(console_out.getvalue().decode("utf-8"), xml_content)
def test_output_dir(self):
"""
Run test mode with output to directory and verify that output files are
generated.
"""
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
args = ["test", os.path.join(self.fixtures_path, "cmd-001.yaml"), "-o", tmpdir]
self.execute_jenkins_jobs_with_args(args)
self.expectThat(
os.path.join(tmpdir, "foo-job"), testtools.matchers.FileExists()
)
def test_output_dir_config_xml(self):
"""
Run test mode with output to directory in "config.xml" mode and verify
that output files are generated.
"""
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
args = [
"test",
os.path.join(self.fixtures_path, "cmd-001.yaml"),
"-o",
tmpdir,
"--config-xml",
]
self.execute_jenkins_jobs_with_args(args)
self.expectThat(
os.path.join(tmpdir, "foo-job", "config.xml"),
testtools.matchers.FileExists(),
)
def test_stream_input_output_no_encoding_exceed_recursion(self):
"""
Test that we don't have issues processing large number of jobs and
outputting the result if the encoding is not set.
"""
console_out = io.BytesIO()
input_file = os.path.join(self.fixtures_path, "large-number-of-jobs-001.yaml")
with io.open(input_file, "r") as f:
with mock.patch("sys.stdout", console_out):
console_out.encoding = None
with mock.patch("sys.stdin", f):
args = ["test"]
self.execute_jenkins_jobs_with_args(args)
def test_stream_input_output_utf8_encoding(self):
"""
Run test mode simulating using pipes for input and output using
utf-8 encoding
"""
console_out = io.BytesIO()
input_file = os.path.join(self.fixtures_path, "cmd-001.yaml")
with io.open(input_file, "r") as f:
with mock.patch("sys.stdout", console_out):
with mock.patch("sys.stdin", f):
args = ["--conf", self.default_config_file, "test"]
self.execute_jenkins_jobs_with_args(args)
xml_content = io.open(
os.path.join(self.fixtures_path, "cmd-001.xml"), "r", encoding="utf-8"
).read()
value = console_out.getvalue().decode("utf-8")
self.assertEqual(value, xml_content)
def test_stream_input_output_ascii_encoding(self):
"""
Run test mode simulating using pipes for input and output using
ascii encoding with unicode input
"""
console_out = io.BytesIO()
console_out.encoding = "ascii"
input_file = os.path.join(self.fixtures_path, "cmd-001.yaml")
with io.open(input_file, "r") as f:
with mock.patch("sys.stdout", console_out):
with mock.patch("sys.stdin", f):
args = ["--conf", self.default_config_file, "test"]
self.execute_jenkins_jobs_with_args(args)
xml_content = io.open(
os.path.join(self.fixtures_path, "cmd-001.xml"), "r", encoding="utf-8"
).read()
value = console_out.getvalue().decode("ascii")
self.assertEqual(value, xml_content)
def test_stream_output_ascii_encoding_invalid_char(self):
"""
Run test mode simulating using pipes for input and output using
ascii encoding for output with include containing a character
that cannot be converted.
"""
console_out = io.BytesIO()
console_out.encoding = "ascii"
input_file = os.path.join(self.fixtures_path, "unicode001.yaml")
with io.open(input_file, "r", encoding="utf-8") as f:
with mock.patch("sys.stdout", console_out):
with mock.patch("sys.stdin", f):
args = ["--conf", self.default_config_file, "test"]
jenkins_jobs = entry.JenkinsJobs(args)
e = self.assertRaises(UnicodeError, jenkins_jobs.execute)
self.assertIn("'ascii' codec can't encode character", str(e))
@mock.patch("jenkins_jobs.cli.subcommand.update.XmlJobGenerator.generateXML")
@mock.patch("jenkins_jobs.cli.subcommand.update.ModuleRegistry")
def test_plugins_info_stub_option(self, registry_mock, generateXML_mock):
"""
Test handling of plugins_info stub option.
"""
plugins_info_stub_yaml_file = os.path.join(
self.fixtures_path, "plugins-info.yaml"
)
args = [
"--conf",
os.path.join(self.fixtures_path, "cmd-001.conf"),
"test",
"-p",
plugins_info_stub_yaml_file,
os.path.join(self.fixtures_path, "cmd-001.yaml"),
]
self.execute_jenkins_jobs_with_args(args)
with io.open(plugins_info_stub_yaml_file, "r", encoding="utf-8") as yaml_file:
plugins_info_list = yaml.safe_load(yaml_file)
registry_mock.assert_called_with(mock.ANY, plugins_info_list)
@mock.patch("jenkins_jobs.cli.subcommand.update.XmlJobGenerator.generateXML")
@mock.patch("jenkins_jobs.cli.subcommand.update.ModuleRegistry")
def test_bogus_plugins_info_stub_option(self, registry_mock, generateXML_mock):
"""
Verify that a JenkinsJobException is raised if the plugins_info stub
file does not yield a list as its top-level object.
"""
plugins_info_stub_yaml_file = os.path.join(
self.fixtures_path, "bogus-plugins-info.yaml"
)
args = [
"--conf",
os.path.join(self.fixtures_path, "cmd-001.conf"),
"test",
"-p",
plugins_info_stub_yaml_file,
os.path.join(self.fixtures_path, "cmd-001.yaml"),
]
stderr = StringIO()
with mock.patch("sys.stderr", stderr):
self.assertRaises(SystemExit, entry.JenkinsJobs, args)
self.assertIn("must contain a Yaml list", stderr.getvalue())
def test_non_existing_job(fixtures_dir, default_config_file, execute_jenkins_jobs):
"""
Run test mode and pass a non-existing job name
(probably better to fail here)
"""
args = [
"--conf",
default_config_file,
"test",
str(fixtures_dir / "cmd-001.yaml"),
"invalid",
]
execute_jenkins_jobs(args)
class TestJenkinsGetPluginInfoError(CmdTestsBase):
"""Test without mocking get_plugins_info.
def test_valid_job(fixtures_dir, default_config_file, execute_jenkins_jobs):
"""
Run test mode and pass a valid job name
"""
args = [
"--conf",
default_config_file,
"test",
str(fixtures_dir / "cmd-001.yaml"),
"foo-job",
]
execute_jenkins_jobs(args)
This test class is used for testing the 'test' subcommand when we want
to validate its behavior without mocking
jenkins_jobs.builder.JenkinsManager.get_plugins_info
def test_console_output(
capsys, fixtures_dir, default_config_file, execute_jenkins_jobs
):
"""
Run test mode and verify that resulting XML gets sent to the console.
"""
@mock.patch("jenkins.Jenkins.get_plugins")
def test_console_output_jenkins_connection_failure_warning(self, get_plugins_mock):
"""
Run test mode and verify that failed Jenkins connection attempt
exception does not bubble out of cmd.main. Ideally, we would also test
that an appropriate message is logged to stderr but it's somewhat
difficult to figure out how to actually enable stderr in this test
suite.
"""
args = [
"--conf",
default_config_file,
"test",
str(fixtures_dir / "cmd-001.yaml"),
]
execute_jenkins_jobs(args)
get_plugins_mock.side_effect = jenkins.JenkinsException("Connection refused")
with mock.patch("sys.stdout"):
try:
args = [
"--conf",
self.default_config_file,
"test",
os.path.join(self.fixtures_path, "cmd-001.yaml"),
]
self.execute_jenkins_jobs_with_args(args)
except jenkins.JenkinsException:
self.fail("jenkins.JenkinsException propagated to main")
except Exception:
pass # only care about jenkins.JenkinsException for now
expected_output = fixtures_dir.joinpath("cmd-001.xml").read_text()
captured = capsys.readouterr()
assert captured.out == expected_output
@mock.patch("jenkins.Jenkins.get_plugins")
def test_skip_plugin_retrieval_if_no_config_provided(self, get_plugins_mock):
"""
Verify that retrieval of information from Jenkins instance about its
plugins will be skipped when run if no config file provided.
"""
with mock.patch("sys.stdout", new_callable=io.BytesIO):
args = [
"--conf",
self.default_config_file,
"test",
os.path.join(self.fixtures_path, "cmd-001.yaml"),
]
entry.JenkinsJobs(args)
self.assertFalse(get_plugins_mock.called)
@mock.patch("jenkins.Jenkins.get_plugins_info")
def test_skip_plugin_retrieval_if_disabled(self, get_plugins_mock):
"""
Verify that retrieval of information from Jenkins instance about its
plugins will be skipped when run if a config file provided and disables
querying through a config option.
"""
with mock.patch("sys.stdout", new_callable=io.BytesIO):
args = [
"--conf",
os.path.join(self.fixtures_path, "disable-query-plugins.conf"),
"test",
os.path.join(self.fixtures_path, "cmd-001.yaml"),
]
entry.JenkinsJobs(args)
self.assertFalse(get_plugins_mock.called)
def test_output_dir(tmp_path, fixtures_dir, default_config_file, execute_jenkins_jobs):
"""
Run test mode with output to directory and verify that output files are
generated.
"""
args = ["test", str(fixtures_dir / "cmd-001.yaml"), "-o", str(tmp_path)]
execute_jenkins_jobs(args)
assert tmp_path.joinpath("foo-job").exists()
def test_output_dir_config_xml(tmp_path, fixtures_dir, execute_jenkins_jobs):
"""
Run test mode with output to directory in "config.xml" mode and verify
that output files are generated.
"""
args = [
"test",
str(fixtures_dir / "cmd-001.yaml"),
"-o",
str(tmp_path),
"--config-xml",
]
execute_jenkins_jobs(args)
assert tmp_path.joinpath("foo-job", "config.xml").exists()
def test_stream_input_output_no_encoding_exceed_recursion(
mocker, fixtures_dir, execute_jenkins_jobs
):
"""
Test that we don't have issues processing large number of jobs and
outputting the result if the encoding is not set.
"""
console_out = io.BytesIO()
console_out.encoding = None
mocker.patch("sys.stdout", console_out)
input = fixtures_dir.joinpath("large-number-of-jobs-001.yaml").read_bytes()
mocker.patch("sys.stdin", io.BytesIO(input))
args = ["test"]
execute_jenkins_jobs(args)
def test_stream_input_output_utf8_encoding(
capsys, mocker, fixtures_dir, default_config_file, execute_jenkins_jobs
):
"""
Run test mode simulating using pipes for input and output using
utf-8 encoding
"""
input = fixtures_dir.joinpath("cmd-001.yaml").read_bytes()
mocker.patch("sys.stdin", io.BytesIO(input))
args = ["--conf", default_config_file, "test"]
execute_jenkins_jobs(args)
expected_output = fixtures_dir.joinpath("cmd-001.xml").read_text()
captured = capsys.readouterr()
assert captured.out == expected_output
def test_stream_input_output_ascii_encoding(
mocker, fixtures_dir, default_config_file, execute_jenkins_jobs
):
"""
Run test mode simulating using pipes for input and output using
ascii encoding with unicode input
"""
console_out = io.BytesIO()
console_out.encoding = "ascii"
mocker.patch("sys.stdout", console_out)
input = fixtures_dir.joinpath("cmd-001.yaml").read_bytes()
mocker.patch("sys.stdin", io.BytesIO(input))
args = ["--conf", default_config_file, "test"]
execute_jenkins_jobs(args)
expected_output = fixtures_dir.joinpath("cmd-001.xml").read_text()
output = console_out.getvalue().decode("ascii")
assert output == expected_output
def test_stream_output_ascii_encoding_invalid_char(
mocker, fixtures_dir, default_config_file
):
"""
Run test mode simulating using pipes for input and output using
ascii encoding for output with include containing a character
that cannot be converted.
"""
console_out = io.BytesIO()
console_out.encoding = "ascii"
mocker.patch("sys.stdout", console_out)
input = fixtures_dir.joinpath("unicode001.yaml").read_bytes()
mocker.patch("sys.stdin", io.BytesIO(input))
args = ["--conf", default_config_file, "test"]
jenkins_jobs = entry.JenkinsJobs(args)
with pytest.raises(UnicodeError) as excinfo:
jenkins_jobs.execute()
assert "'ascii' codec can't encode character" in str(excinfo.value)
def test_plugins_info_stub_option(mocker, fixtures_dir, execute_jenkins_jobs):
"""
Test handling of plugins_info stub option.
"""
mocker.patch("jenkins_jobs.cli.subcommand.update.XmlJobGenerator.generateXML")
registry_mock = mocker.patch("jenkins_jobs.cli.subcommand.update.ModuleRegistry")
plugins_info_stub_yaml_file = fixtures_dir / "plugins-info.yaml"
args = [
"--conf",
str(fixtures_dir / "cmd-001.conf"),
"test",
"-p",
str(plugins_info_stub_yaml_file),
str(fixtures_dir / "cmd-001.yaml"),
]
execute_jenkins_jobs(args)
plugins_info_list = yaml.safe_load(plugins_info_stub_yaml_file.read_text())
registry_mock.assert_called_with(mock.ANY, plugins_info_list)
def test_bogus_plugins_info_stub_option(
capsys, mocker, fixtures_dir, default_config_file
):
"""
Verify that a JenkinsJobException is raised if the plugins_info stub
file does not yield a list as its top-level object.
"""
mocker.patch("jenkins_jobs.cli.subcommand.update.XmlJobGenerator.generateXML")
mocker.patch("jenkins_jobs.cli.subcommand.update.ModuleRegistry")
plugins_info_stub_yaml_file = fixtures_dir / "bogus-plugins-info.yaml"
args = [
"--conf",
str(fixtures_dir / "cmd-001.conf"),
"test",
"-p",
str(plugins_info_stub_yaml_file),
str(fixtures_dir / "cmd-001.yaml"),
]
with pytest.raises(SystemExit):
entry.JenkinsJobs(args)
captured = capsys.readouterr()
assert "must contain a Yaml list" in captured.err
# Test without mocking get_plugins_info.
#
# This test class is used for testing the 'test' subcommand when we want
# to validate its behavior without mocking
# jenkins_jobs.builder.JenkinsManager.get_plugins_info
def test_console_output_jenkins_connection_failure_warning(
caplog, mocker, fixtures_dir, execute_jenkins_jobs
):
"""
Run test mode and verify that failed Jenkins connection attempt
exception does not bubble out of cmd.main.
"""
mocker.patch(
"jenkins.Jenkins.get_plugins",
side_effect=jenkins.JenkinsException("Connection refused"),
)
try:
args = [
"--conf",
str(fixtures_dir / "enable-query-plugins.conf"),
"test",
str(fixtures_dir / "cmd-001.yaml"),
]
execute_jenkins_jobs(args)
except jenkins.JenkinsException:
pytest.fail("jenkins.JenkinsException propagated to main")
except Exception:
pass # only care about jenkins.JenkinsException for now
assert "Unable to retrieve Jenkins Plugin Info" in caplog.text
def test_skip_plugin_retrieval_if_no_config_provided(
mocker, fixtures_dir, default_config_file
):
"""
Verify that retrieval of information from Jenkins instance about its
plugins will be skipped when run if no config file provided.
"""
get_plugins_mock = mocker.patch("jenkins.Jenkins.get_plugins")
args = [
"--conf",
default_config_file,
"test",
str(fixtures_dir / "cmd-001.yaml"),
]
entry.JenkinsJobs(args)
assert not get_plugins_mock.called
@mock.patch("jenkins.Jenkins.get_plugins_info")
def test_skip_plugin_retrieval_if_disabled(mocker, fixtures_dir):
"""
Verify that retrieval of information from Jenkins instance about its
plugins will be skipped when run if a config file provided and disables
querying through a config option.
"""
get_plugins_mock = mocker.patch("jenkins.Jenkins.get_plugins")
args = [
"--conf",
str(fixtures_dir / "disable-query-plugins.conf"),
"test",
str(fixtures_dir / "cmd-001.yaml"),
]
entry.JenkinsJobs(args)
assert not get_plugins_mock.called
class MatchesDirMissingFilesMismatch(object):
@ -377,98 +376,97 @@ class MatchesDir(object):
return None
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info", mock.MagicMock)
class TestTestsMultiPath(CmdTestsBase):
def setUp(self):
super(TestTestsMultiPath, self).setUp()
@pytest.fixture
def multipath(fixtures_dir):
path_list = [
str(fixtures_dir / "multi-path/yamldirs/" / p) for p in ["dir1", "dir2"]
]
return os.pathsep.join(path_list)
path_list = [
os.path.join(self.fixtures_path, "multi-path/yamldirs/", p)
for p in ["dir1", "dir2"]
]
self.multipath = os.pathsep.join(path_list)
self.output_dir = tempfile.mkdtemp()
def check_dirs_match(self, expected_dir):
try:
self.assertThat(self.output_dir, MatchesDir(expected_dir))
except testtools.matchers.MismatchError:
raise
else:
shutil.rmtree(self.output_dir)
@pytest.fixture
def output_dir(tmp_path):
dir = tmp_path / "output"
dir.mkdir()
return str(dir)
def test_multi_path(self):
"""
Run test mode and pass multiple paths.
"""
args = [
"--conf",
self.default_config_file,
"test",
"-o",
self.output_dir,
self.multipath,
]
self.execute_jenkins_jobs_with_args(args)
self.check_dirs_match(
os.path.join(self.fixtures_path, "multi-path/output_simple")
)
def test_multi_path(
fixtures_dir, default_config_file, execute_jenkins_jobs, output_dir, multipath
):
"""
Run test mode and pass multiple paths.
"""
args = [
"--conf",
default_config_file,
"test",
"-o",
output_dir,
multipath,
]
def test_recursive_multi_path_command_line(self):
"""
Run test mode and pass multiple paths with recursive path option.
"""
args = [
"--conf",
self.default_config_file,
"test",
"-o",
self.output_dir,
"-r",
self.multipath,
]
execute_jenkins_jobs(args)
assert_that(output_dir, MatchesDir(fixtures_dir / "multi-path/output_simple"))
self.execute_jenkins_jobs_with_args(args)
self.check_dirs_match(
os.path.join(self.fixtures_path, "multi-path/output_recursive")
)
def test_recursive_multi_path_config_file(self):
# test recursive set in configuration file
args = [
"--conf",
os.path.join(self.fixtures_path, "multi-path/builder-recursive.ini"),
"test",
"-o",
self.output_dir,
self.multipath,
]
self.execute_jenkins_jobs_with_args(args)
self.check_dirs_match(
os.path.join(self.fixtures_path, "multi-path/output_recursive")
)
def test_recursive_multi_path_command_line(
fixtures_dir, default_config_file, execute_jenkins_jobs, output_dir, multipath
):
"""
Run test mode and pass multiple paths with recursive path option.
"""
args = [
"--conf",
default_config_file,
"test",
"-o",
output_dir,
"-r",
multipath,
]
def test_recursive_multi_path_with_excludes(self):
"""
Run test mode and pass multiple paths with recursive path option.
"""
exclude_path = os.path.join(self.fixtures_path, "multi-path/yamldirs/dir2/dir1")
args = [
"--conf",
self.default_config_file,
"test",
"-x",
exclude_path,
"-o",
self.output_dir,
"-r",
self.multipath,
]
execute_jenkins_jobs(args)
assert_that(output_dir, MatchesDir(fixtures_dir / "multi-path/output_recursive"))
self.execute_jenkins_jobs_with_args(args)
self.check_dirs_match(
os.path.join(
self.fixtures_path, "multi-path/output_recursive_with_excludes"
)
)
def test_recursive_multi_path_config_file(
fixtures_dir, execute_jenkins_jobs, output_dir, multipath
):
# test recursive set in configuration file
args = [
"--conf",
str(fixtures_dir / "multi-path/builder-recursive.ini"),
"test",
"-o",
output_dir,
multipath,
]
execute_jenkins_jobs(args)
assert_that(output_dir, MatchesDir(fixtures_dir / "multi-path/output_recursive"))
def test_recursive_multi_path_with_excludes(
fixtures_dir, default_config_file, execute_jenkins_jobs, output_dir, multipath
):
"""
Run test mode and pass multiple paths with recursive path option.
"""
exclude_path = fixtures_dir / "multi-path/yamldirs/dir2/dir1"
args = [
"--conf",
default_config_file,
"test",
"-x",
str(exclude_path),
"-o",
output_dir,
"-r",
multipath,
]
execute_jenkins_jobs(args)
assert_that(
output_dir,
MatchesDir(fixtures_dir / "multi-path/output_recursive_with_excludes"),
)

View File

@ -18,107 +18,110 @@
# of actions by the JJB library, usually through interaction with the
# python-jenkins library.
import os
import six
from unittest import mock
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
import pytest
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info", mock.MagicMock)
class UpdateTests(CmdTestsBase):
@mock.patch("jenkins_jobs.builder.jenkins.Jenkins.job_exists")
@mock.patch("jenkins_jobs.builder.jenkins.Jenkins.get_all_jobs")
@mock.patch("jenkins_jobs.builder.jenkins.Jenkins.reconfig_job")
def test_update_jobs(
self, jenkins_reconfig_job, jenkins_get_jobs, jenkins_job_exists
):
"""
Test update_job is called
"""
path = os.path.join(self.fixtures_path, "cmd-002.yaml")
args = ["--conf", self.default_config_file, "update", path]
def test_update_jobs(mocker, fixtures_dir, default_config_file, execute_jenkins_jobs):
"""
Test update_job is called
"""
mocker.patch("jenkins_jobs.builder.jenkins.Jenkins.job_exists")
mocker.patch("jenkins_jobs.builder.jenkins.Jenkins.get_all_jobs")
reconfig_job = mocker.patch("jenkins_jobs.builder.jenkins.Jenkins.reconfig_job")
self.execute_jenkins_jobs_with_args(args)
path = fixtures_dir / "cmd-002.yaml"
args = ["--conf", default_config_file, "update", str(path)]
jenkins_reconfig_job.assert_has_calls(
[
mock.call(job_name, mock.ANY)
for job_name in ["bar001", "bar002", "baz001", "bam001"]
],
any_order=True,
)
execute_jenkins_jobs(args)
@mock.patch("jenkins_jobs.builder.JenkinsManager.is_job", return_value=True)
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_jobs")
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_job_md5")
@mock.patch("jenkins_jobs.builder.JenkinsManager.update_job")
def test_update_jobs_decode_job_output(
self, update_job_mock, get_job_md5_mock, get_jobs_mock, is_job_mock
):
"""
Test that job xml output has been decoded before attempting to update
"""
# don't care about the value returned here
update_job_mock.return_value = ([], 0)
reconfig_job.assert_has_calls(
[
mock.call(job_name, mock.ANY)
for job_name in ["bar001", "bar002", "baz001", "bam001"]
],
any_order=True,
)
path = os.path.join(self.fixtures_path, "cmd-002.yaml")
args = ["--conf", self.default_config_file, "update", path]
self.execute_jenkins_jobs_with_args(args)
self.assertTrue(isinstance(update_job_mock.call_args[0][1], six.text_type))
def test_update_jobs_decode_job_output(
mocker, fixtures_dir, default_config_file, execute_jenkins_jobs
):
"""
Test that job xml output has been decoded before attempting to update
"""
mocker.patch("jenkins_jobs.builder.JenkinsManager.is_job", return_value=True)
mocker.patch("jenkins_jobs.builder.JenkinsManager.get_jobs")
mocker.patch("jenkins_jobs.builder.JenkinsManager.get_job_md5")
update_job_mock = mocker.patch("jenkins_jobs.builder.JenkinsManager.update_job")
@mock.patch("jenkins_jobs.builder.jenkins.Jenkins.job_exists")
@mock.patch("jenkins_jobs.builder.jenkins.Jenkins.get_all_jobs")
@mock.patch("jenkins_jobs.builder.jenkins.Jenkins.reconfig_job")
@mock.patch("jenkins_jobs.builder.jenkins.Jenkins.delete_job")
def test_update_jobs_and_delete_old(
self,
jenkins_delete_job,
jenkins_reconfig_job,
jenkins_get_all_jobs,
jenkins_job_exists,
):
"""Test update behaviour with --delete-old option.
# don't care about the value returned here
update_job_mock.return_value = ([], 0)
* mock out a call to jenkins.Jenkins.get_jobs() to return a known list
of job names.
* mock out a call to jenkins.Jenkins.reconfig_job() and
jenkins.Jenkins.delete_job() to detect calls being made to determine
that JJB does correctly delete the jobs it should delete when passed
a specific set of inputs.
* mock out a call to jenkins.Jenkins.job_exists() to always return
True.
"""
yaml_jobs = ["bar001", "bar002", "baz001", "bam001"]
extra_jobs = ["old_job001", "old_job002", "unmanaged"]
path = fixtures_dir / "cmd-002.yaml"
args = ["--conf", default_config_file, "update", str(path)]
path = os.path.join(self.fixtures_path, "cmd-002.yaml")
args = ["--conf", self.default_config_file, "update", "--delete-old", path]
execute_jenkins_jobs(args)
assert isinstance(update_job_mock.call_args[0][1], str)
jenkins_get_all_jobs.return_value = [
{"fullname": name} for name in yaml_jobs + extra_jobs
]
with mock.patch(
"jenkins_jobs.builder.JenkinsManager.is_managed",
side_effect=(lambda name: name != "unmanaged"),
):
self.execute_jenkins_jobs_with_args(args)
def test_update_jobs_and_delete_old(
mocker, fixtures_dir, default_config_file, execute_jenkins_jobs
):
"""Test update behaviour with --delete-old option.
jenkins_reconfig_job.assert_has_calls(
[mock.call(job_name, mock.ANY) for job_name in yaml_jobs], any_order=True
)
calls = [mock.call(name) for name in extra_jobs if name != "unmanaged"]
jenkins_delete_job.assert_has_calls(calls)
# to ensure only the calls we expected were made, have to check
# there were no others, as no API call for assert_has_only_calls
self.assertEqual(jenkins_delete_job.call_count, len(calls))
* mock out a call to jenkins.Jenkins.get_jobs() to return a known list
of job names.
* mock out a call to jenkins.Jenkins.reconfig_job() and
jenkins.Jenkins.delete_job() to detect calls being made to determine
that JJB does correctly delete the jobs it should delete when passed
a specific set of inputs.
* mock out a call to jenkins.Jenkins.job_exists() to always return
True.
"""
mocker.patch("jenkins_jobs.builder.jenkins.Jenkins.job_exists")
jenkins_get_all_jobs = mocker.patch(
"jenkins_jobs.builder.jenkins.Jenkins.get_all_jobs"
)
jenkins_reconfig_job = mocker.patch(
"jenkins_jobs.builder.jenkins.Jenkins.reconfig_job"
)
jenkins_delete_job = mocker.patch("jenkins_jobs.builder.jenkins.Jenkins.delete_job")
def test_update_timeout_not_set(self):
"""Validate update timeout behavior when timeout not explicitly configured."""
self.skipTest("TODO: Develop actual update timeout test approach.")
yaml_jobs = ["bar001", "bar002", "baz001", "bam001"]
extra_jobs = ["old_job001", "old_job002", "unmanaged"]
def test_update_timeout_set(self):
"""Validate update timeout behavior when timeout is explicitly configured."""
self.skipTest("TODO: Develop actual update timeout test approach.")
path = fixtures_dir / "cmd-002.yaml"
args = ["--conf", default_config_file, "update", "--delete-old", str(path)]
jenkins_get_all_jobs.return_value = [
{"fullname": name} for name in yaml_jobs + extra_jobs
]
mocker.patch(
"jenkins_jobs.builder.JenkinsManager.is_managed",
side_effect=(lambda name: name != "unmanaged"),
)
execute_jenkins_jobs(args)
jenkins_reconfig_job.assert_has_calls(
[mock.call(job_name, mock.ANY) for job_name in yaml_jobs], any_order=True
)
calls = [mock.call(name) for name in extra_jobs if name != "unmanaged"]
jenkins_delete_job.assert_has_calls(calls)
# to ensure only the calls we expected were made, have to check
# there were no others, as no API call for assert_has_only_calls
assert jenkins_delete_job.call_count == len(calls)
@pytest.mark.skip(reason="TODO: Develop actual update timeout test approach.")
def test_update_timeout_not_set():
"""Validate update timeout behavior when timeout not explicitly configured."""
pass
@pytest.mark.skip(reason="TODO: Develop actual update timeout test approach.")
def test_update_timeout_set():
"""Validate update timeout behavior when timeout is explicitly configured."""
pass

View File

@ -1,37 +1,11 @@
import os
import pytest
from jenkins_jobs.cli import entry
from tests import base
from tests.base import mock
class CmdTestsBase(base.BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
def setUp(self):
super(CmdTestsBase, self).setUp()
# Testing the cmd module can sometimes result in the JobCache class
# attempting to create the cache directory multiple times as the tests
# are run in parallel. Stub out the JobCache to ensure that each
# test can safely create the cache directory without risk of
# interference.
cache_patch = mock.patch("jenkins_jobs.builder.JobCache", autospec=True)
self.cache_mock = cache_patch.start()
self.addCleanup(cache_patch.stop)
self.default_config_file = os.path.join(self.fixtures_path, "empty_builder.ini")
def execute_jenkins_jobs_with_args(self, args):
jenkins_jobs = entry.JenkinsJobs(args)
jenkins_jobs.execute()
class TestCmd(CmdTestsBase):
def test_with_empty_args(self):
"""
User passes no args, should fail with SystemExit
"""
with mock.patch("sys.stderr"):
self.assertRaises(SystemExit, entry.JenkinsJobs, [])
def test_with_empty_args(mocker):
"""
User passes no args, should fail with SystemExit
"""
with pytest.raises(SystemExit):
entry.JenkinsJobs([])

View File

@ -1,160 +1,177 @@
import io
import os
from pathlib import Path
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
import pytest
from jenkins_jobs.cli import entry
from jenkins_jobs import builder
patch = mock.patch
global_conf = "/etc/jenkins_jobs/jenkins_jobs.ini"
user_conf = Path.home() / ".config" / "jenkins_jobs" / "jenkins_jobs.ini"
local_conf = Path(__file__).parent / "jenkins_jobs.ini"
@mock.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info", mock.MagicMock)
class TestConfigs(CmdTestsBase):
def test_use_global_config(mocker, default_config_file):
"""
Verify that JJB uses the global config file by default
"""
mocker.patch("jenkins_jobs.builder.JenkinsManager.get_plugins_info")
global_conf = "/etc/jenkins_jobs/jenkins_jobs.ini"
user_conf = os.path.join(
os.path.expanduser("~"), ".config", "jenkins_jobs", "jenkins_jobs.ini"
)
local_conf = os.path.join(os.path.dirname(__file__), "jenkins_jobs.ini")
args = ["test", "foo"]
def test_use_global_config(self):
"""
Verify that JJB uses the global config file by default
"""
default_io_open = io.open
args = ["test", "foo"]
conffp = io.open(self.default_config_file, "r", encoding="utf-8")
def io_open(file, *args, **kw):
if file == global_conf:
default_io_open(default_config_file, "r", encoding="utf-8")
else:
return default_io_open(file, *args, **kw)
with patch("os.path.isfile", return_value=True) as m_isfile:
def isfile(path):
if path == global_conf:
return True
return False
def side_effect(path):
if path == self.global_conf:
return True
return False
mocker.patch("os.path.isfile", side_effect=isfile)
mocked_open = mocker.patch("io.open", side_effect=io_open)
m_isfile.side_effect = side_effect
entry.JenkinsJobs(args, config_file_required=True)
with patch("io.open", return_value=conffp) as m_open:
entry.JenkinsJobs(args, config_file_required=True)
m_open.assert_called_with(self.global_conf, "r", encoding="utf-8")
mocked_open.assert_called_with(global_conf, "r", encoding="utf-8")
def test_use_config_in_user_home(self):
"""
Verify that JJB uses config file in user home folder
"""
args = ["test", "foo"]
def test_use_config_in_user_home(mocker, default_config_file):
"""
Verify that JJB uses config file in user home folder
"""
conffp = io.open(self.default_config_file, "r", encoding="utf-8")
with patch("os.path.isfile", return_value=True) as m_isfile:
args = ["test", "foo"]
def side_effect(path):
if path == self.user_conf:
return True
return False
default_io_open = io.open
m_isfile.side_effect = side_effect
with patch("io.open", return_value=conffp) as m_open:
entry.JenkinsJobs(args, config_file_required=True)
m_open.assert_called_with(self.user_conf, "r", encoding="utf-8")
def io_open(file, *args, **kw):
if file == str(user_conf):
default_io_open(default_config_file, "r", encoding="utf-8")
else:
return default_io_open(file, *args, **kw)
def test_non_existing_config_dir(self):
"""
Run test mode and pass a non-existing configuration directory
"""
args = ["--conf", self.default_config_file, "test", "foo"]
jenkins_jobs = entry.JenkinsJobs(args)
self.assertRaises(IOError, jenkins_jobs.execute)
def isfile(path):
if path == str(user_conf):
return True
return False
def test_non_existing_config_file(self):
"""
Run test mode and pass a non-existing configuration file
"""
args = ["--conf", self.default_config_file, "test", "non-existing.yaml"]
jenkins_jobs = entry.JenkinsJobs(args)
self.assertRaises(IOError, jenkins_jobs.execute)
mocker.patch("os.path.isfile", side_effect=isfile)
mocked_open = mocker.patch("io.open", side_effect=io_open)
def test_config_options_not_replaced_by_cli_defaults(self):
"""
Run test mode and check config settings from conf file retained
when none of the global CLI options are set.
"""
config_file = os.path.join(self.fixtures_path, "settings_from_config.ini")
args = ["--conf", config_file, "test", "dummy.yaml"]
jenkins_jobs = entry.JenkinsJobs(args)
jjb_config = jenkins_jobs.jjb_config
self.assertEqual(jjb_config.jenkins["user"], "jenkins_user")
self.assertEqual(jjb_config.jenkins["password"], "jenkins_password")
self.assertEqual(jjb_config.builder["ignore_cache"], True)
self.assertEqual(jjb_config.builder["flush_cache"], True)
self.assertEqual(jjb_config.builder["update"], "all")
self.assertEqual(jjb_config.yamlparser["allow_empty_variables"], True)
entry.JenkinsJobs(args, config_file_required=True)
mocked_open.assert_called_with(str(user_conf), "r", encoding="utf-8")
def test_config_options_overriden_by_cli(self):
"""
Run test mode and check config settings from conf file retained
when none of the global CLI options are set.
"""
args = [
"--user",
"myuser",
"--password",
"mypassword",
"--ignore-cache",
"--flush-cache",
"--allow-empty-variables",
"test",
"dummy.yaml",
]
jenkins_jobs = entry.JenkinsJobs(args)
jjb_config = jenkins_jobs.jjb_config
self.assertEqual(jjb_config.jenkins["user"], "myuser")
self.assertEqual(jjb_config.jenkins["password"], "mypassword")
self.assertEqual(jjb_config.builder["ignore_cache"], True)
self.assertEqual(jjb_config.builder["flush_cache"], True)
self.assertEqual(jjb_config.yamlparser["allow_empty_variables"], True)
@mock.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager")
def test_update_timeout_not_set(self, jenkins_mock):
"""Check that timeout is left unset
def test_non_existing_config_dir(default_config_file):
"""
Run test mode and pass a non-existing configuration directory
"""
args = ["--conf", default_config_file, "test", "foo"]
jenkins_jobs = entry.JenkinsJobs(args)
with pytest.raises(IOError):
jenkins_jobs.execute()
Test that the Jenkins object has the timeout set on it only when
provided via the config option.
"""
path = os.path.join(self.fixtures_path, "cmd-002.yaml")
args = ["--conf", self.default_config_file, "update", path]
def test_non_existing_config_file(default_config_file):
"""
Run test mode and pass a non-existing configuration file
"""
args = ["--conf", default_config_file, "test", "non-existing.yaml"]
jenkins_jobs = entry.JenkinsJobs(args)
with pytest.raises(IOError):
jenkins_jobs.execute()
jenkins_mock.return_value.update_jobs.return_value = ([], 0)
jenkins_mock.return_value.update_views.return_value = ([], 0)
self.execute_jenkins_jobs_with_args(args)
# validate that the JJBConfig used to initialize builder.Jenkins
# contains the expected timeout value.
def test_config_options_not_replaced_by_cli_defaults(fixtures_dir):
"""
Run test mode and check config settings from conf file retained
when none of the global CLI options are set.
"""
config_file = fixtures_dir / "settings_from_config.ini"
args = ["--conf", str(config_file), "test", "dummy.yaml"]
jenkins_jobs = entry.JenkinsJobs(args)
jjb_config = jenkins_jobs.jjb_config
assert jjb_config.jenkins["user"] == "jenkins_user"
assert jjb_config.jenkins["password"] == "jenkins_password"
assert jjb_config.builder["ignore_cache"]
assert jjb_config.builder["flush_cache"]
assert jjb_config.builder["update"] == "all"
assert jjb_config.yamlparser["allow_empty_variables"]
jjb_config = jenkins_mock.call_args[0][0]
self.assertEqual(jjb_config.jenkins["timeout"], builder._DEFAULT_TIMEOUT)
@mock.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager")
def test_update_timeout_set(self, jenkins_mock):
"""Check that timeout is set correctly
def test_config_options_overriden_by_cli():
"""
Run test mode and check config settings from conf file retained
when none of the global CLI options are set.
"""
args = [
"--user",
"myuser",
"--password",
"mypassword",
"--ignore-cache",
"--flush-cache",
"--allow-empty-variables",
"test",
"dummy.yaml",
]
jenkins_jobs = entry.JenkinsJobs(args)
jjb_config = jenkins_jobs.jjb_config
assert jjb_config.jenkins["user"] == "myuser"
assert jjb_config.jenkins["password"] == "mypassword"
assert jjb_config.builder["ignore_cache"]
assert jjb_config.builder["flush_cache"]
assert jjb_config.yamlparser["allow_empty_variables"]
Test that the Jenkins object has the timeout set on it only when
provided via the config option.
"""
path = os.path.join(self.fixtures_path, "cmd-002.yaml")
config_file = os.path.join(self.fixtures_path, "non-default-timeout.ini")
args = ["--conf", config_file, "update", path]
def test_update_timeout_not_set(mocker, fixtures_dir, default_config_file):
"""Check that timeout is left unset
jenkins_mock.return_value.update_jobs.return_value = ([], 0)
jenkins_mock.return_value.update_views.return_value = ([], 0)
self.execute_jenkins_jobs_with_args(args)
Test that the Jenkins object has the timeout set on it only when
provided via the config option.
"""
jenkins_mock = mocker.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager")
# validate that the JJBConfig used to initialize builder.Jenkins
# contains the expected timeout value.
path = fixtures_dir / "cmd-002.yaml"
args = ["--conf", default_config_file, "update", str(path)]
jjb_config = jenkins_mock.call_args[0][0]
self.assertEqual(jjb_config.jenkins["timeout"], 0.2)
jenkins_mock.return_value.update_jobs.return_value = ([], 0)
jenkins_mock.return_value.update_views.return_value = ([], 0)
jenkins_jobs = entry.JenkinsJobs(args)
jenkins_jobs.execute()
# validate that the JJBConfig used to initialize builder.Jenkins
# contains the expected timeout value.
jjb_config = jenkins_mock.call_args[0][0]
assert jjb_config.jenkins["timeout"] == builder._DEFAULT_TIMEOUT
def test_update_timeout_set(mocker, fixtures_dir):
"""Check that timeout is set correctly
Test that the Jenkins object has the timeout set on it only when
provided via the config option.
"""
jenkins_mock = mocker.patch("jenkins_jobs.cli.subcommand.update.JenkinsManager")
path = fixtures_dir / "cmd-002.yaml"
config_file = fixtures_dir / "non-default-timeout.ini"
args = ["--conf", str(config_file), "update", str(path)]
jenkins_mock.return_value.update_jobs.return_value = ([], 0)
jenkins_mock.return_value.update_views.return_value = ([], 0)
jenkins_jobs = entry.JenkinsJobs(args)
jenkins_jobs.execute()
# validate that the JJBConfig used to initialize builder.Jenkins
# contains the expected timeout value.
jjb_config = jenkins_mock.call_args[0][0]
assert jjb_config.jenkins["timeout"] == 0.2

View File

@ -1,7 +1,4 @@
import os
from tests.base import mock
import testtools
from pathlib import Path
from jenkins_jobs import utils
@ -26,114 +23,104 @@ def fake_os_walk(paths):
return os_walk
# Testing the utils module can sometimes result in the JobCache class
# attempting to create the cache directory multiple times as the tests
# are run in parallel. Stub out the JobCache to ensure that each
# test can safely create the object without effect.
@mock.patch("jenkins_jobs.builder.JobCache", mock.MagicMock)
class CmdRecursePath(testtools.TestCase):
@mock.patch("jenkins_jobs.utils.os.walk")
def test_recursive_path_option_exclude_pattern(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using pattern
excludes.
def test_recursive_path_option_exclude_pattern(mocker):
"""
Test paths returned by the recursive processing when using pattern
excludes.
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
os_walk_paths = [
("/jjb_configs", (["dir1", "dir2", "dir3", "test3"], ())),
("/jjb_configs/dir1", (["test1"], ("file"))),
("/jjb_configs/dir2", (["test2"], ())),
("/jjb_configs/dir3", (["bar"], ())),
("/jjb_configs/dir3/bar", ([], ())),
("/jjb_configs/test3/bar", None),
("/jjb_configs/test3/baz", None),
]
os_walk_paths = [
("/jjb_configs", (["dir1", "dir2", "dir3", "test3"], ())),
("/jjb_configs/dir1", (["test1"], ("file"))),
("/jjb_configs/dir2", (["test2"], ())),
("/jjb_configs/dir3", (["bar"], ())),
("/jjb_configs/dir3/bar", ([], ())),
("/jjb_configs/test3/bar", None),
("/jjb_configs/test3/baz", None),
]
paths = [k for k, v in os_walk_paths if v is not None]
paths = [k for k, v in os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(os_walk_paths)
self.assertEqual(paths, utils.recurse_path("/jjb_configs", ["test*"]))
mocker.patch("jenkins_jobs.utils.os.walk", side_effect=fake_os_walk(os_walk_paths))
assert paths == utils.recurse_path("/jjb_configs", ["test*"])
@mock.patch("jenkins_jobs.utils.os.walk")
def test_recursive_path_option_exclude_absolute(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using absolute
excludes.
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
def test_recursive_path_option_exclude_absolute(mocker):
"""
Test paths returned by the recursive processing when using absolute
excludes.
os_walk_paths = [
("/jjb_configs", (["dir1", "dir2", "dir3", "test3"], ())),
("/jjb_configs/dir1", None),
("/jjb_configs/dir2", (["test2"], ())),
("/jjb_configs/dir3", (["bar"], ())),
("/jjb_configs/test3", (["bar", "baz"], ())),
("/jjb_configs/dir2/test2", ([], ())),
("/jjb_configs/dir3/bar", ([], ())),
("/jjb_configs/test3/bar", ([], ())),
("/jjb_configs/test3/baz", ([], ())),
]
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
paths = [k for k, v in os_walk_paths if v is not None]
os_walk_paths = [
("/jjb_configs", (["dir1", "dir2", "dir3", "test3"], ())),
("/jjb_configs/dir1", None),
("/jjb_configs/dir2", (["test2"], ())),
("/jjb_configs/dir3", (["bar"], ())),
("/jjb_configs/test3", (["bar", "baz"], ())),
("/jjb_configs/dir2/test2", ([], ())),
("/jjb_configs/dir3/bar", ([], ())),
("/jjb_configs/test3/bar", ([], ())),
("/jjb_configs/test3/baz", ([], ())),
]
oswalk_mock.side_effect = fake_os_walk(os_walk_paths)
paths = [k for k, v in os_walk_paths if v is not None]
self.assertEqual(
paths, utils.recurse_path("/jjb_configs", ["/jjb_configs/dir1"])
)
mocker.patch("jenkins_jobs.utils.os.walk", side_effect=fake_os_walk(os_walk_paths))
@mock.patch("jenkins_jobs.utils.os.walk")
def test_recursive_path_option_exclude_relative(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using relative
excludes.
assert paths == utils.recurse_path("/jjb_configs", ["/jjb_configs/dir1"])
testing paths
./jjb_configs/dir1/test/
./jjb_configs/dir1/file
./jjb_configs/dir2/test/
./jjb_configs/dir3/bar/
./jjb_configs/test3/bar/
./jjb_configs/test3/baz/
"""
os_walk_paths = [
("jjb_configs", (["dir1", "dir2", "dir3", "test3"], ())),
("jjb_configs/dir1", (["test"], ("file"))),
("jjb_configs/dir2", (["test2"], ())),
("jjb_configs/dir3", (["bar"], ())),
("jjb_configs/test3", (["bar", "baz"], ())),
("jjb_configs/dir1/test", ([], ())),
("jjb_configs/dir2/test2", ([], ())),
("jjb_configs/dir3/bar", ([], ())),
("jjb_configs/test3/bar", None),
("jjb_configs/test3/baz", ([], ())),
]
def test_recursive_path_option_exclude_relative(mocker):
"""
Test paths returned by the recursive processing when using relative
excludes.
rel_os_walk_paths = [
(os.path.abspath(os.path.join(os.path.curdir, k)), v)
for k, v in os_walk_paths
]
testing paths
./jjb_configs/dir1/test/
./jjb_configs/dir1/file
./jjb_configs/dir2/test/
./jjb_configs/dir3/bar/
./jjb_configs/test3/bar/
./jjb_configs/test3/baz/
"""
paths = [k for k, v in rel_os_walk_paths if v is not None]
os_walk_paths = [
("jjb_configs", (["dir1", "dir2", "dir3", "test3"], ())),
("jjb_configs/dir1", (["test"], ("file"))),
("jjb_configs/dir2", (["test2"], ())),
("jjb_configs/dir3", (["bar"], ())),
("jjb_configs/test3", (["bar", "baz"], ())),
("jjb_configs/dir1/test", ([], ())),
("jjb_configs/dir2/test2", ([], ())),
("jjb_configs/dir3/bar", ([], ())),
("jjb_configs/test3/bar", None),
("jjb_configs/test3/baz", ([], ())),
]
oswalk_mock.side_effect = fake_os_walk(rel_os_walk_paths)
rel_os_walk_paths = [
(str(Path.cwd().joinpath(k).absolute()), v) for k, v in os_walk_paths
]
self.assertEqual(
paths, utils.recurse_path("jjb_configs", ["jjb_configs/test3/bar"])
)
paths = [k for k, v in rel_os_walk_paths if v is not None]
mocker.patch(
"jenkins_jobs.utils.os.walk", side_effect=fake_os_walk(rel_os_walk_paths)
)
assert paths == utils.recurse_path("jjb_configs", ["jjb_configs/test3/bar"])

169
tests/conftest.py Normal file
View File

@ -0,0 +1,169 @@
import configparser
import pkg_resources
import xml.etree.ElementTree as XML
from pathlib import Path
import pytest
from jenkins_jobs.alphanum import AlphanumSort
from jenkins_jobs.config import JJBConfig
from jenkins_jobs.modules import project_externaljob
from jenkins_jobs.modules import project_flow
from jenkins_jobs.modules import project_githuborg
from jenkins_jobs.modules import project_matrix
from jenkins_jobs.modules import project_maven
from jenkins_jobs.modules import project_multibranch
from jenkins_jobs.modules import project_multijob
from jenkins_jobs.parser import YamlParser
from jenkins_jobs.registry import ModuleRegistry
from jenkins_jobs.xml_config import XmlJob, XmlJobGenerator
import jenkins_jobs.local_yaml as yaml
# Avoid writing to ~/.cache/jenkins_jobs.
@pytest.fixture(autouse=True)
def job_cache_mocked(mocker):
mocker.patch("jenkins_jobs.builder.JobCache", autospec=True)
@pytest.fixture
def config_path(scenario):
return scenario.config_path
@pytest.fixture
def jjb_config(config_path):
config = JJBConfig(config_path)
config.validate()
return config
@pytest.fixture
def mock_iter_entry_points():
config = configparser.ConfigParser()
config.read(Path(__file__).parent / "../setup.cfg")
groups = {}
for key in config["entry_points"]:
groups[key] = list()
for line in config["entry_points"][key].split("\n"):
if "" == line.strip():
continue
groups[key].append(
pkg_resources.EntryPoint.parse(line, dist=pkg_resources.Distribution())
)
def iter_entry_points(group, name=None):
return (entry for entry in groups[group] if name is None or name == entry.name)
return iter_entry_points
@pytest.fixture
def input(scenario):
return yaml.load(scenario.in_path.read_text())
@pytest.fixture
def plugins_info(scenario):
if not scenario.plugins_info_path.exists():
return None
return yaml.load(scenario.plugins_info_path.read_text())
@pytest.fixture
def registry(mocker, mock_iter_entry_points, jjb_config, plugins_info):
mocker.patch("pkg_resources.iter_entry_points", side_effect=mock_iter_entry_points)
return ModuleRegistry(jjb_config, plugins_info)
@pytest.fixture
def project(input, registry):
type_to_class = {
"maven": project_maven.Maven,
"matrix": project_matrix.Matrix,
"flow": project_flow.Flow,
"githuborg": project_githuborg.GithubOrganization,
"multijob": project_multijob.MultiJob,
"multibranch": project_multibranch.WorkflowMultiBranch,
"multibranch-defaults": project_multibranch.WorkflowMultiBranchDefaults,
"externaljob": project_externaljob.ExternalJob,
}
try:
class_name = input["project-type"]
except KeyError:
return None
if class_name == "freestyle":
return None
cls = type_to_class[class_name]
return cls(registry)
@pytest.fixture
def expected_output(scenario):
return "".join(path.read_text() for path in sorted(scenario.out_paths))
def check_folder(scenario, jjb_config, input):
if "name" not in input:
return
parser = YamlParser(jjb_config)
*dirs, name = parser._getfullname(input).split("/")
input_dir = scenario.in_path.parent
expected_out_dirs = [input_dir.joinpath(*dirs)]
actual_out_dirs = [path.parent for path in scenario.out_paths]
assert expected_out_dirs == actual_out_dirs
@pytest.fixture
def check_generator(scenario, input, expected_output, jjb_config, registry, project):
registry.set_parser_data({})
if project:
xml = project.root_xml(input)
else:
xml = XML.Element("project")
def check(Generator):
generator = Generator(registry)
generator.gen_xml(xml, input)
check_folder(scenario, jjb_config, input)
pretty_xml = XmlJob(xml, "fixturejob").output().decode()
assert expected_output == pretty_xml
return check
@pytest.fixture
def check_parser(jjb_config, registry):
parser = YamlParser(jjb_config)
def check(in_path):
parser.parse(str(in_path))
_ = parser.expandYaml(registry)
return check
@pytest.fixture
def check_job(scenario, expected_output, jjb_config, registry):
parser = YamlParser(jjb_config)
def check():
parser.parse(str(scenario.in_path))
registry.set_parser_data(parser.data)
job_data_list, view_data_list = parser.expandYaml(registry)
generator = XmlJobGenerator(registry)
job_xml_list = generator.generateXML(job_data_list)
job_xml_list.sort(key=AlphanumSort)
pretty_xml = (
"\n".join(job.output().decode() for job in job_xml_list)
.strip()
.replace("\n\n", "\n")
)
stripped_expected_output = (
expected_output.strip().replace("<BLANKLINE>", "").replace("\n\n", "\n")
)
assert stripped_expected_output == pretty_xml
return check

View File

@ -13,24 +13,30 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
from testtools import ExpectedException
import pytest
from jenkins_jobs.errors import JenkinsJobsException
from tests import base
from tests.base import mock
from tests.enum_scenarios import scenario_list
class TestCaseModuleDuplicates(base.SingleJobTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
fixtures_dir = Path(__file__).parent / "fixtures"
@mock.patch("jenkins_jobs.builder.logger", autospec=True)
def test_yaml_snippet(self, mock_logger):
if os.path.basename(self.in_filename).startswith("exception_"):
with ExpectedException(JenkinsJobsException, "^Duplicate .*"):
super(TestCaseModuleDuplicates, self).test_yaml_snippet()
else:
super(TestCaseModuleDuplicates, self).test_yaml_snippet()
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(scenario, check_job):
if scenario.in_path.name.startswith("exception_"):
with pytest.raises(JenkinsJobsException) as excinfo:
check_job()
assert str(excinfo.value).startswith("Duplicate ")
else:
check_job()

42
tests/enum_scenarios.py Normal file
View File

@ -0,0 +1,42 @@
#!/usr/bin/env python
#
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
Scenario = namedtuple(
"Scnenario", "name in_path out_paths config_path plugins_info_path"
)
def scenario_list(fixtures_dir, in_ext=".yaml", out_ext=".xml"):
for path in fixtures_dir.rglob(f"*{in_ext}"):
if path.name.endswith("plugins_info.yaml"):
continue
out_path = path.with_suffix(out_ext)
out_path_list = list(fixtures_dir.rglob(out_path.name))
yield Scenario(
name=path.stem,
in_path=path,
out_paths=out_path_list,
# When config file is missing it will still be passed and not None,
# so JJBConfig will prefer it over system and user configs.
config_path=path.with_suffix(".conf"),
plugins_info_path=path.with_suffix(".plugins_info.yaml"),
)

View File

@ -1,7 +1,6 @@
from testtools import ExpectedException
import pytest
from jenkins_jobs import errors
from tests import base
def dispatch(exc, *args):
@ -21,65 +20,67 @@ def gen_xml(exc, *args):
raise exc(*args)
class TestInvalidAttributeError(base.BaseTestCase):
def test_no_valid_values(self):
# When given no valid values, InvalidAttributeError simply displays a
# message indicating the invalid value, the component type, the
# component name, and the attribute name.
message = "'{0}' is an invalid value for attribute {1}.{2}".format(
"fnord", "type.name", "fubar"
)
with ExpectedException(errors.InvalidAttributeError, message):
dispatch(errors.InvalidAttributeError, "fubar", "fnord")
def test_with_valid_values(self):
# When given valid values, InvalidAttributeError displays a message
# indicating the invalid value, the component type, the component name,
# and the attribute name; additionally, it lists the valid values for
# the current component type & name.
valid_values = ["herp", "derp"]
message = "'{0}' is an invalid value for attribute {1}.{2}".format(
"fnord", "type.name", "fubar"
)
message += "\nValid values include: {0}".format(
", ".join("'{0}'".format(value) for value in valid_values)
)
with ExpectedException(errors.InvalidAttributeError, message):
dispatch(errors.InvalidAttributeError, "fubar", "fnord", valid_values)
def test_no_valid_values():
# When given no valid values, InvalidAttributeError simply displays a
# message indicating the invalid value, the component type, the
# component name, and the attribute name.
message = "'{0}' is an invalid value for attribute {1}.{2}".format(
"fnord", "type.name", "fubar"
)
with pytest.raises(errors.InvalidAttributeError) as excinfo:
dispatch(errors.InvalidAttributeError, "fubar", "fnord")
assert str(excinfo.value) == message
class TestMissingAttributeError(base.BaseTestCase):
def test_with_single_missing_attribute(self):
# When passed a single missing attribute, display a message indicating
# * the missing attribute
# * which component type and component name is missing it.
missing_attribute = "herp"
message = "Missing {0} from an instance of '{1}'".format(
missing_attribute, "type.name"
)
def test_with_valid_values():
# When given valid values, InvalidAttributeError displays a message
# indicating the invalid value, the component type, the component name,
# and the attribute name; additionally, it lists the valid values for
# the current component type & name.
valid_values = ["herp", "derp"]
message = "'{0}' is an invalid value for attribute {1}.{2}".format(
"fnord", "type.name", "fubar"
)
message += "\nValid values include: {0}".format(
", ".join("'{0}'".format(value) for value in valid_values)
)
with ExpectedException(errors.MissingAttributeError, message):
dispatch(errors.MissingAttributeError, missing_attribute)
with pytest.raises(errors.InvalidAttributeError) as excinfo:
dispatch(errors.InvalidAttributeError, "fubar", "fnord", valid_values)
assert str(excinfo.value) == message
with ExpectedException(
errors.MissingAttributeError, message.replace("type.name", "module")
):
gen_xml(errors.MissingAttributeError, missing_attribute)
def test_with_multiple_missing_attributes(self):
# When passed multiple missing attributes, display a message indicating
# * the missing attributes
# * which component type and component name is missing it.
missing_attribute = ["herp", "derp"]
message = "One of {0} must be present in '{1}'".format(
", ".join("'{0}'".format(value) for value in missing_attribute), "type.name"
)
def test_with_single_missing_attribute():
# When passed a single missing attribute, display a message indicating
# * the missing attribute
# * which component type and component name is missing it.
missing_attribute = "herp"
message = "Missing {0} from an instance of '{1}'".format(
missing_attribute, "type.name"
)
with ExpectedException(errors.MissingAttributeError, message):
dispatch(errors.MissingAttributeError, missing_attribute)
with pytest.raises(errors.MissingAttributeError) as excinfo:
dispatch(errors.MissingAttributeError, missing_attribute)
assert str(excinfo.value) == message
with ExpectedException(
errors.MissingAttributeError, message.replace("type.name", "module")
):
gen_xml(errors.MissingAttributeError, missing_attribute)
with pytest.raises(errors.MissingAttributeError) as excinfo:
gen_xml(errors.MissingAttributeError, missing_attribute)
assert str(excinfo.value) == message.replace("type.name", "module")
def test_with_multiple_missing_attributes():
# When passed multiple missing attributes, display a message indicating
# * the missing attributes
# * which component type and component name is missing it.
missing_attribute = ["herp", "derp"]
message = "One of {0} must be present in '{1}'".format(
", ".join("'{0}'".format(value) for value in missing_attribute), "type.name"
)
with pytest.raises(errors.MissingAttributeError) as excinfo:
dispatch(errors.MissingAttributeError, missing_attribute)
assert str(excinfo.value) == message
with pytest.raises(errors.MissingAttributeError) as excinfo:
gen_xml(errors.MissingAttributeError, missing_attribute)
assert str(excinfo.value) == message.replace("type.name", "module")

View File

@ -15,13 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import general
from tests import base
class TestCaseModuleGeneral(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = general.General
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(general.General)

View File

@ -13,13 +13,25 @@
# License for the specific language governing permissions and limitations
# under the License.
from tests import base
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import project_githuborg
class TestCaseGithubOrganization(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
default_config_file = "/dev/null"
klass = project_githuborg.GithubOrganization
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(project_githuborg.GithubOrganization)

View File

@ -12,13 +12,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import hipchat_notif
from tests import base
class TestCaseModulePublishers(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = hipchat_notif.HipChat
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(hipchat_notif.HipChat)

View File

@ -14,72 +14,72 @@
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import pytest
from jenkins_jobs.config import JJBConfig
import jenkins_jobs.builder
from tests import base
from tests.base import mock
_plugins_info = {}
_plugins_info["plugin1"] = {"longName": "", "shortName": "", "version": ""}
@mock.patch("jenkins_jobs.builder.JobCache", mock.MagicMock)
class TestCaseTestJenkinsManager(base.BaseTestCase):
def setUp(self):
super(TestCaseTestJenkinsManager, self).setUp()
self.jjb_config = JJBConfig()
self.jjb_config.validate()
@pytest.fixture
def jjb_config():
config = JJBConfig()
config.validate()
return config
def test_plugins_list(self):
self.jjb_config.builder["plugins_info"] = _plugins_info
self.builder = jenkins_jobs.builder.JenkinsManager(self.jjb_config)
self.assertEqual(self.builder.plugins_list, _plugins_info)
def test_plugins_list(jjb_config):
jjb_config.builder["plugins_info"] = _plugins_info
@mock.patch.object(
builder = jenkins_jobs.builder.JenkinsManager(jjb_config)
assert builder.plugins_list == _plugins_info
def test_plugins_list_from_jenkins(mocker, jjb_config):
mocker.patch.object(
jenkins_jobs.builder.jenkins.Jenkins, "get_plugins", return_value=_plugins_info
)
def test_plugins_list_from_jenkins(self, jenkins_mock):
# Trigger fetching the plugins from jenkins when accessing the property
self.jjb_config.builder["plugins_info"] = {}
self.builder = jenkins_jobs.builder.JenkinsManager(self.jjb_config)
# See https://github.com/formiaczek/multi_key_dict/issues/17
# self.assertEqual(self.builder.plugins_list, k)
for key_tuple in self.builder.plugins_list.keys():
for key in key_tuple:
self.assertEqual(self.builder.plugins_list[key], _plugins_info[key])
# Trigger fetching the plugins from jenkins when accessing the property
jjb_config.builder["plugins_info"] = {}
builder = jenkins_jobs.builder.JenkinsManager(jjb_config)
# See https://github.com/formiaczek/multi_key_dict/issues/17
# self.assertEqual(self.builder.plugins_list, k)
for key_tuple in builder.plugins_list.keys():
for key in key_tuple:
assert builder.plugins_list[key] == _plugins_info[key]
def test_delete_managed(self):
self.jjb_config.builder["plugins_info"] = {}
self.builder = jenkins_jobs.builder.JenkinsManager(self.jjb_config)
with mock.patch.multiple(
"jenkins_jobs.builder.JenkinsManager",
get_jobs=mock.DEFAULT,
is_job=mock.DEFAULT,
is_managed=mock.DEFAULT,
delete_job=mock.DEFAULT,
) as patches:
patches["get_jobs"].return_value = [
{"fullname": "job1"},
{"fullname": "job2"},
]
patches["is_managed"].side_effect = [True, True]
patches["is_job"].side_effect = [True, True]
def test_delete_managed(mocker, jjb_config):
jjb_config.builder["plugins_info"] = {}
builder = jenkins_jobs.builder.JenkinsManager(jjb_config)
self.builder.delete_old_managed()
self.assertEqual(patches["delete_job"].call_count, 2)
patches = mocker.patch.multiple(
"jenkins_jobs.builder.JenkinsManager",
get_jobs=mock.DEFAULT,
is_job=mock.DEFAULT,
is_managed=mock.DEFAULT,
delete_job=mock.DEFAULT,
)
patches["get_jobs"].return_value = [
{"fullname": "job1"},
{"fullname": "job2"},
]
patches["is_managed"].side_effect = [True, True]
patches["is_job"].side_effect = [True, True]
def _get_plugins_info_error_test(self, error_string):
builder = jenkins_jobs.builder.JenkinsManager(self.jjb_config)
exception = jenkins_jobs.builder.jenkins.JenkinsException(error_string)
with mock.patch.object(builder.jenkins, "get_plugins", side_effect=exception):
plugins_info = builder.get_plugins_info()
self.assertEqual([_plugins_info["plugin1"]], plugins_info)
builder.delete_old_managed()
assert patches["delete_job"].call_count == 2
def test_get_plugins_info_handles_connectionrefused_errors(self):
self._get_plugins_info_error_test("Connection refused")
def test_get_plugins_info_handles_forbidden_errors(self):
self._get_plugins_info_error_test("Forbidden")
@pytest.mark.parametrize("error_string", ["Connection refused", "Forbidden"])
def test_get_plugins_info_error(mocker, jjb_config, error_string):
builder = jenkins_jobs.builder.JenkinsManager(jjb_config)
exception = jenkins_jobs.builder.jenkins.JenkinsException(error_string)
mocker.patch.object(builder.jenkins, "get_plugins", side_effect=exception)
plugins_info = builder.get_plugins_info()
assert [_plugins_info["plugin1"]] == plugins_info

View File

@ -15,11 +15,24 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
from tests import base
import pytest
from tests.enum_scenarios import scenario_list
class TestCaseModuleJsonParser(base.SingleJobTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path, in_ext="json", out_ext="xml")
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir, in_ext=".json"),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_job):
check_job()

View File

@ -14,128 +14,169 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from io import StringIO
from pathlib import Path
from yaml import safe_dump
from testtools import ExpectedException
import json
import pytest
from yaml.composer import ComposerError
import jenkins_jobs.local_yaml as yaml
from jenkins_jobs.config import JJBConfig
from jenkins_jobs.parser import YamlParser
from jenkins_jobs.registry import ModuleRegistry
from tests import base
from tests.enum_scenarios import scenario_list
def _exclude_scenarios(input_filename):
return os.path.basename(input_filename).startswith("custom_")
fixtures_dir = Path(__file__).parent / "fixtures"
class TestCaseLocalYamlInclude(base.JsonTestCase):
@pytest.fixture
def read_input(scenario):
def read():
return yaml.load(
scenario.in_path.read_text(),
search_path=[str(fixtures_dir)],
)
return read
@pytest.mark.parametrize(
"scenario",
[
pytest.param(s, id=s.name)
for s in scenario_list(fixtures_dir, out_ext=".json")
if not s.name.startswith(("custom_", "exception_"))
],
)
def test_include(read_input, expected_output):
"""
Verify application specific tags independently of any changes to
modules XML parsing behaviour
"""
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(
fixtures_path, "yaml", "json", filter_func=_exclude_scenarios
)
def test_yaml_snippet(self):
if os.path.basename(self.in_filename).startswith("exception_"):
with ExpectedException(ComposerError, "^found duplicate anchor .*"):
super(TestCaseLocalYamlInclude, self).test_yaml_snippet()
else:
super(TestCaseLocalYamlInclude, self).test_yaml_snippet()
input = read_input()
pretty_json = json.dumps(input, indent=4, separators=(",", ": "))
assert expected_output.rstrip() == pretty_json
class TestCaseLocalYamlAnchorAlias(base.YamlTestCase):
@pytest.mark.parametrize(
"scenario",
[
pytest.param(s, id=s.name)
for s in scenario_list(fixtures_dir, out_ext=".json")
if s.name.startswith("exception_")
],
)
def test_include_error(read_input, expected_output):
with pytest.raises(ComposerError) as excinfo:
_ = read_input()
assert str(excinfo.value).startswith("found duplicate anchor ")
@pytest.mark.parametrize(
"scenario",
[
pytest.param(s, id=s.name)
for s in scenario_list(fixtures_dir, in_ext=".iyaml", out_ext=".oyaml")
],
)
def test_anchor_alias(read_input, expected_output):
"""
Verify yaml input is expanded to the expected yaml output when using yaml
anchors and aliases.
"""
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path, "iyaml", "oyaml")
input = read_input()
data = StringIO(json.dumps(input))
pretty_yaml = safe_dump(json.load(data), default_flow_style=False)
assert expected_output == pretty_yaml
class TestCaseLocalYamlIncludeAnchors(base.BaseTestCase):
def test_include_anchors():
"""
Verify that anchors/aliases only span use of '!include' tag
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
To ensure that any yaml loaded by the include tag is in the same
space as the top level file, but individual top level yaml definitions
are treated by the yaml loader as independent.
"""
def test_multiple_same_anchor_in_multiple_toplevel_yaml(self):
"""
Verify that anchors/aliases only span use of '!include' tag
config = JJBConfig()
config.jenkins["url"] = "http://example.com"
config.jenkins["user"] = "jenkins"
config.jenkins["password"] = "password"
config.builder["plugins_info"] = []
config.validate()
To ensure that any yaml loaded by the include tag is in the same
space as the top level file, but individual top level yaml definitions
are treated by the yaml loader as independent.
"""
files = [
"custom_same_anchor-001-part1.yaml",
"custom_same_anchor-001-part2.yaml",
]
files = [
"custom_same_anchor-001-part1.yaml",
"custom_same_anchor-001-part2.yaml",
]
jjb_config = JJBConfig()
jjb_config.jenkins["url"] = "http://example.com"
jjb_config.jenkins["user"] = "jenkins"
jjb_config.jenkins["password"] = "password"
jjb_config.builder["plugins_info"] = []
jjb_config.validate()
j = YamlParser(jjb_config)
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
parser = YamlParser(config)
# Should not raise ComposerError.
parser.load_files([str(fixtures_dir / name) for name in files])
class TestCaseLocalYamlRetainAnchors(base.BaseTestCase):
def test_retain_anchor_default():
"""
Verify that anchors are NOT retained across files by default.
"""
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
config = JJBConfig()
config.validate()
def test_retain_anchors_default(self):
"""
Verify that anchors are NOT retained across files by default.
"""
files = [
"custom_retain_anchors_include001.yaml",
"custom_retain_anchors.yaml",
]
files = ["custom_retain_anchors_include001.yaml", "custom_retain_anchors.yaml"]
parser = YamlParser(config)
with pytest.raises(ComposerError) as excinfo:
parser.load_files([str(fixtures_dir / name) for name in files])
assert "found undefined alias" in str(excinfo.value)
jjb_config = JJBConfig()
# use the default value for retain_anchors
jjb_config.validate()
j = YamlParser(jjb_config)
with ExpectedException(yaml.composer.ComposerError, "found undefined alias.*"):
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
def test_retain_anchors_enabled(self):
"""
Verify that anchors are retained across files if retain_anchors is
enabled in the config.
"""
def test_retain_anchors_enabled():
"""
Verify that anchors are retained across files if retain_anchors is
enabled in the config.
"""
files = ["custom_retain_anchors_include001.yaml", "custom_retain_anchors.yaml"]
config = JJBConfig()
config.yamlparser["retain_anchors"] = True
config.validate()
jjb_config = JJBConfig()
jjb_config.yamlparser["retain_anchors"] = True
jjb_config.validate()
j = YamlParser(jjb_config)
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
files = [
"custom_retain_anchors_include001.yaml",
"custom_retain_anchors.yaml",
]
def test_retain_anchors_enabled_j2_yaml(self):
"""
Verify that anchors are retained across files and are properly retained when using !j2-yaml.
"""
parser = YamlParser(config)
# Should not raise ComposerError.
parser.load_files([str(fixtures_dir / name) for name in files])
files = [
"custom_retain_anchors_j2_yaml_include001.yaml",
"custom_retain_anchors_j2_yaml.yaml",
]
jjb_config = JJBConfig()
jjb_config.yamlparser["retain_anchors"] = True
jjb_config.validate()
j = YamlParser(jjb_config)
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
def test_retain_anchors_enabled_j2_yaml():
"""
Verify that anchors are retained across files and are properly retained when using !j2-yaml.
"""
registry = ModuleRegistry(jjb_config, None)
jobs, _ = j.expandYaml(registry)
self.assertEqual(jobs[0]["builders"][0]["shell"], "docker run ubuntu:latest")
config = JJBConfig()
config.yamlparser["retain_anchors"] = True
config.validate()
files = [
"custom_retain_anchors_j2_yaml_include001.yaml",
"custom_retain_anchors_j2_yaml.yaml",
]
parser = YamlParser(config)
parser.load_files([str(fixtures_dir / name) for name in files])
registry = ModuleRegistry(config, None)
jobs, _ = parser.expandYaml(registry)
assert "docker run ubuntu:latest" == jobs[0]["builders"][0]["shell"]

View File

@ -15,11 +15,24 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
from tests import base
import pytest
from tests.enum_scenarios import scenario_list
class TestCaseModuleSCMMacro(base.SingleJobTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_job):
check_job()

View File

@ -1,140 +1,146 @@
import pkg_resources
from collections import namedtuple
from operator import attrgetter
from testtools.content import text_content
import testscenarios
import pytest
from jenkins_jobs.config import JJBConfig
from jenkins_jobs.registry import ModuleRegistry
from tests import base
class ModuleRegistryPluginInfoTestsWithScenarios(
testscenarios.TestWithScenarios, base.BaseTestCase
):
scenarios = [
("s1", dict(v1="1.0.0", op="__gt__", v2="0.8.0")),
("s2", dict(v1="1.0.1alpha", op="__gt__", v2="1.0.0")),
("s3", dict(v1="1.0", op="__eq__", v2="1.0.0")),
("s4", dict(v1="1.0", op="__eq__", v2="1.0")),
("s5", dict(v1="1.0", op="__lt__", v2="1.8.0")),
("s6", dict(v1="1.0.1alpha", op="__lt__", v2="1.0.1")),
("s7", dict(v1="1.0alpha", op="__lt__", v2="1.0.0")),
("s8", dict(v1="1.0-alpha", op="__lt__", v2="1.0.0")),
("s9", dict(v1="1.1-alpha", op="__gt__", v2="1.0")),
("s10", dict(v1="1.0-SNAPSHOT", op="__lt__", v2="1.0")),
("s11", dict(v1="1.0.preview", op="__lt__", v2="1.0")),
("s12", dict(v1="1.1-SNAPSHOT", op="__gt__", v2="1.0")),
("s13", dict(v1="1.0a-SNAPSHOT", op="__lt__", v2="1.0a")),
(
"s14",
dict(
v1="1.4.6-SNAPSHOT (private-0986edd9-example)", op="__lt__", v2="1.4.6"
),
),
(
"s15",
dict(
v1="1.4.6-SNAPSHOT (private-0986edd9-example)", op="__gt__", v2="1.4.5"
),
),
("s16", dict(v1="1.0.1-1.v1", op="__gt__", v2="1.0.1")),
("s17", dict(v1="1.0.1-1.v1", op="__lt__", v2="1.0.2")),
("s18", dict(v1="1.0.2-1.v1", op="__gt__", v2="1.0.1")),
("s19", dict(v1="1.0.2-1.v1", op="__gt__", v2="1.0.1-2")),
Scenario = namedtuple("Scnenario", "name v1 op v2")
scenarios = [
Scenario("s1", v1="1.0.0", op="__gt__", v2="0.8.0"),
Scenario("s2", v1="1.0.1alpha", op="__gt__", v2="1.0.0"),
Scenario("s3", v1="1.0", op="__eq__", v2="1.0.0"),
Scenario("s4", v1="1.0", op="__eq__", v2="1.0"),
Scenario("s5", v1="1.0", op="__lt__", v2="1.8.0"),
Scenario("s6", v1="1.0.1alpha", op="__lt__", v2="1.0.1"),
Scenario("s7", v1="1.0alpha", op="__lt__", v2="1.0.0"),
Scenario("s8", v1="1.0-alpha", op="__lt__", v2="1.0.0"),
Scenario("s9", v1="1.1-alpha", op="__gt__", v2="1.0"),
Scenario("s10", v1="1.0-SNAPSHOT", op="__lt__", v2="1.0"),
Scenario("s11", v1="1.0.preview", op="__lt__", v2="1.0"),
Scenario("s12", v1="1.1-SNAPSHOT", op="__gt__", v2="1.0"),
Scenario("s13", v1="1.0a-SNAPSHOT", op="__lt__", v2="1.0a"),
Scenario(
"s14", v1="1.4.6-SNAPSHOT (private-0986edd9-example)", op="__lt__", v2="1.4.6"
),
Scenario(
"s15", v1="1.4.6-SNAPSHOT (private-0986edd9-example)", op="__gt__", v2="1.4.5"
),
Scenario("s16", v1="1.0.1-1.v1", op="__gt__", v2="1.0.1"),
Scenario("s17", v1="1.0.1-1.v1", op="__lt__", v2="1.0.2"),
Scenario("s18", v1="1.0.2-1.v1", op="__gt__", v2="1.0.1"),
Scenario("s19", v1="1.0.2-1.v1", op="__gt__", v2="1.0.1-2"),
]
@pytest.fixture(
params=scenarios,
ids=attrgetter("name"),
)
def scenario(request):
return request.param
@pytest.fixture
def config():
config = JJBConfig()
config.validate()
return config
@pytest.fixture
def registry(config, scenario):
plugin_info = [
{
"shortName": "HerpDerpPlugin",
"longName": "Blah Blah Blah Plugin",
},
{
"shortName": "JankyPlugin1",
"longName": "Not A Real Plugin",
"version": scenario.v1,
},
]
return ModuleRegistry(config, plugin_info)
def setUp(self):
super(ModuleRegistryPluginInfoTestsWithScenarios, self).setUp()
jjb_config = JJBConfig()
jjb_config.validate()
def test_get_plugin_info_dict(registry):
"""
The goal of this test is to validate that the plugin_info returned by
ModuleRegistry.get_plugin_info is a dictionary whose key 'shortName' is
the same value as the string argument passed to
ModuleRegistry.get_plugin_info.
"""
plugin_name = "JankyPlugin1"
plugin_info = registry.get_plugin_info(plugin_name)
plugin_info = [
{"shortName": "HerpDerpPlugin", "longName": "Blah Blah Blah Plugin"}
]
plugin_info.append(
{
"shortName": "JankyPlugin1",
"longName": "Not A Real Plugin",
"version": self.v1,
}
)
assert isinstance(plugin_info, dict)
assert plugin_info["shortName"] == plugin_name
self.addDetail("plugin_info", text_content(str(plugin_info)))
self.registry = ModuleRegistry(jjb_config, plugin_info)
def tearDown(self):
super(ModuleRegistryPluginInfoTestsWithScenarios, self).tearDown()
def test_get_plugin_info_dict_using_longName(registry):
"""
The goal of this test is to validate that the plugin_info returned by
ModuleRegistry.get_plugin_info is a dictionary whose key 'longName' is
the same value as the string argument passed to
ModuleRegistry.get_plugin_info.
"""
plugin_name = "Blah Blah Blah Plugin"
plugin_info = registry.get_plugin_info(plugin_name)
def test_get_plugin_info_dict(self):
"""
The goal of this test is to validate that the plugin_info returned by
ModuleRegistry.get_plugin_info is a dictionary whose key 'shortName' is
the same value as the string argument passed to
ModuleRegistry.get_plugin_info.
"""
plugin_name = "JankyPlugin1"
plugin_info = self.registry.get_plugin_info(plugin_name)
assert isinstance(plugin_info, dict)
assert plugin_info["longName"] == plugin_name
self.assertIsInstance(plugin_info, dict)
self.assertEqual(plugin_info["shortName"], plugin_name)
def test_get_plugin_info_dict_using_longName(self):
"""
The goal of this test is to validate that the plugin_info returned by
ModuleRegistry.get_plugin_info is a dictionary whose key 'longName' is
the same value as the string argument passed to
ModuleRegistry.get_plugin_info.
"""
plugin_name = "Blah Blah Blah Plugin"
plugin_info = self.registry.get_plugin_info(plugin_name)
def test_get_plugin_info_dict_no_plugin(registry):
"""
The goal of this test case is to validate the behavior of
ModuleRegistry.get_plugin_info when the given plugin cannot be found in
ModuleRegistry's internal representation of the plugins_info.
"""
plugin_name = "PluginDoesNotExist"
plugin_info = registry.get_plugin_info(plugin_name)
self.assertIsInstance(plugin_info, dict)
self.assertEqual(plugin_info["longName"], plugin_name)
assert isinstance(plugin_info, dict)
assert plugin_info == {}
def test_get_plugin_info_dict_no_plugin(self):
"""
The goal of this test case is to validate the behavior of
ModuleRegistry.get_plugin_info when the given plugin cannot be found in
ModuleRegistry's internal representation of the plugins_info.
"""
plugin_name = "PluginDoesNotExist"
plugin_info = self.registry.get_plugin_info(plugin_name)
self.assertIsInstance(plugin_info, dict)
self.assertEqual(plugin_info, {})
def test_get_plugin_info_dict_no_version(registry):
"""
The goal of this test case is to validate the behavior of
ModuleRegistry.get_plugin_info when the given plugin shortName returns
plugin_info dict that has no version string. In a sane world where
plugin frameworks like Jenkins' are sane this should never happen, but
I am including this test and the corresponding default behavior
because, well, it's Jenkins.
"""
plugin_name = "HerpDerpPlugin"
plugin_info = registry.get_plugin_info(plugin_name)
def test_get_plugin_info_dict_no_version(self):
"""
The goal of this test case is to validate the behavior of
ModuleRegistry.get_plugin_info when the given plugin shortName returns
plugin_info dict that has no version string. In a sane world where
plugin frameworks like Jenkins' are sane this should never happen, but
I am including this test and the corresponding default behavior
because, well, it's Jenkins.
"""
plugin_name = "HerpDerpPlugin"
plugin_info = self.registry.get_plugin_info(plugin_name)
assert isinstance(plugin_info, dict)
assert plugin_info["shortName"] == plugin_name
assert plugin_info["version"] == "0"
self.assertIsInstance(plugin_info, dict)
self.assertEqual(plugin_info["shortName"], plugin_name)
self.assertEqual(plugin_info["version"], "0")
def test_plugin_version_comparison(self):
"""
The goal of this test case is to validate that valid tuple versions are
ordinally correct. That is, for each given scenario, v1.op(v2)==True
where 'op' is the equality operator defined for the scenario.
"""
plugin_name = "JankyPlugin1"
plugin_info = self.registry.get_plugin_info(plugin_name)
v1 = plugin_info.get("version")
def test_plugin_version_comparison(registry, scenario):
"""
The goal of this test case is to validate that valid tuple versions are
ordinally correct. That is, for each given scenario, v1.op(v2)==True
where 'op' is the equality operator defined for the scenario.
"""
plugin_name = "JankyPlugin1"
plugin_info = registry.get_plugin_info(plugin_name)
v1 = plugin_info.get("version")
op = getattr(pkg_resources.parse_version(v1), self.op)
test = op(pkg_resources.parse_version(self.v2))
op = getattr(pkg_resources.parse_version(v1), scenario.op)
test = op(pkg_resources.parse_version(scenario.v2))
self.assertTrue(
test,
msg="Unexpectedly found {0} {2} {1} == False "
"when comparing versions!".format(v1, self.v2, self.op),
)
assert test, (
f"Unexpectedly found {v1} {scenario.v2} {scenario.op} == False"
" when comparing versions!"
)

View File

@ -13,10 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from testtools.matchers import Equals
import xml.etree.ElementTree as XML
import yaml
import pytest
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import MissingAttributeError
from jenkins_jobs.errors import JenkinsJobsException
@ -24,111 +25,108 @@ from jenkins_jobs.modules.helpers import (
convert_mapping_to_xml,
check_mutual_exclusive_data_args,
)
from tests import base
class TestCaseTestHelpers(base.BaseTestCase):
def test_convert_mapping_to_xml(self):
"""
Tests the test_convert_mapping_to_xml_fail_required function
"""
def test_convert_mapping_to_xml():
"""
Tests the test_convert_mapping_to_xml_fail_required function
"""
# Test default values
default_root = XML.Element("testdefault")
default_data = yaml.safe_load("string: hello")
default_mappings = [("default-string", "defaultString", "default")]
# Test default values
default_root = XML.Element("testdefault")
default_data = yaml.safe_load("string: hello")
default_mappings = [("default-string", "defaultString", "default")]
convert_mapping_to_xml(
default_root, default_data, default_mappings, fail_required=True
)
result = default_root.find("defaultString").text
result == "default"
# Test user input
user_input_root = XML.Element("testUserInput")
user_input_data = yaml.safe_load("user-input-string: hello")
user_input_mappings = [("user-input-string", "userInputString", "user-input")]
convert_mapping_to_xml(
user_input_root, user_input_data, user_input_mappings, fail_required=True
)
result = user_input_root.find("userInputString").text
result == "hello"
# Test missing required input
required_root = XML.Element("testrequired")
required_data = yaml.safe_load("string: hello")
required_mappings = [("required-string", "requiredString", None)]
with pytest.raises(MissingAttributeError):
convert_mapping_to_xml(
default_root, default_data, default_mappings, fail_required=True
)
result = default_root.find("defaultString").text
self.assertThat(result, Equals("default"))
# Test user input
user_input_root = XML.Element("testUserInput")
user_input_data = yaml.safe_load("user-input-string: hello")
user_input_mappings = [("user-input-string", "userInputString", "user-input")]
convert_mapping_to_xml(
user_input_root, user_input_data, user_input_mappings, fail_required=True
)
result = user_input_root.find("userInputString").text
self.assertThat(result, Equals("hello"))
# Test missing required input
required_root = XML.Element("testrequired")
required_data = yaml.safe_load("string: hello")
required_mappings = [("required-string", "requiredString", None)]
self.assertRaises(
MissingAttributeError,
convert_mapping_to_xml,
required_root,
required_data,
required_mappings,
fail_required=True,
)
# Test invalid user input for list
user_input_root = XML.Element("testUserInput")
user_input_data = yaml.safe_load("user-input-string: bye")
valid_inputs = ["hello"]
user_input_mappings = [
("user-input-string", "userInputString", "user-input", valid_inputs)
]
# Test invalid user input for list
user_input_root = XML.Element("testUserInput")
user_input_data = yaml.safe_load("user-input-string: bye")
valid_inputs = ["hello"]
user_input_mappings = [
("user-input-string", "userInputString", "user-input", valid_inputs)
]
self.assertRaises(
InvalidAttributeError,
convert_mapping_to_xml,
with pytest.raises(InvalidAttributeError):
convert_mapping_to_xml(
user_input_root,
user_input_data,
user_input_mappings,
)
# Test invalid user input for dict
user_input_root = XML.Element("testUserInput")
user_input_data = yaml.safe_load("user-input-string: later")
valid_inputs = {"hello": "world"}
user_input_mappings = [
("user-input-string", "userInputString", "user-input", valid_inputs)
]
# Test invalid user input for dict
user_input_root = XML.Element("testUserInput")
user_input_data = yaml.safe_load("user-input-string: later")
valid_inputs = {"hello": "world"}
user_input_mappings = [
("user-input-string", "userInputString", "user-input", valid_inputs)
]
self.assertRaises(
InvalidAttributeError,
convert_mapping_to_xml,
with pytest.raises(InvalidAttributeError):
convert_mapping_to_xml(
user_input_root,
user_input_data,
user_input_mappings,
)
# Test invalid key for dict
user_input_root = XML.Element("testUserInput")
user_input_data = yaml.safe_load("user-input-string: world")
valid_inputs = {"hello": "world"}
user_input_mappings = [
("user-input-string", "userInputString", "user-input", valid_inputs)
]
# Test invalid key for dict
user_input_root = XML.Element("testUserInput")
user_input_data = yaml.safe_load("user-input-string: world")
valid_inputs = {"hello": "world"}
user_input_mappings = [
("user-input-string", "userInputString", "user-input", valid_inputs)
]
self.assertRaises(
InvalidAttributeError,
convert_mapping_to_xml,
with pytest.raises(InvalidAttributeError):
convert_mapping_to_xml(
user_input_root,
user_input_data,
user_input_mappings,
)
def test_check_mutual_exclusive_data_args_no_mutual_exclusive(self):
@check_mutual_exclusive_data_args(0, "foo", "bar")
@check_mutual_exclusive_data_args(0, "foo", "baz")
def func(data):
pass
func({"baz": "qaz", "bar": "qaz"})
def test_check_mutual_exclusive_data_args_no_mutual_exclusive():
@check_mutual_exclusive_data_args(0, "foo", "bar")
@check_mutual_exclusive_data_args(0, "foo", "baz")
def func(data):
pass
def test_check_mutual_exclusive_data_args_mutual_exclusive(self):
@check_mutual_exclusive_data_args(0, "foo", "bar")
@check_mutual_exclusive_data_args(0, "foo", "baz")
def func(data):
pass
func({"baz": "qaz", "bar": "qaz"})
self.assertRaises(JenkinsJobsException, func, {"foo": "qaz", "bar": "qaz"})
def test_check_mutual_exclusive_data_args_mutual_exclusive():
@check_mutual_exclusive_data_args(0, "foo", "bar")
@check_mutual_exclusive_data_args(0, "foo", "baz")
def func(data):
pass
with pytest.raises(JenkinsJobsException):
func({"foo": "qaz", "bar": "qaz"})

View File

@ -13,15 +13,25 @@
# License for the specific language governing permissions and limitations
# under the License.
from tests import base
from tests.base import mock
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import project_multibranch
@mock.patch("uuid.uuid4", mock.Mock(return_value="1-1-1-1-1"))
class TestCaseMultibranchPipeline(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
default_config_file = "/dev/null"
klass = project_multibranch.WorkflowMultiBranch
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(project_multibranch.WorkflowMultiBranch)

View File

@ -15,13 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import notifications
from tests import base
class TestCaseModuleNotifications(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = notifications.Notifications
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(notifications.Notifications)

View File

@ -15,53 +15,52 @@
import time
from multiprocessing import cpu_count
from testtools import matchers
from testtools import TestCase
from jenkins_jobs.parallel import concurrent
from tests.base import mock
class TestCaseParallel(TestCase):
def test_parallel_correct_order(self):
expected = list(range(10, 20))
def test_parallel_correct_order():
expected = list(range(10, 20))
@concurrent
def parallel_test(num_base, num_extra):
return num_base + num_extra
@concurrent
def parallel_test(num_base, num_extra):
return num_base + num_extra
parallel_args = [{"num_extra": num} for num in range(10)]
result = parallel_test(10, concurrent=parallel_args)
self.assertThat(result, matchers.Equals(expected))
parallel_args = [{"num_extra": num} for num in range(10)]
result = parallel_test(10, concurrent=parallel_args)
assert result == expected
def test_parallel_time_less_than_serial(self):
@concurrent
def wait(secs):
time.sleep(secs)
before = time.time()
# ten threads to make it as fast as possible
wait(concurrent=[{"secs": 1} for _ in range(10)], n_workers=10)
after = time.time()
self.assertThat(after - before, matchers.LessThan(5))
def test_parallel_time_less_than_serial():
@concurrent
def wait(secs):
time.sleep(secs)
def test_parallel_single_thread(self):
expected = list(range(10, 20))
before = time.time()
# ten threads to make it as fast as possible
wait(concurrent=[{"secs": 1} for _ in range(10)], n_workers=10)
after = time.time()
assert after - before < 5
@concurrent
def parallel_test(num_base, num_extra):
return num_base + num_extra
parallel_args = [{"num_extra": num} for num in range(10)]
result = parallel_test(10, concurrent=parallel_args, n_workers=1)
self.assertThat(result, matchers.Equals(expected))
def test_parallel_single_thread():
expected = list(range(10, 20))
@mock.patch("jenkins_jobs.parallel.cpu_count", wraps=cpu_count)
def test_use_auto_detect_cores(self, mockCpu_count):
@concurrent
def parallel_test():
return True
@concurrent
def parallel_test(num_base, num_extra):
return num_base + num_extra
result = parallel_test(concurrent=[{} for _ in range(10)], n_workers=0)
self.assertThat(result, matchers.Equals([True for _ in range(10)]))
mockCpu_count.assert_called_once_with()
parallel_args = [{"num_extra": num} for num in range(10)]
result = parallel_test(10, concurrent=parallel_args, n_workers=1)
result == expected
def test_use_auto_detect_cores(mocker):
mock = mocker.patch("jenkins_jobs.parallel.cpu_count", wraps=cpu_count)
@concurrent
def parallel_test():
return True
result = parallel_test(concurrent=[{} for _ in range(10)], n_workers=0)
assert result == [True for _ in range(10)]
mock.assert_called_once_with()

View File

@ -15,13 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import parameters
from tests import base
class TestCaseModuleParameters(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = parameters.Parameters
fixtures_dir = Path(__file__).parent
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(parameters.Parameters)

View File

@ -15,13 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import properties
from tests import base
class TestCaseModuleProperties(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = properties.Properties
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(properties.Properties)

View File

@ -15,13 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import publishers
from tests import base
class TestCaseModulePublishers(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = publishers.Publishers
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(publishers.Publishers)

View File

@ -14,13 +14,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import reporters
from tests import base
class TestCaseModuleReporters(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = reporters.Reporters
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(reporters.Reporters)

View File

@ -15,13 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import scm
from tests import base
class TestCaseModuleSCM(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = scm.SCM
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(scm.SCM)

View File

@ -15,13 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import triggers
from tests import base
class TestCaseModuleTriggers(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = triggers.Triggers
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(triggers.Triggers)

View File

@ -12,47 +12,62 @@
# See the License for the specific language governing permissions and
# limitations under the License.import os
import os
from operator import attrgetter
from pathlib import Path
import pytest
from jenkins_jobs.modules import view_all
from jenkins_jobs.modules import view_delivery_pipeline
from jenkins_jobs.modules import view_list
from jenkins_jobs.modules import view_nested
from jenkins_jobs.modules import view_pipeline
from jenkins_jobs.modules import view_sectioned
from tests import base
from tests.enum_scenarios import scenario_list
class TestCaseModuleViewAll(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = view_all.All
fixtures_dir = Path(__file__).parent / "fixtures"
class TestCaseModuleViewDeliveryPipeline(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = view_delivery_pipeline.DeliveryPipeline
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
class TestCaseModuleViewList(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = view_list.List
# But actually this is a view.
@pytest.fixture
def project(input, registry):
type_to_class = {
"all": view_all.All,
"delivery_pipeline": view_delivery_pipeline.DeliveryPipeline,
"list": view_list.List,
"nested": view_nested.Nested,
"pipeline": view_pipeline.Pipeline,
"sectioned": view_sectioned.Sectioned,
}
try:
class_name = input["view-type"]
except KeyError:
raise RuntimeError("'view-type' element is expected in input yaml")
cls = type_to_class[class_name]
return cls(registry)
class TestCaseModuleViewNested(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = view_nested.Nested
view_class_list = [
view_all.All,
view_delivery_pipeline.DeliveryPipeline,
view_list.List,
view_nested.Nested,
view_pipeline.Pipeline,
view_sectioned.Sectioned,
]
class TestCaseModuleViewPipeline(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = view_pipeline.Pipeline
class TestCaseModuleViewSectioned(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = view_sectioned.Sectioned
@pytest.mark.parametrize(
"view_class", [pytest.param(cls, id=cls.__name__) for cls in view_class_list]
)
def test_view(view_class, check_generator):
check_generator(view_class)

View File

@ -15,13 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
from jenkins_jobs.modules import wrappers
from tests import base
class TestCaseModuleWrappers(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
klass = wrappers.Wrappers
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_generator):
check_generator(wrappers.Wrappers)

View File

@ -12,65 +12,67 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from pathlib import Path
from jenkins_jobs import errors
from jenkins_jobs import parser
from jenkins_jobs import registry
from jenkins_jobs import xml_config
import pytest
from tests import base
from jenkins_jobs.config import JJBConfig
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.parser import YamlParser
from jenkins_jobs.registry import ModuleRegistry
from jenkins_jobs.xml_config import XmlJobGenerator, XmlViewGenerator
class TestXmlJobGeneratorExceptions(base.BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "exceptions")
fixtures_dir = Path(__file__).parent / "exceptions"
def test_invalid_project(self):
self.conf_filename = None
config = self._get_config()
yp = parser.YamlParser(config)
yp.parse(os.path.join(self.fixtures_path, "invalid_project.yaml"))
@pytest.fixture
def config():
config = JJBConfig()
config.validate()
return config
reg = registry.ModuleRegistry(config)
job_data, _ = yp.expandYaml(reg)
# Generate the XML tree
xml_generator = xml_config.XmlJobGenerator(reg)
e = self.assertRaises(
errors.JenkinsJobsException, xml_generator.generateXML, job_data
)
self.assertIn("Unrecognized project-type:", str(e))
@pytest.fixture
def parser(config):
return YamlParser(config)
def test_invalid_view(self):
self.conf_filename = None
config = self._get_config()
yp = parser.YamlParser(config)
yp.parse(os.path.join(self.fixtures_path, "invalid_view.yaml"))
@pytest.fixture
def registry(config):
return ModuleRegistry(config)
reg = registry.ModuleRegistry(config)
_, view_data = yp.expandYaml(reg)
# Generate the XML tree
xml_generator = xml_config.XmlViewGenerator(reg)
e = self.assertRaises(
errors.JenkinsJobsException, xml_generator.generateXML, view_data
)
self.assertIn("Unrecognized view-type:", str(e))
def test_invalid_project(parser, registry):
parser.parse(str(fixtures_dir / "invalid_project.yaml"))
jobs, views = parser.expandYaml(registry)
def test_incorrect_template_params(self):
self.conf_filename = None
config = self._get_config()
generator = XmlJobGenerator(registry)
yp = parser.YamlParser(config)
yp.parse(os.path.join(self.fixtures_path, "failure_formatting_component.yaml"))
with pytest.raises(JenkinsJobsException) as excinfo:
generator.generateXML(jobs)
assert "Unrecognized project-type:" in str(excinfo.value)
reg = registry.ModuleRegistry(config)
reg.set_parser_data(yp.data)
job_data_list, view_data_list = yp.expandYaml(reg)
xml_generator = xml_config.XmlJobGenerator(reg)
self.assertRaises(Exception, xml_generator.generateXML, job_data_list)
self.assertIn("Failure formatting component", self.logger.output)
self.assertIn("Problem formatting with args", self.logger.output)
def test_invalid_view(parser, registry):
parser.parse(str(fixtures_dir / "invalid_view.yaml"))
jobs, views = parser.expandYaml(registry)
generator = XmlViewGenerator(registry)
with pytest.raises(JenkinsJobsException) as excinfo:
generator.generateXML(views)
assert "Unrecognized view-type:" in str(excinfo.value)
def test_template_params(caplog, parser, registry):
parser.parse(str(fixtures_dir / "failure_formatting_component.yaml"))
registry.set_parser_data(parser.data)
jobs, views = parser.expandYaml(registry)
generator = XmlJobGenerator(registry)
with pytest.raises(Exception):
generator.generateXML(jobs)
assert "Failure formatting component" in caplog.text
assert "Problem formatting with args" in caplog.text

View File

@ -0,0 +1,41 @@
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from operator import attrgetter
from pathlib import Path
import pytest
from tests.enum_scenarios import scenario_list
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(
params=scenario_list(fixtures_dir),
ids=attrgetter("name"),
)
def scenario(request):
return request.param
def test_yaml_snippet(check_job):
# Some tests using config with 'include_path' expect JJB root to be current directory.
os.chdir(Path(__file__).parent / "../..")
check_job()

View File

@ -0,0 +1,53 @@
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from pathlib import Path
import pytest
exceptions_dir = Path(__file__).parent / "exceptions"
# Override to avoid scenarios usage.
@pytest.fixture
def config_path():
return os.devnull
# Override to avoid scenarios usage.
@pytest.fixture
def plugins_info():
return None
def test_incorrect_template_dimensions(caplog, check_parser):
in_path = exceptions_dir / "incorrect_template_dimensions.yaml"
with pytest.raises(Exception) as excinfo:
check_parser(in_path)
assert "'NoneType' object is not iterable" in str(excinfo.value)
assert "- branch: current\n current: null" in caplog.text
@pytest.mark.parametrize("name", ["template", "params"])
def test_failure_formatting(caplog, check_parser, name):
in_path = exceptions_dir / f"failure_formatting_{name}.yaml"
with pytest.raises(Exception):
check_parser(in_path)
assert f"Failure formatting {name}" in caplog.text
assert "Problem formatting with args" in caplog.text

View File

@ -1,67 +0,0 @@
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from jenkins_jobs import parser
from jenkins_jobs import registry
from tests import base
class TestCaseModuleYamlInclude(base.SingleJobTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path)
class TestYamlParserExceptions(base.BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "exceptions")
def test_incorrect_template_dimensions(self):
self.conf_filename = None
config = self._get_config()
yp = parser.YamlParser(config)
yp.parse(os.path.join(self.fixtures_path, "incorrect_template_dimensions.yaml"))
reg = registry.ModuleRegistry(config)
e = self.assertRaises(Exception, yp.expandYaml, reg)
self.assertIn("'NoneType' object is not iterable", str(e))
self.assertIn("- branch: current\n current: null", self.logger.output)
class TestYamlParserFailureFormattingExceptions(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "exceptions")
scenarios = [("s1", {"name": "template"}), ("s2", {"name": "params"})]
def test_yaml_snippet(self):
self.conf_filename = None
config = self._get_config()
yp = parser.YamlParser(config)
yp.parse(
os.path.join(
self.fixtures_path, "failure_formatting_{}.yaml".format(self.name)
)
)
reg = registry.ModuleRegistry(config)
self.assertRaises(Exception, yp.expandYaml, reg)
self.assertIn("Failure formatting {}".format(self.name), self.logger.output)
self.assertIn("Problem formatting with args", self.logger.output)

View File

@ -22,7 +22,7 @@ commands =
- find . -type d -name "__pycache__" -delete
# test that we can call jjb using both variants with same results
bash {toxinidir}/tools/test-commands.sh
stestr run --slowest {posargs}
pytest {posargs}
whitelist_externals =
bash
find
@ -34,16 +34,14 @@ commands =
bash -c "if [ -d {toxinidir}/../python-jenkins ]; then \
pip install -q -U -e 'git+file://{toxinidir}/../python-jenkins#egg=python-jenkins' ; else \
pip install -q -U -e 'git+https://git.openstack.org/openstack/python-jenkins@master#egg=python-jenkins' ; fi "
stestr run --slowest {posargs}
pytest {posargs}
[testenv:cover]
setenv =
{[testenv]setenv}
PYTHON=coverage run --source jenkins_jobs --parallel-mode
commands =
{[tox]install_test_deps}
stestr run {posargs}
coverage combine
coverage run --source jenkins_jobs -m pytest {posargs}
coverage html -d cover
coverage xml -o cover/coverage.xml