Compare Rally tests results before/after patching
If Rally tests are specified in 'erratum.yaml' for bug, then run benchmarks before and after patching and compare results. Change-Id: I43b1f4ee06e07143f5f12307bc3cba4147577ef3 Implements: blueprint mos-patching-tests
This commit is contained in:
parent
2100390eac
commit
963a65bf3b
@ -135,6 +135,11 @@ def map_test(target):
|
|||||||
env_distro,
|
env_distro,
|
||||||
target)
|
target)
|
||||||
|
|
||||||
|
if 'rally' in errata.keys():
|
||||||
|
if len(errata['rally']) > 0:
|
||||||
|
settings.PATCHING_RUN_RALLY = True
|
||||||
|
settings.RALLY_TAGS = errata['rally']
|
||||||
|
|
||||||
if settings.PATCHING_CUSTOM_TEST:
|
if settings.PATCHING_CUSTOM_TEST:
|
||||||
deployment_test = settings.PATCHING_CUSTOM_TEST
|
deployment_test = settings.PATCHING_CUSTOM_TEST
|
||||||
settings.PATCHING_SNAPSHOT = \
|
settings.PATCHING_SNAPSHOT = \
|
||||||
|
@ -428,6 +428,7 @@ PATCHING_PKGS = os.environ.get("PATCHING_PKGS", None)
|
|||||||
PATCHING_SNAPSHOT = os.environ.get("PATCHING_SNAPSHOT", None)
|
PATCHING_SNAPSHOT = os.environ.get("PATCHING_SNAPSHOT", None)
|
||||||
PATCHING_CUSTOM_TEST = os.environ.get("PATCHING_CUSTOM_TEST", None)
|
PATCHING_CUSTOM_TEST = os.environ.get("PATCHING_CUSTOM_TEST", None)
|
||||||
PATCHING_DISABLE_UPDATES = get_var_as_bool('PATCHING_DISABLE_UPDATES', False)
|
PATCHING_DISABLE_UPDATES = get_var_as_bool('PATCHING_DISABLE_UPDATES', False)
|
||||||
|
PATCHING_RUN_RALLY = get_var_as_bool("PATCHING_RUN_RALLY", False)
|
||||||
|
|
||||||
DOWNLOAD_LINK = os.environ.get(
|
DOWNLOAD_LINK = os.environ.get(
|
||||||
'DOWNLOAD_LINK', 'http://releases.ubuntu.com/14.04.2/'
|
'DOWNLOAD_LINK', 'http://releases.ubuntu.com/14.04.2/'
|
||||||
|
@ -24,6 +24,8 @@ from fuelweb_test import logger
|
|||||||
from fuelweb_test import settings
|
from fuelweb_test import settings
|
||||||
from fuelweb_test.helpers import patching
|
from fuelweb_test.helpers import patching
|
||||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||||
|
from fuelweb_test.helpers.rally import RallyBenchmarkTest
|
||||||
|
from fuelweb_test.helpers.rally import RallyResult
|
||||||
from fuelweb_test.helpers.utils import install_pkg
|
from fuelweb_test.helpers.utils import install_pkg
|
||||||
from fuelweb_test.tests.base_test_case import TestBasic
|
from fuelweb_test.tests.base_test_case import TestBasic
|
||||||
|
|
||||||
@ -72,7 +74,18 @@ class PatchingTests(TestBasic):
|
|||||||
assert_is_not_none(cluster_id, 'Environment for patching not found.')
|
assert_is_not_none(cluster_id, 'Environment for patching not found.')
|
||||||
|
|
||||||
# Step #2
|
# Step #2
|
||||||
# Run Rally benchmarks, coming soon...
|
if settings.PATCHING_RUN_RALLY:
|
||||||
|
rally_benchmarks = {}
|
||||||
|
benchmark_results1 = {}
|
||||||
|
for tag in set(settings.RALLY_TAGS):
|
||||||
|
rally_benchmarks[tag] = RallyBenchmarkTest(
|
||||||
|
container_repo=settings.RALLY_DOCKER_REPO,
|
||||||
|
environment=self.env,
|
||||||
|
cluster_id=cluster_id,
|
||||||
|
test_type=tag
|
||||||
|
)
|
||||||
|
benchmark_results1[tag] = rally_benchmarks[tag].run()
|
||||||
|
logger.debug(benchmark_results1[tag].show())
|
||||||
|
|
||||||
# Step #3
|
# Step #3
|
||||||
patching_repos = patching.add_remote_repositories(
|
patching_repos = patching.add_remote_repositories(
|
||||||
@ -110,8 +123,23 @@ class PatchingTests(TestBasic):
|
|||||||
self.fuel_web.run_ostf(cluster_id=cluster_id)
|
self.fuel_web.run_ostf(cluster_id=cluster_id)
|
||||||
|
|
||||||
# Step #8
|
# Step #8
|
||||||
# Run Rally benchmarks, compare new results with previous,
|
if settings.PATCHING_RUN_RALLY:
|
||||||
# coming soon...
|
benchmark_results2 = {}
|
||||||
|
for tag in set(settings.RALLY_TAGS):
|
||||||
|
benchmark_results2[tag] = rally_benchmarks[tag].run()
|
||||||
|
logger.debug(benchmark_results2[tag].show())
|
||||||
|
|
||||||
|
rally_benchmarks_passed = True
|
||||||
|
|
||||||
|
for tag in set(settings.RALLY_TAGS):
|
||||||
|
if not RallyResult.compare(benchmark_results1[tag],
|
||||||
|
benchmark_results2[tag],
|
||||||
|
deviation=0.2):
|
||||||
|
rally_benchmarks_passed = False
|
||||||
|
|
||||||
|
assert_true(rally_benchmarks_passed,
|
||||||
|
"Rally benchmarks show performance degradation "
|
||||||
|
"after packages patching.")
|
||||||
|
|
||||||
|
|
||||||
@test(groups=["patching_master_tests"])
|
@test(groups=["patching_master_tests"])
|
||||||
|
Loading…
Reference in New Issue
Block a user