Retire repository
Fuel (from openstack namespace) and fuel-ccp (in x namespace) repositories are unused and ready to retire. This change removes all content from the repository and adds the usual README file to point out that the repository is retired following the process from https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project See also http://lists.openstack.org/pipermail/openstack-discuss/2019-December/011647.html Depends-On: https://review.opendev.org/699362 Change-Id: I37b6a82c9c3c3893bb4b9b6a4c4b5a83a6d8193c
This commit is contained in:
parent
be9b52bcf2
commit
08db3ad05f
@ -1,5 +0,0 @@
|
||||
[run]
|
||||
source =
|
||||
core
|
||||
omit =
|
||||
core/_tests/*
|
63
.gitignore
vendored
63
.gitignore
vendored
@ -1,63 +0,0 @@
|
||||
*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Packages
|
||||
*.egg
|
||||
*.egg-info
|
||||
dist
|
||||
build
|
||||
eggs
|
||||
parts
|
||||
bin
|
||||
var
|
||||
sdist
|
||||
develop-eggs
|
||||
.installed.cfg
|
||||
lib
|
||||
lib64
|
||||
MANIFEST
|
||||
TAGS
|
||||
.venv
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
.coverage
|
||||
.tox
|
||||
nosetests.xml
|
||||
unit.xml
|
||||
/htmlcov/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
|
||||
# Mr Developer
|
||||
.mr.developer.cfg
|
||||
.project
|
||||
.pydevproject
|
||||
.idea
|
||||
|
||||
# Local example
|
||||
example_local.py
|
||||
|
||||
# Local settings
|
||||
local_settings.py
|
||||
|
||||
# Documentation
|
||||
doc/_build/
|
||||
|
||||
# Logs
|
||||
/logs
|
||||
*.log
|
||||
|
||||
# Certs
|
||||
/ca.crt
|
||||
/ca.pem
|
||||
|
||||
# Cache
|
||||
/.cache
|
||||
/core/.cache
|
||||
__pycache__
|
481
.pylintrc
481
.pylintrc
@ -1,481 +0,0 @@
|
||||
[MASTER]
|
||||
|
||||
# Specify a configuration file.
|
||||
#rcfile=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=CVS, tox, logs
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
# Use multiple processes to speed up Pylint.
|
||||
jobs=1
|
||||
|
||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||
# active Python interpreter and may run arbitrary code.
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code
|
||||
extension-pkg-whitelist=
|
||||
|
||||
# Allow optimization of some AST trees. This will activate a peephole AST
|
||||
# optimizer, which will apply various small optimizations. For instance, it can
|
||||
# be used to obtain the result of joining multiple strings with the addition
|
||||
# operator. Joining a lot of strings can lead to a maximum recursion error in
|
||||
# Pylint and this flag can prevent that. It has one side effect, the resulting
|
||||
# AST will be different than the one from reality.
|
||||
optimize-ast=no
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
|
||||
confidence=
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time. See also the "--disable" option for examples.
|
||||
|
||||
# old-style-class (C1001)
|
||||
# return-arg-in-generator (E0106)
|
||||
# slots-on-old-class (E1001)
|
||||
# super-on-old-class (E1002)
|
||||
# missing-super-argument (E1004)
|
||||
# print-statement (E1601)
|
||||
# parameter-unpacking (E1602)
|
||||
# unpacking-in-except (E1603)
|
||||
# old-raise-syntax (E1604)
|
||||
# backtick (E1605)
|
||||
# long-suffix (E1606)
|
||||
# old-ne-operator (E1607)
|
||||
# old-octal-literal (E1608)
|
||||
# import-star-module-level (E1609)
|
||||
# lowercase-l-suffix (W0332)
|
||||
# deprecated-module (W0402)
|
||||
# invalid-encoded-data (W0512)
|
||||
# property-on-old-class (W1001)
|
||||
# boolean-datetime (W1502)
|
||||
# deprecated-method (W1505)
|
||||
# apply-builtin (W1601)
|
||||
# basestring-builtin (W1602)
|
||||
# buffer-builtin (W1603)
|
||||
# cmp-builtin (W1604)
|
||||
# coerce-builtin (W1605)
|
||||
# execfile-builtin (W1606)
|
||||
# file-builtin (W1607)
|
||||
# long-builtin (W1608)
|
||||
# raw_input-builtin (W1609)
|
||||
# reduce-builtin (W1610)
|
||||
# standarderror-builtin (W1611)
|
||||
# unicode-builtin (W1612)
|
||||
# xrange-builtin (W1613)
|
||||
# coerce-method (W1614)
|
||||
# delslice-method (W1615)
|
||||
# getslice-method (W1616)
|
||||
# setslice-method (W1617)
|
||||
# old-division (W1619)
|
||||
# dict-iter-method (W1620)
|
||||
# dict-view-method (W1621)
|
||||
# next-method-called (W1622)
|
||||
# metaclass-assignment (W1623)
|
||||
# indexing-exception (W1624)
|
||||
# raising-string (W1625)
|
||||
# reload-builtin (W1626)
|
||||
# oct-method (W1627)
|
||||
# hex-method (W1628)
|
||||
# nonzero-method (W1629)
|
||||
# cmp-method (W1630)
|
||||
# input-builtin (W1632)
|
||||
# round-builtin (W1633)
|
||||
# intern-builtin (W1634)
|
||||
# unichr-builtin (W1635)
|
||||
# map-builtin-not-iterating (W1636)
|
||||
# zip-builtin-not-iterating (W1637)
|
||||
# range-builtin-not-iterating (W1638)
|
||||
# filter-builtin-not-iterating (W1639)
|
||||
# filter-builtin-not-iterating (W1639)
|
||||
# using-cmp-argument (W1640)
|
||||
|
||||
enable = E0106,C1001,E1001,E1002,E1004,E1601,E1602,E1603,E1604,E1605,E1606,E1607,E1608,E1609,W0332,W0402,W0512,W1001,W1502,W1505,W1601,W1602,W1603,W1604,W1605,W1606,W1607,W1608,W1609,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1619,W1620,W1621,W1622,W1623,W1624,W1625,W1626,W1627,W1628,W1629,W1630,W1632,W1633,W1634,W1635,W1636,W1637,W1638,W1639,W1640,
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once).You can also use "--disable=all" to
|
||||
# disable everything first and then reenable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
|
||||
# Disabling pointless reports:
|
||||
# RP0401: External dependencies
|
||||
# RP0402: Modules dependencies graph
|
||||
# RP0801: Duplication
|
||||
# R0801: Duplication
|
||||
# cyclic-import (R0401) - produces false-negative results
|
||||
|
||||
# Disabling messages:
|
||||
# pointless-string-statement (W0105)
|
||||
# unnecessary-lambda (W0108)
|
||||
# deprecated-lambda (W0110)
|
||||
# bad-builtin (W0141)
|
||||
# fixme (W0511)
|
||||
# unused-argument (W0613)
|
||||
# redefined-outer-name (W0621)
|
||||
# cell-var-from-loop (W0640)
|
||||
# bare-except (W0702)
|
||||
# broad-except (W0703)
|
||||
# logging-format-interpolation (W1202)
|
||||
# anomalous-backslash-in-string (W1401) - DO NOT ENABLE, INCORRECTLY PARSES REGEX
|
||||
# no-absolute-import (W1618):
|
||||
# import missing `from __future__ import absolute_import` Used when an import is not accompanied by from __future__ import absolute_import (default behaviour in Python 3)
|
||||
|
||||
# invalid-name (C0103)
|
||||
# missing-docstring (C0111)
|
||||
# misplaced-comparison-constant (C0122)
|
||||
# too-many-lines (C0302)
|
||||
# bad-continuation (C0330)
|
||||
|
||||
# too-many-ancestors (R0901)
|
||||
# too-many-public-methods (R0904)
|
||||
# too-few-public-methods (R0903)
|
||||
# too-many-return-statements (R0911)
|
||||
# too-many-branches (R0912)
|
||||
# too-many-arguments (R0913)
|
||||
# too-many-locals (R0914)
|
||||
# too-many-statements (R0915)
|
||||
|
||||
# locally-disabled (I0011)
|
||||
# locally-enabled (I0012)
|
||||
|
||||
disable=RP0401,RP0402,RP0801,R0801,W0141,W1618,W0621,W1202,W1401,W0703,W0702,C0111,W0640,C0122,W0511, W0613, C0103, R0903, C0330, C0302, R0915, R0914, R0912, W0105, R0904, R0911, W0108, W0110, R0913, R0901, R0401, I0011, I0012
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
# (visual studio) and html. You can also give a reporter class, eg
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
output-format=colorized
|
||||
#output-format=parseable
|
||||
|
||||
# Put messages in a separate file for each module / package specified on the
|
||||
# command line instead of printing them on stdout. Reports (if any) will be
|
||||
# written in a file name "pylint_global.[txt|html]".
|
||||
files-output=no
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=yes
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details
|
||||
#msg-template=
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# A regular expression matching the name of dummy variables (i.e. expectedly
|
||||
# not used).
|
||||
dummy-variables-rgx=_$|dummy
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
# List of strings which can identify a callback function by name. A callback
|
||||
# name must start or end with one of those strings.
|
||||
callbacks=cb_,_cb
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# List of module names for which member attributes should not be checked
|
||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
||||
# and thus existing member attributes cannot be deduced by static analysis. It
|
||||
# supports qualified module names, as well as Unix pattern matching.
|
||||
ignored-modules=
|
||||
|
||||
# List of classes names for which member attributes should not be checked
|
||||
# (useful for classes with attributes dynamically set). This supports can work
|
||||
# with qualified names.
|
||||
ignored-classes=
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=
|
||||
|
||||
|
||||
[SPELLING]
|
||||
|
||||
# Spelling dictionary name. Available dictionaries: none. To make it working
|
||||
# install python-enchant package.
|
||||
spelling-dict=
|
||||
|
||||
# List of comma separated words that should not be checked.
|
||||
spelling-ignore-words=
|
||||
|
||||
# A path to a file that contains private dictionary; one word per line.
|
||||
spelling-private-dict-file=
|
||||
|
||||
# Tells whether to store unknown words to indicated private dictionary in
|
||||
# --spelling-private-dict-file option instead of raising a message.
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=10
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Ignore imports when computing similarities.
|
||||
ignore-imports=no
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,XXX,TODO
|
||||
|
||||
|
||||
[LOGGING]
|
||||
|
||||
# Logging modules to check that the string format arguments are in logging
|
||||
# function parameter format
|
||||
logging-modules=logging
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=80
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
# List of optional constructs for which whitespace checking is disabled. `dict-
|
||||
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
||||
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
||||
# `empty-line` allows space-only lines.
|
||||
no-space-check=trailing-comma,dict-separator
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1500
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
# Number of spaces of indent required inside a hanging or continued line.
|
||||
indent-after-paren=4
|
||||
|
||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||
expected-line-ending-format=
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# List of builtins function names that should not be used, separated by a comma
|
||||
bad-functions=map,filter,input
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
good-names=i,j,k,ex,Run,_,x,e,ip
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=foo,bar,baz,toto,tutu,tata
|
||||
|
||||
# Colon-delimited sets of names that determine each other's naming style when
|
||||
# the name regexes allow several styles.
|
||||
name-group=
|
||||
|
||||
# Include a hint for the correct naming format with invalid-name
|
||||
include-naming-hint=no
|
||||
|
||||
# Regular expression matching correct function names
|
||||
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for function names
|
||||
function-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct variable names
|
||||
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for variable names
|
||||
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct constant names
|
||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Naming hint for constant names
|
||||
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Regular expression matching correct attribute names
|
||||
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for attribute names
|
||||
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct argument names
|
||||
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for argument names
|
||||
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct class attribute names
|
||||
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Naming hint for class attribute names
|
||||
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Regular expression matching correct inline iteration names
|
||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Naming hint for inline iteration names
|
||||
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Regular expression matching correct class names
|
||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Naming hint for class names
|
||||
class-name-hint=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Regular expression matching correct module names
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Naming hint for module names
|
||||
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Regular expression matching correct method names
|
||||
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for method names
|
||||
method-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=^_
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=-1
|
||||
|
||||
|
||||
[ELIF]
|
||||
|
||||
# Maximum number of nested blocks for function / method body
|
||||
max-nested-blocks=5
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=regsub,TERMIOS,Bastion,rexec
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=10
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=15
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
# Maximum number of boolean expressions in a if statement
|
||||
max-bool-expr=5
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
# List of member names, which should be excluded from the protected access
|
||||
# warning.
|
||||
exclude-protected=_asdict,_fields,_replace,_source,_make
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
482
.pylintrc_gerrit
482
.pylintrc_gerrit
@ -1,482 +0,0 @@
|
||||
[MASTER]
|
||||
|
||||
# Specify a configuration file.
|
||||
#rcfile=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=CVS, tox, logs
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
# Use multiple processes to speed up Pylint.
|
||||
jobs=1
|
||||
|
||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||
# active Python interpreter and may run arbitrary code.
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code
|
||||
extension-pkg-whitelist=
|
||||
|
||||
# Allow optimization of some AST trees. This will activate a peephole AST
|
||||
# optimizer, which will apply various small optimizations. For instance, it can
|
||||
# be used to obtain the result of joining multiple strings with the addition
|
||||
# operator. Joining a lot of strings can lead to a maximum recursion error in
|
||||
# Pylint and this flag can prevent that. It has one side effect, the resulting
|
||||
# AST will be different than the one from reality.
|
||||
optimize-ast=no
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
|
||||
confidence=
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time. See also the "--disable" option for examples.
|
||||
|
||||
# old-style-class (C1001)
|
||||
# return-arg-in-generator (E0106)
|
||||
# slots-on-old-class (E1001)
|
||||
# super-on-old-class (E1002)
|
||||
# missing-super-argument (E1004)
|
||||
# print-statement (E1601)
|
||||
# parameter-unpacking (E1602)
|
||||
# unpacking-in-except (E1603)
|
||||
# old-raise-syntax (E1604)
|
||||
# backtick (E1605)
|
||||
# long-suffix (E1606)
|
||||
# old-ne-operator (E1607)
|
||||
# old-octal-literal (E1608)
|
||||
# import-star-module-level (E1609)
|
||||
# lowercase-l-suffix (W0332)
|
||||
# deprecated-module (W0402)
|
||||
# invalid-encoded-data (W0512)
|
||||
# property-on-old-class (W1001)
|
||||
# boolean-datetime (W1502)
|
||||
# deprecated-method (W1505)
|
||||
# apply-builtin (W1601)
|
||||
# basestring-builtin (W1602)
|
||||
# buffer-builtin (W1603)
|
||||
# cmp-builtin (W1604)
|
||||
# coerce-builtin (W1605)
|
||||
# execfile-builtin (W1606)
|
||||
# file-builtin (W1607)
|
||||
# long-builtin (W1608)
|
||||
# raw_input-builtin (W1609)
|
||||
# reduce-builtin (W1610)
|
||||
# standarderror-builtin (W1611)
|
||||
# unicode-builtin (W1612)
|
||||
# xrange-builtin (W1613)
|
||||
# coerce-method (W1614)
|
||||
# delslice-method (W1615)
|
||||
# getslice-method (W1616)
|
||||
# setslice-method (W1617)
|
||||
# old-division (W1619)
|
||||
# dict-iter-method (W1620)
|
||||
# dict-view-method (W1621)
|
||||
# next-method-called (W1622)
|
||||
# metaclass-assignment (W1623)
|
||||
# indexing-exception (W1624)
|
||||
# raising-string (W1625)
|
||||
# reload-builtin (W1626)
|
||||
# oct-method (W1627)
|
||||
# hex-method (W1628)
|
||||
# nonzero-method (W1629)
|
||||
# cmp-method (W1630)
|
||||
# input-builtin (W1632)
|
||||
# round-builtin (W1633)
|
||||
# intern-builtin (W1634)
|
||||
# unichr-builtin (W1635)
|
||||
# map-builtin-not-iterating (W1636)
|
||||
# zip-builtin-not-iterating (W1637)
|
||||
# range-builtin-not-iterating (W1638)
|
||||
# filter-builtin-not-iterating (W1639)
|
||||
# filter-builtin-not-iterating (W1639)
|
||||
# using-cmp-argument (W1640)
|
||||
|
||||
enable = E0106,C1001,E1001,E1002,E1004,E1601,E1602,E1603,E1604,E1605,E1606,E1607,E1608,E1609,W0332,W0402,W0512,W1001,W1502,W1505,W1601,W1602,W1603,W1604,W1605,W1606,W1607,W1608,W1609,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1619,W1620,W1621,W1622,W1623,W1624,W1625,W1626,W1627,W1628,W1629,W1630,W1632,W1633,W1634,W1635,W1636,W1637,W1638,W1639,W1640,
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once).You can also use "--disable=all" to
|
||||
# disable everything first and then reenable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
|
||||
# Disabling pointless reports:
|
||||
# RP0401: External dependencies
|
||||
# RP0402: Modules dependencies graph
|
||||
# RP0801: Duplication
|
||||
# R0801: Duplication
|
||||
# cyclic-import (R0401) - produces false-negative results
|
||||
|
||||
# Disabling messages:
|
||||
# pointless-string-statement (W0105)
|
||||
# unnecessary-lambda (W0108)
|
||||
# deprecated-lambda (W0110)
|
||||
# bad-builtin (W0141)
|
||||
# protected-access (W0212)
|
||||
# fixme (W0511)
|
||||
# unused-argument (W0613)
|
||||
# redefined-outer-name (W0621)
|
||||
# cell-var-from-loop (W0640)
|
||||
# bare-except (W0702)
|
||||
# broad-except (W0703)
|
||||
# logging-format-interpolation (W1202)
|
||||
# anomalous-backslash-in-string (W1401) - DO NOT ENABLE, INCORRECTLY PARSES REGEX
|
||||
# no-absolute-import (W1618):
|
||||
# import missing `from __future__ import absolute_import` Used when an import is not accompanied by from __future__ import absolute_import (default behaviour in Python 3)
|
||||
|
||||
# invalid-name (C0103)
|
||||
# missing-docstring (C0111)
|
||||
# misplaced-comparison-constant (C0122)
|
||||
# too-many-lines (C0302)
|
||||
# bad-continuation (C0330)
|
||||
|
||||
# too-many-ancestors (R0901)
|
||||
# too-many-public-methods (R0904)
|
||||
# too-few-public-methods (R0903)
|
||||
# too-many-return-statements (R0911)
|
||||
# too-many-branches (R0912)
|
||||
# too-many-arguments (R0913)
|
||||
# too-many-locals (R0914)
|
||||
# too-many-statements (R0915)
|
||||
|
||||
# locally-disabled (I0011)
|
||||
# locally-enabled (I0012)
|
||||
|
||||
disable=RP0401,RP0402,RP0801,R0801, W0141,W1618,W0621,W1202,W1401,W0703,W0702,C0111,W0640,C0122,W0511, W0613, C0103, R0903, C0330, W0212, C0302, R0915, R0914, R0912, W0105, R0904, R0911, W0108, W0110, R0913, R0901, R0401, I0011, I0012
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
# (visual studio) and html. You can also give a reporter class, eg
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
output-format=text
|
||||
#output-format=parseable
|
||||
|
||||
# Put messages in a separate file for each module / package specified on the
|
||||
# command line instead of printing them on stdout. Reports (if any) will be
|
||||
# written in a file name "pylint_global.[txt|html]".
|
||||
files-output=no
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=yes
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details
|
||||
#msg-template=
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# A regular expression matching the name of dummy variables (i.e. expectedly
|
||||
# not used).
|
||||
dummy-variables-rgx=_$|dummy
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
# List of strings which can identify a callback function by name. A callback
|
||||
# name must start or end with one of those strings.
|
||||
callbacks=cb_,_cb
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# List of module names for which member attributes should not be checked
|
||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
||||
# and thus existing member attributes cannot be deduced by static analysis. It
|
||||
# supports qualified module names, as well as Unix pattern matching.
|
||||
ignored-modules=
|
||||
|
||||
# List of classes names for which member attributes should not be checked
|
||||
# (useful for classes with attributes dynamically set). This supports can work
|
||||
# with qualified names.
|
||||
ignored-classes=
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=
|
||||
|
||||
|
||||
[SPELLING]
|
||||
|
||||
# Spelling dictionary name. Available dictionaries: none. To make it working
|
||||
# install python-enchant package.
|
||||
spelling-dict=
|
||||
|
||||
# List of comma separated words that should not be checked.
|
||||
spelling-ignore-words=
|
||||
|
||||
# A path to a file that contains private dictionary; one word per line.
|
||||
spelling-private-dict-file=
|
||||
|
||||
# Tells whether to store unknown words to indicated private dictionary in
|
||||
# --spelling-private-dict-file option instead of raising a message.
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=10
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Ignore imports when computing similarities.
|
||||
ignore-imports=no
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,XXX,TODO
|
||||
|
||||
|
||||
[LOGGING]
|
||||
|
||||
# Logging modules to check that the string format arguments are in logging
|
||||
# function parameter format
|
||||
logging-modules=logging
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=80
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
# List of optional constructs for which whitespace checking is disabled. `dict-
|
||||
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
||||
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
||||
# `empty-line` allows space-only lines.
|
||||
no-space-check=trailing-comma,dict-separator
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1500
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
# Number of spaces of indent required inside a hanging or continued line.
|
||||
indent-after-paren=4
|
||||
|
||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||
expected-line-ending-format=
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# List of builtins function names that should not be used, separated by a comma
|
||||
bad-functions=map,filter,input
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
good-names=i,j,k,ex,Run,_,x,e,ip
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=foo,bar,baz,toto,tutu,tata
|
||||
|
||||
# Colon-delimited sets of names that determine each other's naming style when
|
||||
# the name regexes allow several styles.
|
||||
name-group=
|
||||
|
||||
# Include a hint for the correct naming format with invalid-name
|
||||
include-naming-hint=no
|
||||
|
||||
# Regular expression matching correct function names
|
||||
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for function names
|
||||
function-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct variable names
|
||||
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for variable names
|
||||
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct constant names
|
||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Naming hint for constant names
|
||||
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Regular expression matching correct attribute names
|
||||
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for attribute names
|
||||
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct argument names
|
||||
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for argument names
|
||||
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct class attribute names
|
||||
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Naming hint for class attribute names
|
||||
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Regular expression matching correct inline iteration names
|
||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Naming hint for inline iteration names
|
||||
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Regular expression matching correct class names
|
||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Naming hint for class names
|
||||
class-name-hint=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Regular expression matching correct module names
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Naming hint for module names
|
||||
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Regular expression matching correct method names
|
||||
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for method names
|
||||
method-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=^_
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=-1
|
||||
|
||||
|
||||
[ELIF]
|
||||
|
||||
# Maximum number of nested blocks for function / method body
|
||||
max-nested-blocks=5
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=regsub,TERMIOS,Bastion,rexec
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=10
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=15
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
# Maximum number of boolean expressions in a if statement
|
||||
max-bool-expr=5
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
# List of member names, which should be excluded from the protected access
|
||||
# warning.
|
||||
exclude-protected=_asdict,_fields,_replace,_source,_make
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
72
MAINTAINERS
72
MAINTAINERS
@ -1,72 +0,0 @@
|
||||
---
|
||||
description:
|
||||
For Fuel team structure and contribution policy, see [1].
|
||||
|
||||
This is repository level MAINTAINERS file. All contributions to this
|
||||
repository must be approved by one or more Core Reviewers [2].
|
||||
If you are contributing to files (or create new directories) in
|
||||
root folder of this repository, please contact Core Reviewers for
|
||||
review and merge requests.
|
||||
|
||||
If you are contributing to subfolders of this repository, please
|
||||
check 'maintainers' section of this file in order to find maintainers
|
||||
for those specific modules.
|
||||
|
||||
It is mandatory to get +1 from one or more maintainers before asking
|
||||
Core Reviewers for review/merge in order to decrease a load on Core Reviewers [3].
|
||||
Exceptions are when maintainers are actually cores, or when maintainers
|
||||
are not available for some reason (e.g. on vacation).
|
||||
|
||||
[1] https://specs.openstack.org/openstack/fuel-specs/policy/team-structure
|
||||
[2] https://review.openstack.org/#/admin/groups/662,members
|
||||
[3] http://lists.openstack.org/pipermail/openstack-dev/2015-August/072406.html
|
||||
|
||||
Please keep this file in YAML format in order to allow helper scripts
|
||||
to read this as a configuration data.
|
||||
|
||||
maintainers:
|
||||
|
||||
- ./:
|
||||
- name: Vladimir Khlyunev
|
||||
email: vkhlyunev@mirantis.com
|
||||
IRC: vkhlyunev
|
||||
|
||||
- name: Alexandr Kostrikov
|
||||
email: akostrikov@mirantis.com
|
||||
IRC: akostrikov_mirantis
|
||||
|
||||
- name: Artem Grechanichenko
|
||||
email: agrechanichenko@mirantis.com
|
||||
IRC: agrechanicheko
|
||||
|
||||
- name: Maksym Strukov
|
||||
email: mstrukov@mirantis.com
|
||||
IRC: mstrukov
|
||||
|
||||
- name: Aleksandr Kurenyshev
|
||||
email: akurenyshev@mirantis.com
|
||||
IRC: akurenyshev
|
||||
|
||||
- fuelweb_test/tests/plugins/plugin_zabbix/:
|
||||
|
||||
- name: Swann Croiset
|
||||
email: scroiset@mirantis.com
|
||||
IRC: swann
|
||||
|
||||
- name: Simon Pasquier
|
||||
email: spasquier@mirantis.com
|
||||
IRC: pasquier-s
|
||||
|
||||
- name: Maciej Relewicz
|
||||
email: mrelewicz@mirantis.com
|
||||
IRC: rlu
|
||||
|
||||
- name: Bartosz Kupidura
|
||||
email: bkupidura@mirantis.com
|
||||
IRC: zynzel
|
||||
|
||||
- fuelweb_test/tests/tests_extra_computes/:
|
||||
|
||||
- name: Victor Ryzhenkin
|
||||
email: vryzhenkin@mirantis.com
|
||||
IRC: freerunner
|
25
README.md
25
README.md
@ -1,25 +0,0 @@
|
||||
Team and repository tags
|
||||
========================
|
||||
|
||||
[![Team and repository tags](http://governance.openstack.org/badges/fuel-qa.svg)](http://governance.openstack.org/reference/tags/index.html)
|
||||
|
||||
<!-- Change things from this point on -->
|
||||
|
||||
Tests documentation
|
||||
-------------------
|
||||
|
||||
[Devops documentation](http://docs.fuel-infra.org/fuel-dev/devops.html)
|
||||
|
||||
Code-generated documentation
|
||||
----------------------------
|
||||
|
||||
You need to run `make doc-html` to generate them.
|
||||
|
||||
Output is stored in `doc/_build/html/index.html`.
|
||||
|
||||
|
||||
For 'make iso'
|
||||
--------------
|
||||
|
||||
[Building ISO documentation](http://docs.fuel-infra.org/fuel-dev/develop/env.html#building-the-fuel-iso)
|
||||
|
10
README.rst
Normal file
10
README.rst
Normal file
@ -0,0 +1,10 @@
|
||||
This project is no longer maintained.
|
||||
|
||||
The contents of this repository are still available in the Git
|
||||
source code management system. To see the contents of this
|
||||
repository before it reached its end of life, please check out the
|
||||
previous commit with "git checkout HEAD^1".
|
||||
|
||||
For any further questions, please email
|
||||
openstack-discuss@lists.openstack.org or join #openstack-dev on
|
||||
Freenode.
|
@ -1,17 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger('fuel-qa.{}'.format(__name__))
|
@ -1,265 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import logging
|
||||
import unittest
|
||||
|
||||
# pylint: disable=import-error
|
||||
import mock
|
||||
from mock import call
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
# pylint: enable=import-error
|
||||
|
||||
from core.helpers import log_helpers
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
|
||||
|
||||
@mock.patch('core.helpers.log_helpers.logger', autospec=True)
|
||||
class TestLogWrap(unittest.TestCase):
|
||||
def test_no_args(self, logger):
|
||||
@log_helpers.logwrap
|
||||
def func():
|
||||
return 'No args'
|
||||
|
||||
result = func()
|
||||
self.assertEqual(result, 'No args')
|
||||
logger.assert_has_calls((
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Calling: \n'func'()"
|
||||
),
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Done: 'func' with result:\n{}".format(
|
||||
log_helpers.pretty_repr(result))
|
||||
),
|
||||
))
|
||||
|
||||
def test_args_simple(self, logger):
|
||||
arg = 'test arg'
|
||||
|
||||
@log_helpers.logwrap
|
||||
def func(tst):
|
||||
return tst
|
||||
|
||||
result = func(arg)
|
||||
self.assertEqual(result, arg)
|
||||
logger.assert_has_calls((
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Calling: \n'func'(\n 'tst'={},\n)".format(
|
||||
log_helpers.pretty_repr(
|
||||
arg, indent=8, no_indent_start=True)
|
||||
)
|
||||
),
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Done: 'func' with result:\n{}".format(
|
||||
log_helpers.pretty_repr(result))
|
||||
),
|
||||
))
|
||||
|
||||
def test_args_defaults(self, logger):
|
||||
arg = 'test arg'
|
||||
|
||||
@log_helpers.logwrap
|
||||
def func(tst=arg):
|
||||
return tst
|
||||
|
||||
result = func()
|
||||
self.assertEqual(result, arg)
|
||||
logger.assert_has_calls((
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Calling: \n'func'(\n 'tst'={},\n)".format(
|
||||
log_helpers.pretty_repr(
|
||||
arg, indent=8, no_indent_start=True))
|
||||
),
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Done: 'func' with result:\n{}".format(
|
||||
log_helpers.pretty_repr(result))
|
||||
),
|
||||
))
|
||||
|
||||
def test_args_complex(self, logger):
|
||||
string = 'string'
|
||||
dictionary = {'key': 'dictionary'}
|
||||
|
||||
@log_helpers.logwrap
|
||||
def func(param_string, param_dictionary):
|
||||
return param_string, param_dictionary
|
||||
|
||||
result = func(string, dictionary)
|
||||
self.assertEqual(result, (string, dictionary))
|
||||
# raise ValueError(logger.mock_calls)
|
||||
logger.assert_has_calls((
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Calling: \n'func'("
|
||||
"\n 'param_string'={string},"
|
||||
"\n 'param_dictionary'={dictionary},\n)".format(
|
||||
string=log_helpers.pretty_repr(
|
||||
string,
|
||||
indent=8, no_indent_start=True),
|
||||
dictionary=log_helpers.pretty_repr(
|
||||
dictionary,
|
||||
indent=8, no_indent_start=True)
|
||||
)
|
||||
),
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Done: 'func' with result:\n{}".format(
|
||||
log_helpers.pretty_repr(result))
|
||||
),
|
||||
))
|
||||
|
||||
def test_args_kwargs(self, logger):
|
||||
targs = ['string1', 'string2']
|
||||
tkwargs = {'key': 'tkwargs'}
|
||||
|
||||
@log_helpers.logwrap
|
||||
def func(*args, **kwargs):
|
||||
return tuple(args), kwargs
|
||||
|
||||
result = func(*targs, **tkwargs)
|
||||
self.assertEqual(result, (tuple(targs), tkwargs))
|
||||
# raise ValueError(logger.mock_calls)
|
||||
logger.assert_has_calls((
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Calling: \n'func'("
|
||||
"\n 'args'={args},"
|
||||
"\n 'kwargs'={kwargs},\n)".format(
|
||||
args=log_helpers.pretty_repr(
|
||||
tuple(targs),
|
||||
indent=8, no_indent_start=True),
|
||||
kwargs=log_helpers.pretty_repr(
|
||||
tkwargs,
|
||||
indent=8, no_indent_start=True)
|
||||
)
|
||||
),
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Done: 'func' with result:\n{}".format(
|
||||
log_helpers.pretty_repr(result))
|
||||
),
|
||||
))
|
||||
|
||||
def test_negative(self, logger):
|
||||
@log_helpers.logwrap
|
||||
def func():
|
||||
raise ValueError('as expected')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
func()
|
||||
|
||||
logger.assert_has_calls((
|
||||
mock.call.log(
|
||||
level=logging.DEBUG,
|
||||
msg="Calling: \n'func'()"
|
||||
),
|
||||
mock.call.log(
|
||||
level=logging.ERROR,
|
||||
msg="Failed: \n'func'()",
|
||||
exc_info=True
|
||||
),
|
||||
))
|
||||
|
||||
def test_negative_substitutions(self, logger):
|
||||
new_logger = mock.Mock(spec=logging.Logger, name='logger')
|
||||
log = mock.Mock(name='log')
|
||||
new_logger.attach_mock(log, 'log')
|
||||
|
||||
@log_helpers.logwrap(
|
||||
log=new_logger,
|
||||
log_level=logging.INFO,
|
||||
exc_level=logging.WARNING
|
||||
)
|
||||
def func():
|
||||
raise ValueError('as expected')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
func()
|
||||
|
||||
self.assertEqual(len(logger.mock_calls), 0)
|
||||
log.assert_has_calls((
|
||||
mock.call(
|
||||
level=logging.INFO,
|
||||
msg="Calling: \n'func'()"
|
||||
),
|
||||
mock.call(
|
||||
level=logging.WARNING,
|
||||
msg="Failed: \n'func'()",
|
||||
exc_info=True
|
||||
),
|
||||
))
|
||||
|
||||
|
||||
@patch('logging.StreamHandler')
|
||||
@patch('core.helpers.log_helpers.logger', autospec=True)
|
||||
class TestQuietLogger(unittest.TestCase):
|
||||
def test_default(self, logger_obj, handler_cls):
|
||||
handler = Mock()
|
||||
handler.configure_mock(level=logging.INFO)
|
||||
handler_cls.return_value = handler
|
||||
|
||||
with log_helpers.QuietLogger():
|
||||
log_helpers.logger.warning('Test')
|
||||
|
||||
handler.assert_has_calls((
|
||||
call.setLevel(logging.INFO + 1),
|
||||
call.setLevel(logging.INFO)
|
||||
))
|
||||
|
||||
logger_obj.assert_has_calls((call.warning('Test'), ))
|
||||
|
||||
def test_upper_level(self, logger_obj, handler_cls):
|
||||
handler = Mock()
|
||||
handler.configure_mock(level=logging.INFO)
|
||||
handler_cls.return_value = handler
|
||||
|
||||
with log_helpers.QuietLogger(logging.WARNING):
|
||||
log_helpers.logger.warning('Test')
|
||||
|
||||
handler.assert_has_calls((
|
||||
call.setLevel(logging.WARNING + 1),
|
||||
call.setLevel(logging.INFO)
|
||||
))
|
||||
|
||||
logger_obj.assert_has_calls((call.warning('Test'), ))
|
||||
|
||||
def test_lower_level(self, logger_obj, handler_cls):
|
||||
handler = Mock()
|
||||
handler.configure_mock(level=logging.INFO)
|
||||
handler_cls.return_value = handler
|
||||
|
||||
with log_helpers.QuietLogger(logging.DEBUG):
|
||||
log_helpers.logger.warning('Test')
|
||||
|
||||
handler.assert_has_calls((
|
||||
call.setLevel(logging.INFO),
|
||||
))
|
||||
|
||||
logger_obj.assert_has_calls((
|
||||
call.debug(
|
||||
'QuietLogger requested lower level, than is already set. '
|
||||
'Not changing level'),
|
||||
call.warning('Test'),
|
||||
))
|
@ -1,255 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
# pylint: disable=import-error
|
||||
from mock import call
|
||||
from mock import patch
|
||||
# pylint: enable=import-error
|
||||
|
||||
from core.helpers import setup_teardown
|
||||
|
||||
|
||||
# Get helpers names (python will try to mangle it inside classes)
|
||||
get_arg_names = setup_teardown.__get_arg_names
|
||||
getcallargs = setup_teardown.__getcallargs
|
||||
call_in_context = setup_teardown.__call_in_context
|
||||
|
||||
|
||||
class TestWrappers(unittest.TestCase):
|
||||
def test_get_arg_names(self):
|
||||
def func_no_args():
|
||||
pass
|
||||
|
||||
def func_arg(single):
|
||||
pass
|
||||
|
||||
def func_args(first, last):
|
||||
pass
|
||||
|
||||
self.assertEqual(
|
||||
get_arg_names(func_no_args),
|
||||
[]
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
get_arg_names(func_arg),
|
||||
['single']
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
get_arg_names(func_args),
|
||||
['first', 'last']
|
||||
)
|
||||
|
||||
def test_getcallargs(self):
|
||||
def func_no_def(arg1, arg2):
|
||||
pass
|
||||
|
||||
def func_def(arg1, arg2='arg2'):
|
||||
pass
|
||||
|
||||
self.assertEqual(
|
||||
dict(getcallargs(func_no_def, *['arg1', 'arg2'], **{})),
|
||||
{'arg1': 'arg1', 'arg2': 'arg2'}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
dict(getcallargs(func_no_def, *['arg1'], **{'arg2': 'arg2'})),
|
||||
{'arg1': 'arg1', 'arg2': 'arg2'}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
dict(getcallargs(
|
||||
func_no_def, *[], **{'arg1': 'arg1', 'arg2': 'arg2'})),
|
||||
{'arg1': 'arg1', 'arg2': 'arg2'}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
dict(getcallargs(func_def, *['arg1'], **{})),
|
||||
{'arg1': 'arg1', 'arg2': 'arg2'}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
dict(getcallargs(func_def, *[], **{'arg1': 'arg1'})),
|
||||
{'arg1': 'arg1', 'arg2': 'arg2'}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
dict(getcallargs(
|
||||
func_def, *[], **{'arg1': 'arg1', 'arg2': 2})),
|
||||
{'arg1': 'arg1', 'arg2': 2}
|
||||
)
|
||||
|
||||
def test_call_in_context(self):
|
||||
def func_no_args():
|
||||
return None
|
||||
|
||||
def func_args(first='first', last='last'):
|
||||
return first, last
|
||||
|
||||
def func_self_arg(self):
|
||||
return self
|
||||
|
||||
def func_cls_arg(cls):
|
||||
return cls
|
||||
|
||||
class Tst(object):
|
||||
@classmethod
|
||||
def tst(cls):
|
||||
return cls
|
||||
|
||||
self.assertIsNone(
|
||||
call_in_context(
|
||||
func=func_no_args,
|
||||
context_args={}
|
||||
)
|
||||
)
|
||||
|
||||
self.assertIsNone(
|
||||
call_in_context(
|
||||
func=func_no_args,
|
||||
context_args={'test': 'val'}
|
||||
)
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
call_in_context(
|
||||
func=func_args,
|
||||
context_args={'first': 0, 'last': -1}
|
||||
),
|
||||
(0, -1)
|
||||
)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
call_in_context(
|
||||
func=func_args,
|
||||
context_args={}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
call_in_context(
|
||||
func=func_self_arg,
|
||||
context_args={'self': self}
|
||||
),
|
||||
self
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
call_in_context(
|
||||
func=func_cls_arg,
|
||||
context_args={'cls': self.__class__}
|
||||
),
|
||||
self.__class__
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
call_in_context(
|
||||
func=func_cls_arg,
|
||||
context_args={'self': self}
|
||||
),
|
||||
self.__class__
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
call_in_context(
|
||||
func=Tst.tst,
|
||||
context_args={'cls': self.__class__}
|
||||
),
|
||||
Tst,
|
||||
'cls was not filtered from @classmethod!'
|
||||
)
|
||||
|
||||
# Allow to replace function by None in special cases
|
||||
self.assertIsNone(
|
||||
call_in_context(None, {'test_arg': 'test_val'})
|
||||
)
|
||||
|
||||
|
||||
@patch('core.helpers.setup_teardown.__getcallargs', return_value={'arg': True})
|
||||
@patch('core.helpers.setup_teardown.__call_in_context')
|
||||
class TestSetupTeardown(unittest.TestCase):
|
||||
def test_basic(self, call_in, getargs):
|
||||
arg = True
|
||||
|
||||
@setup_teardown.setup_teardown()
|
||||
def positive_example(arg):
|
||||
return arg
|
||||
|
||||
self.assertEqual(positive_example(arg), arg)
|
||||
|
||||
# Real function is under decorator, so we could not make full check
|
||||
getargs.assert_called_once()
|
||||
|
||||
call_in.assert_has_calls((
|
||||
call(None, {'arg': arg}),
|
||||
call(None, {'arg': arg}),
|
||||
))
|
||||
|
||||
def test_applied(self, call_in, getargs):
|
||||
arg = True
|
||||
|
||||
def setup_func():
|
||||
pass
|
||||
|
||||
def teardown_func():
|
||||
pass
|
||||
|
||||
@setup_teardown.setup_teardown(
|
||||
setup=setup_func,
|
||||
teardown=teardown_func
|
||||
)
|
||||
def positive_example(arg):
|
||||
return arg
|
||||
|
||||
self.assertEqual(positive_example(arg), arg)
|
||||
|
||||
# Real function is under decorator, so we could not make full check
|
||||
getargs.assert_called_once()
|
||||
|
||||
call_in.assert_has_calls((
|
||||
call(setup_func, {'arg': arg}),
|
||||
call(teardown_func, {'arg': arg}),
|
||||
))
|
||||
|
||||
def test_exception_applied(self, call_in, getargs):
|
||||
arg = True
|
||||
|
||||
def setup_func():
|
||||
pass
|
||||
|
||||
def teardown_func():
|
||||
pass
|
||||
|
||||
@setup_teardown.setup_teardown(
|
||||
setup=setup_func,
|
||||
teardown=teardown_func
|
||||
)
|
||||
def positive_example(arg):
|
||||
raise ValueError(arg)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
positive_example(arg)
|
||||
|
||||
# Real function is under decorator, so we could not make full check
|
||||
getargs.assert_called_once()
|
||||
|
||||
call_in.assert_has_calls((
|
||||
call(setup_func, {'arg': arg}),
|
||||
call(teardown_func, {'arg': arg}),
|
||||
))
|
@ -1,115 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import unittest
|
||||
|
||||
# pylint: disable=import-error
|
||||
from mock import Mock
|
||||
# pylint: enable=import-error
|
||||
|
||||
from core.models.fuel_client import base_client
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
|
||||
|
||||
class TestAdapter(unittest.TestCase):
|
||||
def test_init_default(self):
|
||||
session = Mock(spec='keystoneauth1.session.Session')
|
||||
obj = base_client.Adapter(session=session)
|
||||
|
||||
self.assertEqual(obj.service_type, 'fuel')
|
||||
self.assertEqual(obj.session, session)
|
||||
|
||||
self.assertEqual(
|
||||
repr(obj),
|
||||
(
|
||||
"{cls}("
|
||||
"session=<Session(original_ip=original_ip, verify=verify)"
|
||||
" id={sess_id}>,"
|
||||
"service_type={svc}"
|
||||
") id={id}".format(
|
||||
cls=base_client.Adapter.__name__,
|
||||
sess_id=hex(id(session)),
|
||||
svc=obj.service_type,
|
||||
id=hex(id(obj))
|
||||
))
|
||||
)
|
||||
|
||||
def test_init_svc(self):
|
||||
session = Mock(spec='keystoneauth1.session.Session')
|
||||
|
||||
service_type = 'ostf'
|
||||
obj = base_client.Adapter(session=session, service_type=service_type)
|
||||
|
||||
self.assertEqual(obj.service_type, service_type)
|
||||
self.assertEqual(obj.session, session)
|
||||
|
||||
self.assertEqual(
|
||||
repr(obj),
|
||||
(
|
||||
"{cls}("
|
||||
"session=<Session(original_ip=original_ip, verify=verify)"
|
||||
" id={sess_id}>,"
|
||||
"service_type={svc}"
|
||||
") id={id}".format(
|
||||
cls=base_client.Adapter.__name__,
|
||||
sess_id=hex(id(session)),
|
||||
svc=obj.service_type,
|
||||
id=hex(id(obj))
|
||||
))
|
||||
)
|
||||
|
||||
def test_methods(self):
|
||||
session = Mock(spec='keystoneauth1.session.Session')
|
||||
get = Mock(name='get')
|
||||
post = Mock(name='post')
|
||||
put = Mock(name='put')
|
||||
delete = Mock(name='delete')
|
||||
|
||||
session.attach_mock(get, 'get')
|
||||
session.attach_mock(post, 'post')
|
||||
session.attach_mock(put, 'put')
|
||||
session.attach_mock(delete, 'delete')
|
||||
|
||||
url = 'test'
|
||||
|
||||
obj = base_client.Adapter(session=session)
|
||||
|
||||
obj.get(url=url)
|
||||
obj.post(url=url)
|
||||
obj.put(url=url)
|
||||
obj.delete(url=url)
|
||||
|
||||
get.assert_called_once_with(
|
||||
connect_retries=1,
|
||||
endpoint_filter={'service_type': obj.service_type},
|
||||
url=url)
|
||||
|
||||
post.assert_called_once_with(
|
||||
connect_retries=1,
|
||||
endpoint_filter={'service_type': obj.service_type},
|
||||
url=url)
|
||||
|
||||
put.assert_called_once_with(
|
||||
connect_retries=1,
|
||||
endpoint_filter={'service_type': obj.service_type},
|
||||
url=url)
|
||||
|
||||
delete.assert_called_once_with(
|
||||
connect_retries=1,
|
||||
endpoint_filter={'service_type': obj.service_type},
|
||||
url=url)
|
@ -1,52 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import unittest
|
||||
|
||||
# pylint: disable=import-error
|
||||
from mock import call
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
# pylint: enable=import-error
|
||||
|
||||
from core.models.fuel_client import client
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
|
||||
|
||||
@patch('core.models.fuel_client.client.logger', autospec=True)
|
||||
@patch('core.models.fuel_client.base_client.Adapter', autospec=True)
|
||||
class TestClient(unittest.TestCase):
|
||||
def test_init(self, adapter, logger):
|
||||
session = Mock(spec='keystoneauth1.session.Session')
|
||||
session.attach_mock(Mock(), 'auth')
|
||||
session.auth.auth_url = 'http://127.0.0.1'
|
||||
|
||||
obj = client.Client(session=session)
|
||||
|
||||
self.assertIn(
|
||||
call(service_type=u'ostf', session=session),
|
||||
adapter.mock_calls
|
||||
)
|
||||
|
||||
logger.assert_has_calls((
|
||||
call.info(
|
||||
'Initialization of NailgunClient using shared session \n'
|
||||
'(auth_url={})'.format(session.auth.auth_url)),
|
||||
))
|
||||
|
||||
self.assertIn('ostf', dir(obj))
|
@ -1,128 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import unittest
|
||||
|
||||
# pylint: disable=import-error
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
# pylint: enable=import-error
|
||||
|
||||
from core.models.fuel_client.ostf_client import OSTFClient
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
|
||||
|
||||
@patch('core.models.fuel_client.ostf_client.logwrap', autospec=True)
|
||||
class TestOSTFClient(unittest.TestCase):
|
||||
@staticmethod
|
||||
def prepare_session():
|
||||
session = Mock(spec='keystoneauth1.session.Session')
|
||||
session.attach_mock(Mock(), 'auth')
|
||||
session.auth.auth_url = 'http://127.0.0.1'
|
||||
get = Mock(name='get')
|
||||
post = Mock(name='post')
|
||||
put = Mock(name='put')
|
||||
delete = Mock(name='delete')
|
||||
|
||||
session.attach_mock(get, 'get')
|
||||
session.attach_mock(post, 'post')
|
||||
session.attach_mock(put, 'put')
|
||||
session.attach_mock(delete, 'delete')
|
||||
|
||||
return session
|
||||
|
||||
def test_basic(self, logwrap):
|
||||
session = self.prepare_session()
|
||||
client = OSTFClient(session)
|
||||
|
||||
cluster_id = 0
|
||||
|
||||
client.get_test_sets(cluster_id=cluster_id)
|
||||
|
||||
session.get.assert_called_once_with(
|
||||
url="/testsets/{}".format(cluster_id))
|
||||
|
||||
session.reset_mock()
|
||||
|
||||
client.get_tests(cluster_id=cluster_id)
|
||||
|
||||
session.get.assert_called_once_with(
|
||||
url="/tests/{}".format(cluster_id))
|
||||
|
||||
session.reset_mock()
|
||||
|
||||
client.get_test_runs()
|
||||
|
||||
session.get.assert_called_once_with(url="/testruns")
|
||||
|
||||
def test_test_runs(self, logwrap):
|
||||
session = self.prepare_session()
|
||||
client = OSTFClient(session)
|
||||
|
||||
cluster_id = 0
|
||||
testrun_id = 0xff
|
||||
|
||||
client.get_test_runs(testrun_id=testrun_id)
|
||||
session.get.assert_called_once_with(
|
||||
url="/testruns/{}".format(testrun_id))
|
||||
|
||||
session.reset_mock()
|
||||
|
||||
client.get_test_runs(testrun_id=testrun_id, cluster_id=cluster_id)
|
||||
|
||||
session.get.assert_called_once_with(
|
||||
url="/testruns/{}/{}".format(testrun_id, cluster_id))
|
||||
|
||||
session.reset_mock()
|
||||
|
||||
client.get_test_runs(cluster_id=cluster_id)
|
||||
|
||||
session.get.assert_called_once_with(
|
||||
url="/testruns/last/{}".format(cluster_id))
|
||||
|
||||
def test_run_tests(self, logwrap):
|
||||
session = self.prepare_session()
|
||||
client = OSTFClient(session)
|
||||
|
||||
cluster_id = 0
|
||||
|
||||
test_sets = ['smoke']
|
||||
|
||||
test_name = 'test'
|
||||
|
||||
client.run_tests(cluster_id=cluster_id, test_sets=test_sets)
|
||||
|
||||
json = [
|
||||
{'metadata': {'cluster_id': str(cluster_id), 'config': {}},
|
||||
'testset': test_sets[0]}]
|
||||
|
||||
session.post.assert_called_once_with(
|
||||
"/testruns", json=json
|
||||
)
|
||||
|
||||
session.reset_mock()
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
client.run_tests(
|
||||
cluster_id=cluster_id, test_sets=test_sets, test_name=test_name)
|
||||
|
||||
json[0]['tests'] = [test_name]
|
||||
|
||||
session.post.assert_called_once_with(
|
||||
"/testruns", json=json
|
||||
)
|
@ -1,153 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import unittest
|
||||
|
||||
# pylint: disable=import-error
|
||||
from mock import call
|
||||
from mock import patch
|
||||
# pylint: enable=import-error
|
||||
|
||||
from core.models.collector_client import CollectorClient
|
||||
|
||||
ip = '127.0.0.1'
|
||||
endpoint = 'fake'
|
||||
url = "http://{0}/{1}".format(ip, endpoint)
|
||||
|
||||
|
||||
@patch('requests.get')
|
||||
class TestCollectorClient(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.client = CollectorClient(collector_ip=ip, endpoint=endpoint)
|
||||
|
||||
def test_init(self, get):
|
||||
self.assertEqual(self.client.url, url)
|
||||
get.assert_not_called()
|
||||
|
||||
def test_get(self, get):
|
||||
tgt = '/tst'
|
||||
self.client._get(tgt)
|
||||
get.assert_called_once_with(url=url + tgt)
|
||||
|
||||
def test_get_oswls(self, get):
|
||||
master_node_uid = '0'
|
||||
self.client.get_oswls(master_node_uid=master_node_uid)
|
||||
get.assert_has_calls((
|
||||
call(url=url + '/oswls/{0}'.format(master_node_uid)),
|
||||
call().json(),
|
||||
))
|
||||
|
||||
def test_get_installation_info(self, get):
|
||||
master_node_uid = '0'
|
||||
self.client.get_installation_info(master_node_uid=master_node_uid)
|
||||
get.assert_has_calls((
|
||||
call(url=url + '/installation_info/{0}'.format(
|
||||
master_node_uid)),
|
||||
call().json(),
|
||||
))
|
||||
|
||||
def test_get_action_logs(self, get):
|
||||
master_node_uid = '0'
|
||||
self.client.get_action_logs(master_node_uid=master_node_uid)
|
||||
get.assert_has_calls((
|
||||
call(url=url + '/action_logs/{0}'.format(master_node_uid)),
|
||||
call().json(),
|
||||
))
|
||||
|
||||
def test_get_oswls_by_resource(self, get):
|
||||
master_node_uid = '0'
|
||||
resource = '1'
|
||||
self.client.get_oswls_by_resource(
|
||||
master_node_uid=master_node_uid,
|
||||
resource=resource
|
||||
)
|
||||
get.assert_has_calls((
|
||||
call(url=url + "/oswls/{0}/{1}".format(master_node_uid, resource)),
|
||||
call().json(),
|
||||
))
|
||||
|
||||
@patch(
|
||||
'core.models.collector_client.CollectorClient.get_oswls_by_resource',
|
||||
return_value={
|
||||
'objs': [
|
||||
{'resource_data': 'test0'},
|
||||
{'resource_data': 'test1'},
|
||||
]
|
||||
}
|
||||
)
|
||||
def test_get_oswls_by_resource_data(self, get_oswls, get):
|
||||
master_node_uid = '0'
|
||||
resource = '1'
|
||||
result = self.client.get_oswls_by_resource_data(
|
||||
master_node_uid=master_node_uid,
|
||||
resource=resource
|
||||
)
|
||||
get_oswls.assert_called_once_with(
|
||||
master_node_uid,
|
||||
resource
|
||||
)
|
||||
self.assertEqual(result, 'test0')
|
||||
|
||||
@patch(
|
||||
'core.models.collector_client.CollectorClient.get_action_logs',
|
||||
return_value=[
|
||||
{'id': 0, 'body': {'additional_info': 'test0'}},
|
||||
{'id': 1, 'body': {'additional_info': 'test1'}},
|
||||
{'id': 2, 'body': {'additional_info': 'test2'}},
|
||||
]
|
||||
)
|
||||
def test_get_action_logs_ids(self, logs, get):
|
||||
master_node_uid = 0
|
||||
result = self.client.get_action_logs_ids(master_node_uid)
|
||||
logs.assert_called_once_with(master_node_uid)
|
||||
self.assertEqual(result, [0, 1, 2])
|
||||
|
||||
@patch(
|
||||
'core.models.collector_client.CollectorClient.get_action_logs',
|
||||
return_value=[
|
||||
{'id': 0, 'body': {'additional_info': 'test0'}},
|
||||
{'id': 1, 'body': {'additional_info': 'test1'}},
|
||||
{'id': 2, 'body': {'additional_info': 'test2'}},
|
||||
]
|
||||
)
|
||||
def test_get_action_logs_additional_info_by_id(self, logs, get):
|
||||
master_node_uid = 0
|
||||
action_id = 1
|
||||
result = self.client.get_action_logs_additional_info_by_id(
|
||||
master_node_uid, action_id)
|
||||
logs.assert_called_once_with(master_node_uid)
|
||||
self.assertEqual(result, ['test1'])
|
||||
|
||||
@patch(
|
||||
'core.models.collector_client.CollectorClient.get_action_logs_ids',
|
||||
return_value=[0, 1, 2]
|
||||
)
|
||||
def test_get_action_logs_count(self, get_ids, get):
|
||||
master_node_uid = 0
|
||||
result = self.client.get_action_logs_count(master_node_uid)
|
||||
get_ids.assert_called_once_with(master_node_uid)
|
||||
self.assertEqual(result, 3)
|
||||
|
||||
@patch(
|
||||
'core.models.collector_client.CollectorClient.get_installation_info',
|
||||
return_value={'structure': 'test_result'}
|
||||
)
|
||||
def test_get_installation_info_data(self, get_inst_info, get):
|
||||
master_node_uid = 0
|
||||
result = self.client.get_installation_info_data(master_node_uid)
|
||||
get_inst_info.assert_called_once_with(master_node_uid)
|
||||
self.assertEqual(result, 'test_result')
|
@ -1,86 +0,0 @@
|
||||
from copy import deepcopy
|
||||
import unittest
|
||||
|
||||
from core.models.value_objects import FuelAccessParams
|
||||
|
||||
EXAMPLE_YAML_DICT = {
|
||||
'OS_USERNAME': 'root',
|
||||
'OS_TENANT_NAME': 'project',
|
||||
'OS_PASSWORD': 'password',
|
||||
'SERVER_ADDRESS': '127.0.0.1',
|
||||
'SERVER_PORT': '8000',
|
||||
'KEYSTONE_PORT': '5000'
|
||||
}
|
||||
|
||||
EXPECTED_OPENRC_CONTENT = 'export OS_USERNAME="root"\n' \
|
||||
'export OS_PASSWORD="root"\n' \
|
||||
'export OS_TENANT_NAME="project"\n' \
|
||||
'export SERVICE_URL="https://127.0.0.1:8000"\n' \
|
||||
'export OS_AUTH_URL="https://127.0.0.1:5000"\n'
|
||||
|
||||
|
||||
class TestFuelAccessParams(unittest.TestCase):
|
||||
def test_simple_init(self):
|
||||
fuel_access = FuelAccessParams()
|
||||
|
||||
fuel_access.username = 'root'
|
||||
self.assertEqual(fuel_access.username, 'root')
|
||||
|
||||
fuel_access.password = 'password'
|
||||
self.assertEqual(fuel_access.password, 'password')
|
||||
|
||||
fuel_access.project = 'tenant'
|
||||
self.assertEqual(fuel_access.project, 'tenant')
|
||||
|
||||
fuel_access.service_address = '127.0.0.1'
|
||||
self.assertEqual(fuel_access.service_address, '127.0.0.1')
|
||||
|
||||
fuel_access.service_port = '777'
|
||||
self.assertEqual(fuel_access.service_port, '777')
|
||||
|
||||
fuel_access.keystone_address = '127.0.0.1'
|
||||
self.assertEqual(fuel_access.keystone_address, '127.0.0.1')
|
||||
|
||||
fuel_access.keystone_port = '5000'
|
||||
self.assertEqual(fuel_access.keystone_port, '5000')
|
||||
|
||||
def test_tls_init(self):
|
||||
fuel_access = FuelAccessParams(tls_keystone_enabled=True,
|
||||
tls_service_enabled=False)
|
||||
fuel_access.service_address = '127.0.0.1'
|
||||
fuel_access.service_port = '777'
|
||||
|
||||
fuel_access.keystone_address = '127.0.0.1'
|
||||
fuel_access.keystone_port = '5000'
|
||||
|
||||
self.assertEqual(fuel_access.service_url, 'http://127.0.0.1:777')
|
||||
self.assertEqual(fuel_access.os_auth_url, 'https://127.0.0.1:5000')
|
||||
|
||||
def test_init_from_yaml_content(self):
|
||||
fuel_access = FuelAccessParams.from_yaml_params(EXAMPLE_YAML_DICT)
|
||||
self.assertEqual(fuel_access.service_address, '127.0.0.1')
|
||||
self.assertEqual(fuel_access.os_auth_url, 'http://127.0.0.1:5000')
|
||||
|
||||
def test_init_from_yaml_content_with_tls(self):
|
||||
fuel_access = FuelAccessParams.from_yaml_params(
|
||||
EXAMPLE_YAML_DICT,
|
||||
tls_service_enabled=True,
|
||||
tls_keystone_enabled=True
|
||||
)
|
||||
self.assertEqual(fuel_access.service_address, '127.0.0.1')
|
||||
self.assertEqual(fuel_access.os_auth_url, 'https://127.0.0.1:5000')
|
||||
self.assertEqual(fuel_access.service_url, 'https://127.0.0.1:8000')
|
||||
|
||||
def test_failed_from_yaml_content_when_key_absents(self):
|
||||
yaml_from_content = deepcopy(EXAMPLE_YAML_DICT)
|
||||
yaml_from_content.pop('OS_PASSWORD', None)
|
||||
with self.assertRaises(KeyError):
|
||||
FuelAccessParams.from_yaml_params(yaml_from_content)
|
||||
|
||||
def test_export_to_openrc(self):
|
||||
openrc_content = FuelAccessParams.from_yaml_params(
|
||||
EXAMPLE_YAML_DICT,
|
||||
tls_service_enabled=True,
|
||||
tls_keystone_enabled=True
|
||||
).to_openrc_content()
|
||||
self.assertEqual(EXPECTED_OPENRC_CONTENT, openrc_content)
|
@ -1,270 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import inspect
|
||||
import logging
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
import six
|
||||
|
||||
from core import logger
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
def _get_arg_names(func):
|
||||
"""get argument names for function
|
||||
|
||||
:param func: func
|
||||
:return: list of function argnames
|
||||
:rtype: list
|
||||
|
||||
>>> def tst_1():
|
||||
... pass
|
||||
|
||||
>>> _get_arg_names(tst_1)
|
||||
[]
|
||||
|
||||
>>> def tst_2(arg):
|
||||
... pass
|
||||
|
||||
>>> _get_arg_names(tst_2)
|
||||
['arg']
|
||||
"""
|
||||
# noinspection PyUnresolvedReferences
|
||||
return (
|
||||
[arg for arg in inspect.getargspec(func=func).args] if six.PY2 else
|
||||
list(inspect.signature(obj=func).parameters.keys())
|
||||
)
|
||||
|
||||
|
||||
def _getcallargs(func, *positional, **named):
|
||||
"""get real function call arguments without calling function
|
||||
|
||||
:rtype: dict
|
||||
"""
|
||||
# noinspection PyUnresolvedReferences
|
||||
if sys.version_info[0:2] < (3, 5): # apply_defaults is py35 feature
|
||||
orig_args = inspect.getcallargs(func, *positional, **named)
|
||||
# Construct OrderedDict as Py3
|
||||
arguments = collections.OrderedDict(
|
||||
[(key, orig_args[key]) for key in _get_arg_names(func)]
|
||||
)
|
||||
if six.PY2:
|
||||
# args and kwargs is not bound in py27
|
||||
# Note: py27 inspect is not unicode
|
||||
missed = (
|
||||
(key, val)
|
||||
for key, val in orig_args.items()
|
||||
if key not in arguments)
|
||||
args, kwargs = (), ()
|
||||
for record in missed:
|
||||
if isinstance(record[1], (list, tuple)):
|
||||
args = record
|
||||
elif isinstance(record[1], dict):
|
||||
kwargs = record
|
||||
|
||||
if args:
|
||||
arguments[args[0]] = args[1]
|
||||
if kwargs:
|
||||
arguments[kwargs[0]] = kwargs[1]
|
||||
return arguments
|
||||
sig = inspect.signature(func).bind(*positional, **named)
|
||||
sig.apply_defaults() # after bind we doesn't have defaults
|
||||
return sig.arguments
|
||||
# pylint:enable=no-member
|
||||
|
||||
|
||||
def _simple(item):
|
||||
"""Check for nested iterations: True, if not"""
|
||||
return not isinstance(item, (list, set, tuple, dict))
|
||||
|
||||
|
||||
_formatters = {
|
||||
'simple': "{spc:<{indent}}{val!r}".format,
|
||||
'text': "{spc:<{indent}}{prefix}'''{string}'''".format,
|
||||
'dict': "\n{spc:<{indent}}{key!r:{size}}: {val},".format,
|
||||
}
|
||||
|
||||
|
||||
def pretty_repr(src, indent=0, no_indent_start=False, max_indent=20):
|
||||
"""Make human readable repr of object
|
||||
|
||||
:param src: object to process
|
||||
:type src: object
|
||||
:param indent: start indentation, all next levels is +4
|
||||
:type indent: int
|
||||
:param no_indent_start: do not indent open bracket and simple parameters
|
||||
:type no_indent_start: bool
|
||||
:param max_indent: maximal indent before classic repr() call
|
||||
:type max_indent: int
|
||||
:return: formatted string
|
||||
"""
|
||||
if _simple(src) or indent >= max_indent:
|
||||
indent = 0 if no_indent_start else indent
|
||||
if isinstance(src, (six.binary_type, six.text_type)):
|
||||
if isinstance(src, six.binary_type):
|
||||
string = src.decode(
|
||||
encoding='utf-8',
|
||||
errors='backslashreplace'
|
||||
)
|
||||
prefix = 'b'
|
||||
else:
|
||||
string = src
|
||||
prefix = 'u'
|
||||
return _formatters['text'](
|
||||
spc='',
|
||||
indent=indent,
|
||||
prefix=prefix,
|
||||
string=string
|
||||
)
|
||||
return _formatters['simple'](
|
||||
spc='',
|
||||
indent=indent,
|
||||
val=src
|
||||
)
|
||||
if isinstance(src, dict):
|
||||
prefix, suffix = '{', '}'
|
||||
result = ''
|
||||
max_len = len(max([repr(key) for key in src])) if src else 0
|
||||
for key, val in src.items():
|
||||
result += _formatters['dict'](
|
||||
spc='',
|
||||
indent=indent + 4,
|
||||
size=max_len,
|
||||
key=key,
|
||||
val=pretty_repr(val, indent + 8, no_indent_start=True)
|
||||
)
|
||||
return (
|
||||
'\n{start:>{indent}}'.format(
|
||||
start=prefix,
|
||||
indent=indent + 1
|
||||
) +
|
||||
result +
|
||||
'\n{end:>{indent}}'.format(end=suffix, indent=indent + 1)
|
||||
)
|
||||
if isinstance(src, list):
|
||||
prefix, suffix = '[', ']'
|
||||
elif isinstance(src, tuple):
|
||||
prefix, suffix = '(', ')'
|
||||
else:
|
||||
prefix, suffix = '{', '}'
|
||||
result = ''
|
||||
for elem in src:
|
||||
if _simple(elem):
|
||||
result += '\n'
|
||||
result += pretty_repr(elem, indent + 4) + ','
|
||||
return (
|
||||
'\n{start:>{indent}}'.format(
|
||||
start=prefix,
|
||||
indent=indent + 1) +
|
||||
result +
|
||||
'\n{end:>{indent}}'.format(end=suffix, indent=indent + 1)
|
||||
)
|
||||
|
||||
|
||||
def logwrap(log=logger, log_level=logging.DEBUG, exc_level=logging.ERROR):
|
||||
"""Log function calls
|
||||
|
||||
:type log: logging.Logger
|
||||
:type log_level: int
|
||||
:type exc_level: int
|
||||
:rtype: callable
|
||||
"""
|
||||
warnings.warn(
|
||||
'logwrap is moved to fuel-devops 3.0.3,'
|
||||
' please change imports after switch',
|
||||
DeprecationWarning)
|
||||
|
||||
def real_decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
call_args = _getcallargs(func, *args, **kwargs)
|
||||
args_repr = ""
|
||||
if len(call_args) > 0:
|
||||
args_repr = "\n " + "\n ".join((
|
||||
"{key!r}={val},".format(
|
||||
key=key,
|
||||
val=pretty_repr(val, indent=8, no_indent_start=True)
|
||||
)
|
||||
for key, val in call_args.items())
|
||||
) + '\n'
|
||||
log.log(
|
||||
level=log_level,
|
||||
msg="Calling: \n{name!r}({arguments})".format(
|
||||
name=func.__name__,
|
||||
arguments=args_repr
|
||||
)
|
||||
)
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
log.log(
|
||||
level=log_level,
|
||||
msg="Done: {name!r} with result:\n{result}".format(
|
||||
name=func.__name__,
|
||||
result=pretty_repr(result))
|
||||
)
|
||||
except BaseException:
|
||||
log.log(
|
||||
level=exc_level,
|
||||
msg="Failed: \n{name!r}({arguments})".format(
|
||||
name=func.__name__,
|
||||
arguments=args_repr,
|
||||
),
|
||||
exc_info=True
|
||||
)
|
||||
raise
|
||||
return result
|
||||
return wrapped
|
||||
|
||||
if not isinstance(log, logging.Logger):
|
||||
func, log = log, logger
|
||||
return real_decorator(func)
|
||||
|
||||
return real_decorator
|
||||
|
||||
|
||||
class QuietLogger(object):
|
||||
"""Reduce logging level while context is executed."""
|
||||
|
||||
def __init__(self, upper_log_level=None):
|
||||
"""Reduce logging level while context is executed.
|
||||
|
||||
:param upper_log_level: log level to ignore
|
||||
:type upper_log_level: int
|
||||
"""
|
||||
self.log_level = upper_log_level
|
||||
self.level = None
|
||||
|
||||
def __enter__(self):
|
||||
console = logging.StreamHandler()
|
||||
self.level = console.level
|
||||
if self.log_level is None:
|
||||
self.log_level = self.level
|
||||
elif self.log_level < self.level:
|
||||
logger.debug(
|
||||
'QuietLogger requested lower level, than is already set. '
|
||||
'Not changing level')
|
||||
return
|
||||
console.setLevel(self.log_level + 1)
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||
logging.StreamHandler().setLevel(self.level)
|
||||
|
||||
|
||||
__all__ = ['logwrap', 'QuietLogger', 'logger']
|
@ -1,337 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
|
||||
import six
|
||||
|
||||
# Setup/Teardown decorators, which is missing in Proboscis.
|
||||
# Usage: like in Nose.
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
def __getcallargs(func, *positional, **named):
|
||||
"""get real function call arguments without calling function
|
||||
|
||||
:rtype: dict
|
||||
"""
|
||||
# noinspection PyUnresolvedReferences
|
||||
if six.PY2:
|
||||
return inspect.getcallargs(func, *positional, **named)
|
||||
sig = inspect.signature(func).bind(*positional, **named)
|
||||
sig.apply_defaults() # after bind we doesn't have defaults
|
||||
return sig.arguments
|
||||
|
||||
|
||||
def __get_arg_names(func):
|
||||
"""get argument names for function
|
||||
|
||||
:param func: func
|
||||
:return: list of function argnames
|
||||
:rtype: list
|
||||
|
||||
>>> def tst_1():
|
||||
... pass
|
||||
|
||||
>>> __get_arg_names(tst_1)
|
||||
[]
|
||||
|
||||
>>> def tst_2(arg):
|
||||
... pass
|
||||
|
||||
>>> __get_arg_names(tst_2)
|
||||
['arg']
|
||||
"""
|
||||
# noinspection PyUnresolvedReferences
|
||||
return (
|
||||
[arg for arg in inspect.getargspec(func=func).args] if six.PY2 else
|
||||
list(inspect.signature(obj=func).parameters.keys())
|
||||
)
|
||||
# pylint:enable=no-member
|
||||
|
||||
|
||||
def __call_in_context(func, context_args):
|
||||
"""call function with substitute arguments from dict
|
||||
|
||||
:param func: function or None
|
||||
:param context_args: dict
|
||||
:type context_args: dict
|
||||
:return: function call results
|
||||
|
||||
>>> __call_in_context(None, {})
|
||||
|
||||
>>> def print_print():
|
||||
... print ('print')
|
||||
|
||||
>>> __call_in_context(print_print, {})
|
||||
print
|
||||
|
||||
>>> __call_in_context(print_print, {'val': 1})
|
||||
print
|
||||
|
||||
>>> def print_val(val):
|
||||
... print(val)
|
||||
|
||||
>>> __call_in_context(print_val, {'val': 1})
|
||||
1
|
||||
"""
|
||||
if func is None:
|
||||
return
|
||||
|
||||
func_args = __get_arg_names(func)
|
||||
if not func_args:
|
||||
return func()
|
||||
|
||||
if inspect.ismethod(func) and 'cls' in func_args:
|
||||
func_args.remove('cls')
|
||||
# cls if used in @classmethod and could not be posted
|
||||
# via args or kwargs, so classmethod decorators always has access
|
||||
# to it's own class only, except direct class argument
|
||||
elif 'self' in context_args:
|
||||
context_args.setdefault('cls', context_args['self'].__class__)
|
||||
try:
|
||||
arg_values = [context_args[k] for k in func_args]
|
||||
except KeyError as e:
|
||||
raise ValueError("Argument '{}' is missing".format(str(e)))
|
||||
|
||||
return func(*arg_values)
|
||||
|
||||
|
||||
def setup_teardown(setup=None, teardown=None):
|
||||
"""Add setup and teardown for functions and methods.
|
||||
|
||||
:param setup: function
|
||||
:param teardown: function
|
||||
:return:
|
||||
|
||||
>>> def setup_func():
|
||||
... print('setup_func called')
|
||||
|
||||
>>> def teardown_func():
|
||||
... print('teardown_func called')
|
||||
|
||||
>>> @setup_teardown(setup=setup_func, teardown=teardown_func)
|
||||
... def positive_example(arg):
|
||||
... print(arg)
|
||||
|
||||
>>> positive_example(arg=1)
|
||||
setup_func called
|
||||
1
|
||||
teardown_func called
|
||||
|
||||
>>> def print_call(text):
|
||||
... print (text)
|
||||
|
||||
>>> @setup_teardown(
|
||||
... setup=lambda: print_call('setup lambda'),
|
||||
... teardown=lambda: print_call('teardown lambda'))
|
||||
... def positive_example_lambda(arg):
|
||||
... print(arg)
|
||||
|
||||
>>> positive_example_lambda(arg=1)
|
||||
setup lambda
|
||||
1
|
||||
teardown lambda
|
||||
|
||||
>>> def setup_with_self(self):
|
||||
... print(
|
||||
... 'setup_with_self: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=self.cls_val, val=self.val))
|
||||
|
||||
>>> def teardown_with_self(self):
|
||||
... print(
|
||||
... 'teardown_with_self: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=self.cls_val, val=self.val))
|
||||
|
||||
>>> def setup_with_cls(cls):
|
||||
... print(
|
||||
... 'setup_with_cls: cls.cls_val = {cls_val!s}'.format(
|
||||
... cls_val=cls.cls_val))
|
||||
|
||||
>>> def teardown_with_cls(cls):
|
||||
... print('teardown_with_cls: cls.cls_val = {cls_val!s}'.format(
|
||||
... cls_val=cls.cls_val))
|
||||
|
||||
>>> class HelpersBase(object):
|
||||
... cls_val = None
|
||||
... def __init__(self):
|
||||
... self.val = None
|
||||
... @classmethod
|
||||
... def cls_setup(cls):
|
||||
... print(
|
||||
... 'cls_setup: cls.cls_val = {cls_val!s}'.format(
|
||||
... cls_val=cls.cls_val))
|
||||
... @classmethod
|
||||
... def cls_teardown(cls):
|
||||
... print(
|
||||
... 'cls_teardown: cls.cls_val = {cls_val!s}'.format(
|
||||
... cls_val=cls.cls_val))
|
||||
... def self_setup(self):
|
||||
... print(
|
||||
... 'self_setup: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=self.cls_val, val=self.val))
|
||||
... def self_teardown(self):
|
||||
... print(
|
||||
... 'self_teardown: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=self.cls_val, val=self.val))
|
||||
|
||||
>>> class Test(HelpersBase):
|
||||
... @setup_teardown(
|
||||
... setup=HelpersBase.self_setup,
|
||||
... teardown=HelpersBase.self_teardown)
|
||||
... def test_self_self(self, cls_val=0, val=0):
|
||||
... print(
|
||||
... 'test_self_self: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=cls_val, val=val))
|
||||
... self.val = val
|
||||
... self.cls_val = cls_val
|
||||
... @setup_teardown(
|
||||
... setup=HelpersBase.cls_setup,
|
||||
... teardown=HelpersBase.cls_teardown)
|
||||
... def test_self_cls(self, cls_val=1, val=1):
|
||||
... print(
|
||||
... 'test_self_cls: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=cls_val, val=val))
|
||||
... self.val = val
|
||||
... self.cls_val = cls_val
|
||||
... @setup_teardown(
|
||||
... setup=setup_func,
|
||||
... teardown=teardown_func)
|
||||
... def test_self_none(self, cls_val=2, val=2):
|
||||
... print(
|
||||
... 'test_self_cls: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=cls_val, val=val))
|
||||
... self.val = val
|
||||
... self.cls_val = cls_val
|
||||
... @setup_teardown(
|
||||
... setup=setup_with_self,
|
||||
... teardown=teardown_with_self)
|
||||
... def test_self_ext_self(self, cls_val=-1, val=-1):
|
||||
... print(
|
||||
... 'test_self_ext_self: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=cls_val, val=val))
|
||||
... self.val = val
|
||||
... self.cls_val = cls_val
|
||||
... @setup_teardown(
|
||||
... setup=setup_with_cls,
|
||||
... teardown=teardown_with_cls)
|
||||
... def test_self_ext_cls(self, cls_val=-2, val=-2):
|
||||
... print(
|
||||
... 'test_self_ext_cls: '
|
||||
... 'self.cls_val = {cls_val!s}, self.val = {val!s}'.format(
|
||||
... cls_val=cls_val, val=val))
|
||||
... self.val = val
|
||||
... self.cls_val = cls_val
|
||||
... @classmethod
|
||||
... @setup_teardown(
|
||||
... setup=HelpersBase.cls_setup,
|
||||
... teardown=HelpersBase.cls_teardown)
|
||||
... def test_cls_cls(cls, cls_val=3):
|
||||
... print(
|
||||
... 'test_cls_cls: cls.cls_val = {cls_val!s}'.format(
|
||||
... cls_val=cls_val))
|
||||
... cls.cls_val = cls_val
|
||||
... @classmethod
|
||||
... @setup_teardown(
|
||||
... setup=setup_func,
|
||||
... teardown=teardown_func)
|
||||
... def test_cls_none(cls, cls_val=4):
|
||||
... print(
|
||||
... 'test_cls_none: cls.cls_val = {cls_val!s}'.format(
|
||||
... cls_val=cls_val))
|
||||
... cls.cls_val = cls_val
|
||||
... @classmethod
|
||||
... @setup_teardown(
|
||||
... setup=setup_with_cls,
|
||||
... teardown=teardown_with_cls)
|
||||
... def test_cls_ext_cls(cls, cls_val=-3):
|
||||
... print(
|
||||
... 'test_self_ext_cls: cls.cls_val = {cls_val!s}'.format(
|
||||
... cls_val=cls_val))
|
||||
... cls.cls_val = cls_val
|
||||
... @staticmethod
|
||||
... @setup_teardown(setup=setup_func, teardown=teardown_func)
|
||||
... def test_none_none():
|
||||
... print('test')
|
||||
|
||||
>>> test = Test()
|
||||
|
||||
>>> test.test_self_self()
|
||||
self_setup: self.cls_val = None, self.val = None
|
||||
test_self_self: self.cls_val = 0, self.val = 0
|
||||
self_teardown: self.cls_val = 0, self.val = 0
|
||||
|
||||
>>> test.test_self_cls()
|
||||
cls_setup: cls.cls_val = None
|
||||
test_self_cls: self.cls_val = 1, self.val = 1
|
||||
cls_teardown: cls.cls_val = None
|
||||
|
||||
>>> test.test_self_none()
|
||||
setup_func called
|
||||
test_self_cls: self.cls_val = 2, self.val = 2
|
||||
teardown_func called
|
||||
|
||||
>>> test.test_self_ext_self()
|
||||
setup_with_self: self.cls_val = 2, self.val = 2
|
||||
test_self_ext_self: self.cls_val = -1, self.val = -1
|
||||
teardown_with_self: self.cls_val = -1, self.val = -1
|
||||
|
||||
>>> test.test_self_ext_cls()
|
||||
setup_with_cls: cls.cls_val = None
|
||||
test_self_ext_cls: self.cls_val = -2, self.val = -2
|
||||
teardown_with_cls: cls.cls_val = None
|
||||
|
||||
>>> test.test_cls_cls()
|
||||
cls_setup: cls.cls_val = None
|
||||
test_cls_cls: cls.cls_val = 3
|
||||
cls_teardown: cls.cls_val = None
|
||||
|
||||
>>> test.test_cls_none()
|
||||
setup_func called
|
||||
test_cls_none: cls.cls_val = 4
|
||||
teardown_func called
|
||||
|
||||
>>> test.test_cls_ext_cls()
|
||||
setup_with_cls: cls.cls_val = 4
|
||||
test_self_ext_cls: cls.cls_val = -3
|
||||
teardown_with_cls: cls.cls_val = -3
|
||||
|
||||
>>> test.test_none_none()
|
||||
setup_func called
|
||||
test
|
||||
teardown_func called
|
||||
"""
|
||||
def real_decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
real_args = __getcallargs(func, *args, **kwargs)
|
||||
__call_in_context(setup, real_args)
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
finally:
|
||||
__call_in_context(teardown, real_args)
|
||||
return result
|
||||
return wrapper
|
||||
return real_decorator
|
@ -1,79 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import requests
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
|
||||
|
||||
class CollectorClient(object):
|
||||
"""CollectorClient.""" # TODO documentation
|
||||
|
||||
def __init__(self, collector_ip, endpoint):
|
||||
url = "http://{0}/{1}".format(collector_ip, endpoint)
|
||||
self.__url = url
|
||||
super(CollectorClient, self).__init__()
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return self.__url
|
||||
|
||||
def _get(self, endpoint):
|
||||
return requests.get(url=self.url + endpoint)
|
||||
|
||||
@logwrap
|
||||
def get_oswls(self, master_node_uid):
|
||||
return self._get("/oswls/{0}".format(master_node_uid)).json()
|
||||
|
||||
@logwrap
|
||||
def get_installation_info(self, master_node_uid):
|
||||
return self._get("/installation_info/{0}".format(
|
||||
master_node_uid)).json()
|
||||
|
||||
@logwrap
|
||||
def get_action_logs(self, master_node_uid):
|
||||
return self._get("/action_logs/{0}".format(
|
||||
master_node_uid)).json()
|
||||
|
||||
@logwrap
|
||||
def get_oswls_by_resource(self, master_node_uid, resource):
|
||||
return self._get("/oswls/{0}/{1}".format(master_node_uid,
|
||||
resource)).json()
|
||||
|
||||
@logwrap
|
||||
def get_oswls_by_resource_data(self, master_node_uid, resource):
|
||||
return self.get_oswls_by_resource(master_node_uid,
|
||||
resource)['objs'][0]['resource_data']
|
||||
|
||||
@logwrap
|
||||
def get_action_logs_ids(self, master_node_uid):
|
||||
return [actions['id']
|
||||
for actions in self.get_action_logs(master_node_uid)]
|
||||
|
||||
@logwrap
|
||||
def get_action_logs_count(self, master_node_uid):
|
||||
return len(self.get_action_logs_ids(master_node_uid))
|
||||
|
||||
@logwrap
|
||||
def get_action_logs_additional_info_by_id(
|
||||
self, master_node_uid, action_id):
|
||||
return [actions['body']['additional_info']
|
||||
for actions in self.get_action_logs(master_node_uid)
|
||||
if actions['id'] == action_id]
|
||||
|
||||
@logwrap
|
||||
def get_installation_info_data(self, master_node_uid):
|
||||
return self.get_installation_info(master_node_uid)['structure']
|
@ -1,17 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from core.models.fuel_client.client import Client
|
||||
|
||||
__all__ = ['Client']
|
@ -1,59 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
class Adapter(object):
|
||||
def __init__(self, session, service_type='fuel'):
|
||||
self.session = session
|
||||
self.service_type = service_type
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"{cls}("
|
||||
"session=<Session(original_ip=original_ip, verify=verify)"
|
||||
" id={sess_id}>,"
|
||||
"service_type={svc}"
|
||||
") id={id}".format(
|
||||
cls=self.__class__.__name__,
|
||||
sess_id=hex(id(self.session)),
|
||||
svc=self.service_type,
|
||||
id=hex(id(self))
|
||||
))
|
||||
|
||||
def get(self, url, **kwargs):
|
||||
kwargs.setdefault(
|
||||
'endpoint_filter', {'service_type': self.service_type})
|
||||
return self.session.get(url=url, connect_retries=1, **kwargs)
|
||||
|
||||
def delete(self, url, **kwargs):
|
||||
kwargs.setdefault(
|
||||
'endpoint_filter', {'service_type': self.service_type})
|
||||
return self.session.delete(url=url, connect_retries=1, **kwargs)
|
||||
|
||||
def post(self, url, **kwargs):
|
||||
kwargs.setdefault(
|
||||
'endpoint_filter', {'service_type': self.service_type})
|
||||
return self.session.post(url=url, connect_retries=1, **kwargs)
|
||||
|
||||
def put(self, url, **kwargs):
|
||||
kwargs.setdefault(
|
||||
'endpoint_filter', {'service_type': self.service_type})
|
||||
return self.session.put(url=url, connect_retries=1, **kwargs)
|
||||
|
||||
|
||||
class BaseClient(object):
|
||||
def __init__(self, client):
|
||||
self._client = client
|
@ -1,35 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from core import logger
|
||||
from core.models.fuel_client import base_client
|
||||
from core.models.fuel_client import ostf_client
|
||||
|
||||
|
||||
class Client(object):
|
||||
def __init__(self, session):
|
||||
logger.info(
|
||||
'Initialization of NailgunClient using shared session \n'
|
||||
'(auth_url={})'.format(session.auth.auth_url))
|
||||
|
||||
ostf_clnt = base_client.Adapter(session=session, service_type='ostf')
|
||||
# TODO(astepanov): use for FUEL functionality:
|
||||
# clnt = base_client.Adapter(session=session)
|
||||
|
||||
self.ostf = ostf_client.OSTFClient(ostf_clnt)
|
||||
|
||||
|
||||
__all__ = ['Client']
|
@ -1,79 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
from core.models.fuel_client import base_client
|
||||
|
||||
|
||||
class OSTFClient(base_client.BaseClient):
|
||||
@logwrap
|
||||
def get_test_sets(self, cluster_id):
|
||||
"""get all test sets for a cluster
|
||||
|
||||
:type cluster_id: int
|
||||
"""
|
||||
return self._client.get(
|
||||
url="/testsets/{}".format(cluster_id),
|
||||
).json()
|
||||
|
||||
@logwrap
|
||||
def get_tests(self, cluster_id):
|
||||
"""get all tests for a cluster
|
||||
|
||||
:type cluster_id: int
|
||||
"""
|
||||
return self._client.get(
|
||||
url="/tests/{}".format(cluster_id),
|
||||
).json()
|
||||
|
||||
@logwrap
|
||||
def get_test_runs(self, testrun_id=None, cluster_id=None):
|
||||
"""get test runs results
|
||||
|
||||
:type testrun_id: int
|
||||
:type cluster_id: int
|
||||
"""
|
||||
url = '/testruns'
|
||||
if testrun_id is not None:
|
||||
url += '/{}'.format(testrun_id)
|
||||
if cluster_id is not None:
|
||||
url += '/{}'.format(cluster_id)
|
||||
elif cluster_id is not None:
|
||||
url += '/last/{}'.format(cluster_id)
|
||||
return self._client.get(url=url).json()
|
||||
|
||||
@logwrap
|
||||
def run_tests(self, cluster_id, test_sets, test_name=None):
|
||||
"""run tests on specified cluster
|
||||
|
||||
:type cluster_id: int
|
||||
:type test_sets: list
|
||||
:type test_name: str
|
||||
"""
|
||||
# get tests otherwise 500 error will be thrown6^40
|
||||
self.get_tests(cluster_id)
|
||||
json = []
|
||||
for test_set in test_sets:
|
||||
record = {
|
||||
'metadata': {'cluster_id': str(cluster_id), 'config': {}},
|
||||
'testset': test_set
|
||||
}
|
||||
if test_name is not None:
|
||||
record['tests'] = [test_name]
|
||||
|
||||
json.append(record)
|
||||
|
||||
return self._client.post("/testruns", json=json).json()
|
@ -1,182 +0,0 @@
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
class FuelAccessParams(object):
|
||||
"""Value object to represent and map yaml file values of fuel master node
|
||||
access to openrc file.
|
||||
Should not use any api."""
|
||||
|
||||
def __init__(self,
|
||||
tls_service_enabled=False,
|
||||
tls_keystone_enabled=False):
|
||||
self.__username = None # type: str
|
||||
self.__password = None # type: str
|
||||
self.__project = None # type: str
|
||||
self.__service_address = None # type: str
|
||||
self.__service_port = None # type: str
|
||||
self.__keystone_address = None # type: str
|
||||
self.__keystone_port = None # type: str
|
||||
self.__tls_service_enabled = tls_service_enabled # type: bool
|
||||
self.__tls_keystone_enabled = tls_keystone_enabled # type: bool
|
||||
|
||||
@property
|
||||
def username(self):
|
||||
return self.__username
|
||||
|
||||
@username.setter
|
||||
def username(self, value):
|
||||
"""Set up username
|
||||
|
||||
:type value: str
|
||||
"""
|
||||
self.__username = value
|
||||
|
||||
@property
|
||||
def password(self):
|
||||
return self.__password
|
||||
|
||||
@password.setter
|
||||
def password(self, value):
|
||||
"""Set up password
|
||||
|
||||
:type value: str
|
||||
"""
|
||||
self.__password = value
|
||||
|
||||
@property
|
||||
def project(self):
|
||||
return self.__project
|
||||
|
||||
@project.setter
|
||||
def project(self, value):
|
||||
"""Set up project
|
||||
|
||||
:type value: str
|
||||
"""
|
||||
self.__project = value
|
||||
|
||||
@property
|
||||
def service_address(self):
|
||||
return self.__service_address
|
||||
|
||||
@service_address.setter
|
||||
def service_address(self, value):
|
||||
"""Set up service address
|
||||
|
||||
:type value: str
|
||||
"""
|
||||
self.__service_address = value
|
||||
|
||||
@property
|
||||
def service_port(self):
|
||||
return self.__service_port
|
||||
|
||||
@service_port.setter
|
||||
def service_port(self, value):
|
||||
"""Set up service port
|
||||
|
||||
:type value: str
|
||||
"""
|
||||
self.__service_port = value
|
||||
|
||||
@property
|
||||
def keystone_address(self):
|
||||
address = self.service_address
|
||||
if self.__keystone_address:
|
||||
address = self.__keystone_address
|
||||
return address
|
||||
|
||||
@keystone_address.setter
|
||||
def keystone_address(self, value):
|
||||
"""Set up keystone address
|
||||
|
||||
:type value: str
|
||||
"""
|
||||
self.__keystone_address = value
|
||||
|
||||
@property
|
||||
def keystone_port(self):
|
||||
return self.__keystone_port
|
||||
|
||||
@keystone_port.setter
|
||||
def keystone_port(self, value):
|
||||
"""Set up keystone port
|
||||
|
||||
:type value: str
|
||||
"""
|
||||
self.__keystone_port = value
|
||||
|
||||
@property
|
||||
def os_auth_url(self):
|
||||
"""Get url of authentication endpoint
|
||||
|
||||
:rtype: str
|
||||
:return: The url of os auth endpoint
|
||||
"""
|
||||
protocol = 'https' if self.__tls_keystone_enabled else 'http'
|
||||
|
||||
return "{protocol}://{keystone_address}:{keystone_port}".format(
|
||||
protocol=protocol,
|
||||
keystone_address=self.keystone_address,
|
||||
keystone_port=self.keystone_port
|
||||
)
|
||||
|
||||
@property
|
||||
def service_url(self):
|
||||
"""Get url of nailgun service endpoint
|
||||
|
||||
:rtype: str
|
||||
:return: The url of nailgun endpoint
|
||||
"""
|
||||
protocol = 'https' if self.__tls_service_enabled else 'http'
|
||||
|
||||
return "{protocol}://{service_address}:{service_port}".format(
|
||||
protocol=protocol,
|
||||
service_address=self.service_address,
|
||||
service_port=self.service_port
|
||||
)
|
||||
|
||||
def to_openrc_content(self):
|
||||
"""Method to represent access credentials in openrc format.
|
||||
|
||||
:rtype: str
|
||||
:return: string content for openrc file
|
||||
"""
|
||||
env_template = ('export OS_USERNAME="{username}"\n'
|
||||
'export OS_PASSWORD="{password}"\n'
|
||||
'export OS_TENANT_NAME="{project}"\n'
|
||||
'export SERVICE_URL="{service_url}"\n'
|
||||
'export OS_AUTH_URL="{os_auth_url}"\n')
|
||||
|
||||
return env_template.format(
|
||||
username=self.username,
|
||||
password=self.username,
|
||||
project=self.project,
|
||||
service_url=self.service_url,
|
||||
os_auth_url=self.os_auth_url,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_yaml_params(cls,
|
||||
yaml_content,
|
||||
tls_service_enabled=False,
|
||||
tls_keystone_enabled=False):
|
||||
"""The method to initialize value object from parsed yaml from
|
||||
master node.
|
||||
|
||||
:type yaml_content: dict[str]
|
||||
:type tls_service_enabled: boolean
|
||||
:type tls_keystone_enabled: boolean
|
||||
:rtype: FuelAccessParams
|
||||
:return: instance, which can be used
|
||||
"""
|
||||
access_params = cls(
|
||||
tls_service_enabled=tls_service_enabled,
|
||||
tls_keystone_enabled=tls_keystone_enabled)
|
||||
access_params.username = yaml_content['OS_USERNAME']
|
||||
access_params.password = yaml_content['OS_PASSWORD']
|
||||
access_params.project = yaml_content['OS_TENANT_NAME']
|
||||
access_params.service_address = yaml_content['SERVER_ADDRESS']
|
||||
access_params.service_port = yaml_content['SERVER_PORT']
|
||||
access_params.keystone_port = yaml_content['KEYSTONE_PORT']
|
||||
|
||||
return access_params
|
||||
# pylint: enable=too-many-instance-attributes
|
@ -1,3 +0,0 @@
|
||||
[pytest]
|
||||
addopts = -vvv -s -p no:django -p no:ipdb
|
||||
testpaths = _tests
|
27
doc/Makefile
27
doc/Makefile
@ -1,27 +0,0 @@
|
||||
.PHONY: clean-doc doc-html
|
||||
|
||||
SPHINXBUILD = sphinx-build
|
||||
DOC_BUILDDIR = _build
|
||||
SPHINXOPTS = -d $(DOC_BUILDDIR)/doctrees .
|
||||
|
||||
help:
|
||||
@echo 'Build directives (can be overridden by environment variables'
|
||||
@echo 'or by command line parameters):'
|
||||
@echo ' DOC_BUILDDIR: $(DOC_BUILDDIR)'
|
||||
@echo
|
||||
@echo 'Available targets:'
|
||||
@echo ' doc-html - build html documentation based on source code of product'
|
||||
@echo ' clean-doc - clean generated docs'
|
||||
@echo
|
||||
|
||||
|
||||
|
||||
doc-html:
|
||||
$(SPHINXBUILD) -b html $(SPHINXOPTS) $(DOC_BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(DOC_BUILDDIR)/html."
|
||||
|
||||
clean-doc:
|
||||
$ rm -rf $(DOC_BUILDDIR)
|
||||
@echo
|
||||
@echo "We are cleaned documentation output."
|
@ -1,768 +0,0 @@
|
||||
.. index:: Base tests
|
||||
|
||||
General OpenStack/Fuel Tests
|
||||
****************************
|
||||
|
||||
General tests
|
||||
=============
|
||||
|
||||
Base Test Case
|
||||
--------------
|
||||
.. automodule:: fuelweb_test.tests.base_test_case
|
||||
:members:
|
||||
|
||||
Admin Node Tests
|
||||
----------------
|
||||
.. automodule:: fuelweb_test.tests.test_admin_node
|
||||
:members:
|
||||
|
||||
Test Admin Node Backup-Restore
|
||||
------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_backup_restore
|
||||
:members:
|
||||
|
||||
Test Bonding base
|
||||
-----------------
|
||||
.. automodule:: fuelweb_test.tests.test_bonding_base
|
||||
:members:
|
||||
|
||||
Test Bonding
|
||||
------------
|
||||
.. automodule:: fuelweb_test.tests.test_bonding
|
||||
:members:
|
||||
|
||||
Test Bond offloading types
|
||||
--------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_bond_offloading
|
||||
:members:
|
||||
|
||||
Test Ceph
|
||||
---------
|
||||
.. automodule:: fuelweb_test.tests.test_ceph
|
||||
:members:
|
||||
|
||||
Test Cli
|
||||
--------
|
||||
.. automodule:: fuelweb_test.tests.test_cli
|
||||
:members:
|
||||
|
||||
Test Cli Base
|
||||
-------------
|
||||
.. automodule:: fuelweb_test.tests.test_cli_base
|
||||
:members:
|
||||
|
||||
Test Cli role component (creade/update/delete role)
|
||||
---------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_cli.test_cli_role
|
||||
:members:
|
||||
|
||||
Test Cli deploy (deploy neutron tun)
|
||||
------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_cli.test_cli_deploy
|
||||
:members:
|
||||
|
||||
Test Cli deploy ceph neutron tun
|
||||
--------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_cli.test_cli_deploy_ceph
|
||||
:members:
|
||||
|
||||
Test custom hostname
|
||||
--------------------
|
||||
.. automodule:: fuelweb_test.tests.test_custom_hostname
|
||||
:members:
|
||||
|
||||
Test custom graph
|
||||
-----------------
|
||||
.. automodule:: fuelweb_test.tests.tests_custom_graph.test_custom_graph
|
||||
:members:
|
||||
|
||||
|
||||
Prepare target image file
|
||||
-------------------------
|
||||
.. automodule:: fuelweb_test.config_templates.prepare_release_image
|
||||
:members:
|
||||
|
||||
Test DPDK
|
||||
---------
|
||||
.. automodule:: fuelweb_test.tests.test_dpdk
|
||||
:members:
|
||||
|
||||
Test Environment Action
|
||||
-----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_environment_action
|
||||
:members:
|
||||
|
||||
Test ha NeutronTUN deployment group 1 (controller+baseos multirole and ceph for images/objects)
|
||||
-----------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_tun.test_ha_tun_group_1
|
||||
:members:
|
||||
|
||||
Test ha NeutronTUN deployment group 2 (ceph for all, baseos node and ceph for all, untag networks and changed OS credentials)
|
||||
-----------------------------------------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_tun.test_ha_tun_group_2
|
||||
:members:
|
||||
|
||||
Test ha NeutronTUN deployment group 3 (5 controllers, ceph for images/ephemeral and no volumes, ceph for images/ephemeral)
|
||||
--------------------------------------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_tun.test_ha_tun_group_3
|
||||
:members:
|
||||
|
||||
Test ha neutron vlan deployment group 1 (cinder/ceph for images and ceph for volumes/swift)
|
||||
-------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_1
|
||||
:members:
|
||||
|
||||
Test ha neutron vlan deployment group 2 (cinder/ceph for ephemeral and cinder/ceph for images/ephemeral)
|
||||
--------------------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_2
|
||||
:members:
|
||||
|
||||
Test ha neutron vlan deployment group 3(no volumes storage/ceph volumes, ephemeral)
|
||||
-----------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_3
|
||||
:members:
|
||||
|
||||
Test ha neutron vlan deployment group 4(cinder volumes, ceph images and rados gw/ default storage)
|
||||
--------------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_4
|
||||
:members:
|
||||
|
||||
Test ha neutron vlan deployment group 5 (ceph for volumes/images/ephemeral/rados and cinder/ceph for images/ephemeral/rados)
|
||||
----------------------------------------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_5
|
||||
:members:
|
||||
|
||||
Test ha neutron vlan deployment group 6 (no volumes and ceph for images/ephemeral/rados and ceph for volumes/images/ephemeral)
|
||||
------------------------------------------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_6
|
||||
:members:
|
||||
|
||||
Test ha neutron vlan deployment group 7 (no volumes/ceph for images and cinder/swift/base os)
|
||||
---------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_deployments.tests_neutron_vlan.test_ha_vlan_group_7
|
||||
:members:
|
||||
|
||||
Test Sahara OS component with vlan and ceph
|
||||
-------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_os_components.test_sahara_os_component
|
||||
:members:
|
||||
|
||||
Test Murano OS component with vlan
|
||||
----------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_os_components.test_murano_os_component
|
||||
:members:
|
||||
|
||||
Test mixed OS components
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_os_components.test_mixed_os_components
|
||||
:members:
|
||||
|
||||
Test failover group 1
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_failover_group_1
|
||||
:members:
|
||||
|
||||
Test failover group 2
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_failover_group_2
|
||||
:members:
|
||||
|
||||
Test failover group 3
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_failover_group_3
|
||||
:members:
|
||||
|
||||
Test failover mongo
|
||||
-------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_failover_mongo
|
||||
:members:
|
||||
|
||||
Test Mongo Multirole
|
||||
--------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_multirole.test_mongo_multirole
|
||||
:members:
|
||||
|
||||
Test scale neutron vlan deployment add/delete compute/cinder+cinder+ceph
|
||||
------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_5
|
||||
:members:
|
||||
|
||||
Test scale neutron tun deployment add/delete compute+cinder+ceph+ephemeral
|
||||
--------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_6
|
||||
:members:
|
||||
|
||||
Test High Availability on one controller
|
||||
----------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_ha_one_controller
|
||||
:members:
|
||||
|
||||
Test High Availability on one controller base
|
||||
---------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_ha_one_controller_base
|
||||
:members:
|
||||
|
||||
Test jumbo frames
|
||||
-----------------
|
||||
.. automodule:: fuelweb_test.tests.test_jumbo_frames
|
||||
:members:
|
||||
|
||||
Test manual VIP allocation
|
||||
--------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_manual_vip_allocation
|
||||
:members:
|
||||
|
||||
Test Multiple Networks
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_multiple_networks
|
||||
:members:
|
||||
|
||||
Test multirole group 1 (controller+ceph/compute+cinder and controller+ceph+cinder/compute+ceph+cinder)
|
||||
------------------------------------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_multirole.test_multirole_group_1
|
||||
:members:
|
||||
|
||||
Test network templates base
|
||||
---------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_net_templates_base
|
||||
:members:
|
||||
|
||||
Test network templates
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_net_templates
|
||||
:members:
|
||||
|
||||
Test multiple networks templates
|
||||
--------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_net_templates_multiple_networks
|
||||
:members:
|
||||
|
||||
Test Neutron
|
||||
------------
|
||||
.. automodule:: fuelweb_test.tests.test_neutron
|
||||
:members:
|
||||
|
||||
Test Neutron Public
|
||||
-------------------
|
||||
.. automodule:: fuelweb_test.tests.test_neutron_public
|
||||
:members:
|
||||
|
||||
Test Neutron VXLAN
|
||||
------------------
|
||||
.. automodule:: fuelweb_test.tests.test_neutron_tun
|
||||
:members:
|
||||
|
||||
Test Neutron VXLAN base
|
||||
-----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_neutron_tun_base
|
||||
:members:
|
||||
|
||||
Test Neutron IPv6 base functionality
|
||||
------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_neutron_ipv6
|
||||
:members:
|
||||
|
||||
Test Node reinstallation
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_node_reinstallation
|
||||
:members:
|
||||
|
||||
Test offloading types
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.tests.test_offloading_types
|
||||
:members:
|
||||
|
||||
Test public api
|
||||
---------------
|
||||
.. automodule:: fuelweb_test.tests.test_public_api
|
||||
:members:
|
||||
|
||||
Test Pull Requests
|
||||
------------------
|
||||
.. automodule:: fuelweb_test.tests.test_pullrequest
|
||||
:members:
|
||||
|
||||
Test Reduced Footprint
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_reduced_footprint
|
||||
:members:
|
||||
|
||||
Test scale group 1 (add controllers with stop and add ceph nodes with stop)
|
||||
---------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_1
|
||||
:members:
|
||||
|
||||
Test scale group 2 (replace primary controller and remove 2 controllers)
|
||||
------------------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_2
|
||||
:members:
|
||||
|
||||
Test scale group 3 (add/delete compute and add/delete cinder)
|
||||
-------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_3
|
||||
:members:
|
||||
|
||||
Test scale group 4 (add/delete ceph and add/delete cinder+ceph)
|
||||
---------------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_scale.test_scale_group_4
|
||||
:members:
|
||||
|
||||
Test Services
|
||||
-------------
|
||||
.. automodule:: fuelweb_test.tests.test_services
|
||||
:members:
|
||||
|
||||
Test Ubuntu bootstrap
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.tests.test_ubuntu_bootstrap
|
||||
:members:
|
||||
|
||||
Test Ubuntu Cloud Archive
|
||||
-------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_uca.test_uca
|
||||
:members:
|
||||
|
||||
Test Ironic
|
||||
-----------
|
||||
.. automodule:: fuelweb_test.tests.test_ironic_base
|
||||
:members:
|
||||
|
||||
Test Services reconfiguration
|
||||
-----------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_services_reconfiguration
|
||||
:members:
|
||||
|
||||
Test Support HugePages
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_support_hugepages
|
||||
:members:
|
||||
|
||||
Test CPU pinning
|
||||
----------------
|
||||
.. automodule:: fuelweb_test.tests.test_cpu_pinning
|
||||
:members:
|
||||
|
||||
Test extra computes
|
||||
-------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_extra_computes.base_extra_computes
|
||||
.. automodule:: fuelweb_test.tests.tests_extra_computes.test_rh_basic_actions
|
||||
.. automodule:: fuelweb_test.tests.tests_extra_computes.test_rh_migration
|
||||
.. automodule:: fuelweb_test.tests.tests_extra_computes.test_ol_basic_actions
|
||||
.. automodule:: fuelweb_test.tests.tests_extra_computes.test_ol_migration
|
||||
:members:
|
||||
|
||||
Test Daemon Resource Allocation Control
|
||||
---------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_cgroups
|
||||
:members:
|
||||
|
||||
Test LCM base
|
||||
-------------
|
||||
.. automodule:: fuelweb_test.tests.tests_lcm.base_lcm_test
|
||||
:members:
|
||||
|
||||
Test task idempotency
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_lcm.test_idempotency
|
||||
:members:
|
||||
|
||||
Test task ensurability
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_lcm.test_ensurability
|
||||
:members:
|
||||
|
||||
Test task coverage by LCM tests
|
||||
-------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_lcm.test_task_coverage
|
||||
:members:
|
||||
|
||||
Test unlock settings tab
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_unlock_settings_tab
|
||||
:members:
|
||||
|
||||
Test for unlock settings tab from different cluster states
|
||||
----------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_states_unlock_settings_tab
|
||||
:members:
|
||||
|
||||
Gating tests
|
||||
============
|
||||
|
||||
Test Fuel agent
|
||||
---------------
|
||||
.. automodule:: gates_tests.tests.test_review_in_fuel_agent
|
||||
:members:
|
||||
|
||||
Test Fuel cli
|
||||
-------------
|
||||
.. automodule:: gates_tests.tests.test_review_in_fuel_client
|
||||
:members:
|
||||
|
||||
Test Fuel astute
|
||||
----------------
|
||||
.. automodule:: gates_tests.tests.test_review_in_astute
|
||||
:members:
|
||||
|
||||
Test Fuel nailgun agent
|
||||
-----------------------
|
||||
.. automodule:: gates_tests.tests.test_nailgun_agent
|
||||
:members:
|
||||
|
||||
Test Fuel web
|
||||
-------------
|
||||
.. automodule:: gates_tests.tests.test_review_fuel_web
|
||||
:members:
|
||||
|
||||
Fuel mirror verification
|
||||
========================
|
||||
|
||||
Tests to check that mirror is created in various scenarios
|
||||
----------------------------------------------------------
|
||||
Fuel create mirror is made to simplify process of mirror creation for our
|
||||
customers who do not have internet access on-site. It is rewritten from bash
|
||||
to python.
|
||||
|
||||
Fuel create mirror features:
|
||||
|
||||
1) Minimize size of packages in a mirror;
|
||||
|
||||
2) Download packages in parallel.
|
||||
|
||||
Such features can cause some problems:
|
||||
|
||||
1) During packages resolving to minimize mirror size we found such issues:
|
||||
|
||||
1.1) Incorrect versions. When we have multiple mirrors, some version can be
|
||||
skipped due to name duplication. But it is still needed by bootstrap/deploy.
|
||||
|
||||
1.2) Mirror/version collisions. Sometimes package present in number of mirrors
|
||||
and not always correct version corresponds to correct site.
|
||||
|
||||
1.3) There are special mirror on Fuel iso, which differs from
|
||||
http://mirror.fuel-infra.org/ .
|
||||
|
||||
2) With concurrent packages fetching complications are:
|
||||
|
||||
2.1) Some mirrors are unable to support download in multiple threads and fail
|
||||
or reject to support concurrency. In such cases we are abandoning concurrent
|
||||
downloads on such mirrors.
|
||||
|
||||
2.2) Common concurrency pitfalls: race conditions for resources like lists to
|
||||
process.
|
||||
|
||||
2.3) Problems with offset based downloads. Some packages were broken and it had
|
||||
been found out only during package installation.
|
||||
|
||||
.. automodule:: fuelweb_test.tests.tests_mirrors.test_create_mirror
|
||||
:members:
|
||||
|
||||
Tests to verify installation from packages mirrors
|
||||
--------------------------------------------------
|
||||
After mirror is created we should be able to deploy environment with it.
|
||||
|
||||
Fuel-mirror updates default repo urls for deployment and we do not have to
|
||||
set them up for new environments.But be careful. If you want to deploy
|
||||
environments with vanilla mirrors from iso, You should update settings in
|
||||
environment. Currently there is no option to update default mirrors from
|
||||
UI/cli.
|
||||
|
||||
Fuel-mirror updates repo list with internal structures:
|
||||
https://github.com/bgaifullin/packetary/blob/packetary3/contrib/fuel_mirror/fuel_mirror/commands/create.py#L224-L243
|
||||
|
||||
Repository should be able to do two things:
|
||||
|
||||
1) Create bootstrap iso for provisioning;
|
||||
|
||||
2) Provide packages for deployment. Packages from dependencies in http://mirror.fuel-infra.org/ do not cover all the needed packages.
|
||||
So we need to mix in list of required packages:
|
||||
https://github.com/bgaifullin/packetary/blob/packetary3/contrib/fuel_mirror/etc/config.yaml#L46-L96
|
||||
|
||||
Problems:
|
||||
|
||||
1) We need to install not only 'depends', but also 'recommends' packages:
|
||||
https://wiki.ubuntu.com/LucidLynx/ReleaseNotes/#Recommended_packages_installed_by_default
|
||||
http://askubuntu.com/questions/18545/installing-suggested-recommended-packages
|
||||
|
||||
2) We have a problem with support of a custom packages list.
|
||||
It is only tracked via system test failure without exact team assigned for a
|
||||
job. Also debootstrap and other tools are not informative about package errors.
|
||||
It may fail with 'unable to mount', '/proc not mounted', 'file not found' even
|
||||
if a problem is a missing package.
|
||||
|
||||
.. automodule:: fuelweb_test.tests.tests_mirrors.test_use_mirror
|
||||
:members:
|
||||
|
||||
|
||||
Plugins tests
|
||||
=============
|
||||
|
||||
Contrail tests
|
||||
--------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_contrail.test_fuel_plugin_contrail
|
||||
:members:
|
||||
|
||||
Emc tests
|
||||
---------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_emc.test_plugin_emc
|
||||
:members:
|
||||
|
||||
Example tests
|
||||
-------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_example.test_fuel_plugin_example
|
||||
:members:
|
||||
|
||||
Example tests for plugin installation after cluster create
|
||||
----------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_example.test_fuel_plugin_example_postdeploy
|
||||
:members:
|
||||
|
||||
Glusterfs tests
|
||||
---------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_glusterfs.test_plugin_glusterfs
|
||||
:members:
|
||||
|
||||
Lbaas tests
|
||||
-----------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_lbaas.test_plugin_lbaas
|
||||
:members:
|
||||
|
||||
Reboot tests
|
||||
------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_reboot.test_plugin_reboot_task
|
||||
:members:
|
||||
|
||||
Vip reservation tests
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_vip_reservation.test_plugin_vip_reservation
|
||||
:members:
|
||||
|
||||
Zabbix tests
|
||||
------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_zabbix.test_plugin_zabbix
|
||||
:members:
|
||||
|
||||
Murano Tests
|
||||
------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_murano.test_plugin_murano
|
||||
:members:
|
||||
|
||||
Patching tests
|
||||
==============
|
||||
|
||||
Patching tests
|
||||
--------------
|
||||
.. automodule:: fuelweb_test.tests.tests_patching.test_patching
|
||||
:members:
|
||||
|
||||
|
||||
Security tests
|
||||
==============
|
||||
|
||||
Nessus scan tests
|
||||
-----------------
|
||||
.. automodule:: fuelweb_test.tests.tests_security.test_run_nessus
|
||||
:members:
|
||||
|
||||
Lynis audit tests
|
||||
-----------------
|
||||
.. automodule:: fuelweb_test.tests.tests_security.test_lynis_audit
|
||||
:members:
|
||||
|
||||
Strength tests
|
||||
==============
|
||||
|
||||
Cic maintenance mode tests
|
||||
--------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_cic_maintenance_mode
|
||||
:members:
|
||||
|
||||
Failover tests
|
||||
--------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_failover
|
||||
:members:
|
||||
|
||||
Base failover tests
|
||||
-------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_failover_base
|
||||
:members:
|
||||
|
||||
Failover with CEPH tests
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_failover_with_ceph
|
||||
:members:
|
||||
|
||||
Huge environments tests
|
||||
-----------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_huge_environments
|
||||
:members:
|
||||
|
||||
Image based tests
|
||||
-----------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_image_based
|
||||
:members:
|
||||
|
||||
Base load tests
|
||||
---------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_load_base
|
||||
:members:
|
||||
|
||||
Load tests
|
||||
----------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_load
|
||||
:members:
|
||||
|
||||
Master node failover tests
|
||||
--------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_master_node_failover
|
||||
:members:
|
||||
|
||||
Neutron tests
|
||||
-------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_neutron
|
||||
:members:
|
||||
|
||||
Base Neutron tests
|
||||
------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_neutron_base
|
||||
:members:
|
||||
|
||||
OSTF repeatable tests
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_ostf_repeatable_tests
|
||||
:members:
|
||||
|
||||
Repetitive restart tests
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_repetitive_restart
|
||||
:members:
|
||||
|
||||
Restart tests
|
||||
-------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_restart
|
||||
:members:
|
||||
|
||||
Upgrade tests
|
||||
=============
|
||||
|
||||
Test Data-Driven Upgrade
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_clone_env
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_base
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_ceph_ha
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_net_tmpl
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_no_cluster
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_plugin
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_smoke
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_data_driven_upgrade_multirack_deployment
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_node_reassignment
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.upgrader_tool
|
||||
:members:
|
||||
|
||||
OS upgrade tests
|
||||
================
|
||||
|
||||
Test OpenStack Upgrades
|
||||
-----------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.upgrade_base
|
||||
:members:
|
||||
|
||||
.. automodule:: fuelweb_test.tests.tests_upgrade.test_os_upgrade
|
||||
:members:
|
||||
|
||||
Tests for separated services
|
||||
============================
|
||||
|
||||
Test for separate haproxy service
|
||||
---------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_haproxy
|
||||
:members:
|
||||
|
||||
Test for separate horizon service
|
||||
---------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_horizon
|
||||
:members:
|
||||
|
||||
Test for separate multiroles
|
||||
----------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_multiroles
|
||||
:members:
|
||||
|
||||
Test for separate rabbitmq service
|
||||
----------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_rabbitmq
|
||||
:members:
|
||||
|
||||
Test for separate rabbitmq service and ceph
|
||||
-------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_separate_services.test_separate_rabbitmq_ceph
|
||||
:members:
|
||||
|
||||
Deployment with platform components
|
||||
-----------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_separate_services.test_deploy_platform_components
|
||||
:members:
|
||||
|
||||
Test for ssl components
|
||||
-----------------------
|
||||
.. automodule:: fuelweb_test.tests.test_ssl
|
||||
:members:
|
||||
|
||||
Test for network outage
|
||||
-----------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_strength.test_network_outage
|
||||
:members:
|
||||
|
||||
Test for separate master node deployment
|
||||
----------------------------------------
|
||||
.. automodule:: system_test.tests.test_centos_master_deploy_ceph
|
||||
:members:
|
||||
|
||||
Test for multipath devices
|
||||
--------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_multipath_devices
|
||||
:members:
|
||||
|
||||
Test for Image Based Provisioning
|
||||
---------------------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_ibp.test_ibp
|
||||
:members:
|
||||
|
||||
Tests for configDB api
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_configdb.test_configdb_api
|
||||
:members:
|
||||
|
||||
Tests for cinder block device driver
|
||||
------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_bdd
|
||||
|
||||
Tests for configDB cli
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.tests.tests_configdb.test_configdb_cli
|
||||
:members:
|
||||
|
||||
Test for tracking /etc dir by etckeeper plugin
|
||||
----------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.plugins.plugin_etckeeper.test_plugin_etckeeper
|
||||
:members:
|
||||
|
||||
Test SR-IOV
|
||||
-----------
|
||||
.. automodule:: fuelweb_test.tests.test_sriov
|
||||
:members:
|
||||
|
||||
Test graph extension
|
||||
--------------------
|
||||
.. automodule:: fuelweb_test.tests.test_graph_extension
|
||||
:members:
|
||||
|
||||
Test Multiqueue
|
||||
---------------
|
||||
.. automodule:: fuelweb_test.tests.test_multiqueue
|
||||
:members:
|
||||
|
||||
Test OVS firewall driver
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_ovs_firewall
|
||||
:members:
|
51
doc/conf.py
51
doc/conf.py
@ -1,51 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0,
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.doctest',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.viewcode',
|
||||
]
|
||||
|
||||
autodoc_default_flags = ['members', 'show-inheritance', 'inherited-members']
|
||||
autodoc_member_order = 'bysource'
|
||||
|
||||
source_suffix = '.rst'
|
||||
|
||||
master_doc = 'index'
|
||||
|
||||
project = 'Fuel QA'
|
||||
copyright = 'Copyright 2015 Mirantis, Inc.' \
|
||||
'Licensed under the Apache License, Version 2.0' \
|
||||
' (the "License"); you may not use this file except in' \
|
||||
' compliance with the License. You may obtain a copy' \
|
||||
' of the License at http://www.apache.org/licenses/LICENSE-2.0'
|
||||
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
html_theme = 'sphinxdoc'
|
||||
htmlhelp_basename = 'fuel-qadoc'
|
||||
|
||||
intersphinx_mapping = {'http://docs.python.org/': None}
|
@ -1,34 +0,0 @@
|
||||
.. index:: Fuel tests
|
||||
|
||||
Fuel tests
|
||||
**********
|
||||
|
||||
PyTest test config
|
||||
==================
|
||||
|
||||
Conftest for Tests
|
||||
------------------
|
||||
.. automodule:: fuel_tests.tests.conftest.py
|
||||
:members:
|
||||
|
||||
Models
|
||||
======
|
||||
|
||||
Manager
|
||||
----------
|
||||
.. automodule:: fuel_tests.models.manager
|
||||
:members:
|
||||
|
||||
Tests
|
||||
=====
|
||||
|
||||
Ceph Tests
|
||||
----------
|
||||
.. automodule:: fuel_tests.tests.test_ceph.py
|
||||
:members:
|
||||
|
||||
Neutron Tests
|
||||
-------------
|
||||
.. automodule:: fuel_tests.tests.test_neutron.py
|
||||
:members:
|
||||
|
@ -1,19 +0,0 @@
|
||||
.. index:: General
|
||||
|
||||
General
|
||||
=======
|
||||
|
||||
Proboscis test runner
|
||||
---------------------
|
||||
.. automodule:: fuelweb_test.run_tests
|
||||
:members:
|
||||
|
||||
Settings
|
||||
--------
|
||||
.. automodule:: fuelweb_test.settings
|
||||
:members:
|
||||
|
||||
OSTF tests mapping
|
||||
------------------
|
||||
.. automodule:: fuelweb_test.ostf_test_mapping
|
||||
:members:
|
170
doc/helpers.rst
170
doc/helpers.rst
@ -1,170 +0,0 @@
|
||||
.. index:: Helpers
|
||||
|
||||
Helpers
|
||||
*******
|
||||
|
||||
General Helpers
|
||||
===============
|
||||
|
||||
Ceph
|
||||
----
|
||||
.. automodule:: fuelweb_test.helpers.ceph
|
||||
:members:
|
||||
|
||||
Checkers
|
||||
--------
|
||||
.. automodule:: fuelweb_test.helpers.checkers
|
||||
:members:
|
||||
|
||||
CIC Maintenance Mode
|
||||
--------------------
|
||||
.. automodule:: fuelweb_test.helpers.cic_maintenance_mode
|
||||
:members:
|
||||
|
||||
Cloud Image
|
||||
-----------
|
||||
.. automodule:: fuelweb_test.helpers.cloud_image
|
||||
:members:
|
||||
|
||||
Common
|
||||
------
|
||||
.. automodule:: fuelweb_test.helpers.common
|
||||
:members:
|
||||
|
||||
Decorators
|
||||
----------
|
||||
.. automodule:: fuelweb_test.helpers.decorators
|
||||
:members:
|
||||
|
||||
Metaclasses
|
||||
-----------
|
||||
.. automodule:: fuelweb_test.helpers.metaclasses
|
||||
:members:
|
||||
|
||||
Eb tables
|
||||
---------
|
||||
.. automodule:: fuelweb_test.helpers.eb_tables
|
||||
:members:
|
||||
|
||||
Fuel Actions
|
||||
------------
|
||||
.. automodule:: fuelweb_test.helpers.fuel_actions
|
||||
:members:
|
||||
|
||||
Fuel Release Hacks
|
||||
------------------
|
||||
.. automodule:: fuelweb_test.helpers.fuel_release_hacks
|
||||
:members:
|
||||
|
||||
Granular Deployment Checkers
|
||||
----------------------------
|
||||
.. automodule:: fuelweb_test.helpers.granular_deployment_checkers
|
||||
:members:
|
||||
|
||||
Ironic Actions
|
||||
--------------
|
||||
.. automodule:: fuelweb_test.helpers.ironic_actions
|
||||
:members:
|
||||
|
||||
Log Server
|
||||
----------
|
||||
.. automodule:: fuelweb_test.helpers.log_server
|
||||
:members:
|
||||
|
||||
Multiple Networks Hacks
|
||||
-----------------------
|
||||
.. automodule:: fuelweb_test.helpers.multiple_networks_hacks
|
||||
:members:
|
||||
|
||||
Nessus REST Client
|
||||
------------------
|
||||
.. automodule:: fuelweb_test.helpers.nessus
|
||||
:members:
|
||||
|
||||
Os Actions
|
||||
----------
|
||||
.. automodule:: fuelweb_test.helpers.os_actions
|
||||
:members:
|
||||
|
||||
Ovs helper
|
||||
----------
|
||||
.. automodule:: fuelweb_test.helpers.ovs
|
||||
:members:
|
||||
|
||||
Pacemaker
|
||||
---------
|
||||
.. automodule:: fuelweb_test.helpers.pacemaker
|
||||
:members:
|
||||
|
||||
Patching
|
||||
--------
|
||||
.. automodule:: fuelweb_test.helpers.patching
|
||||
:members:
|
||||
|
||||
Rally
|
||||
-----
|
||||
.. automodule:: fuelweb_test.helpers.rally
|
||||
:members:
|
||||
|
||||
Regenerate Repo
|
||||
---------------
|
||||
.. automodule:: fuelweb_test.helpers.regenerate_repo
|
||||
:members:
|
||||
|
||||
Replace Repositories
|
||||
--------------------
|
||||
.. automodule:: fuelweb_test.helpers.replace_repos
|
||||
:members:
|
||||
|
||||
Security
|
||||
--------
|
||||
.. automodule:: fuelweb_test.helpers.security
|
||||
:members:
|
||||
|
||||
SSH Manager
|
||||
-----------
|
||||
.. automodule:: fuelweb_test.helpers.ssh_manager
|
||||
:members:
|
||||
|
||||
Ssl
|
||||
---
|
||||
.. automodule:: fuelweb_test.helpers.ssl_helpers
|
||||
:members:
|
||||
|
||||
UCA
|
||||
---
|
||||
.. automodule:: fuelweb_test.helpers.uca
|
||||
:members:
|
||||
|
||||
Utils
|
||||
-----
|
||||
.. automodule:: fuelweb_test.helpers.utils
|
||||
:members:
|
||||
|
||||
Gerrit
|
||||
======
|
||||
|
||||
Client
|
||||
------
|
||||
.. automodule:: fuelweb_test.helpers.gerrit.gerrit_client
|
||||
:members:
|
||||
|
||||
Info provider
|
||||
-------------
|
||||
.. automodule:: fuelweb_test.helpers.gerrit.gerrit_info_provider
|
||||
:members:
|
||||
|
||||
Utils
|
||||
-----
|
||||
.. automodule:: fuelweb_test.helpers.gerrit.utils
|
||||
:members:
|
||||
|
||||
Rules
|
||||
-----
|
||||
.. automodule:: fuelweb_test.helpers.gerrit.rules
|
||||
:members:
|
||||
|
||||
Content parser
|
||||
--------------
|
||||
.. automodule:: fuelweb_test.helpers.gerrit.content_parser
|
||||
:members:
|
@ -1,14 +0,0 @@
|
||||
Documentation for the QA test code repo
|
||||
***************************************
|
||||
|
||||
.. toctree::
|
||||
:numbered:
|
||||
:maxdepth: 3
|
||||
|
||||
general.rst
|
||||
models.rst
|
||||
helpers.rst
|
||||
base_tests.rst
|
||||
testrail.rst
|
||||
system_tests.rst
|
||||
fuel_tests.rst
|
@ -1,24 +0,0 @@
|
||||
.. index:: Models
|
||||
|
||||
Models
|
||||
======
|
||||
|
||||
Collector client
|
||||
----------------
|
||||
.. automodule:: fuelweb_test.models.collector_client
|
||||
:members:
|
||||
|
||||
Environment
|
||||
-----------
|
||||
.. automodule:: fuelweb_test.models.environment
|
||||
:members:
|
||||
|
||||
Fuel Web Client
|
||||
---------------
|
||||
.. automodule:: fuelweb_test.models.fuel_web_client
|
||||
:members:
|
||||
|
||||
Nailgun Client
|
||||
--------------
|
||||
.. automodule:: fuelweb_test.models.nailgun_client
|
||||
:members:
|
@ -1 +0,0 @@
|
||||
sphinx==1.3.1
|
@ -1,137 +0,0 @@
|
||||
.. index:: System tests
|
||||
|
||||
System tests
|
||||
************
|
||||
|
||||
Core
|
||||
====
|
||||
|
||||
Repository
|
||||
----------
|
||||
.. automodule:: system_test.core.repository
|
||||
:members:
|
||||
|
||||
Discover
|
||||
--------
|
||||
.. automodule:: system_test.core.discover
|
||||
:members:
|
||||
|
||||
Decorators
|
||||
----------
|
||||
.. automodule:: system_test.core.decorators
|
||||
:members:
|
||||
|
||||
Factory
|
||||
-------
|
||||
.. automodule:: system_test.core.factory
|
||||
:members:
|
||||
|
||||
Config
|
||||
------
|
||||
.. automodule:: system_test.core.config
|
||||
:members:
|
||||
|
||||
Actions
|
||||
=======
|
||||
|
||||
Base actions
|
||||
------------
|
||||
.. automodule:: system_test.actions.base
|
||||
:members:
|
||||
|
||||
Fuelmaster actions
|
||||
------------------
|
||||
.. automodule:: system_test.actions.fuelmaster_actions
|
||||
:members:
|
||||
|
||||
OSTF actions
|
||||
------------
|
||||
.. automodule:: system_test.actions.ostf_actions
|
||||
:members:
|
||||
|
||||
Plugins actions
|
||||
---------------
|
||||
.. automodule:: system_test.actions.plugins_actions
|
||||
:members:
|
||||
|
||||
Strength actions
|
||||
----------------
|
||||
.. automodule:: system_test.actions.strength_actions
|
||||
:members:
|
||||
|
||||
General tests
|
||||
=============
|
||||
|
||||
ActionTest
|
||||
----------
|
||||
.. automodule:: system_test.tests.base
|
||||
:members:
|
||||
|
||||
Case deploy Environment
|
||||
-----------------------
|
||||
.. automodule:: system_test.tests.test_create_deploy_ostf
|
||||
:members:
|
||||
|
||||
Deploy cluster and check RadosGW
|
||||
--------------------------------
|
||||
.. automodule:: system_test.tests.test_deploy_check_rados
|
||||
:members:
|
||||
|
||||
Delete cluster after deploy
|
||||
---------------------------
|
||||
.. automodule:: system_test.tests.test_delete_after_deploy
|
||||
:members:
|
||||
|
||||
Redeploy cluster after stop
|
||||
---------------------------
|
||||
.. automodule:: system_test.tests.test_redeploy_after_stop
|
||||
:members:
|
||||
|
||||
Redeploy cluster after reset
|
||||
----------------------------
|
||||
.. automodule:: system_test.tests.test_redeploy_after_reset
|
||||
:members:
|
||||
|
||||
Fuel master migration
|
||||
---------------------
|
||||
.. automodule:: system_test.tests.test_fuel_migration
|
||||
:members:
|
||||
|
||||
Strength tests
|
||||
==============
|
||||
|
||||
Destroy controllers
|
||||
-------------------
|
||||
.. automodule:: system_test.tests.strength.test_destroy_controllers
|
||||
:members:
|
||||
|
||||
Fill root and check pacemaker
|
||||
-----------------------------
|
||||
.. automodule:: system_test.tests.strength.test_filling_root
|
||||
:members:
|
||||
|
||||
Plugin tests
|
||||
============
|
||||
|
||||
Example plugin Base
|
||||
-------------------
|
||||
.. automodule:: system_test.tests.plugins.plugin_example
|
||||
:members:
|
||||
|
||||
Example plugin
|
||||
--------------
|
||||
.. automodule:: system_test.tests.plugins.plugin_example.test_plugin_example
|
||||
:members:
|
||||
|
||||
Example plugin v3
|
||||
-----------------
|
||||
.. automodule:: system_test.tests.plugins.plugin_example.test_plugin_example_v3
|
||||
:members:
|
||||
|
||||
Helpers
|
||||
=======
|
||||
|
||||
Decorators
|
||||
----------
|
||||
.. automodule:: system_test.helpers.decorators
|
||||
:members:
|
@ -1,69 +0,0 @@
|
||||
.. index:: Testrail
|
||||
|
||||
Testrail
|
||||
========
|
||||
|
||||
Builds
|
||||
------
|
||||
.. automodule:: fuelweb_test.testrail.builds
|
||||
:members:
|
||||
|
||||
Launchpad client
|
||||
----------------
|
||||
.. automodule:: fuelweb_test.testrail.launchpad_client
|
||||
:members:
|
||||
|
||||
Report
|
||||
------
|
||||
.. automodule:: fuelweb_test.testrail.report
|
||||
:members:
|
||||
|
||||
Report partner ingreation
|
||||
-------------------------
|
||||
.. automodule:: fuelweb_test.testrail.report_pi
|
||||
:members:
|
||||
|
||||
Report tempest results
|
||||
----------------------
|
||||
.. automodule:: fuelweb_test.testrail.report_tempest_results
|
||||
:members:
|
||||
|
||||
Settings
|
||||
--------
|
||||
.. automodule:: fuelweb_test.testrail.settings
|
||||
:members:
|
||||
|
||||
Testrail
|
||||
--------
|
||||
.. automodule:: fuelweb_test.testrail.testrail
|
||||
:members:
|
||||
|
||||
Testrail Client
|
||||
---------------
|
||||
.. automodule:: fuelweb_test.testrail.testrail_client
|
||||
:members:
|
||||
|
||||
Upload Cases Description
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.testrail.upload_cases_description
|
||||
:members:
|
||||
|
||||
Upload tempest test suite
|
||||
-------------------------
|
||||
.. automodule:: fuelweb_test.testrail.upload_tempest_test_suite
|
||||
:members:
|
||||
|
||||
Generate bugs statistics for TestPlan
|
||||
-------------------------------------
|
||||
.. automodule:: fuelweb_test.testrail.generate_statistics
|
||||
:members:
|
||||
|
||||
Datetime utils for Testrail
|
||||
---------------------------
|
||||
.. automodule:: fuelweb_test.testrail.datetime_util
|
||||
:members:
|
||||
|
||||
Generate failure statistics for TestPlan
|
||||
----------------------------------------
|
||||
.. automodule:: fuelweb_test.testrail.generate_failure_group_statistics
|
||||
:members:
|
@ -1,329 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# pylint: disable=redefined-builtin
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves import xrange
|
||||
# pylint: enable=redefined-builtin
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers.decorators import create_diagnostic_snapshot
|
||||
from fuelweb_test.helpers.utils import TimeStat
|
||||
from fuelweb_test.tests.base_test_case import TestBasic as Basic
|
||||
|
||||
from system_test.core.discover import load_yaml
|
||||
|
||||
|
||||
class Manager(Basic):
|
||||
"""Manager class for tests."""
|
||||
|
||||
def __init__(self, config_file, cls):
|
||||
super(Manager, self).__init__()
|
||||
self.full_config = None
|
||||
self.env_config = None
|
||||
self.env_settings = None
|
||||
self.config_name = None
|
||||
self._devops_config = None
|
||||
self._start_time = 0
|
||||
self.config_file = config_file
|
||||
if config_file:
|
||||
self._load_config()
|
||||
self._context = cls
|
||||
self.assigned_slaves = set()
|
||||
|
||||
def _cluster_from_template(self):
|
||||
"""Create cluster from template file."""
|
||||
|
||||
slaves = int(self.full_config['template']['slaves'])
|
||||
cluster_name = self.env_config['name']
|
||||
snapshot_name = "ready_cluster_{}".format(cluster_name)
|
||||
if self.check_run(snapshot_name):
|
||||
self.env.revert_snapshot(snapshot_name)
|
||||
cluster_id = self.fuel_web.client.get_cluster_id(cluster_name)
|
||||
self._context._storage['cluster_id'] = cluster_id
|
||||
logger.info("Got deployed cluster from snapshot")
|
||||
return True
|
||||
elif self.get_ready_slaves(slaves):
|
||||
self.env.sync_time()
|
||||
logger.info("Create env {}".format(
|
||||
self.env_config['name']))
|
||||
cluster_settings = {
|
||||
"sahara": self.env_settings['components'].get(
|
||||
'sahara', False),
|
||||
"ceilometer": self.env_settings['components'].get(
|
||||
'ceilometer', False),
|
||||
"ironic": self.env_settings['components'].get(
|
||||
'ironic', False),
|
||||
"user": self.env_config.get("user", "admin"),
|
||||
"password": self.env_config.get("password", "admin"),
|
||||
"tenant": self.env_config.get("tenant", "admin"),
|
||||
"volumes_lvm": self.env_settings['storages'].get(
|
||||
"volume-lvm", False),
|
||||
"volumes_ceph": self.env_settings['storages'].get(
|
||||
"volume-ceph", False),
|
||||
"images_ceph": self.env_settings['storages'].get(
|
||||
"image-ceph", False),
|
||||
"ephemeral_ceph": self.env_settings['storages'].get(
|
||||
"ephemeral-ceph", False),
|
||||
"objects_ceph": self.env_settings['storages'].get(
|
||||
"rados-ceph", False),
|
||||
"osd_pool_size": str(self.env_settings['storages'].get(
|
||||
"replica-ceph", 2)),
|
||||
"net_provider": self.env_config['network'].get(
|
||||
'provider', 'neutron'),
|
||||
"net_segment_type": self.env_config['network'].get(
|
||||
'segment-type', 'vlan'),
|
||||
"assign_to_all_nodes": self.env_config['network'].get(
|
||||
'pubip-to-all',
|
||||
False),
|
||||
"neutron_l3_ha": self.env_config['network'].get(
|
||||
'neutron-l3-ha', False),
|
||||
"neutron_dvr": self.env_config['network'].get(
|
||||
'neutron-dvr', False),
|
||||
"neutron_l2_pop": self.env_config['network'].get(
|
||||
'neutron-l2-pop', False)
|
||||
}
|
||||
|
||||
cluster_id = self.fuel_web.create_cluster(
|
||||
name=self.env_config['name'],
|
||||
mode=settings.DEPLOYMENT_MODE,
|
||||
release_name=self.env_config['release'],
|
||||
settings=cluster_settings)
|
||||
|
||||
self._context._storage['cluster_id'] = cluster_id
|
||||
logger.info("Add nodes to env {}".format(cluster_id))
|
||||
names = "slave-{:02}"
|
||||
num = iter(xrange(1, slaves + 1))
|
||||
nodes = {}
|
||||
for new in self.env_config['nodes']:
|
||||
for _ in xrange(new['count']):
|
||||
name = names.format(next(num))
|
||||
while name in self.assigned_slaves:
|
||||
name = names.format(next(num))
|
||||
|
||||
self.assigned_slaves.add(name)
|
||||
nodes[name] = new['roles']
|
||||
logger.info("Set roles {} to node {}".format(
|
||||
new['roles'], name))
|
||||
self.fuel_web.update_nodes(cluster_id, nodes)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.fuel_web.deploy_cluster_wait(cluster_id)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.env.make_snapshot(snapshot_name, is_make=True)
|
||||
self.env.resume_environment()
|
||||
return True
|
||||
else:
|
||||
logger.error("Can't deploy cluster because snapshot"
|
||||
" with bootstrapped nodes didn't revert")
|
||||
raise RuntimeError("Can't deploy cluster because snapshot"
|
||||
" with bootstrapped nodes didn't revert")
|
||||
|
||||
def _cluster_from_config(self, config):
|
||||
"""Create cluster from predefined config."""
|
||||
|
||||
slaves = len(config.get('nodes'))
|
||||
cluster_name = config.get('name', self._context.__name__)
|
||||
snapshot_name = "ready_cluster_{}".format(cluster_name)
|
||||
if self.check_run(snapshot_name):
|
||||
self.env.revert_snapshot(snapshot_name)
|
||||
cluster_id = self.fuel_web.client.get_cluster_id(cluster_name)
|
||||
self._context._storage['cluster_id'] = cluster_id
|
||||
logger.info("Getted deployed cluster from snapshot")
|
||||
return True
|
||||
elif self.get_ready_slaves(slaves):
|
||||
self.env.sync_time()
|
||||
logger.info("Create env {}".format(cluster_name))
|
||||
cluster_id = self.fuel_web.create_cluster(
|
||||
name=cluster_name,
|
||||
mode=config.get('mode', settings.DEPLOYMENT_MODE),
|
||||
settings=config.get('settings', {})
|
||||
)
|
||||
self._context._storage['cluster_id'] = cluster_id
|
||||
self.fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
config.get('nodes')
|
||||
)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.fuel_web.deploy_cluster_wait(cluster_id)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.env.make_snapshot(snapshot_name, is_make=True)
|
||||
self.env.resume_environment()
|
||||
return True
|
||||
else:
|
||||
logger.error("Can't deploy cluster because snapshot"
|
||||
" with bootstrapped nodes didn't revert")
|
||||
raise RuntimeError("Can't deploy cluster because snapshot"
|
||||
" with bootstrapped nodes didn't revert")
|
||||
|
||||
def check_run(self, snapshot_name):
|
||||
"""Checks if run of current test is required.
|
||||
|
||||
:param snapshot_name: Name of the snapshot the function should make
|
||||
:type snapshot_name: str
|
||||
|
||||
"""
|
||||
if snapshot_name:
|
||||
return self.env.d_env.has_snapshot(snapshot_name)
|
||||
|
||||
def _load_config(self):
|
||||
"""Read cluster config from yaml file."""
|
||||
|
||||
config = load_yaml(self.config_file)
|
||||
self.full_config = config
|
||||
self.env_config = config[
|
||||
'template']['cluster_template']
|
||||
self.env_settings = config[
|
||||
'template']['cluster_template']['settings']
|
||||
self.config_name = config['template']['name']
|
||||
|
||||
if 'devops_settings' in config['template']:
|
||||
self._devops_config = config
|
||||
|
||||
def get_ready_setup(self):
|
||||
"""Create virtual environment and install Fuel master node.
|
||||
"""
|
||||
|
||||
logger.info("Getting ready setup")
|
||||
if self.check_run("empty"):
|
||||
self.env.revert_snapshot("empty")
|
||||
return True
|
||||
else:
|
||||
with TimeStat("setup_environment", is_uniq=True):
|
||||
if list(self.env.d_env.get_nodes(role='fuel_master')):
|
||||
self.env.setup_environment()
|
||||
self.fuel_post_install_actions()
|
||||
|
||||
elif list(self.env.d_env.get_nodes(role='centos_master')):
|
||||
# need to use centos_master.yaml devops template
|
||||
hostname = ''.join((settings.FUEL_MASTER_HOSTNAME,
|
||||
settings.DNS_SUFFIX))
|
||||
self.centos_setup_fuel(hostname)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"No Fuel master nodes found!")
|
||||
|
||||
self.env.make_snapshot("empty", is_make=True)
|
||||
self.env.resume_environment()
|
||||
return True
|
||||
|
||||
def get_ready_release(self):
|
||||
"""Make changes in release configuration."""
|
||||
|
||||
logger.info("Getting ready release")
|
||||
if self.check_run("ready"):
|
||||
self.env.revert_snapshot("ready")
|
||||
logger.info("Getted ready release from snapshot")
|
||||
return True
|
||||
elif self.get_ready_setup():
|
||||
self.env.sync_time()
|
||||
self.fuel_web.get_nailgun_version()
|
||||
self.fuel_web.change_default_network_settings()
|
||||
|
||||
if (settings.REPLACE_DEFAULT_REPOS and
|
||||
settings.REPLACE_DEFAULT_REPOS_ONLY_ONCE):
|
||||
self.fuel_web.replace_default_repos()
|
||||
|
||||
self.env.make_snapshot("ready", is_make=True)
|
||||
self.env.resume_environment()
|
||||
return True
|
||||
else:
|
||||
logger.error("Can't config releases setup "
|
||||
"snapshot didn't revert")
|
||||
raise RuntimeError("Can't config releases setup "
|
||||
"snapshot didn't revert")
|
||||
|
||||
def get_ready_slaves(self, slaves=None):
|
||||
"""Bootstrap slave nodes."""
|
||||
|
||||
logger.info("Getting ready slaves")
|
||||
if not slaves:
|
||||
if hasattr(self._context, 'cluster_config'):
|
||||
slaves = len(self._context.cluster_config.get('nodes'))
|
||||
elif self.full_config:
|
||||
slaves = int(self.full_config['template']['slaves'])
|
||||
else:
|
||||
logger.error("Unable to count slaves")
|
||||
raise RuntimeError("Unable to count slaves")
|
||||
snapshot_name = "ready_with_{}_slaves".format(slaves)
|
||||
if self.check_run(snapshot_name):
|
||||
self.env.revert_snapshot(snapshot_name)
|
||||
logger.info("Getted ready slaves from snapshot")
|
||||
return True
|
||||
elif self.get_ready_release():
|
||||
self.env.sync_time()
|
||||
logger.info("Bootstrap {} nodes".format(slaves))
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:slaves],
|
||||
skip_timesync=True)
|
||||
self.env.make_snapshot(snapshot_name, is_make=True)
|
||||
self.env.resume_environment()
|
||||
return True
|
||||
logger.error(
|
||||
"Can't bootstrap nodes because release snapshot didn't revert")
|
||||
raise RuntimeError(
|
||||
"Can't bootstrap nodes because release snapshot didn't revert")
|
||||
|
||||
def get_ready_cluster(self, config=None):
|
||||
"""Create and deploy cluster."""
|
||||
|
||||
logger.info("Getting deployed cluster")
|
||||
config = config or self._context.cluster_config or None
|
||||
if config:
|
||||
self._cluster_from_config(config=config)
|
||||
else:
|
||||
self._cluster_from_template()
|
||||
|
||||
def show_step(self, step, details='', initialize=False):
|
||||
"""Show a description of the step taken from docstring
|
||||
|
||||
:param int/str step: step number to show
|
||||
:param str details: additional info for a step
|
||||
"""
|
||||
test_func = self._context._current_test
|
||||
test_func_name = test_func.__name__
|
||||
|
||||
if initialize or step == 1:
|
||||
self.current_log_step = step
|
||||
else:
|
||||
self.current_log_step += 1
|
||||
if self.current_log_step != step:
|
||||
error_message = 'The step {} should be {} at {}'
|
||||
error_message = error_message.format(
|
||||
step,
|
||||
self.current_log_step,
|
||||
test_func_name
|
||||
)
|
||||
logger.error(error_message)
|
||||
|
||||
docstring = test_func.__doc__
|
||||
docstring = '\n'.join([s.strip() for s in docstring.split('\n')])
|
||||
steps = {s.split('. ')[0]: s for s in
|
||||
docstring.split('\n') if s and s[0].isdigit()}
|
||||
if details:
|
||||
details_msg = ': {0} '.format(details)
|
||||
else:
|
||||
details_msg = ''
|
||||
if str(step) in steps:
|
||||
logger.info("\n" + " " * 55 + "<<< {0} {1}>>>"
|
||||
.format(steps[str(step)], details_msg))
|
||||
else:
|
||||
logger.info("\n" + " " * 55 + "<<< {0}. (no step description "
|
||||
"in scenario) {1}>>>".format(str(step), details_msg))
|
||||
|
||||
def make_diagnostic_snapshot(self, status, name):
|
||||
self.env.resume_environment()
|
||||
create_diagnostic_snapshot(self.env, status, name)
|
||||
|
||||
def save_env_snapshot(self, name):
|
||||
self.env.make_snapshot(name, is_make=True)
|
@ -1,193 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from __future__ import division
|
||||
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
from fuel_tests.models.manager import Manager
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
|
||||
from system_test.core.discover import config_filter
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def config_file(request):
|
||||
"""Fixture which provide config for test."""
|
||||
template = settings.FUELQA_TEMPLATE
|
||||
if template:
|
||||
return config_filter([template])[template]
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture(scope='class', autouse=True)
|
||||
def manager(request, config_file):
|
||||
"""Fixture which link manager instante for each test class."""
|
||||
manager = Manager(config_file, request.cls)
|
||||
request.cls.manager = manager
|
||||
request.cls._storage = dict()
|
||||
request.cls._logger = logger
|
||||
|
||||
def get_env(self):
|
||||
return self.manager.env
|
||||
|
||||
request.cls.env = property(get_env)
|
||||
|
||||
|
||||
@pytest.fixture(scope='function', autouse=True)
|
||||
def snapshot(request):
|
||||
"""Fixture which provide getting of artifacs after test.
|
||||
|
||||
Markers:
|
||||
get_logs - create snapshot with logs
|
||||
fail_snapshot - create environment snapshot
|
||||
|
||||
Example:
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
def test_ha_deploy():
|
||||
pass
|
||||
"""
|
||||
get_logs = request.keywords.get('get_logs', None)
|
||||
fail_snapshot = request.keywords.get('fail_snapshot', None)
|
||||
|
||||
def test_fin():
|
||||
if request.node.rep_setup.failed:
|
||||
if get_logs:
|
||||
request.instance.manager.make_diagnostic_snapshot(
|
||||
status="prepare_failed",
|
||||
name=request.node.function.__name__)
|
||||
if fail_snapshot:
|
||||
request.instance.manager.save_env_snapshot(
|
||||
name="prep_fail_{}".format(request.node.function.__name__))
|
||||
elif request.node.rep_call.passed:
|
||||
if get_logs:
|
||||
request.instance.manager.make_diagnostic_snapshot(
|
||||
status="test_pass",
|
||||
name=request.node.function.__name__)
|
||||
elif request.node.rep_call.failed:
|
||||
if get_logs:
|
||||
request.instance.manager.make_diagnostic_snapshot(
|
||||
status="test_failed",
|
||||
name=request.node.function.__name__)
|
||||
if fail_snapshot:
|
||||
request.instance.manager.save_env_snapshot(
|
||||
name="fail_{}".format(request.node.function.__name__))
|
||||
|
||||
request.addfinalizer(test_fin)
|
||||
|
||||
|
||||
@pytest.fixture(scope='function', autouse=True)
|
||||
def prepare(request, snapshot):
|
||||
"""Fixture for prepearing environment for test.
|
||||
|
||||
Provided two marker behaviour:
|
||||
need_ready_cluster marker if test need already deployed cluster
|
||||
need_ready_slaves marker if test need already provisioned slaves
|
||||
need_ready_release marker if test need already provisioned slaves
|
||||
need_ready_master marker if test need already provisioned slaves
|
||||
|
||||
Example:
|
||||
|
||||
@pytest.mark.need_ready_cluster
|
||||
def test_ha_deploy():
|
||||
pass
|
||||
|
||||
@pytest.mark.need_ready_slaves
|
||||
def test_ha_deploy():
|
||||
pass
|
||||
|
||||
"""
|
||||
need_ready_cluster = request.keywords.get('need_ready_cluster', None)
|
||||
need_ready_slaves = request.keywords.get('need_ready_slaves', None)
|
||||
need_ready_release = request.keywords.get('need_ready_release', None)
|
||||
need_ready_master = request.keywords.get('need_ready_master', None)
|
||||
if need_ready_cluster:
|
||||
request.instance.manager.get_ready_cluster()
|
||||
elif need_ready_slaves:
|
||||
request.instance.manager.get_ready_slaves()
|
||||
elif need_ready_release:
|
||||
request.instance.manager.get_ready_release()
|
||||
elif need_ready_master:
|
||||
request.instance.manager.get_ready_setup()
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
"""Attache test result for each test object."""
|
||||
# execute all other hooks to obtain the report object
|
||||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
|
||||
# set a report attribute for each phase of a call, which can
|
||||
# be "setup", "call", "teardown"
|
||||
|
||||
setattr(item, "rep_" + rep.when, rep)
|
||||
|
||||
|
||||
test_names = set()
|
||||
test_groups = []
|
||||
|
||||
|
||||
@pytest.hookimpl()
|
||||
def pytest_collection_finish(session):
|
||||
def _get_groups(kws):
|
||||
return (
|
||||
kw for kw, val in kws.keywords.items() if hasattr(val, 'name'))
|
||||
|
||||
# pylint: disable=global-statement
|
||||
global test_names
|
||||
global test_groups
|
||||
# pylint: enable=global-statement
|
||||
|
||||
test_groups = [{tuple(_get_groups(kws)): kws} for kws in session.items]
|
||||
|
||||
test_names = {kw for kws in session.items for kw in _get_groups(kws)}
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
"""Hook which run before test start."""
|
||||
item.cls._current_test = item.function
|
||||
item._start_time = time.time()
|
||||
head = "<" * 5 + "#" * 30 + "[ {} ]" + "#" * 30 + ">" * 5
|
||||
head = head.format(item.function.__name__)
|
||||
steps = ''.join(item.function.__doc__)
|
||||
start_step = "\n{head}\n{steps}".format(head=head, steps=steps)
|
||||
logger.info(start_step)
|
||||
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
"""Hook which run after test."""
|
||||
step_name = item.function.__name__
|
||||
if hasattr(item, '_start_time'):
|
||||
spent_time = time.time() - item._start_time
|
||||
else:
|
||||
spent_time = 0
|
||||
minutes = spent_time // 60
|
||||
# pylint: disable=round-builtin
|
||||
seconds = int(round(spent_time)) % 60
|
||||
# pylint: enable=round-builtin
|
||||
finish_step = "FINISH {} TEST. TOOK {} min {} sec".format(
|
||||
step_name, minutes, seconds)
|
||||
foot = "\n" + "<" * 5 + "#" * 30 + "[ {} ]" + "#" * 30 + ">" * 5
|
||||
foot = foot.format(finish_step)
|
||||
logger.info(foot)
|
@ -1,91 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import pytest
|
||||
|
||||
from devops.helpers.helpers import http
|
||||
from devops.helpers.helpers import wait
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
# pylint: disable=import-error
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves.xmlrpc_client import ServerProxy
|
||||
# pylint: enable=import-error
|
||||
|
||||
# pylint: disable=no-member
|
||||
# pylint: disable=no-self-use
|
||||
ssh_manager = SSHManager()
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.need_ready_master
|
||||
@pytest.mark.thread_1
|
||||
class TestAdminNode(object):
|
||||
"""TestAdminNode.""" # TODO documentation
|
||||
|
||||
@pytest.mark.test_cobbler_alive
|
||||
def test_cobbler_alive(self):
|
||||
"""Test current installation has correctly setup cobbler
|
||||
|
||||
API and cobbler HTTP server are alive
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot "empty"
|
||||
2. test cobbler API and HTTP server through send http request
|
||||
|
||||
Duration 1m
|
||||
|
||||
"""
|
||||
wait(
|
||||
lambda: http(host=self.env.get_admin_node_ip(), url='/cobbler_api',
|
||||
waited_code=501),
|
||||
timeout=60
|
||||
)
|
||||
server = ServerProxy(
|
||||
'http://%s/cobbler_api' % self.env.get_admin_node_ip())
|
||||
|
||||
config = self.env.admin_actions.get_fuel_settings()
|
||||
username = config['cobbler']['user']
|
||||
password = config['cobbler']['password']
|
||||
|
||||
# raises an error if something isn't right
|
||||
server.login(username, password)
|
||||
|
||||
@pytest.mark.test_astuted_alive
|
||||
def test_astuted_alive(self):
|
||||
"""Test astute master and worker processes are alive on master node
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot "empty"
|
||||
2. Search for master and child processes
|
||||
|
||||
Duration 1m
|
||||
|
||||
"""
|
||||
ps_output = ssh_manager.execute(
|
||||
ssh_manager.admin_ip, 'ps ax')['stdout']
|
||||
astute_master = [
|
||||
master for master in ps_output if 'astute master' in master]
|
||||
logger.info("Found astute processes: {:s}".format(astute_master))
|
||||
assert len(astute_master) == 1
|
||||
astute_workers = [
|
||||
worker for worker in ps_output if 'astute worker' in worker]
|
||||
logger.info(
|
||||
"Found {length:d} astute worker processes: {workers!s}"
|
||||
"".format(length=len(astute_workers), workers=astute_workers))
|
||||
assert len(astute_workers) > 1
|
@ -1,115 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pytest
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers import checkers
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
|
||||
|
||||
ssh_manager = SSHManager()
|
||||
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
class TestCephRadosGW(object):
|
||||
"""Test class consits the tests for clustre with Ceph and RadosGW"""
|
||||
|
||||
# This cluster config used for all test in this class
|
||||
cluster_config = {
|
||||
'name': "TestCephRadosGW",
|
||||
'mode': settings.DEPLOYMENT_MODE,
|
||||
'settings': {
|
||||
'volumes_lvm': False,
|
||||
'volumes_ceph': True,
|
||||
'images_ceph': True,
|
||||
'objects_ceph': True,
|
||||
'tenant': 'rados',
|
||||
'user': 'rados',
|
||||
'password': 'rados'
|
||||
},
|
||||
'nodes': {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['controller'],
|
||||
'slave-03': ['controller'],
|
||||
'slave-04': ['compute', 'ceph-osd'],
|
||||
'slave-05': ['compute', 'ceph-osd'],
|
||||
'slave-06': ['compute', 'ceph-osd']
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.pytest_bvt_2
|
||||
def test_ceph_rados_gw(self):
|
||||
"""Deploy ceph HA with RadosGW for objects
|
||||
|
||||
Scenario:
|
||||
1. Create cluster with Neutron
|
||||
2. Add 3 nodes with controller role
|
||||
3. Add 3 nodes with compute and ceph-osd role
|
||||
4. Deploy the cluster
|
||||
5. Network check
|
||||
6. Check HAProxy backends
|
||||
7. Check ceph status
|
||||
8. Run OSTF tests
|
||||
9. Check the radosgw daemon is started
|
||||
|
||||
Duration 90m
|
||||
|
||||
"""
|
||||
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
self.manager.show_step(4)
|
||||
self.manager.show_step(5)
|
||||
|
||||
# HAProxy backend checking
|
||||
self.manager.show_step(6)
|
||||
fuel_web = self.manager.fuel_web
|
||||
controller_nodes = fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
self._storage['cluster_id'], ['controller'])
|
||||
|
||||
for node in controller_nodes:
|
||||
logger.info("Check all HAProxy backends on {}".format(
|
||||
node['meta']['system']['fqdn']))
|
||||
haproxy_status = checkers.check_haproxy_backend(node['ip'])
|
||||
msg = "HAProxy backends are DOWN. {0}".format(haproxy_status)
|
||||
assert haproxy_status['exit_code'] == 1, msg
|
||||
|
||||
self.manager.show_step(7)
|
||||
fuel_web.check_ceph_status(self._storage['cluster_id'])
|
||||
|
||||
self.manager.show_step(8)
|
||||
# Run ostf
|
||||
fuel_web.run_ostf(cluster_id=self._storage['cluster_id'],
|
||||
test_sets=['ha', 'smoke', 'sanity'])
|
||||
|
||||
self.manager.show_step(9)
|
||||
# Check the radosgw daemon is started
|
||||
for node in controller_nodes:
|
||||
logger.info("Check radosgw daemon is started on {}".format(
|
||||
node['meta']['system']['fqdn']))
|
||||
|
||||
cmd = "pkill -0 radosgw"
|
||||
ip = node['ip']
|
||||
err_msg = "radosgw daemon not started on {}".format(
|
||||
node['meta']['system']['fqdn'])
|
||||
ssh_manager.execute_on_remote(ip=ip, cmd=cmd, err_msg=err_msg)
|
@ -1,193 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers.eb_tables import Ebtables
|
||||
|
||||
# pylint: disable=no-member
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.thread_1
|
||||
class TestNodeDiskSizes(object):
|
||||
"""TestNodeDiskSizes.""" # TODO documentation
|
||||
|
||||
cluster_config = {
|
||||
'name': "TestNodeDiskSizes",
|
||||
'mode': settings.DEPLOYMENT_MODE,
|
||||
'nodes': {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['compute'],
|
||||
'slave-03': ['cinder']
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.need_ready_slaves
|
||||
@pytest.mark.check_nodes_notifications
|
||||
def test_check_nodes_notifications(self):
|
||||
"""Verify nailgun notifications for discovered nodes
|
||||
|
||||
Scenario:
|
||||
1. Setup master and bootstrap 3 slaves
|
||||
2. Verify hard drive sizes for discovered nodes in /api/nodes
|
||||
3. Verify hard drive sizes for discovered nodes in notifications
|
||||
|
||||
Duration 5m
|
||||
|
||||
"""
|
||||
# self.env.revert_snapshot("ready_with_3_slaves")
|
||||
fuel_web = self.manager.fuel_web
|
||||
# assert /api/nodes
|
||||
disk_size = settings.NODE_VOLUME_SIZE * 1024 ** 3
|
||||
nailgun_nodes = fuel_web.client.list_nodes()
|
||||
for node in nailgun_nodes:
|
||||
for disk in node['meta']['disks']:
|
||||
assert disk['size'] == disk_size, 'Disk size'
|
||||
|
||||
hdd_size = "{0:.3} TB HDD".format((disk_size * 3 / (10 ** 9)) / 1000)
|
||||
notifications = fuel_web.client.get_notifications()
|
||||
|
||||
for node in nailgun_nodes:
|
||||
# assert /api/notifications
|
||||
for notification in notifications:
|
||||
discover = notification['topic'] == 'discover'
|
||||
current_node = notification['node_id'] == node['id']
|
||||
if current_node and discover and \
|
||||
"discovered" in notification['message']:
|
||||
assert hdd_size in notification['message'], (
|
||||
'"{size} not found in notification message '
|
||||
'"{note}" for node {node} '
|
||||
'(hostname {host})!'.format(
|
||||
size=hdd_size,
|
||||
note=notification['message'],
|
||||
node=node['name'],
|
||||
host=node['hostname']))
|
||||
|
||||
# assert disks
|
||||
disks = fuel_web.client.get_node_disks(node['id'])
|
||||
for disk in disks:
|
||||
expected_size = settings.NODE_VOLUME_SIZE * 1024 - 500
|
||||
assert disk['size'] == expected_size, (
|
||||
'Disk size {0} is not equals expected {1}'.format(
|
||||
disk['size'], expected_size))
|
||||
|
||||
@pytest.mark.check_nodes_disks
|
||||
@pytest.mark.need_ready_cluster
|
||||
def test_check_nodes_disks(self):
|
||||
"""Verify hard drive sizes for deployed nodes
|
||||
|
||||
Scenario:
|
||||
1. Create cluster
|
||||
2. Add 1 controller
|
||||
3. Add 1 compute
|
||||
4. Add 1 cinder
|
||||
5. Deploy cluster
|
||||
6. Verify hard drive sizes for deployed nodes
|
||||
7. Run network verify
|
||||
8. Run OSTF
|
||||
|
||||
Duration 15m
|
||||
"""
|
||||
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
self.manager.show_step(4)
|
||||
self.manager.show_step(5)
|
||||
self.manager.show_step(6)
|
||||
# assert node disks after deployment
|
||||
for node_name in self.cluster_config['nodes']:
|
||||
str_block_devices = fuel_web.get_cluster_block_devices(
|
||||
node_name)
|
||||
|
||||
logger.debug("Block device:\n{}".format(str_block_devices))
|
||||
|
||||
expected_regexp = re.compile(
|
||||
"vda\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(
|
||||
settings.NODE_VOLUME_SIZE))
|
||||
assert expected_regexp.search(str_block_devices), (
|
||||
"Unable to find vda block device for {}G in: {}".format(
|
||||
settings.NODE_VOLUME_SIZE, str_block_devices))
|
||||
|
||||
expected_regexp = re.compile(
|
||||
"vdb\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(
|
||||
settings.NODE_VOLUME_SIZE))
|
||||
assert expected_regexp.search(str_block_devices), (
|
||||
"Unable to find vdb block device for {}G in: {}".format(
|
||||
settings.NODE_VOLUME_SIZE, str_block_devices))
|
||||
|
||||
expected_regexp = re.compile(
|
||||
"vdc\s+\d+:\d+\s+0\s+{}G\s+0\s+disk".format(
|
||||
settings.NODE_VOLUME_SIZE))
|
||||
assert expected_regexp.search(str_block_devices), (
|
||||
"Unable to find vdc block device for {}G in: {}".format(
|
||||
settings.NODE_VOLUME_SIZE, str_block_devices))
|
||||
|
||||
self.manager.show_step(7)
|
||||
fuel_web.verify_network(cluster_id)
|
||||
|
||||
self.manager.show_step(8)
|
||||
fuel_web.run_ostf(
|
||||
cluster_id=cluster_id,
|
||||
test_sets=['ha', 'smoke', 'sanity'])
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.thread_1
|
||||
class TestMultinicBootstrap(object):
|
||||
"""MultinicBootstrap.""" # TODO documentation
|
||||
|
||||
@pytest.mark.multinic_bootstrap_booting
|
||||
@pytest.mark.need_ready_release
|
||||
@pytest.mark.check_nodes_disks
|
||||
def test_multinic_bootstrap_booting(self):
|
||||
"""Verify slaves booting with blocked mac address
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot "ready"
|
||||
2. Block traffic for first slave node (by mac)
|
||||
3. Restore mac addresses and boot first slave
|
||||
4. Verify slave mac addresses is equal to unblocked
|
||||
|
||||
Duration 2m
|
||||
|
||||
"""
|
||||
slave = self.env.d_env.get_node(name='slave-01')
|
||||
mac_addresses = [interface.mac_address for interface in
|
||||
slave.interfaces.filter(network__name='internal')]
|
||||
try:
|
||||
for mac in mac_addresses:
|
||||
Ebtables.block_mac(mac)
|
||||
for mac in mac_addresses:
|
||||
Ebtables.restore_mac(mac)
|
||||
slave.destroy()
|
||||
self.env.d_env.get_node(name='admin').revert("ready")
|
||||
nailgun_slave = self.env.bootstrap_nodes([slave])[0]
|
||||
assert mac.upper() == nailgun_slave['mac'].upper()
|
||||
Ebtables.block_mac(mac)
|
||||
finally:
|
||||
for mac in mac_addresses:
|
||||
Ebtables.restore_mac(mac)
|
@ -1,244 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pytest
|
||||
|
||||
from devops.helpers.helpers import get_admin_remote
|
||||
from devops.helpers.helpers import icmp_ping
|
||||
from devops.helpers.helpers import wait_pass
|
||||
from devops.helpers.helpers import wait
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
|
||||
# pylint: disable=no-member
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def fuel_master_migration(request):
|
||||
"""Fixture which migrate Fuel Master to a compute"""
|
||||
|
||||
instance = request.node.instance
|
||||
cluster_id = instance._storage['cluster_id']
|
||||
instance.start_fuel_migration()
|
||||
instance.check_migration_status()
|
||||
instance.run_checkers()
|
||||
instance.manager.fuel_web.verify_network(cluster_id)
|
||||
instance.manager.fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.fuel_master_migrate
|
||||
class TestFuelMasterMigrate(object):
|
||||
|
||||
compute = None
|
||||
cluster_config = {
|
||||
'name': "FuelMasterMigrate",
|
||||
'mode': settings.DEPLOYMENT_MODE,
|
||||
'nodes': {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['controller'],
|
||||
'slave-03': ['controller'],
|
||||
'slave-04': ['compute'],
|
||||
'slave-05': ['compute'],
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.usefixtures("fuel_master_migration")
|
||||
@pytest.mark.test_fuel_master_migrate
|
||||
def test_fuel_master_migrate(self):
|
||||
"""Fuel master migration to VM
|
||||
|
||||
Scenario:
|
||||
1. Create environment with two computes and three controllers
|
||||
2. Run network checker
|
||||
3. Deploy environment
|
||||
4. Run network checker
|
||||
5. Migrate Fuel Master to the compute node
|
||||
6. Run network checker
|
||||
7. Run OSTF
|
||||
"""
|
||||
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
self.manager.show_step(4)
|
||||
self.manager.show_step(5)
|
||||
self.manager.show_step(6)
|
||||
self.manager.show_step(7)
|
||||
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.usefixtures("fuel_master_migration")
|
||||
@pytest.mark.test_compute_hard_restart
|
||||
def test_compute_hard_restart(self):
|
||||
"""Check Fuel Master node functionality after hard restart of the
|
||||
compute where Fuel Master node is located
|
||||
|
||||
Scenario:
|
||||
1. Deploy cluster with two computes and three controllers
|
||||
2. Migrate Fuel Master
|
||||
3. Hard restart for compute node where Fuel Master node was
|
||||
migrated to
|
||||
4. Reconnect to Fuel Master
|
||||
5. Check status for master's services
|
||||
6. Run OSTF
|
||||
"""
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
|
||||
self.manager.show_step(3)
|
||||
self.compute_hard_restart()
|
||||
self.node_rebooted(self.env.get_admin_node_ip())
|
||||
|
||||
self.manager.show_step(4)
|
||||
self.run_checkers()
|
||||
|
||||
self.manager.show_step(5)
|
||||
fuel_web.verify_network(cluster_id)
|
||||
|
||||
self.manager.show_step(6)
|
||||
fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.usefixtures("fuel_master_migration")
|
||||
@pytest.mark.test_compute_warm_restart
|
||||
def test_compute_warm_restart(self):
|
||||
"""Check Fuel Master node functionality after warm restart of the
|
||||
compute where Fuel Master node is located
|
||||
|
||||
Scenario:
|
||||
1. Deploy cluster with two computes and three controllers
|
||||
2. Migrate Fuel Master
|
||||
3. Warm restart for compute node where Fuel Master node was
|
||||
migrated to
|
||||
4. Reconnect to Fuel Master
|
||||
5. Check status for master's services
|
||||
6. Run OSTF
|
||||
"""
|
||||
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
|
||||
self.manager.show_step(3)
|
||||
self.compute_warm_restart()
|
||||
self.node_rebooted(self.env.get_admin_node_ip())
|
||||
|
||||
self.manager.show_step(4)
|
||||
self.run_checkers()
|
||||
|
||||
self.manager.show_step(5)
|
||||
fuel_web.verify_network(cluster_id)
|
||||
|
||||
self.manager.show_step(6)
|
||||
fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
def start_fuel_migration(self):
|
||||
"""Migrate Fuel Master to a compute"""
|
||||
|
||||
# Get a compute to migrate Fuel Master to
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
self.compute = fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
cluster_id, ['compute'])[0]
|
||||
logger.info(
|
||||
'Fuel Master will be migrated to {0} '
|
||||
'compute'.format(self.compute['name']))
|
||||
|
||||
# Start migrating Fuel Master
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
remote.execute('fuel-migrate {0} >/dev/null &'.
|
||||
format(self.compute['ip']))
|
||||
|
||||
def check_migration_status(self):
|
||||
"""Check periodically the status of Fuel Master migration process"""
|
||||
|
||||
logger.info(
|
||||
'Rebooting to begin the data sync process for fuel migrate')
|
||||
self.node_rebooted(self.env.get_admin_node_ip())
|
||||
|
||||
logger.info('Fuel Master is migrating..')
|
||||
self.node_rebooted(self.env.get_admin_node_ip(), interval=0.5,
|
||||
timeout=60 * 45)
|
||||
|
||||
logger.info('Waiting for appearance of /tmp/migration-done file..')
|
||||
with get_admin_remote(self.env.d_env) as remote:
|
||||
wait(lambda: remote.exists("/tmp/migration-done"),
|
||||
timeout=60 * 5,
|
||||
timeout_msg="File /tmp/migration-done wasn't appeared")
|
||||
|
||||
@staticmethod
|
||||
def node_rebooted(ip, interval=5, timeout=60 * 15):
|
||||
wait(lambda: not icmp_ping(ip), interval=interval, timeout=timeout,
|
||||
timeout_msg=("Node with ip: {} has not become offline after "
|
||||
"starting reboot").format(ip))
|
||||
wait(lambda: icmp_ping(ip), interval=interval, timeout=timeout,
|
||||
timeout_msg="Node with ip: {} has not become online "
|
||||
"after reboot".format(ip))
|
||||
|
||||
def wait_nailgun_nodes(self):
|
||||
"""Wait for cluster nodes online state in nailgun"""
|
||||
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
fuel_web.wait_cluster_nodes_get_online_state(cluster_id)
|
||||
|
||||
def wait_mcollective_nodes(self):
|
||||
"""Wait for mcollective online status of cluster nodes"""
|
||||
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
wait(lambda: fuel_web.mcollective_nodes_online(cluster_id),
|
||||
timeout=60 * 5, timeout_msg="Cluster nodes don't become available"
|
||||
" via mcollective in allotted time.")
|
||||
|
||||
def wait_nailgun_available(self):
|
||||
"""Check status for Nailgun"""
|
||||
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
wait_pass(fuel_web.get_nailgun_version,
|
||||
timeout=60 * 20)
|
||||
|
||||
def compute_hard_restart(self):
|
||||
"""Hard restart compute with Fuel Master node"""
|
||||
|
||||
fuel_web = self.manager.fuel_web
|
||||
fuel_web.cold_restart_nodes(
|
||||
[fuel_web.get_devops_node_by_nailgun_node(self.compute)],
|
||||
wait_offline=False, wait_online=False, wait_after_destroy=5
|
||||
)
|
||||
|
||||
def compute_warm_restart(self):
|
||||
"""Warm restart of the compute with Fuel Master node"""
|
||||
|
||||
logger.debug('Reboot (warm restart) ip {0}'.format(self.compute['ip']))
|
||||
with self.env.d_env.get_ssh_to_remote(self.compute['ip']) as remote:
|
||||
remote.execute('/sbin/shutdown -r now')
|
||||
|
||||
def run_checkers(self):
|
||||
"""Run set of checkers"""
|
||||
|
||||
self.wait_nailgun_available()
|
||||
self.wait_mcollective_nodes()
|
||||
self.wait_nailgun_nodes()
|
@ -1,153 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pytest
|
||||
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.settings import iface_alias
|
||||
|
||||
# pylint: disable=no-member
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.thread_1
|
||||
class TestL2NetworkConfig(object):
|
||||
"""TestL2NetworkConfig.""" # TODO documentation
|
||||
|
||||
cluster_config = {
|
||||
'name': "TestL2NetworkConfig",
|
||||
'mode': settings.DEPLOYMENT_MODE,
|
||||
'nodes': {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['compute'],
|
||||
'slave-03': ['cinder']
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.need_ready_slaves
|
||||
@pytest.mark.deploy_node_multiple_interfaces
|
||||
def test_deploy_node_multiple_interfaces(self):
|
||||
"""Deploy cluster with networks allocated on different interfaces
|
||||
|
||||
Scenario:
|
||||
1. Create cluster in Ha mode
|
||||
2. Add 1 node with controller role
|
||||
3. Add 1 node with compute role
|
||||
4. Add 1 node with cinder role
|
||||
5. Split networks on existing physical interfaces
|
||||
6. Deploy the cluster
|
||||
7. Verify network configuration on each deployed node
|
||||
8. Run network verification
|
||||
|
||||
Duration 25m
|
||||
Snapshot: deploy_node_multiple_interfaces
|
||||
|
||||
"""
|
||||
# self.env.revert_snapshot("ready_with_3_slaves")
|
||||
|
||||
fuel_web = self.manager.fuel_web
|
||||
interfaces_dict = {
|
||||
iface_alias('eth0'): ['fuelweb_admin'],
|
||||
iface_alias('eth1'): ['public'],
|
||||
iface_alias('eth2'): ['storage'],
|
||||
iface_alias('eth3'): ['private'],
|
||||
iface_alias('eth4'): ['management'],
|
||||
}
|
||||
self.manager.show_step(1)
|
||||
cluster_id = fuel_web.create_cluster(
|
||||
name=self.cluster_config['name'],
|
||||
mode=self.cluster_config['mode'],
|
||||
)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
self.manager.show_step(4)
|
||||
fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
self.cluster_config['nodes']
|
||||
)
|
||||
self.manager.show_step(5)
|
||||
nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id)
|
||||
for node in nailgun_nodes:
|
||||
fuel_web.update_node_networks(node['id'], interfaces_dict)
|
||||
|
||||
self.manager.show_step(6)
|
||||
fuel_web.deploy_cluster_wait(cluster_id)
|
||||
|
||||
self.manager.show_step(7)
|
||||
fuel_web.verify_network(cluster_id)
|
||||
|
||||
@pytest.mark.skip(reason="Disabled in fuelweb_test")
|
||||
@pytest.mark.untagged_networks_negative
|
||||
@pytest.mark.need_ready_slaves
|
||||
def test_untagged_networks_negative(self):
|
||||
"""Verify network verification fails with untagged network on eth0
|
||||
|
||||
Scenario:
|
||||
1. Create cluster in ha mode
|
||||
2. Add 1 node with controller role
|
||||
3. Add 1 node with compute role
|
||||
4. Add 1 node with compute cinder
|
||||
5. Split networks on existing physical interfaces
|
||||
6. Remove VLAN tagging from networks which are on eth0
|
||||
7. Run network verification (assert it fails)
|
||||
8. Start cluster deployment (assert it fails)
|
||||
|
||||
Duration 30m
|
||||
|
||||
"""
|
||||
fuel_web = self.manager.fuel_web
|
||||
vlan_turn_off = {'vlan_start': None}
|
||||
interfaces = {
|
||||
iface_alias('eth0'): ["fixed"],
|
||||
iface_alias('eth1'): ["public"],
|
||||
iface_alias('eth2'): ["management", "storage"],
|
||||
iface_alias('eth3'): []
|
||||
}
|
||||
|
||||
self.manager.show_step(1)
|
||||
cluster_id = fuel_web.create_cluster(
|
||||
name=self.cluster_config['name'],
|
||||
mode=self.cluster_config['mode'],
|
||||
)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
self.manager.show_step(4)
|
||||
fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
self.cluster_config['nodes']
|
||||
)
|
||||
|
||||
self.manager.show_step(5)
|
||||
nets = fuel_web.client.get_networks(cluster_id)['networks']
|
||||
nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id)
|
||||
for node in nailgun_nodes:
|
||||
fuel_web.update_node_networks(node['id'], interfaces)
|
||||
|
||||
self.manager.show_step(6)
|
||||
# select networks that will be untagged:
|
||||
for net in nets:
|
||||
net.update(vlan_turn_off)
|
||||
|
||||
# stop using VLANs:
|
||||
fuel_web.client.update_network(cluster_id, networks=nets)
|
||||
|
||||
self.manager.show_step(7)
|
||||
# run network check:
|
||||
fuel_web.verify_network(cluster_id, success=False)
|
||||
|
||||
self.manager.show_step(8)
|
||||
# deploy cluster:
|
||||
task = fuel_web.deploy_cluster(cluster_id)
|
||||
fuel_web.assert_task_failed(task)
|
@ -1,261 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pytest
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers import checkers
|
||||
from fuelweb_test.helpers import os_actions
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
|
||||
# pylint: disable=no-member
|
||||
ssh_manager = SSHManager()
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.ha_neutron
|
||||
class TestNeutronTunHa(object):
|
||||
"""NeutronTunHa.
|
||||
|
||||
Old groups: ha_neutron, neutron, ha, classic_provisioning
|
||||
""" # TODO documentation
|
||||
|
||||
cluster_config = {
|
||||
"name": "NeutronTunHa",
|
||||
"mode": settings.DEPLOYMENT_MODE,
|
||||
"settings": {
|
||||
'net_provider': settings.NEUTRON,
|
||||
'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
|
||||
'tenant': 'haTun',
|
||||
'user': 'haTun',
|
||||
'password': 'haTun'
|
||||
},
|
||||
"nodes": {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['controller'],
|
||||
'slave-03': ['controller'],
|
||||
'slave-04': ['compute'],
|
||||
'slave-05': ['compute']
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.deploy_neutron_gre_ha
|
||||
@pytest.mark.ha_neutron_gre
|
||||
def test_deploy_neutron_gre_ha(self):
|
||||
"""Deploy cluster in HA mode with Neutron TUN
|
||||
|
||||
Scenario:
|
||||
1. Create cluster
|
||||
2. Add 3 nodes with controller role
|
||||
3. Add 2 nodes with compute role
|
||||
4. Deploy the cluster
|
||||
5. Run network verification
|
||||
6. Run OSTF
|
||||
|
||||
Duration 80m
|
||||
Snapshot deploy_neutron_gre_ha
|
||||
|
||||
"""
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
self.manager.show_step(4)
|
||||
self.manager.show_step(5)
|
||||
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
cluster = fuel_web.client.get_cluster(cluster_id)
|
||||
assert str(cluster['net_provider']) == settings.NEUTRON
|
||||
|
||||
devops_node = fuel_web.get_nailgun_primary_node(
|
||||
self.env.d_env.nodes().slaves[0])
|
||||
logger.debug("devops node name is {0}".format(devops_node.name))
|
||||
|
||||
_ip = fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
|
||||
for _ in range(5):
|
||||
try:
|
||||
checkers.check_swift_ring(_ip)
|
||||
break
|
||||
except AssertionError:
|
||||
cmd = "/usr/local/bin/swift-rings-rebalance.sh"
|
||||
result = ssh_manager.execute(ip=_ip, cmd=cmd)
|
||||
logger.debug("command execution result is {0}"
|
||||
.format(result['exit_code']))
|
||||
else:
|
||||
checkers.check_swift_ring(_ip)
|
||||
|
||||
self.manager.show_step(6)
|
||||
fuel_web.run_ostf(
|
||||
cluster_id=cluster_id,
|
||||
test_sets=['ha', 'smoke', 'sanity'])
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.ha_neutron
|
||||
class TestNeutronVlanHa(object):
|
||||
"""NeutronVlanHa.
|
||||
|
||||
|
||||
Old groups: neutron, ha, ha_neutron
|
||||
|
||||
""" # TODO documentation
|
||||
|
||||
cluster_config = {
|
||||
"name": "NeutronVlanHa",
|
||||
"mode": settings.DEPLOYMENT_MODE,
|
||||
"settings": {
|
||||
"net_provider": settings.NEUTRON,
|
||||
"net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
|
||||
'tenant': 'haVlan',
|
||||
'user': 'haVlan',
|
||||
'password': 'haVlan'
|
||||
},
|
||||
"nodes": {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['controller'],
|
||||
'slave-03': ['controller'],
|
||||
'slave-04': ['compute'],
|
||||
'slave-05': ['compute']
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.deploy_neutron_vlan_ha
|
||||
@pytest.mark.neutron_vlan_ha
|
||||
def test_deploy_neutron_vlan_ha(self):
|
||||
"""Deploy cluster in HA mode with Neutron VLAN
|
||||
|
||||
Scenario:
|
||||
1. Create cluster
|
||||
2. Add 3 nodes with controller role
|
||||
3. Add 2 nodes with compute role
|
||||
4. Deploy the cluster
|
||||
5. Run network verification
|
||||
6. Run OSTF
|
||||
|
||||
Duration 80m
|
||||
Snapshot deploy_neutron_vlan_ha
|
||||
|
||||
"""
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
self.manager.show_step(4)
|
||||
self.manager.show_step(5)
|
||||
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
cluster = fuel_web.client.get_cluster(cluster_id)
|
||||
assert str(cluster['net_provider']) == settings.NEUTRON
|
||||
|
||||
os_conn = os_actions.OpenStackActions(
|
||||
fuel_web.get_public_vip(cluster_id),
|
||||
user=self.cluster_config['settings']['user'],
|
||||
passwd=self.cluster_config['settings']['password'],
|
||||
tenant=self.cluster_config['settings']['tenant'])
|
||||
|
||||
fuel_web.check_fixed_network_cidr(
|
||||
cluster_id, os_conn)
|
||||
|
||||
fuel_web.verify_network(cluster_id)
|
||||
devops_node = fuel_web.get_nailgun_primary_node(
|
||||
self.env.d_env.nodes().slaves[0])
|
||||
logger.debug("devops node name is {0}".format(devops_node.name))
|
||||
|
||||
_ip = fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
|
||||
for _ in range(5):
|
||||
try:
|
||||
checkers.check_swift_ring(_ip)
|
||||
break
|
||||
except AssertionError:
|
||||
cmd = "/usr/local/bin/swift-rings-rebalance.sh"
|
||||
result = ssh_manager.execute(ip=_ip, cmd=cmd)
|
||||
logger.debug("command execution result is {0}"
|
||||
.format(result['exit_code']))
|
||||
else:
|
||||
checkers.check_swift_ring(_ip)
|
||||
|
||||
self.manager.show_step(6)
|
||||
fuel_web.run_ostf(
|
||||
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.thread_1
|
||||
@pytest.mark.neutron
|
||||
class TestNeutronVlan(object):
|
||||
"""NeutronVlan.""" # TODO documentation
|
||||
|
||||
cluster_config = {
|
||||
"name": "NeutronVlan",
|
||||
"mode": settings.DEPLOYMENT_MODE,
|
||||
"settings": {
|
||||
"net_provider": settings.NEUTRON,
|
||||
"net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
|
||||
'tenant': 'simpleVlan',
|
||||
'user': 'simpleVlan',
|
||||
'password': 'simpleVlan'
|
||||
},
|
||||
"nodes": {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['compute'],
|
||||
'slave-03': ['compute']
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.deploy_neutron_vlan
|
||||
@pytest.mark.ha_one_controller_neutron_vlan
|
||||
@pytest.mark.deployment
|
||||
@pytest.mark.nova
|
||||
@pytest.mark.nova_compute
|
||||
def test_deploy_neutron_vlan(self):
|
||||
"""Deploy cluster in ha mode with 1 controller and Neutron VLAN
|
||||
|
||||
Scenario:
|
||||
1. Create cluster
|
||||
2. Add 1 node with controller role
|
||||
3. Add 2 nodes with compute role
|
||||
4. Deploy the cluster
|
||||
5. Run network verification
|
||||
6. Run OSTF
|
||||
|
||||
Duration 35m
|
||||
Snapshot deploy_neutron_vlan
|
||||
|
||||
"""
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
cluster = fuel_web.client.get_cluster(cluster_id)
|
||||
assert str(cluster['net_provider']) == settings.NEUTRON
|
||||
|
||||
self.manager.show_step(4)
|
||||
fuel_web.verify_network(cluster_id)
|
||||
|
||||
self.manager.show_step(5)
|
||||
fuel_web.run_ostf(
|
||||
cluster_id=cluster_id)
|
||||
|
||||
self.env.make_snapshot("deploy_neutron_vlan", is_make=True)
|
@ -1,246 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import pytest
|
||||
from paramiko import ChannelException
|
||||
from devops.helpers.ssh_client import SSHAuth
|
||||
from devops.helpers.helpers import wait
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers import os_actions
|
||||
|
||||
cirros_auth = SSHAuth(**settings.SSH_IMAGE_CREDENTIALS)
|
||||
|
||||
# pylint: disable=no-member
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.neutron
|
||||
@pytest.mark.thread_1
|
||||
class TestNeutronIPv6(object):
|
||||
"""NeutronIPv6."""
|
||||
|
||||
cluster_config = {
|
||||
"name": "NeutronVlan",
|
||||
"mode": settings.DEPLOYMENT_MODE,
|
||||
"settings": {
|
||||
"net_provider": settings.NEUTRON,
|
||||
"net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
|
||||
'tenant': 'simpleVlan',
|
||||
'user': 'simpleVlan',
|
||||
'password': 'simpleVlan'
|
||||
},
|
||||
"nodes": {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['compute'],
|
||||
'slave-03': ['compute']
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.deploy_neutron_ip_v6
|
||||
@pytest.mark.nova
|
||||
@pytest.mark.nova_compute
|
||||
@pytest.mark.neutron_ipv6
|
||||
@pytest.mark.deploy_neutron_ip_v6
|
||||
def test_deploy_neutron_ip_v6(self):
|
||||
"""Check IPv6 only functionality for Neutron VLAN
|
||||
|
||||
Scenario:
|
||||
1. Revert deploy_neutron_vlan snapshot
|
||||
2. Create two dualstack network IPv6 subnets
|
||||
(should be in SLAAC mode,
|
||||
address space should not intersect).
|
||||
3. Create virtual router and set gateway.
|
||||
4. Attach this subnets to the router.
|
||||
5. Create a Security Group,
|
||||
that allows SSH and ICMP for both IPv4 and IPv6.
|
||||
6. Launch two instances, one for each network.
|
||||
7. Lease a floating IP.
|
||||
8. Attach Floating IP for main instance.
|
||||
9. SSH to the main instance and ping6 another instance.
|
||||
|
||||
Duration 10m
|
||||
Snapshot deploy_neutron_ip_v6
|
||||
|
||||
"""
|
||||
self.manager.show_step(1)
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
public_vip = fuel_web.get_public_vip(cluster_id)
|
||||
logger.info('Public vip is %s', public_vip)
|
||||
|
||||
os_conn = os_actions.OpenStackActions(
|
||||
controller_ip=public_vip,
|
||||
user='simpleVlan',
|
||||
passwd='simpleVlan',
|
||||
tenant='simpleVlan'
|
||||
)
|
||||
|
||||
tenant = os_conn.get_tenant('simpleVlan')
|
||||
|
||||
self.manager.show_step(2)
|
||||
net1 = os_conn.create_network(
|
||||
network_name='net1',
|
||||
tenant_id=tenant.id)['network']
|
||||
net2 = os_conn.create_network(
|
||||
network_name='net2',
|
||||
tenant_id=tenant.id)['network']
|
||||
|
||||
subnet_1_v4 = os_conn.create_subnet(
|
||||
subnet_name='subnet_1_v4',
|
||||
network_id=net1['id'],
|
||||
cidr='192.168.100.0/24',
|
||||
ip_version=4)
|
||||
|
||||
subnet_1_v6 = os_conn.create_subnet(
|
||||
subnet_name='subnet_1_v6',
|
||||
network_id=net1['id'],
|
||||
ip_version=6,
|
||||
cidr="2001:db8:100::/64",
|
||||
gateway_ip="2001:db8:100::1",
|
||||
ipv6_ra_mode="slaac",
|
||||
ipv6_address_mode="slaac")
|
||||
|
||||
subnet_2_v4 = os_conn.create_subnet(
|
||||
subnet_name='subnet_2_v4',
|
||||
network_id=net2['id'],
|
||||
cidr='192.168.200.0/24',
|
||||
ip_version=4)
|
||||
|
||||
subnet_2_v6 = os_conn.create_subnet(
|
||||
subnet_name='subnet_2_v6',
|
||||
network_id=net2['id'],
|
||||
ip_version=6,
|
||||
cidr="2001:db8:200::/64",
|
||||
gateway_ip="2001:db8:200::1",
|
||||
ipv6_ra_mode="slaac",
|
||||
ipv6_address_mode="slaac")
|
||||
|
||||
self.manager.show_step(3)
|
||||
router = os_conn.create_router('test_router', tenant=tenant)
|
||||
|
||||
self.manager.show_step(4)
|
||||
os_conn.add_router_interface(
|
||||
router_id=router["id"],
|
||||
subnet_id=subnet_1_v4["id"])
|
||||
|
||||
os_conn.add_router_interface(
|
||||
router_id=router["id"],
|
||||
subnet_id=subnet_1_v6["id"])
|
||||
|
||||
os_conn.add_router_interface(
|
||||
router_id=router["id"],
|
||||
subnet_id=subnet_2_v4["id"])
|
||||
|
||||
os_conn.add_router_interface(
|
||||
router_id=router["id"],
|
||||
subnet_id=subnet_2_v6["id"])
|
||||
|
||||
self.manager.show_step(5)
|
||||
security_group = os_conn.create_sec_group_for_ssh()
|
||||
|
||||
self.manager.show_step(6)
|
||||
instance1 = os_conn.create_server(
|
||||
name='instance1',
|
||||
security_groups=[security_group],
|
||||
net_id=net1['id'],
|
||||
)
|
||||
|
||||
instance2 = os_conn.create_server(
|
||||
name='instance2',
|
||||
security_groups=[security_group],
|
||||
net_id=net2['id'],
|
||||
)
|
||||
|
||||
self.manager.show_step(7)
|
||||
self.manager.show_step(8)
|
||||
floating_ip = os_conn.assign_floating_ip(instance1)
|
||||
floating_ip2 = os_conn.assign_floating_ip(instance2)
|
||||
|
||||
self.manager.show_step(9)
|
||||
|
||||
instance1_ipv6 = [
|
||||
addr['addr'] for addr in instance1.addresses[net1['name']]
|
||||
if addr['version'] == 6].pop()
|
||||
|
||||
instance2_ipv6 = [
|
||||
addr['addr'] for addr in instance2.addresses[net2['name']]
|
||||
if addr['version'] == 6].pop()
|
||||
|
||||
logger.info(
|
||||
'\ninstance1:\n'
|
||||
'\tFloatingIP: {ip!s}\n'
|
||||
'\tIPv6 address: {ipv6!s}'.format(
|
||||
ip=floating_ip.ip,
|
||||
ipv6=instance1_ipv6))
|
||||
logger.info(
|
||||
'\ninstance2:\n'
|
||||
'\tFloatingIP: {ip!s}\n'
|
||||
'\tIPv6 address: {ipv6!s}'.format(
|
||||
ip=floating_ip2.ip,
|
||||
ipv6=instance2_ipv6))
|
||||
|
||||
with fuel_web.get_ssh_for_node("slave-01") as remote:
|
||||
def ssh_ready(vm_host):
|
||||
try:
|
||||
remote.execute_through_host(
|
||||
hostname=vm_host,
|
||||
cmd="ls -la",
|
||||
auth=cirros_auth
|
||||
)
|
||||
return True
|
||||
except ChannelException:
|
||||
return False
|
||||
|
||||
for vm_host, hostname in (
|
||||
(floating_ip.ip, instance1),
|
||||
(floating_ip2.ip, instance2)
|
||||
):
|
||||
wait(lambda: ssh_ready(vm_host), timeout=120,
|
||||
timeout_msg='ssh is not ready on host '
|
||||
'{hostname:s} ({ip:s}) at timeout 120s'
|
||||
''.format(hostname=hostname, ip=vm_host))
|
||||
|
||||
res = remote.execute_through_host(
|
||||
hostname=floating_ip.ip,
|
||||
cmd="{ping:s} -q "
|
||||
"-c{count:d} "
|
||||
"-w{deadline:d} "
|
||||
"-s{packetsize:d} "
|
||||
"{dst_address:s}".format(
|
||||
ping='ping6',
|
||||
count=10,
|
||||
deadline=20,
|
||||
packetsize=1452,
|
||||
dst_address=instance2_ipv6),
|
||||
auth=cirros_auth
|
||||
)
|
||||
|
||||
logger.info(
|
||||
'Ping results: \n\t{res:s}'.format(res=res['stdout_str']))
|
||||
|
||||
assert res['exit_code'] == 0, (
|
||||
'Ping failed with error code: {code:d}\n'
|
||||
'\tSTDOUT: {stdout:s}\n'
|
||||
'\tSTDERR: {stderr:s}'.format(
|
||||
code=res['exit_code'],
|
||||
stdout=res['stdout_str'],
|
||||
stderr=res['stderr_str']))
|
||||
|
||||
self.env.make_snapshot('deploy_neutron_ip_v6')
|
@ -1,92 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pytest
|
||||
|
||||
from fuelweb_test import settings
|
||||
|
||||
# pylint: disable=no-member
|
||||
|
||||
|
||||
@pytest.mark.get_logs
|
||||
@pytest.mark.fail_snapshot
|
||||
@pytest.mark.thread_1
|
||||
class TestHAOneControllerNeutronRestart(object):
|
||||
|
||||
cluster_config = {
|
||||
'name': "TestHAOneControllerNeutronRestart",
|
||||
'mode': settings.DEPLOYMENT_MODE,
|
||||
'nodes': {
|
||||
'slave-01': ['controller'],
|
||||
'slave-02': ['compute']
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.mark.need_ready_cluster
|
||||
@pytest.mark.ha_one_controller_neutron_warm_restart
|
||||
def test_ha_one_controller_neutron_warm_restart(self):
|
||||
"""Warm restart for ha one controller environment
|
||||
|
||||
Scenario:
|
||||
1. Create cluster
|
||||
2. Add 1 node with controller role
|
||||
3. Add 1 node with compute role
|
||||
4. Deploy the cluster
|
||||
5. Run network verification
|
||||
6. Run OSTF
|
||||
7. Warm restart
|
||||
8. Wait for HA services to be ready
|
||||
9. Wait for OS services to be ready
|
||||
10. Wait for Galera is up
|
||||
11. Verify firewall rules
|
||||
12. Run network verification
|
||||
13. Run OSTF
|
||||
|
||||
Duration 30m
|
||||
|
||||
"""
|
||||
cluster_id = self._storage['cluster_id']
|
||||
fuel_web = self.manager.fuel_web
|
||||
|
||||
self.manager.show_step(1)
|
||||
self.manager.show_step(2)
|
||||
self.manager.show_step(3)
|
||||
self.manager.show_step(4)
|
||||
|
||||
self.manager.show_step(5)
|
||||
fuel_web.verify_network(cluster_id)
|
||||
self.manager.show_step(6)
|
||||
fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
self.manager.show_step(7)
|
||||
fuel_web.warm_restart_nodes(
|
||||
self.env.d_env.get_nodes(name__in=['slave-01', 'slave-02']))
|
||||
|
||||
self.manager.show_step(8)
|
||||
fuel_web.assert_ha_services_ready(cluster_id)
|
||||
|
||||
self.manager.show_step(9)
|
||||
fuel_web.assert_os_services_ready(cluster_id)
|
||||
|
||||
self.manager.show_step(10)
|
||||
fuel_web.wait_mysql_galera_is_up(['slave-01'])
|
||||
|
||||
self.manager.show_step(11)
|
||||
fuel_web.security.verify_firewall(cluster_id)
|
||||
|
||||
self.manager.show_step(12)
|
||||
fuel_web.verify_network(cluster_id)
|
||||
|
||||
self.manager.show_step(13)
|
||||
fuel_web.run_ostf(cluster_id=cluster_id)
|
@ -1,104 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import logging.config
|
||||
import os
|
||||
import warnings
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
from core.helpers.log_helpers import QuietLogger
|
||||
|
||||
from fuelweb_test.settings import LOGS_DIR
|
||||
|
||||
if not os.path.exists(LOGS_DIR):
|
||||
os.makedirs(LOGS_DIR)
|
||||
|
||||
_log_config = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': False,
|
||||
'formatters': {
|
||||
'default': {
|
||||
'format': '%(asctime)s - %(levelname)s %(filename)s:'
|
||||
'%(lineno)d -- %(message)s',
|
||||
'datefmt': '%Y-%m-%d %H:%M:%S'
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'level': 'INFO',
|
||||
'class': 'logging.StreamHandler',
|
||||
'formatter': 'default'
|
||||
},
|
||||
'tests_log': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.FileHandler',
|
||||
'formatter': 'default',
|
||||
'filename': os.path.join(LOGS_DIR, 'sys_test.log'),
|
||||
'mode': 'w',
|
||||
'encoding': 'utf8',
|
||||
},
|
||||
'devops_log': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.FileHandler',
|
||||
'formatter': 'default',
|
||||
'filename': os.path.join(LOGS_DIR, 'devops.log'),
|
||||
'mode': 'w',
|
||||
'encoding': 'utf8',
|
||||
},
|
||||
'null': {
|
||||
'level': 'CRITICAL',
|
||||
'class': 'logging.NullHandler',
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
# Log all to log file , but by default only warnings.
|
||||
'': {
|
||||
'handlers': ['tests_log'],
|
||||
'level': 'WARNING',
|
||||
},
|
||||
'fuel-qa': {
|
||||
'handlers': ['console'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': True
|
||||
},
|
||||
'devops': {
|
||||
'handlers': ['console', 'devops_log'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': True # Test log too
|
||||
},
|
||||
# py.warnings is changed by Django -> do not propagate
|
||||
'py.warnings': {
|
||||
'handlers': ['console', 'tests_log'],
|
||||
'level': 'WARNING',
|
||||
'propagate': False
|
||||
},
|
||||
'paramiko': {'level': 'WARNING'},
|
||||
'iso8601': {'level': 'WARNING'},
|
||||
'keystoneauth': {'level': 'WARNING'},
|
||||
}
|
||||
}
|
||||
|
||||
logging.config.dictConfig(_log_config)
|
||||
logging.captureWarnings(True) # Log warnings
|
||||
# Filter deprecation warnings: log only when deletion announced
|
||||
warnings.filterwarnings(
|
||||
'default',
|
||||
message=r'.*(drop|remove)+.*',
|
||||
category=DeprecationWarning)
|
||||
|
||||
logger = logging.getLogger('fuel-qa.{}'.format(__name__))
|
||||
|
||||
__all__ = ['QuietLogger', 'logwrap', 'logger']
|
@ -1,13 +0,0 @@
|
||||
meta:
|
||||
conflicts:
|
||||
- controller
|
||||
- compute
|
||||
description: New role
|
||||
has_primary: true
|
||||
name: Test primary role
|
||||
tags:
|
||||
- base-os
|
||||
name: test-primary-role
|
||||
volumes_roles_mapping:
|
||||
- allocate_size: min
|
||||
id: os
|
@ -1,12 +0,0 @@
|
||||
meta:
|
||||
conflicts:
|
||||
- controller
|
||||
- compute
|
||||
description: New role
|
||||
name: Test role
|
||||
tags:
|
||||
- base-os
|
||||
name: test-role
|
||||
volumes_roles_mapping:
|
||||
- allocate_size: min
|
||||
id: os
|
@ -1,27 +0,0 @@
|
||||
- id: custom_task_on_controller
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
role: ['/(primary-)?controller/']
|
||||
parameters:
|
||||
cmd: 'echo "controller" >> /tmp/custom_task_log'
|
||||
|
||||
- id: custom_task_on_compute
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
role: ['compute']
|
||||
parameters:
|
||||
cmd: 'echo "compute" >> /tmp/custom_task_log'
|
||||
|
||||
- id: custom_task_on_cinder
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
role: ['cinder']
|
||||
parameters:
|
||||
cmd: 'echo "cinder" >> /tmp/custom_task_log'
|
||||
|
||||
- id: custom_task_on_ceph-osd
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
role: ['ceph-osd']
|
||||
parameters:
|
||||
cmd: 'echo "ceph-osd" >> /tmp/custom_task_log'
|
@ -1,9 +0,0 @@
|
||||
- id: custom_task_on_all_nodes
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
condition:
|
||||
yaql_exp: '$.uid in added($.nodes).uid'
|
||||
role: ['/.*/']
|
||||
requires: ['custom_task_on_controller']
|
||||
parameters:
|
||||
cmd: 'echo "yaql_task_on_all_nodes" >> /tmp/yaql_task_on_all_nodes'
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
keystone_config:
|
||||
token/expiration:
|
||||
value: 300
|
||||
notify: Service[apache2]
|
||||
service:
|
||||
apache2:
|
||||
ensure: running
|
@ -1,62 +0,0 @@
|
||||
---
|
||||
keystone_config:
|
||||
ldap/url:
|
||||
value: ldap://dc.example.com
|
||||
ldap/user:
|
||||
value: CN=ldap,OU=Users,DC=example,DC=com
|
||||
ldap/password:
|
||||
value: ldap_test
|
||||
ldap/suffix:
|
||||
value: DC=example,DC=com
|
||||
ldap/use_dumb_member:
|
||||
value: "True"
|
||||
ldap/dumb_member:
|
||||
value: CN=ldap,OU=Users,DC=example,DC=com
|
||||
ldap/user_tree_dn:
|
||||
value: OU=Users,DC=example,DC=com
|
||||
ldap/user_objectclass:
|
||||
value: person
|
||||
ldap/user_filter:
|
||||
ensure: absent
|
||||
ldap/user_id_attribute:
|
||||
value: cn
|
||||
ldap/user_name_attribute:
|
||||
value: cn
|
||||
ldap/user_mail_attribute:
|
||||
value: mail
|
||||
ldap/user_pass_attribute:
|
||||
ensure: absent
|
||||
ldap/user_enabled_attribute:
|
||||
value: userAccountControl
|
||||
ldap/user_enabled_mask:
|
||||
value: "2"
|
||||
ldap/user_enabled_default:
|
||||
value: "512"
|
||||
ldap/user_attribute_ignore:
|
||||
value: password,tenant_id,tenants
|
||||
ldap/user_allow_create:
|
||||
value: "False"
|
||||
ldap/user_allow_update:
|
||||
value: "False"
|
||||
ldap/user_allow_delete:
|
||||
value: "False"
|
||||
ldap/role_tree_dn:
|
||||
value: OU=Roles,DC=example,DC=com
|
||||
ldap/role_filter:
|
||||
ensure: absent
|
||||
ldap/role_objectclass:
|
||||
value: organizationalRole
|
||||
ldap/role_id_attribute:
|
||||
value: cn
|
||||
ldap/role_name_attribute:
|
||||
value: ou
|
||||
ldap/role_name_attribute:
|
||||
value: roleOccupant
|
||||
ldap/role_attribute_ignore:
|
||||
ensure: absent
|
||||
ldap/role_allow_create:
|
||||
value: "True"
|
||||
ldap/role_allow_create:
|
||||
value: "True"
|
||||
ldap/role_allow_create:
|
||||
value: "True"
|
@ -1,22 +0,0 @@
|
||||
- id: task_on_master_1
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
role: ['master']
|
||||
required_for: ['task_on_master_2']
|
||||
parameters:
|
||||
cmd: 'echo 1 > /tmp/master_task'
|
||||
|
||||
- id: task_on_master_2
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
role: ['master']
|
||||
parameters:
|
||||
cmd: 'echo 2 >> /tmp/master_task'
|
||||
|
||||
- id: task_on_master_3
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
role: ['master']
|
||||
requires: ['task_on_master_2']
|
||||
parameters:
|
||||
cmd: 'echo 3 >> /tmp/master_task'
|
@ -1,16 +0,0 @@
|
||||
---
|
||||
neutron_plugin_ml2:
|
||||
ml2_type_vlan/network_vlan_ranges:
|
||||
value: "physnet2:900:901,physnet1"
|
||||
notify: "Service[neutron-server]"
|
||||
neutron_config:
|
||||
DEFAULT/verbose:
|
||||
ensure: "absent"
|
||||
notify: "Service[neutron-server]"
|
||||
DEFAULT/debug:
|
||||
value: "True"
|
||||
notify: "Service[neutron-server]"
|
||||
service:
|
||||
neutron-server:
|
||||
ensure: running
|
||||
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
nova_config:
|
||||
fuel_qa/service_reconfiguration_8:
|
||||
value: "InProgress"
|
||||
notify: "Service[nova-compute]"
|
||||
service:
|
||||
nova-compute:
|
||||
ensure: running
|
@ -1,62 +0,0 @@
|
||||
---
|
||||
neutron_config:
|
||||
fuel_qa/service_reconfiguration_1:
|
||||
value: "InProgress"
|
||||
notify: "Service[neutron-server]"
|
||||
neutron_plugin_ml2:
|
||||
fuel_qa/service_reconfiguration_2:
|
||||
value: "InProgress"
|
||||
notify: "Service[neutron-server]"
|
||||
neutron_dhcp_agent_config:
|
||||
fuel_qa/service_reconfiguration_4:
|
||||
value: "InProgress"
|
||||
notify: "Service[neutron-dhcp-agent]"
|
||||
neutron_l3_agent_config:
|
||||
fuel_qa/service_reconfiguration_5:
|
||||
value: "InProgress"
|
||||
notify: "Service[neutron-l3-agent]"
|
||||
neutron_metadata_agent_config:
|
||||
fuel_qa/service_reconfiguration_6:
|
||||
value: "InProgress"
|
||||
notify: "Service[neutron-metadata-agent]"
|
||||
neutron_api_config:
|
||||
fuel_qa/service_reconfiguration_7:
|
||||
value: "InProgress"
|
||||
notify: "Service[neutron-server]"
|
||||
keystone_config:
|
||||
fuel_qa/service_reconfiguration_8:
|
||||
value: "InProgress"
|
||||
nova_config:
|
||||
fuel_qa/service_reconfiguration_9:
|
||||
value: "InProgress"
|
||||
notify:
|
||||
- "Service[nova-scheduler]"
|
||||
- "Service[nova-novncproxy]"
|
||||
- "Service[nova-conductor]"
|
||||
- "Service[nova-api]"
|
||||
- "Service[nova-consoleauth]"
|
||||
- "Service[nova-cert]"
|
||||
service:
|
||||
neutron-server:
|
||||
ensure: running
|
||||
nova-scheduler:
|
||||
ensure: running
|
||||
nova-novncproxy:
|
||||
ensure: running
|
||||
nova-conductor:
|
||||
ensure: running
|
||||
nova-api:
|
||||
ensure: running
|
||||
nova-consoleauth:
|
||||
ensure: running
|
||||
nova-cert:
|
||||
ensure: running
|
||||
neutron-dhcp-agent:
|
||||
ensure: running
|
||||
provider: pacemaker
|
||||
neutron-l3-agent:
|
||||
ensure: running
|
||||
provider: pacemaker
|
||||
neutron-metadata-agent:
|
||||
ensure: running
|
||||
provider: pacemaker
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
nova_config:
|
||||
DEFAULT/cpu_allocation_ratio:
|
||||
value: "1.0"
|
||||
notify: "Service[nova-scheduler]"
|
||||
service:
|
||||
nova-scheduler:
|
||||
ensure: running
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
nova_config:
|
||||
DEFAULT/cpu_allocation_ratio:
|
||||
ensure: absent
|
||||
notify: "Service[nova-scheduler]"
|
||||
service:
|
||||
nova-scheduler:
|
||||
ensure: running
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
nova_config:
|
||||
DEFAULT/default_ephemeral_format:
|
||||
value: ext4
|
||||
notify: "Service[nova-compute]"
|
||||
service:
|
||||
nova-compute:
|
||||
ensure: running
|
@ -1,11 +0,0 @@
|
||||
---
|
||||
nova_config:
|
||||
DEFAULT/default_ephemeral_format:
|
||||
value: "ext3"
|
||||
notify: "Service[nova-compute]"
|
||||
DEFAULT/verbose:
|
||||
value: "False"
|
||||
notify: "Service[nova-compute]"
|
||||
service:
|
||||
nova-compute:
|
||||
ensure: running
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
nova_config:
|
||||
DEFAULT/quota_driver:
|
||||
value: nova.quota.DbQuotaDriver
|
||||
notify:
|
||||
- "Service[nova-api]"
|
||||
DEFAULT/quota_instances:
|
||||
value: "1"
|
||||
notify:
|
||||
- "Service[nova-api]"
|
||||
service:
|
||||
nova-api:
|
||||
ensure: running
|
@ -1,59 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Script to prepare shell script to generate target image"""
|
||||
|
||||
|
||||
def execute():
|
||||
"""Function to prepare shell script to generate target image"""
|
||||
import sys
|
||||
|
||||
import six
|
||||
|
||||
from nailgun.settings import NailgunSettings
|
||||
from nailgun.objects.release import Release
|
||||
from nailgun import consts
|
||||
from nailgun.orchestrator import tasks_templates
|
||||
|
||||
settings = NailgunSettings()
|
||||
master_ip = settings.config['MASTER_IP']
|
||||
release_id = sys.argv[1]
|
||||
|
||||
rel = Release.get_by_uid(release_id)
|
||||
|
||||
packages_str = \
|
||||
rel.attributes_metadata['editable']['provision']['packages']['value']
|
||||
packages = list(
|
||||
six.moves.filter(bool, (s.strip() for s in packages_str.split('\n'))))
|
||||
task = tasks_templates.make_provisioning_images_task(
|
||||
[consts.MASTER_NODE_UID],
|
||||
rel.attributes_metadata['editable']['repo_setup']['repos']['value'],
|
||||
rel.attributes_metadata['generated']['provision'],
|
||||
'prepare_release_ubuntu',
|
||||
packages)
|
||||
|
||||
release_str = 'release_{release_id}'.format(release_id=release_id)
|
||||
with open('build_image.sh', 'w') as cmd_file:
|
||||
cmd_file.write(task['parameters']['cmd'].replace(
|
||||
"{cluster.release.environment_version}",
|
||||
rel.environment_version).replace(
|
||||
'{cluster.release.version}',
|
||||
rel.version).replace(
|
||||
'{settings.MASTER_IP}',
|
||||
master_ip).replace(
|
||||
"{cluster.id}",
|
||||
release_str))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
execute()
|
@ -1,7 +0,0 @@
|
||||
- id: custom_task_on_all_nodes
|
||||
type: shell
|
||||
version: 2.0.0
|
||||
role: ['/.*/']
|
||||
requires: ['custom_task_on_controller']
|
||||
parameters:
|
||||
cmd: 'echo "custom_task_on_all_nodes" > /tmp/custom_task_on_all_nodes'
|
@ -1,276 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from proboscis.asserts import assert_equal
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers.utils import check_distribution
|
||||
from fuelweb_test.settings import DNS_SUFFIX
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE_CENTOS
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU
|
||||
from fuelweb_test.settings import UBUNTU_SERVICE_PROVIDER
|
||||
|
||||
|
||||
def start_monitor(remote):
|
||||
"""Starts ceph-mon service depending on Linux distribution.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: None
|
||||
:raise: DistributionNotSupported
|
||||
"""
|
||||
logger.debug("Starting Ceph monitor on {0}".format(remote.host))
|
||||
check_distribution()
|
||||
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
|
||||
remote.check_call('start ceph-mon-all')
|
||||
if OPENSTACK_RELEASE_CENTOS in OPENSTACK_RELEASE:
|
||||
remote.check_call('/etc/init.d/ceph start')
|
||||
|
||||
|
||||
def stop_monitor(remote):
|
||||
"""Stops ceph-mon service depending on Linux distribution.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: None
|
||||
:raise: DistributionNotSupported
|
||||
"""
|
||||
logger.debug("Stopping Ceph monitor on {0}".format(remote.host))
|
||||
check_distribution()
|
||||
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
|
||||
remote.check_call('stop ceph-mon-all')
|
||||
if OPENSTACK_RELEASE_CENTOS in OPENSTACK_RELEASE:
|
||||
remote.check_call('/etc/init.d/ceph stop')
|
||||
|
||||
|
||||
def restart_monitor(remote):
|
||||
"""Restarts ceph-mon service depending on Linux distribution.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: None
|
||||
:raise: DistributionNotSupported
|
||||
"""
|
||||
stop_monitor(remote)
|
||||
start_monitor(remote)
|
||||
|
||||
|
||||
def get_health(remote):
|
||||
logger.debug("Checking Ceph cluster health on {0}".format(remote.host))
|
||||
cmd = 'ceph health -f json'
|
||||
return remote.check_call(cmd).stdout_json
|
||||
|
||||
|
||||
def get_monitor_node_fqdns(remote):
|
||||
"""Returns node FQDNs with Ceph monitor service is running.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: list of FQDNs
|
||||
"""
|
||||
cmd = 'ceph mon_status -f json'
|
||||
result = remote.check_call(cmd).stdout_json
|
||||
fqdns = [i['name'] + DNS_SUFFIX for i in result['monmap']['mons']]
|
||||
msg = "Ceph monitor service is running on {0}".format(', '.join(fqdns))
|
||||
logger.debug(msg)
|
||||
return fqdns
|
||||
|
||||
|
||||
def is_clock_skew(remote):
|
||||
"""Checks whether clock skews across the monitor nodes.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: bool
|
||||
"""
|
||||
if is_health_warn(remote):
|
||||
if 'clock skew' in ' '.join(health_detail(remote)):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def get_node_fqdns_w_clock_skew(remote):
|
||||
"""Returns node FQDNs with a clock skew.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: list of FQDNs
|
||||
"""
|
||||
fqdns = []
|
||||
if not is_clock_skew(remote):
|
||||
return fqdns
|
||||
|
||||
for i in get_health(remote)['timechecks']['mons']:
|
||||
if abs(float(i['skew'])) >= 0.05:
|
||||
fqdns.append(i['name'] + DNS_SUFFIX)
|
||||
logger.debug("Clock skew is found on {0}".format(', '.join(fqdns)))
|
||||
return fqdns
|
||||
|
||||
|
||||
def check_disks(remote, nodes_ids):
|
||||
nodes_names = ['node-{0}'.format(node_id) for node_id in nodes_ids]
|
||||
disks_tree = get_osd_tree(remote)
|
||||
osd_ids = get_osd_ids(remote)
|
||||
logger.debug("Disks output information: \\n{0}".format(disks_tree))
|
||||
disks_ids = []
|
||||
for node in disks_tree['nodes']:
|
||||
if node['type'] == 'host' and node['name'] in nodes_names:
|
||||
disks_ids.extend(node['children'])
|
||||
for node in disks_tree['nodes']:
|
||||
if node['type'] == 'osd' and node['id'] in disks_ids:
|
||||
assert_equal(node['status'], 'up', 'OSD node {0} is down'.
|
||||
format(node['id']))
|
||||
for node in disks_tree['stray']:
|
||||
if node['type'] == 'osd' and node['id'] in osd_ids:
|
||||
logger.info("WARNING! Ceph OSD '{0}' has no parent host!".
|
||||
format(node['name']))
|
||||
assert_equal(node['status'], 'up', 'OSD node {0} is down'.
|
||||
format(node['id']))
|
||||
|
||||
|
||||
def check_service_ready(remote, exit_code=0):
|
||||
cmds = []
|
||||
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
|
||||
if UBUNTU_SERVICE_PROVIDER == 'systemd':
|
||||
# Gather services on remote node
|
||||
cmd = 'systemctl show --property=Id ceph-mon*service '\
|
||||
'ceph-osd*service ceph-radosgw*service'
|
||||
result = remote.execute(cmd)
|
||||
if result['exit_code'] != 0:
|
||||
return False
|
||||
|
||||
ceph_services = []
|
||||
for line in result['stdout']:
|
||||
try:
|
||||
_, value = line.strip().split('=', 1)
|
||||
ceph_services.append(value)
|
||||
except ValueError:
|
||||
pass
|
||||
for service in ceph_services:
|
||||
cmds.append('systemctl is-active -q {}'.format(service))
|
||||
else:
|
||||
cmds.append('service ceph-all status')
|
||||
else:
|
||||
cmds.append('service ceph status')
|
||||
|
||||
if not cmds:
|
||||
raise Exception("Don't know how to check ceph status. "
|
||||
"Perhaps ceph packages are not installed")
|
||||
|
||||
for cmd in cmds:
|
||||
if remote.execute(cmd)['exit_code'] != exit_code:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def health_overall_status(remote):
|
||||
"""Returns Ceph health overall status.
|
||||
|
||||
Can be one of: 'HEALTH_OK', 'HEALTH_WARN', 'HEALTH_ERR', ...
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: str
|
||||
|
||||
"""
|
||||
health = get_health(remote)
|
||||
return health['overall_status']
|
||||
|
||||
|
||||
def health_detail(remote):
|
||||
"""Returns 'detail' section of Ceph health.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: JSON-like object
|
||||
|
||||
"""
|
||||
health = get_health(remote)
|
||||
return health['detail']
|
||||
|
||||
|
||||
def is_health_ok(remote):
|
||||
"""Checks whether Ceph health overall status is OK.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: bool
|
||||
"""
|
||||
|
||||
if health_overall_status(remote) == 'HEALTH_OK':
|
||||
return True
|
||||
if is_health_warn(remote):
|
||||
health = get_health(remote)
|
||||
if 'too many PGs' in health['summary'][0]['summary']:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_health_warn(remote):
|
||||
"""Checks whether Ceph health overall status is WARN.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: bool
|
||||
"""
|
||||
return health_overall_status(remote) == 'HEALTH_WARN'
|
||||
|
||||
|
||||
def is_pgs_recovering(remote):
|
||||
"""Checks whether Ceph PGs are being recovered.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: bool
|
||||
"""
|
||||
keywords = ['degraded', 'recovery', 'osds', 'are', 'down']
|
||||
detail = ' '.join(health_detail(remote))
|
||||
if all(k in detail.split() for k in keywords):
|
||||
return True
|
||||
logger.debug('Ceph PGs are not being recovered. '
|
||||
'Details: {0}'.format(detail))
|
||||
return False
|
||||
|
||||
|
||||
def get_osd_tree(remote):
|
||||
"""Returns OSDs according to their position in the CRUSH map.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: JSON-like object
|
||||
"""
|
||||
logger.debug("Fetching Ceph OSD tree")
|
||||
cmd = 'ceph osd tree -f json'
|
||||
return remote.check_call(cmd).stdout_json
|
||||
|
||||
|
||||
def get_osd_ids(remote):
|
||||
"""Returns all OSD ids.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: JSON-like object
|
||||
"""
|
||||
logger.debug("Fetching Ceph OSD ids")
|
||||
cmd = 'ceph osd ls -f json'
|
||||
return remote.check_call(cmd).stdout_json
|
||||
|
||||
|
||||
def get_rbd_images_list(remote, pool):
|
||||
"""Returns all OSD ids.
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:param pool: string, can be: 'images', 'volumes', etc.
|
||||
:return: JSON-like object
|
||||
"""
|
||||
cmd = 'rbd --pool {pool} --format json ls -l'.format(pool=pool)
|
||||
return remote.check_call(cmd).stdout_json
|
||||
|
||||
|
||||
def get_version(remote):
|
||||
"""Returns Ceph version
|
||||
|
||||
:param remote: devops.helpers.helpers.SSHClient
|
||||
:return: str
|
||||
"""
|
||||
cmd = 'ceph --version'
|
||||
return remote.check_call(cmd).stdout[0].split(' ')[2]
|
File diff suppressed because it is too large
Load Diff
@ -1,54 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
|
||||
|
||||
ssh_manager = SSHManager()
|
||||
|
||||
|
||||
@logwrap
|
||||
def change_config(ip, umm=True, reboot_count=2, counter_reset_time=10):
|
||||
umm_string = 'yes' if umm else 'no'
|
||||
cmd = ("echo -e 'UMM={0}\n"
|
||||
"REBOOT_COUNT={1}\n"
|
||||
"COUNTER_RESET_TIME={2}' > /etc/umm.conf".format(umm_string,
|
||||
reboot_count,
|
||||
counter_reset_time)
|
||||
)
|
||||
result = ssh_manager.execute(
|
||||
ip=ip,
|
||||
cmd=cmd
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def check_available_mode(ip):
|
||||
command = ('umm status | grep runlevel &>/dev/null && echo "True" '
|
||||
'|| echo "False"')
|
||||
if ssh_manager.execute(ip, command)['exit_code'] == 0:
|
||||
return ''.join(ssh_manager.execute(ip, command)['stdout']).strip()
|
||||
else:
|
||||
return ''.join(ssh_manager.execute(ip, command)['stderr']).strip()
|
||||
|
||||
|
||||
def check_auto_mode(ip):
|
||||
command = ('umm status | grep umm &>/dev/null && echo "True" '
|
||||
'|| echo "False"')
|
||||
if ssh_manager.execute(ip, command)['exit_code'] == 0:
|
||||
return ''.join(ssh_manager.execute(ip, command)['stdout']).strip()
|
||||
else:
|
||||
return ''.join(ssh_manager.execute(ip, command)['stderr']).strip()
|
@ -1,101 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from fuelweb_test import logger
|
||||
|
||||
|
||||
def generate_cloud_image_settings(cloud_image_settings_path, admin_network,
|
||||
interface_name, admin_ip, admin_netmask,
|
||||
gateway, dns, dns_ext,
|
||||
hostname, user, password):
|
||||
|
||||
# create dir for meta_data, user_data and cloud_ISO
|
||||
dir_path = os.path.dirname(cloud_image_settings_path)
|
||||
|
||||
if not os.path.exists(dir_path):
|
||||
os.makedirs(dir_path)
|
||||
|
||||
meta_data_path = os.path.join(dir_path,
|
||||
"meta-data")
|
||||
user_data_path = os.path.join(dir_path,
|
||||
"user-data")
|
||||
|
||||
# create meta_data and user_data
|
||||
|
||||
meta_data_context = {
|
||||
"interface_name": interface_name,
|
||||
"address": admin_ip,
|
||||
"network": admin_network,
|
||||
"netmask": admin_netmask,
|
||||
"gateway": gateway,
|
||||
"dns": dns,
|
||||
"dns_ext": dns_ext,
|
||||
"hostname": hostname
|
||||
}
|
||||
|
||||
meta_data_content = ("instance-id: iid-local1\n"
|
||||
"network-interfaces: |\n"
|
||||
" auto {interface_name}\n"
|
||||
" iface {interface_name} inet static\n"
|
||||
" address {address}\n"
|
||||
" network {network}\n"
|
||||
" netmask {netmask}\n"
|
||||
" gateway {gateway}\n"
|
||||
" dns-nameservers {dns} {dns_ext}\n"
|
||||
"local-hostname: {hostname}")
|
||||
|
||||
logger.debug("meta_data contains next data: \n{}".format(
|
||||
meta_data_content.format(**meta_data_context)))
|
||||
|
||||
with open(meta_data_path, 'w') as f:
|
||||
f.write(meta_data_content.format(**meta_data_context))
|
||||
|
||||
user_data_context = {
|
||||
"interface_name": interface_name,
|
||||
"gateway": gateway,
|
||||
"user": user,
|
||||
"password": password
|
||||
}
|
||||
|
||||
user_data_content = ("\n#cloud-config\n"
|
||||
"ssh_pwauth: True\n"
|
||||
"chpasswd:\n"
|
||||
" list: |\n"
|
||||
" {user}:{password}\n"
|
||||
" expire: False \n\n"
|
||||
"runcmd:\n"
|
||||
" - sudo ifup {interface_name}\n"
|
||||
" - sudo sed -i -e '/^PermitRootLogin/s/^"
|
||||
".*$/PermitRootLogin yes/' /etc/ssh/sshd_config\n"
|
||||
" - sudo service ssh restart\n"
|
||||
" - sudo route add default gw "
|
||||
"{gateway} {interface_name}")
|
||||
|
||||
logger.debug("user_data contains next data: \n{}".format(
|
||||
user_data_content.format(**user_data_context)))
|
||||
|
||||
with open(user_data_path, 'w') as f:
|
||||
f.write(user_data_content.format(**user_data_context))
|
||||
|
||||
# Generate cloud_ISO
|
||||
cmd = "genisoimage -output {} " \
|
||||
"-volid cidata -joliet " \
|
||||
"-rock {} {}".format(cloud_image_settings_path,
|
||||
user_data_path,
|
||||
meta_data_path)
|
||||
|
||||
subprocess.check_call(cmd, shell=True)
|
@ -1,315 +0,0 @@
|
||||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from cinderclient.client import Client as CinderClient
|
||||
from heatclient.v1.client import Client as HeatClient
|
||||
from glanceclient import Client as GlanceClient
|
||||
from ironicclient.client import get_client as get_ironic_client
|
||||
from keystoneauth1.exceptions import ClientException
|
||||
from keystoneauth1.identity import V2Password
|
||||
from keystoneauth1.session import Session as KeystoneSession
|
||||
from keystoneclient.v2_0 import Client as KeystoneClient
|
||||
from novaclient.client import Client as NovaClient
|
||||
from neutronclient.v2_0.client import Client as NeutronClient
|
||||
from proboscis.asserts import assert_equal
|
||||
import six
|
||||
# pylint: disable=redefined-builtin
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves import xrange
|
||||
# pylint: enable=redefined-builtin
|
||||
# pylint: disable=import-error
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves import urllib
|
||||
# pylint: enable=import-error
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
|
||||
from fuelweb_test.helpers import checkers
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.settings import DISABLE_SSL
|
||||
from fuelweb_test.settings import PATH_TO_CERT
|
||||
from fuelweb_test.settings import VERIFY_SSL
|
||||
|
||||
|
||||
class Common(object):
|
||||
"""Common.""" # TODO documentation
|
||||
|
||||
def __make_endpoint(self, endpoint):
|
||||
parse = urllib.parse.urlparse(endpoint)
|
||||
return parse._replace(
|
||||
netloc='{}:{}'.format(
|
||||
self.controller_ip, parse.port)).geturl()
|
||||
|
||||
def __init__(self, controller_ip, user, password, tenant):
|
||||
self.controller_ip = controller_ip
|
||||
|
||||
self.keystone_session = None
|
||||
|
||||
if DISABLE_SSL:
|
||||
auth_url = 'http://{0}:5000/v2.0/'.format(self.controller_ip)
|
||||
path_to_cert = None
|
||||
else:
|
||||
auth_url = 'https://{0}:5000/v2.0/'.format(self.controller_ip)
|
||||
path_to_cert = PATH_TO_CERT
|
||||
|
||||
insecure = not VERIFY_SSL
|
||||
|
||||
logger.debug('Auth URL is {0}'.format(auth_url))
|
||||
|
||||
self.__keystone_auth = V2Password(
|
||||
auth_url=auth_url,
|
||||
username=user,
|
||||
password=password,
|
||||
tenant_name=tenant) # TODO: in v3 project_name
|
||||
|
||||
self.__start_keystone_session(ca_cert=path_to_cert, insecure=insecure)
|
||||
|
||||
@property
|
||||
def keystone(self):
|
||||
return KeystoneClient(session=self.keystone_session)
|
||||
|
||||
@property
|
||||
def glance(self):
|
||||
endpoint = self.__make_endpoint(
|
||||
self._get_url_for_svc(service_type='image'))
|
||||
return GlanceClient(
|
||||
version='1',
|
||||
session=self.keystone_session,
|
||||
endpoint_override=endpoint)
|
||||
|
||||
@property
|
||||
def neutron(self):
|
||||
endpoint = self.__make_endpoint(
|
||||
self._get_url_for_svc(service_type='network'))
|
||||
return NeutronClient(
|
||||
session=self.keystone_session,
|
||||
endpoint_override=endpoint)
|
||||
|
||||
@property
|
||||
def nova(self):
|
||||
endpoint = self.__make_endpoint(
|
||||
self._get_url_for_svc(service_type='compute'))
|
||||
return NovaClient(
|
||||
version='2',
|
||||
session=self.keystone_session,
|
||||
endpoint_override=endpoint)
|
||||
|
||||
@property
|
||||
def cinder(self):
|
||||
endpoint = self.__make_endpoint(
|
||||
self._get_url_for_svc(service_type='volume'))
|
||||
return CinderClient(
|
||||
version='3',
|
||||
session=self.keystone_session,
|
||||
endpoint_override=endpoint)
|
||||
|
||||
@property
|
||||
def heat(self):
|
||||
endpoint = self.__make_endpoint(
|
||||
self._get_url_for_svc(service_type='orchestration'))
|
||||
# TODO: parameter endpoint_override when heatclient will be fixed
|
||||
return HeatClient(
|
||||
session=self.keystone_session,
|
||||
endpoint=endpoint)
|
||||
|
||||
@property
|
||||
def ironic(self):
|
||||
try:
|
||||
endpoint = self.__make_endpoint(
|
||||
self._get_url_for_svc(service_type='baremetal'))
|
||||
return get_ironic_client('1', session=self.keystone_session,
|
||||
insecure=True, ironic_url=endpoint)
|
||||
except ClientException as e:
|
||||
logger.warning('Could not initialize ironic client {0}'.format(e))
|
||||
raise
|
||||
|
||||
@property
|
||||
def keystone_access(self):
|
||||
return self.__keystone_auth.get_access(session=self.keystone_session)
|
||||
|
||||
def _get_url_for_svc(
|
||||
self, service_type=None, interface='public',
|
||||
region_name=None, service_name=None,
|
||||
service_id=None, endpoint_id=None
|
||||
):
|
||||
return self.keystone_access.service_catalog.url_for(
|
||||
service_type=service_type, interface=interface,
|
||||
region_name=region_name, service_name=service_name,
|
||||
service_id=service_id, endpoint_id=endpoint_id
|
||||
)
|
||||
|
||||
def goodbye_security(self):
|
||||
secgroup_list = self.nova.security_groups.list()
|
||||
logger.debug("Security list is {0}".format(secgroup_list))
|
||||
secgroup_id = [i.id for i in secgroup_list if i.name == 'default'][0]
|
||||
logger.debug("Id of security group default is {0}".format(
|
||||
secgroup_id))
|
||||
logger.debug('Permit all TCP and ICMP in security group default')
|
||||
self.nova.security_group_rules.create(secgroup_id,
|
||||
ip_protocol='tcp',
|
||||
from_port=1,
|
||||
to_port=65535)
|
||||
self.nova.security_group_rules.create(secgroup_id,
|
||||
ip_protocol='icmp',
|
||||
from_port=-1,
|
||||
to_port=-1)
|
||||
|
||||
def update_image(self, image, **kwargs):
|
||||
self.glance.images.update(image.id, **kwargs)
|
||||
return self.glance.images.get(image.id)
|
||||
|
||||
def delete_image(self, image_id):
|
||||
return self.glance.images.delete(image_id)
|
||||
|
||||
def create_key(self, key_name):
|
||||
logger.debug('Try to create key {0}'.format(key_name))
|
||||
return self.nova.keypairs.create(key_name)
|
||||
|
||||
def create_instance(self, flavor_name='test_flavor', ram=64, vcpus=1,
|
||||
disk=1, server_name='test_instance', image_name=None,
|
||||
neutron_network=True, label=None):
|
||||
logger.debug('Try to create instance')
|
||||
|
||||
start_time = time.time()
|
||||
exc_type, exc_value, exc_traceback = None, None, None
|
||||
while time.time() - start_time < 100:
|
||||
try:
|
||||
if image_name:
|
||||
image = [i.id for i in self.nova.images.list()
|
||||
if i.name == image_name]
|
||||
else:
|
||||
image = [i.id for i in self.nova.images.list()]
|
||||
break
|
||||
except Exception as e:
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
logger.warning('Ignoring exception: {!r}'.format(e))
|
||||
logger.debug(traceback.format_exc())
|
||||
else:
|
||||
if all((exc_type, exc_traceback, exc_value)):
|
||||
six.reraise(exc_type, exc_value, exc_traceback)
|
||||
raise Exception('Can not get image')
|
||||
|
||||
kwargs = {}
|
||||
if neutron_network:
|
||||
net_label = label if label else 'net04'
|
||||
network = self.nova.networks.find(label=net_label)
|
||||
kwargs['nics'] = [{'net-id': network.id, 'v4-fixed-ip': ''}]
|
||||
|
||||
logger.info('image uuid is {0}'.format(image))
|
||||
flavor = self.nova.flavors.create(
|
||||
name=flavor_name, ram=ram, vcpus=vcpus, disk=disk)
|
||||
logger.info('flavor is {0}'.format(flavor.name))
|
||||
server = self.nova.servers.create(
|
||||
name=server_name, image=image[0], flavor=flavor, **kwargs)
|
||||
logger.info('server is {0}'.format(server.name))
|
||||
return server
|
||||
|
||||
@logwrap
|
||||
def get_instance_detail(self, server):
|
||||
details = self.nova.servers.get(server)
|
||||
return details
|
||||
|
||||
def verify_instance_status(self, server, expected_state):
|
||||
def _verify_instance_state():
|
||||
curr_state = self.get_instance_detail(server).status
|
||||
assert_equal(expected_state, curr_state)
|
||||
|
||||
try:
|
||||
_verify_instance_state()
|
||||
except AssertionError:
|
||||
logger.debug('Instance is not {0}, lets provide it the last '
|
||||
'chance and sleep 60 sec'.format(expected_state))
|
||||
time.sleep(60)
|
||||
_verify_instance_state()
|
||||
|
||||
def delete_instance(self, server):
|
||||
logger.debug('Try to delete instance')
|
||||
self.nova.servers.delete(server)
|
||||
|
||||
def create_flavor(self, name, ram, vcpus, disk, flavorid="auto",
|
||||
ephemeral=0, extra_specs=None):
|
||||
flavor = self.nova.flavors.create(name, ram, vcpus, disk, flavorid,
|
||||
ephemeral=ephemeral)
|
||||
if extra_specs:
|
||||
flavor.set_keys(extra_specs)
|
||||
return flavor
|
||||
|
||||
def delete_flavor(self, flavor):
|
||||
return self.nova.flavors.delete(flavor)
|
||||
|
||||
def create_aggregate(self, name, availability_zone=None,
|
||||
metadata=None, hosts=None):
|
||||
aggregate = self.nova.aggregates.create(
|
||||
name=name, availability_zone=availability_zone)
|
||||
for host in hosts or []:
|
||||
aggregate.add_host(host)
|
||||
if metadata:
|
||||
aggregate.set_metadata(metadata)
|
||||
return aggregate
|
||||
|
||||
def delete_aggregate(self, aggregate, hosts=None):
|
||||
for host in hosts or []:
|
||||
self.nova.aggregates.remove_host(aggregate, host)
|
||||
return self.nova.aggregates.delete(aggregate)
|
||||
|
||||
def __start_keystone_session(
|
||||
self, retries=3, ca_cert=None, insecure=not VERIFY_SSL):
|
||||
exc_type, exc_value, exc_traceback = None, None, None
|
||||
for i in xrange(retries):
|
||||
try:
|
||||
if insecure:
|
||||
self.keystone_session = KeystoneSession(
|
||||
auth=self.__keystone_auth, verify=False)
|
||||
elif ca_cert:
|
||||
self.keystone_session = KeystoneSession(
|
||||
auth=self.__keystone_auth, verify=ca_cert)
|
||||
else:
|
||||
self.keystone_session = KeystoneSession(
|
||||
auth=self.__keystone_auth)
|
||||
self.keystone_session.get_auth_headers()
|
||||
return
|
||||
|
||||
except ClientException as exc:
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
err = "Try nr {0}. Could not get keystone token, error: {1}"
|
||||
logger.warning(err.format(i + 1, exc))
|
||||
time.sleep(5)
|
||||
if exc_type and exc_traceback and exc_value:
|
||||
six.reraise(exc_type, exc_value, exc_traceback)
|
||||
raise RuntimeError()
|
||||
|
||||
@staticmethod
|
||||
def rebalance_swift_ring(controller_ip, retry_count=5, sleep=600):
|
||||
"""Check Swift ring and rebalance it if needed.
|
||||
|
||||
Replication should be performed on primary controller node.
|
||||
Retry check several times. Wait for replication due to LP1498368.
|
||||
"""
|
||||
ssh = SSHManager()
|
||||
cmd = "/usr/local/bin/swift-rings-rebalance.sh"
|
||||
logger.debug('Check swift ring and rebalance it.')
|
||||
for _ in xrange(retry_count):
|
||||
try:
|
||||
checkers.check_swift_ring(controller_ip)
|
||||
break
|
||||
except AssertionError:
|
||||
result = ssh.execute(controller_ip, cmd)
|
||||
logger.debug("command execution result is {0}".format(result))
|
||||
else:
|
||||
checkers.check_swift_ring(controller_ip)
|
@ -1,533 +0,0 @@
|
||||
# Copyright 2013 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import json
|
||||
import os
|
||||
from subprocess import call
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from proboscis import SkipTest
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_true
|
||||
# pylint: disable=import-error
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves import urllib
|
||||
# pylint: enable=import-error
|
||||
|
||||
# pylint: disable=unused-import
|
||||
from core.helpers.setup_teardown import setup_teardown # noqa
|
||||
# pylint: enable=unused-import
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers.checkers import check_action_logs
|
||||
from fuelweb_test.helpers.checkers import check_repo_managment
|
||||
from fuelweb_test.helpers.checkers import check_stats_on_collector
|
||||
from fuelweb_test.helpers.checkers import check_stats_private_info
|
||||
from fuelweb_test.helpers.checkers import count_stats_on_collector
|
||||
from fuelweb_test.helpers.regenerate_repo import CustomRepo
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
from fuelweb_test.helpers.utils import get_current_env
|
||||
from fuelweb_test.helpers.utils import pull_out_logs_via_ssh
|
||||
from fuelweb_test.helpers.utils import store_astute_yaml
|
||||
from fuelweb_test.helpers.utils import store_packages_json
|
||||
from fuelweb_test.helpers.utils import TimeStat
|
||||
from gates_tests.helpers.exceptions import ConfigurationException
|
||||
|
||||
|
||||
def save_logs(session, url, path, chunk_size=1024):
|
||||
logger.info('Saving logs to "%s" file', path)
|
||||
|
||||
stream = session.get(url, stream=True, verify=False)
|
||||
if stream.status_code != 200:
|
||||
logger.error("%s %s: %s", stream.status_code, stream.reason,
|
||||
stream.content)
|
||||
return
|
||||
|
||||
with open(path, 'wb') as fp:
|
||||
for chunk in stream.iter_content(chunk_size=chunk_size):
|
||||
if chunk:
|
||||
fp.write(chunk)
|
||||
fp.flush()
|
||||
|
||||
|
||||
def store_error_details(name, env):
|
||||
description = "Failed in method {:s}.".format(name)
|
||||
if env is not None:
|
||||
try:
|
||||
create_diagnostic_snapshot(env, "fail", name)
|
||||
except:
|
||||
logger.error("Fetching of diagnostic snapshot failed: {0}".format(
|
||||
traceback.format_exception_only(sys.exc_info()[0],
|
||||
sys.exc_info()[1])))
|
||||
logger.debug("Fetching of diagnostic snapshot failed: {0}".
|
||||
format(traceback.format_exc()))
|
||||
try:
|
||||
with env.d_env.get_admin_remote()\
|
||||
as admin_remote:
|
||||
pull_out_logs_via_ssh(admin_remote, name)
|
||||
except:
|
||||
logger.error("Fetching of raw logs failed: {0}".format(
|
||||
traceback.format_exception_only(sys.exc_info()[0],
|
||||
sys.exc_info()[1])))
|
||||
logger.debug("Fetching of raw logs failed: {0}".
|
||||
format(traceback.format_exc()))
|
||||
finally:
|
||||
try:
|
||||
env.make_snapshot(snapshot_name=name[-50:],
|
||||
description=description,
|
||||
is_make=True)
|
||||
except:
|
||||
logger.error(
|
||||
"Error making the environment snapshot: {0}".format(
|
||||
traceback.format_exception_only(sys.exc_info()[0],
|
||||
sys.exc_info()[1])))
|
||||
logger.debug("Error making the environment snapshot:"
|
||||
" {0}".format(traceback.format_exc()))
|
||||
|
||||
|
||||
def log_snapshot_after_test(func):
|
||||
"""Generate diagnostic snapshot after the end of the test.
|
||||
|
||||
- Show test case method name and scenario from docstring.
|
||||
- Create a diagnostic snapshot of environment in cases:
|
||||
- if the test case passed;
|
||||
- if error occurred in the test case.
|
||||
- Fetch logs from master node if creating the diagnostic
|
||||
snapshot has failed.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
logger.info("\n" + "<" * 5 + "#" * 30 + "[ {} ]"
|
||||
.format(func.__name__) + "#" * 30 + ">" * 5 + "\n{}"
|
||||
.format(''.join(func.__doc__)))
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
except SkipTest:
|
||||
raise
|
||||
except Exception:
|
||||
name = 'error_{:s}'.format(func.__name__)
|
||||
store_error_details(name, args[0].env)
|
||||
logger.error(traceback.format_exc())
|
||||
logger.info("<" * 5 + "*" * 100 + ">" * 5)
|
||||
raise
|
||||
else:
|
||||
if settings.ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:
|
||||
if args[0].env is None:
|
||||
logger.warning("Can't get diagnostic snapshot: "
|
||||
"unexpected class is decorated.")
|
||||
return result
|
||||
try:
|
||||
args[0].env.resume_environment()
|
||||
create_diagnostic_snapshot(args[0].env, "pass",
|
||||
func.__name__)
|
||||
except:
|
||||
logger.error("Fetching of diagnostic snapshot failed: {0}".
|
||||
format(traceback.format_exc()))
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def json_parse(func):
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
response = func(*args, **kwargs)
|
||||
return json.loads(response.read())
|
||||
return wrapped
|
||||
|
||||
|
||||
def upload_manifests(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
try:
|
||||
if settings.UPLOAD_MANIFESTS:
|
||||
logger.info(
|
||||
"Uploading new manifests from "
|
||||
"{:s}".format(settings.UPLOAD_MANIFESTS_PATH))
|
||||
environment = get_current_env(args)
|
||||
if not environment:
|
||||
logger.warning("Can't upload manifests: method of "
|
||||
"unexpected class is decorated.")
|
||||
return result
|
||||
with environment.d_env.get_admin_remote() as remote:
|
||||
remote.execute('rm -rf /etc/puppet/modules/*')
|
||||
remote.upload(settings.UPLOAD_MANIFESTS_PATH,
|
||||
'/etc/puppet/modules/')
|
||||
logger.info(
|
||||
"Copying new site.pp from "
|
||||
"{:s}".format(settings.SITEPP_FOR_UPLOAD))
|
||||
remote.execute("cp %s /etc/puppet/manifests" %
|
||||
settings.SITEPP_FOR_UPLOAD)
|
||||
if settings.SYNC_DEPL_TASKS:
|
||||
remote.execute("fuel release --sync-deployment-tasks"
|
||||
" --dir /etc/puppet/")
|
||||
except Exception:
|
||||
logger.error("Could not upload manifests")
|
||||
raise
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def update_rpm_packages(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
if not settings.UPDATE_FUEL:
|
||||
return result
|
||||
try:
|
||||
environment = get_current_env(args)
|
||||
if not environment:
|
||||
logger.warning("Can't update packages: method of "
|
||||
"unexpected class is decorated.")
|
||||
return result
|
||||
|
||||
if settings.UPDATE_FUEL_MIRROR:
|
||||
for url in settings.UPDATE_FUEL_MIRROR:
|
||||
repo_url = urllib.parse.urlparse(url)
|
||||
cut_dirs = len(repo_url.path.strip('/').split('/'))
|
||||
download_cmd = ('wget --recursive --no-parent'
|
||||
' --no-verbose --reject "index'
|
||||
'.html*,*.gif" --exclude-directories'
|
||||
' "{pwd}/repocache" '
|
||||
'--directory-prefix {path} -nH'
|
||||
' --cut-dirs={cutd} {url}').\
|
||||
format(pwd=repo_url.path.rstrip('/'),
|
||||
path=settings.UPDATE_FUEL_PATH,
|
||||
cutd=cut_dirs, url=repo_url.geturl())
|
||||
return_code = call(download_cmd, shell=True)
|
||||
assert_equal(return_code, 0, 'Mirroring of remote'
|
||||
' packages '
|
||||
'repository failed')
|
||||
|
||||
centos_files_count, _ = \
|
||||
environment.admin_actions.upload_packages(
|
||||
local_packages_dir=settings.UPDATE_FUEL_PATH,
|
||||
centos_repo_path=settings.LOCAL_MIRROR_CENTOS,
|
||||
ubuntu_repo_path=None)
|
||||
|
||||
if centos_files_count == 0:
|
||||
return result
|
||||
|
||||
# Add temporary repo with new packages to YUM configuration
|
||||
conf_file = '/etc/yum.repos.d/temporary.repo'
|
||||
cmd = ("echo -e '[temporary]\nname=temporary\nbaseurl=file://{0}/"
|
||||
"\ngpgcheck=0\npriority=1' > {1}").format(
|
||||
settings.LOCAL_MIRROR_CENTOS, conf_file)
|
||||
|
||||
SSHManager().execute_on_remote(
|
||||
ip=SSHManager().admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
update_command = 'yum clean expire-cache; yum update -y -d3 ' \
|
||||
'2>>/var/log/yum-update-error.log'
|
||||
cmd_result = SSHManager().execute(ip=SSHManager().admin_ip,
|
||||
cmd=update_command)
|
||||
logger.debug('Result of "yum update" command on master node: '
|
||||
'{0}'.format(cmd_result))
|
||||
assert_equal(int(cmd_result['exit_code']), 0,
|
||||
'Packages update failed, '
|
||||
'inspect logs for details')
|
||||
|
||||
SSHManager().execute_on_remote(
|
||||
ip=SSHManager().admin_ip,
|
||||
cmd='rm -f {0}'.format(conf_file)
|
||||
)
|
||||
except Exception:
|
||||
logger.error("Could not update packages")
|
||||
raise
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def update_fuel(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
if settings.UPDATE_FUEL:
|
||||
logger.info("Update fuel's packages from directory {0}."
|
||||
.format(settings.UPDATE_FUEL_PATH))
|
||||
environment = get_current_env(args)
|
||||
if not environment:
|
||||
logger.warning("Decorator was triggered "
|
||||
"from unexpected class.")
|
||||
return result
|
||||
|
||||
centos_files_count, ubuntu_files_count = \
|
||||
environment.admin_actions.upload_packages(
|
||||
local_packages_dir=settings.UPDATE_FUEL_PATH,
|
||||
centos_repo_path=settings.LOCAL_MIRROR_CENTOS,
|
||||
ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU)
|
||||
if not centos_files_count and not ubuntu_files_count:
|
||||
raise ConfigurationException('Nothing to update,'
|
||||
' packages to update values is 0')
|
||||
cluster_id = environment.fuel_web.get_last_created_cluster()
|
||||
|
||||
if centos_files_count > 0:
|
||||
with environment.d_env.get_admin_remote() as remote:
|
||||
# Update packages on master node
|
||||
remote.execute(
|
||||
'yum -y install yum-plugin-priorities;'
|
||||
'yum clean expire-cache; yum update -y '
|
||||
'2>>/var/log/yum-update-error.log')
|
||||
|
||||
# Add auxiliary repository to the cluster attributes
|
||||
if settings.OPENSTACK_RELEASE_UBUNTU not in \
|
||||
settings.OPENSTACK_RELEASE:
|
||||
environment.fuel_web.add_local_centos_mirror(
|
||||
cluster_id, path=settings.LOCAL_MIRROR_CENTOS,
|
||||
priority=settings.AUX_RPM_REPO_PRIORITY)
|
||||
|
||||
if ubuntu_files_count > 0:
|
||||
# Add auxiliary repository to the cluster attributes
|
||||
if settings.OPENSTACK_RELEASE_UBUNTU in \
|
||||
settings.OPENSTACK_RELEASE:
|
||||
environment.fuel_web.add_local_ubuntu_mirror(
|
||||
cluster_id, name="Auxiliary",
|
||||
path=settings.LOCAL_MIRROR_UBUNTU,
|
||||
priority=settings.AUX_DEB_REPO_PRIORITY)
|
||||
else:
|
||||
logger.error("{0} .DEB files uploaded but won't be used"
|
||||
" because of deploying wrong release!"
|
||||
.format(ubuntu_files_count))
|
||||
if settings.SYNC_DEPL_TASKS:
|
||||
with environment.d_env.get_admin_remote() as remote:
|
||||
remote.execute("fuel release --sync-deployment-tasks"
|
||||
" --dir /etc/puppet/")
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def revert_info(snapshot_name, master_ip, description=""):
|
||||
logger.info("<" * 5 + "*" * 100 + ">" * 5)
|
||||
logger.info("{} Make snapshot: {}".format(description, snapshot_name))
|
||||
command = ("dos.py revert-resume {env} {name} "
|
||||
"&& ssh root@{master_ip}".format(
|
||||
env=settings.ENV_NAME,
|
||||
name=snapshot_name,
|
||||
master_ip=master_ip))
|
||||
if settings.VIRTUAL_ENV:
|
||||
command = ('source {venv}/bin/activate; {command}'
|
||||
.format(venv=settings.VIRTUAL_ENV, command=command))
|
||||
logger.info("You could revert and ssh to master node: [{command}]"
|
||||
.format(command=command))
|
||||
|
||||
logger.info("<" * 5 + "*" * 100 + ">" * 5)
|
||||
|
||||
|
||||
def create_diagnostic_snapshot(env, status, name="",
|
||||
timeout=settings.LOG_SNAPSHOT_TIMEOUT):
|
||||
logger.debug('Starting log snapshot with '
|
||||
'timeout {} seconds'.format(timeout))
|
||||
task = env.fuel_web.task_wait(env.fuel_web.client.generate_logs(), timeout)
|
||||
assert_true(task['status'] == 'ready',
|
||||
"Generation of diagnostic snapshot failed: {}".format(task))
|
||||
if settings.FORCE_HTTPS_MASTER_NODE:
|
||||
url = "https://{}:8443{}".format(env.get_admin_node_ip(),
|
||||
task['message'])
|
||||
else:
|
||||
url = "http://{}:8000{}".format(env.get_admin_node_ip(),
|
||||
task['message'])
|
||||
|
||||
log_file_name = '{status}_{name}-{basename}'.format(
|
||||
status=status,
|
||||
name=name,
|
||||
basename=os.path.basename(task['message']))
|
||||
save_logs(
|
||||
session=env.fuel_web.client.session,
|
||||
url=url,
|
||||
path=os.path.join(settings.LOGS_DIR, log_file_name))
|
||||
|
||||
|
||||
def retry(count=3, delay=30):
|
||||
def wrapped(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
i = 0
|
||||
while True:
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except:
|
||||
i += 1
|
||||
if i >= count:
|
||||
raise
|
||||
time.sleep(delay)
|
||||
return wrapper
|
||||
return wrapped
|
||||
|
||||
|
||||
def custom_repo(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
custom_pkgs = CustomRepo()
|
||||
try:
|
||||
if settings.CUSTOM_PKGS_MIRROR:
|
||||
custom_pkgs.prepare_repository()
|
||||
|
||||
except Exception:
|
||||
logger.error("Unable to get custom packages from {0}\n{1}"
|
||||
.format(settings.CUSTOM_PKGS_MIRROR,
|
||||
traceback.format_exc()))
|
||||
raise
|
||||
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception:
|
||||
custom_pkgs.check_puppet_logs()
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_fuel_statistics(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
if not settings.FUEL_STATS_CHECK:
|
||||
return result
|
||||
logger.info('Test "{0}" passed. Checking stats.'.format(func.__name__))
|
||||
fuel_settings = args[0].env.admin_actions.get_fuel_settings()
|
||||
nailgun_actions = args[0].env.nailgun_actions
|
||||
postgres_actions = args[0].env.postgres_actions
|
||||
remote_collector = args[0].env.collector
|
||||
master_uuid = args[0].env.get_masternode_uuid()
|
||||
logger.info("Master Node UUID: '{0}'".format(master_uuid))
|
||||
nailgun_actions.force_fuel_stats_sending()
|
||||
|
||||
if not settings.FUEL_STATS_ENABLED:
|
||||
assert_equal(0, int(count_stats_on_collector(remote_collector,
|
||||
master_uuid)),
|
||||
"Sending of Fuel stats is disabled in test, but "
|
||||
"usage info was sent to collector!")
|
||||
assert_equal(args[0].env.postgres_actions.count_sent_action_logs(),
|
||||
0, ("Sending of Fuel stats is disabled in test, but "
|
||||
"usage info was sent to collector!"))
|
||||
return result
|
||||
|
||||
test_scenario = inspect.getdoc(func)
|
||||
if 'Scenario' not in test_scenario:
|
||||
logger.warning(("Can't check that fuel statistics was gathered "
|
||||
"and sent to collector properly because '{0}' "
|
||||
"test doesn't contain correct testing scenario. "
|
||||
"Skipping...").format(func.__name__))
|
||||
return func(*args, **kwargs)
|
||||
try:
|
||||
check_action_logs(test_scenario, postgres_actions)
|
||||
check_stats_private_info(remote_collector,
|
||||
postgres_actions,
|
||||
master_uuid,
|
||||
fuel_settings)
|
||||
check_stats_on_collector(remote_collector,
|
||||
postgres_actions,
|
||||
master_uuid)
|
||||
return result
|
||||
except Exception:
|
||||
logger.error(traceback.format_exc())
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
|
||||
def download_astute_yaml(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
if settings.STORE_ASTUTE_YAML:
|
||||
environment = get_current_env(args)
|
||||
if environment:
|
||||
store_astute_yaml(environment)
|
||||
else:
|
||||
logger.warning("Can't download astute.yaml: "
|
||||
"Unexpected class is decorated.")
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def download_packages_json(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
environment = get_current_env(args)
|
||||
if environment:
|
||||
store_packages_json(environment)
|
||||
else:
|
||||
logger.warning("Can't collect packages: "
|
||||
"Unexpected class is decorated.")
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def duration(func):
|
||||
"""Measuring execution time of the decorated method in context of a test.
|
||||
|
||||
settings.TIMESTAT_PATH_YAML contains file name for collected data.
|
||||
Data are stored to YAML file in the following format:
|
||||
|
||||
<name_of_system_test_method>:
|
||||
<name_of_decorated_method>_XX: <seconds>
|
||||
|
||||
, where:
|
||||
|
||||
- name_of_system_test_method: Name of the system test method started
|
||||
by proboscis;
|
||||
- name_of_decorated_method: Name of the method to which this decorator
|
||||
is implemented. _XX is a number of the method
|
||||
call while test is running, from _00 to _99
|
||||
- seconds: Time in seconds with floating point, consumed by the
|
||||
decorated method
|
||||
|
||||
Thus, different tests can call the same decorated method multiple times
|
||||
and get the separate measurement for each call.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
with TimeStat(func.__name__):
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_repos_management(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
# FIXME: Enable me for all release after fix #1403088 and #1448114
|
||||
if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
|
||||
try:
|
||||
env = get_current_env(args)
|
||||
nailgun_nodes = env.fuel_web.client.list_cluster_nodes(
|
||||
env.fuel_web.get_last_created_cluster())
|
||||
for n in nailgun_nodes:
|
||||
logger.debug("Check repository management on {0}"
|
||||
.format(n['ip']))
|
||||
check_repo_managment(n['ip'])
|
||||
except Exception:
|
||||
logger.error("An error happened during check repositories "
|
||||
"management on nodes. Please see the debug log.")
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
def token(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except AssertionError:
|
||||
logger.info("Response code not equivalent to 200,"
|
||||
" trying to update the token")
|
||||
args[0].login()
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
@ -1,84 +0,0 @@
|
||||
# Copyright 2013 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import subprocess
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
|
||||
|
||||
class Ebtables(object):
|
||||
"""Ebtables.""" # TODO documentation
|
||||
|
||||
def __init__(self, target_devs, vlans):
|
||||
super(Ebtables, self).__init__()
|
||||
self.target_devs = target_devs
|
||||
self.vlans = vlans
|
||||
|
||||
@logwrap
|
||||
def restore_vlans(self):
|
||||
for vlan in self.vlans:
|
||||
for target_dev in self.target_devs:
|
||||
Ebtables.restore_vlan(target_dev, vlan)
|
||||
|
||||
@logwrap
|
||||
def restore_first_vlan(self):
|
||||
for target_dev in self.target_devs:
|
||||
Ebtables.restore_vlan(target_dev, self.vlans[0])
|
||||
|
||||
@logwrap
|
||||
def block_first_vlan(self):
|
||||
for target_dev in self.target_devs:
|
||||
Ebtables.block_vlan(target_dev, self.vlans[0])
|
||||
|
||||
@staticmethod
|
||||
@logwrap
|
||||
def block_mac(mac):
|
||||
return subprocess.check_output(
|
||||
['sudo', 'ebtables', '-t', 'filter', '-A', 'FORWARD', '-s',
|
||||
mac, '-j', 'DROP'],
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@logwrap
|
||||
def restore_mac(mac):
|
||||
return subprocess.call(
|
||||
[
|
||||
'sudo', 'ebtables', '-t', 'filter',
|
||||
'-D', 'FORWARD', '-s', mac, '-j', 'DROP'
|
||||
],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@logwrap
|
||||
def restore_vlan(target_dev, vlan):
|
||||
return subprocess.call(
|
||||
[
|
||||
'sudo', 'ebtables', '-t', 'broute', '-D', 'BROUTING', '-i',
|
||||
target_dev, '-p', '8021Q', '--vlan-id', str(vlan), '-j', 'DROP'
|
||||
],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@logwrap
|
||||
def block_vlan(target_dev, vlan):
|
||||
return subprocess.check_output(
|
||||
[
|
||||
'sudo', 'ebtables', '-t', 'broute', '-A', 'BROUTING', '-i',
|
||||
target_dev, '-p', '8021Q', '--vlan-id', str(vlan), '-j', 'DROP'
|
||||
],
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
@ -1,579 +0,0 @@
|
||||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from devops.helpers.helpers import wait
|
||||
from proboscis.asserts import assert_true
|
||||
import yaml
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers.decorators import retry
|
||||
from fuelweb_test.helpers.regenerate_repo import regenerate_centos_repo
|
||||
from fuelweb_test.helpers.regenerate_repo import regenerate_ubuntu_repo
|
||||
from fuelweb_test.helpers import replace_repos
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
from fuelweb_test.helpers.utils import dict_merge
|
||||
from fuelweb_test.settings import FUEL_PLUGIN_BUILDER_FROM_GIT
|
||||
from fuelweb_test.settings import FUEL_PLUGIN_BUILDER_REPO
|
||||
from fuelweb_test.settings import FUEL_USE_LOCAL_NTPD
|
||||
from fuelweb_test.settings import KEYSTONE_CREDS
|
||||
from fuelweb_test.settings import MIRROR_UBUNTU
|
||||
from fuelweb_test.settings import PLUGIN_PACKAGE_VERSION
|
||||
from fuelweb_test.settings import FUEL_SETTINGS_YAML
|
||||
from fuelweb_test.helpers.utils import YamlEditor
|
||||
|
||||
|
||||
class BaseActions(object):
|
||||
"""BaseActions.""" # TODO documentation
|
||||
|
||||
def __init__(self):
|
||||
self.ssh_manager = SSHManager()
|
||||
self.admin_ip = self.ssh_manager.admin_ip
|
||||
|
||||
def __repr__(self):
|
||||
klass, obj_id = type(self), hex(id(self))
|
||||
return "[{klass}({obj_id})]".format(
|
||||
klass=klass,
|
||||
obj_id=obj_id)
|
||||
|
||||
def restart_service(self, service):
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd="systemctl restart {0}".format(service),
|
||||
err_msg="Failed to restart service {!r}, please inspect logs for "
|
||||
"details".format(service))
|
||||
|
||||
|
||||
class AdminActions(BaseActions):
|
||||
""" All actions relating to the admin node."""
|
||||
|
||||
@logwrap
|
||||
def is_fuel_service_ready(self, service):
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.admin_ip,
|
||||
cmd="timeout 5 fuel-utils check_service {0}".format(service))
|
||||
return result['exit_code'] == 0
|
||||
|
||||
@logwrap
|
||||
def is_fuel_ready(self):
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.admin_ip,
|
||||
cmd="timeout 15 fuel-utils check_all")
|
||||
return result['exit_code'] == 0
|
||||
|
||||
@logwrap
|
||||
def wait_for_fuel_ready(self, timeout=300):
|
||||
wait(lambda: self.is_fuel_ready, timeout=timeout,
|
||||
timeout_msg="Fuel services are not ready, please check the "
|
||||
"output of 'fuel-utils check_all")
|
||||
|
||||
@logwrap
|
||||
@retry()
|
||||
def ensure_cmd(self, cmd):
|
||||
self.ssh_manager.execute_on_remote(ip=self.admin_ip, cmd=cmd)
|
||||
|
||||
@logwrap
|
||||
def upload_plugin(self, plugin):
|
||||
""" Upload plugin on master node.
|
||||
"""
|
||||
logger.info("Upload fuel's plugin from path {}.".format(plugin))
|
||||
return self.ssh_manager.upload_to_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
source=plugin,
|
||||
target='/var',
|
||||
port=self.ssh_manager.admin_port)
|
||||
|
||||
@logwrap
|
||||
def install_plugin(self, plugin_file_name):
|
||||
""" Install plugin on master node.
|
||||
"""
|
||||
return self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd="cd /var && fuel plugins --install "
|
||||
"{plugin!s} ".format(plugin=plugin_file_name),
|
||||
port=self.ssh_manager.admin_port,
|
||||
err_msg='Install script failed'
|
||||
)
|
||||
|
||||
@logwrap
|
||||
def modify_configs(self, router):
|
||||
# Slave nodes should use the gateway of 'admin' network as the default
|
||||
# gateway during provisioning and as an additional DNS server.
|
||||
fuel_settings = self.get_fuel_settings()
|
||||
fuel_settings['DEBUG'] = True
|
||||
fuel_settings['DNS_UPSTREAM'] = router
|
||||
fuel_settings['ADMIN_NETWORK']['dhcp_gateway'] = router
|
||||
fuel_settings["FUEL_ACCESS"]['user'] = KEYSTONE_CREDS['username']
|
||||
fuel_settings["FUEL_ACCESS"]['password'] = KEYSTONE_CREDS['password']
|
||||
|
||||
if FUEL_USE_LOCAL_NTPD:
|
||||
# Try to use only ntpd on the host as the time source
|
||||
# for admin node
|
||||
cmd = 'ntpdate -p 4 -t 0.2 -ub {0}'.format(router)
|
||||
|
||||
if not self.ssh_manager.execute(ip=self.admin_ip,
|
||||
cmd=cmd)['exit_code']:
|
||||
# Local ntpd on the host is alive, so
|
||||
# remove all NTP sources and add the host instead.
|
||||
logger.info("Switching NTPD on the Fuel admin node to use "
|
||||
"{0} as the time source.".format(router))
|
||||
ntp_keys = [k for k in fuel_settings.keys()
|
||||
if re.match(r'^NTP', k)]
|
||||
for key in ntp_keys:
|
||||
fuel_settings.pop(key)
|
||||
fuel_settings['NTP1'] = router
|
||||
|
||||
if MIRROR_UBUNTU:
|
||||
fuel_settings['BOOTSTRAP']['repos'] = \
|
||||
replace_repos.replace_ubuntu_repos(
|
||||
{
|
||||
'value': fuel_settings['BOOTSTRAP']['repos']
|
||||
},
|
||||
upstream_host='archive.ubuntu.com')
|
||||
logger.info("Replace default Ubuntu mirror URL for "
|
||||
"bootstrap image in Fuel settings")
|
||||
self.save_fuel_settings(fuel_settings)
|
||||
|
||||
@logwrap
|
||||
def update_fuel_setting_yaml(self, path):
|
||||
"""This method override fuel settings yaml according to custom yaml
|
||||
|
||||
:param path: a string of full path to custom setting yaml
|
||||
"""
|
||||
|
||||
fuel_settings = self.get_fuel_settings()
|
||||
with open(path) as fyaml:
|
||||
custom_fuel_settings = yaml.load(fyaml)
|
||||
|
||||
fuel_settings = dict_merge(fuel_settings, custom_fuel_settings)
|
||||
self.save_fuel_settings(fuel_settings)
|
||||
logger.debug('File /etc/fuel/astute.yaml was updated.'
|
||||
'And now is {}'.format(fuel_settings))
|
||||
|
||||
@logwrap
|
||||
def upload_packages(self, local_packages_dir, centos_repo_path,
|
||||
ubuntu_repo_path, clean_target=False):
|
||||
logger.info("Upload fuel's packages from directory {0}."
|
||||
.format(local_packages_dir))
|
||||
|
||||
centos_files_count = 0
|
||||
ubuntu_files_count = 0
|
||||
|
||||
if centos_repo_path:
|
||||
centos_files_count = self.ssh_manager.cond_upload(
|
||||
ip=self.admin_ip,
|
||||
source=local_packages_dir,
|
||||
target=os.path.join(centos_repo_path, 'Packages'),
|
||||
condition="(?i).*\.rpm$",
|
||||
clean_target=clean_target
|
||||
)
|
||||
if centos_files_count > 0:
|
||||
regenerate_centos_repo(centos_repo_path)
|
||||
|
||||
if ubuntu_repo_path:
|
||||
ubuntu_files_count = self.ssh_manager.cond_upload(
|
||||
ip=self.admin_ip,
|
||||
source=local_packages_dir,
|
||||
target=os.path.join(ubuntu_repo_path, 'pool/main'),
|
||||
condition="(?i).*\.deb$",
|
||||
clean_target=clean_target
|
||||
)
|
||||
if ubuntu_files_count > 0:
|
||||
regenerate_ubuntu_repo(ubuntu_repo_path)
|
||||
|
||||
return centos_files_count, ubuntu_files_count
|
||||
|
||||
@logwrap
|
||||
def clean_generated_image(self, distro):
|
||||
out = self.ssh_manager.execute(
|
||||
ip=self.admin_ip,
|
||||
cmd="find /var/www/nailgun/targetimages/ -name "
|
||||
"'env*{}*' -printf '%P\n'".format(distro.lower())
|
||||
)
|
||||
images = ''.join(out)
|
||||
|
||||
logger.debug("images are {}".format(images))
|
||||
self.ssh_manager.execute(
|
||||
ip=self.admin_ip,
|
||||
cmd="find /var/www/nailgun/targetimages/ -name 'env*{}*'"
|
||||
" -delete".format(distro.lower())
|
||||
)
|
||||
|
||||
def get_fuel_settings(self):
|
||||
return YamlEditor(
|
||||
file_path=FUEL_SETTINGS_YAML,
|
||||
ip=self.admin_ip
|
||||
).get_content()
|
||||
|
||||
def save_fuel_settings(self, settings):
|
||||
with YamlEditor(
|
||||
file_path=FUEL_SETTINGS_YAML,
|
||||
ip=self.admin_ip
|
||||
) as data:
|
||||
data.content = settings
|
||||
|
||||
@logwrap
|
||||
def get_tasks_description(self, release=None):
|
||||
"""Get tasks description
|
||||
|
||||
:param release: a string with release name
|
||||
:return: a dictionary of tasks description
|
||||
"""
|
||||
if not release:
|
||||
release = ''
|
||||
cmd = "cat `find /etc/puppet/{} -name tasks.yaml`".format(release)
|
||||
return self.ssh_manager.check_call(self.admin_ip, cmd).stdout_yaml
|
||||
|
||||
|
||||
class NailgunActions(BaseActions):
|
||||
"""NailgunActions.""" # TODO documentation
|
||||
|
||||
def update_nailgun_settings(self, settings):
|
||||
cfg_file = '/etc/nailgun/settings.yaml'
|
||||
with YamlEditor(file_path=cfg_file, ip=self.admin_ip) as ng_settings:
|
||||
ng_settings.content.update(settings)
|
||||
|
||||
logger.debug('Uploading new nailgun settings: {}'.format(
|
||||
ng_settings))
|
||||
self.restart_service("nailgun")
|
||||
|
||||
def set_collector_address(self, host, port, ssl=False):
|
||||
base_cfg_file = ('/usr/lib/python2.7/site-packages/'
|
||||
'nailgun/settings.yaml')
|
||||
assert_true(
|
||||
self.ssh_manager.exists_on_remote(
|
||||
self.ssh_manager.admin_ip, base_cfg_file),
|
||||
"Nailgun config file was not found at {!r}".format(base_cfg_file))
|
||||
|
||||
server = "{!s}:{!s}".format(host, port)
|
||||
parameters = {'COLLECTOR_SERVER': server,
|
||||
'OSWL_COLLECT_PERIOD': 0}
|
||||
if not ssl:
|
||||
# replace https endpoints to http endpoints
|
||||
with self.ssh_manager.open_on_remote(self.admin_ip,
|
||||
base_cfg_file) as f:
|
||||
data = yaml.load(f)
|
||||
for key, value in data.items():
|
||||
if (isinstance(key, str) and key.startswith("COLLECTOR") and
|
||||
key.endswith("URL") and value.startswith("https")):
|
||||
parameters[key] = "http" + value[len("https"):]
|
||||
logger.debug('Custom collector parameters: {!r}'.format(parameters))
|
||||
self.update_nailgun_settings(parameters)
|
||||
|
||||
def force_fuel_stats_sending(self):
|
||||
log_file = '/var/log/nailgun/statsenderd.log'
|
||||
# Rotate logs on restart in order to get rid of old errors
|
||||
cmd = 'mv {0}{{,.backup_$(date +%s)}}'.format(log_file)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip, cmd=cmd, raise_on_assert=False)
|
||||
self.restart_service('statsenderd')
|
||||
|
||||
wait(lambda: self.ssh_manager.exists_on_remote(self.admin_ip,
|
||||
log_file),
|
||||
timeout=10)
|
||||
cmd = 'grep -sw "ERROR" {0}'.format(log_file)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip, cmd=cmd, assert_ec_equal=[1],
|
||||
err_msg=("Fuel stats were sent with errors! Check its logs"
|
||||
" in {0} for details.").format(log_file))
|
||||
|
||||
def force_oswl_collect(self, resources=None):
|
||||
resources = resources or ['vm', 'flavor', 'volume', 'image', 'tenant',
|
||||
'keystone_user']
|
||||
for resource in resources:
|
||||
self.restart_service("oswl_{}_collectord".format(resource))
|
||||
|
||||
|
||||
class PostgresActions(BaseActions):
|
||||
"""PostgresActions.""" # TODO documentation
|
||||
|
||||
def run_query(self, db, query):
|
||||
cmd = "su - postgres -c 'psql -qt -d {0} -c \"{1};\"'".format(
|
||||
db, query)
|
||||
return self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd=cmd)['stdout_str']
|
||||
|
||||
def action_logs_contain(self, action, group=False,
|
||||
table='action_logs'):
|
||||
logger.info("Checking that '{0}' action was logged..".format(
|
||||
action))
|
||||
log_filter = "action_name" if not group else "action_group"
|
||||
q = "select id from {0} where {1} = '\"'\"'{2}'\"'\"'".format(
|
||||
table, log_filter, action)
|
||||
logs = [i.strip() for i in self.run_query('nailgun', q).split('\n')
|
||||
if re.compile(r'\d+').match(i.strip())]
|
||||
logger.info("Found log records with ids: {0}".format(logs))
|
||||
return len(logs) > 0
|
||||
|
||||
def count_sent_action_logs(self, table='action_logs'):
|
||||
q = "select count(id) from {0} where is_sent = True".format(table)
|
||||
return int(self.run_query('nailgun', q))
|
||||
|
||||
|
||||
class FuelPluginBuilder(BaseActions):
|
||||
"""
|
||||
Basic class for fuel plugin builder support in tests.
|
||||
|
||||
Initializes BaseActions.
|
||||
"""
|
||||
def fpb_install(self):
|
||||
"""
|
||||
Installs fuel plugin builder on master node
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
rpms = "createrepo dpkg-devel dpkg-dev rpm-build python-pip"
|
||||
fpb_package = "fuel-plugin-builder"
|
||||
if FUEL_PLUGIN_BUILDER_FROM_GIT:
|
||||
rpms += " tar git"
|
||||
fpb_package = "git+{}".format(FUEL_PLUGIN_BUILDER_REPO)
|
||||
|
||||
self.ssh_manager.check_call(self.admin_ip,
|
||||
"yum -y install {}".format(rpms))
|
||||
self.ssh_manager.check_call(self.admin_ip,
|
||||
"pip install {}".format(fpb_package))
|
||||
|
||||
def fpb_create_plugin(self, name, package_version=PLUGIN_PACKAGE_VERSION):
|
||||
"""
|
||||
Creates new plugin with given name
|
||||
:param name: name for plugin created
|
||||
:param package_version: plugin package version to create template for
|
||||
:return: nothing
|
||||
"""
|
||||
cmd = "fpb --create {0}".format(name)
|
||||
if package_version != '':
|
||||
cmd += ' --package-version {0}'.format(package_version)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
|
||||
def fpb_build_plugin(self, path):
|
||||
"""
|
||||
Builds plugin from path
|
||||
:param path: path to plugin. For ex.: /root/example_plugin
|
||||
:return: packet name
|
||||
"""
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd="bash -c 'fpb --build {0}'".format(path)
|
||||
)
|
||||
packet_name = self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd="bash -c 'basename {0}/*.rpm'".format(path)
|
||||
)['stdout_str']
|
||||
return packet_name
|
||||
|
||||
def fpb_update_release_in_metadata(self, path):
|
||||
"""Update fuel version and openstack release version
|
||||
|
||||
:param path: path to plugin's dir on master node
|
||||
"""
|
||||
metadata_path = os.path.join(path, 'metadata.yaml')
|
||||
output = self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip, cmd="fuel2 fuel-version -f json",
|
||||
jsonify=True)['stdout_json']
|
||||
fuel_version = [str(output['release'])]
|
||||
openstack_version = str(output['openstack_version'])
|
||||
with YamlEditor(metadata_path, ip=self.admin_ip) as editor:
|
||||
editor.content['fuel_version'] = fuel_version
|
||||
editor.content['releases'][0]['version'] = openstack_version
|
||||
|
||||
def fpb_validate_plugin(self, path):
|
||||
"""
|
||||
Validates plugin for errors
|
||||
:param path: path to plugin to be verified
|
||||
:return: nothing
|
||||
"""
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd="fpb --check {0}".format(path))
|
||||
|
||||
def fpb_replace_plugin_content(self, local_file, remote_file):
|
||||
"""
|
||||
Replaces file with given local file
|
||||
:param local_file: path to the local file
|
||||
:param remote_file: file to be replaced
|
||||
:return: nothing
|
||||
"""
|
||||
self.ssh_manager.rm_rf_on_remote(ip=self.admin_ip, path=remote_file)
|
||||
self.ssh_manager.upload_to_remote(
|
||||
ip=self.admin_ip,
|
||||
source=local_file,
|
||||
target=remote_file
|
||||
)
|
||||
|
||||
def fpb_change_plugin_version(self, plugin_name, new_version):
|
||||
"""
|
||||
Changes plugin version with given one
|
||||
:param plugin_name: plugin name
|
||||
:param new_version: new version to be used for plugin
|
||||
:return: nothing
|
||||
"""
|
||||
with YamlEditor('/root/{}/metadata.yaml'.format(plugin_name),
|
||||
ip=self.admin_ip) as editor:
|
||||
editor.content['version'] = new_version
|
||||
|
||||
def fpb_change_package_version(self, plugin_name, new_version):
|
||||
"""
|
||||
Changes plugin's package version
|
||||
:param plugin_name: plugin to be used for changing version
|
||||
:param new_version: version to be changed at
|
||||
:return: nothing
|
||||
"""
|
||||
with YamlEditor('/root/{}/metadata.yaml'.format(plugin_name),
|
||||
ip=self.admin_ip) as editor:
|
||||
editor.content['package_version'] = new_version
|
||||
|
||||
def fpb_copy_plugin(self, source, target):
|
||||
"""
|
||||
Copy new plugin from source to target
|
||||
:param source: initial plugin location
|
||||
:param target: target path
|
||||
:return: nothing
|
||||
"""
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd="cp {0} {1}".format(source, target))
|
||||
|
||||
|
||||
class CobblerActions(BaseActions):
|
||||
"""CobblerActions.""" # TODO documentation
|
||||
|
||||
def add_dns_upstream_server(self, dns_server_ip):
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd="sed '$anameserver {0}' -i /etc/dnsmasq.upstream".format(
|
||||
dns_server_ip))
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd='service dnsmasq restart')
|
||||
|
||||
|
||||
class FuelBootstrapCliActions(AdminActions):
|
||||
def get_bootstrap_default_config(self):
|
||||
fuel_settings = self.get_fuel_settings()
|
||||
return fuel_settings["BOOTSTRAP"]
|
||||
|
||||
@staticmethod
|
||||
def parse_uuid(message):
|
||||
uuid_regex = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-" \
|
||||
r"[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
|
||||
# NOTE: Splitting for matching only first uuid in case of parsing
|
||||
# images list, because image label could contain matching strings
|
||||
message_lines = message.splitlines()
|
||||
uuids = []
|
||||
|
||||
for line in message_lines:
|
||||
match = re.search(uuid_regex, line)
|
||||
if match is not None:
|
||||
uuids.append(match.group())
|
||||
|
||||
if not uuids:
|
||||
raise Exception("Could not find uuid in fuel-bootstrap "
|
||||
"output: {0}".format(message))
|
||||
return uuids
|
||||
|
||||
def activate_bootstrap_image(self, uuid):
|
||||
command = "fuel-bootstrap activate {0}".format(uuid)
|
||||
result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd=command,
|
||||
)['stdout_str']
|
||||
|
||||
return self.parse_uuid(result)[0]
|
||||
|
||||
def build_bootstrap_image(self, **kwargs):
|
||||
simple_fields = \
|
||||
("ubuntu-release", "http-proxy", "https-proxy", "script",
|
||||
"label", "extend-kopts", "kernel-flavor",
|
||||
"root-ssh-authorized-file", "output-dir", "image-build-dir")
|
||||
list_fields = ("repo", "direct-repo-addr", "package", "extra-dir")
|
||||
flag_fields = ("activate", )
|
||||
command = "fuel-bootstrap build "
|
||||
|
||||
for field in simple_fields:
|
||||
if kwargs.get(field) is not None:
|
||||
command += "--{0} {1} ".format(field, kwargs.get(field))
|
||||
|
||||
for field in list_fields:
|
||||
if kwargs.get(field) is not None:
|
||||
for value in kwargs.get(field):
|
||||
command += "--{0} {1} ".format(field, value)
|
||||
|
||||
for field in flag_fields:
|
||||
if kwargs.get(field) is not None:
|
||||
command += "--{0} ".format(field)
|
||||
|
||||
logger.info("Building bootstrap image: {0}".format(command))
|
||||
result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd=command,
|
||||
)['stdout_str']
|
||||
|
||||
logger.info("Bootstrap image has been built: {0}".format(result))
|
||||
uuid = self.parse_uuid(result)[0]
|
||||
path = os.path.join(kwargs.get("output-dir", "/tmp"),
|
||||
"{0}.tar.gz".format(uuid))
|
||||
return uuid, path
|
||||
|
||||
def import_bootstrap_image(self, filename, activate=False):
|
||||
command = ("fuel-bootstrap import {0} {1}"
|
||||
.format(filename,
|
||||
"--activate" if activate else ""))
|
||||
|
||||
result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd=command,
|
||||
)['stdout_str']
|
||||
return self.parse_uuid(result)[0]
|
||||
|
||||
def list_bootstrap_images(self):
|
||||
command = "fuel-bootstrap list"
|
||||
result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd=command,
|
||||
)['stdout_str']
|
||||
return result
|
||||
|
||||
def list_bootstrap_images_uuids(self):
|
||||
return self.parse_uuid(self.list_bootstrap_images())
|
||||
|
||||
def get_active_bootstrap_uuid(self):
|
||||
command = "fuel-bootstrap list"
|
||||
bootstrap_images = \
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd=command)['stdout_str'].split('\n')
|
||||
|
||||
for line in bootstrap_images:
|
||||
if "active" in line:
|
||||
return self.parse_uuid(line)[0]
|
||||
|
||||
logger.warning("No active bootstrap. Fuel-bootstrap list:\n{0}"
|
||||
.format("".join(bootstrap_images)))
|
||||
|
||||
def delete_bootstrap_image(self, uuid):
|
||||
command = "fuel-bootstrap delete {0}".format(uuid)
|
||||
result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.admin_ip,
|
||||
cmd=command,
|
||||
)['stdout_str']
|
||||
return self.parse_uuid(result)[0]
|
@ -1,68 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
from fuelweb_test.helpers.utils import generate_yum_repos_config
|
||||
|
||||
from gates_tests.helpers import exceptions
|
||||
|
||||
|
||||
def install_mos_repos():
|
||||
"""
|
||||
Upload and install fuel-release packet with mos-repo description
|
||||
and install necessary packets for packetary Fuel installation
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("upload fuel-release packet")
|
||||
if not settings.FUEL_RELEASE_PATH:
|
||||
raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path')
|
||||
try:
|
||||
ssh = SSHManager()
|
||||
pack_path = '/tmp/'
|
||||
full_pack_path = os.path.join(pack_path,
|
||||
'fuel-release*.noarch.rpm')
|
||||
ssh.upload_to_remote(
|
||||
ip=ssh.admin_ip,
|
||||
source=settings.FUEL_RELEASE_PATH.rstrip('/'),
|
||||
target=pack_path)
|
||||
|
||||
if settings.RPM_REPOS_YAML:
|
||||
with ssh.open_on_remote(
|
||||
ip=ssh.admin_ip,
|
||||
path='/etc/yum.repos.d/custom.repo',
|
||||
mode="w") as f:
|
||||
f.write(generate_yum_repos_config(settings.RPM_REPOS_YAML))
|
||||
|
||||
if settings.DEB_REPOS_YAML:
|
||||
ssh = SSHManager()
|
||||
pack_path = "/root/default_deb_repos.yaml"
|
||||
ssh.upload_to_remote(
|
||||
ip=ssh.admin_ip,
|
||||
source=settings.DEB_REPOS_YAML,
|
||||
target=pack_path)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Could not upload package")
|
||||
raise
|
||||
|
||||
logger.debug("setup MOS repositories")
|
||||
cmd = "rpm -ivh {}".format(full_pack_path)
|
||||
ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
|
||||
|
||||
cmd = "yum install -y fuel-setup"
|
||||
ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
|
@ -1,71 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
# pylint: disable=redefined-builtin
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves import xrange
|
||||
# pylint: enable=redefined-builtin
|
||||
|
||||
|
||||
class PuppetfileChangesParser(object):
|
||||
|
||||
def __init__(self, review, path):
|
||||
self.review = review
|
||||
self.filepath = path
|
||||
|
||||
def get_changed_modules(self):
|
||||
content = self.review.get_content_as_dict(self.filepath)
|
||||
diff = self.review.get_diff_as_dict(self.filepath)
|
||||
diff_lines_changed = self._get_lines_num_changed_from_diff(diff)
|
||||
mod_lines_changed = self._get_modules_line_num_changed_from_content(
|
||||
diff_lines_changed, content)
|
||||
return self._get_modules_from_lines_changed(mod_lines_changed, content)
|
||||
|
||||
@staticmethod
|
||||
def _get_lines_num_changed_from_diff(diff):
|
||||
lines_changed = []
|
||||
cursor = 1
|
||||
for content in diff['content']:
|
||||
diff_content = content.values()[0]
|
||||
if 'ab' in content.keys():
|
||||
cursor += len(diff_content)
|
||||
if 'b' in content.keys():
|
||||
lines_changed.extend(
|
||||
xrange(cursor, len(diff_content) + cursor))
|
||||
cursor += len(diff_content)
|
||||
return lines_changed
|
||||
|
||||
@staticmethod
|
||||
def _get_modules_line_num_changed_from_content(lines, content):
|
||||
modules_lines_changed = []
|
||||
for num in lines:
|
||||
index = num
|
||||
if content[index] == '' or content[index].startswith('#'):
|
||||
continue
|
||||
while not content[index].startswith('mod'):
|
||||
index -= 1
|
||||
modules_lines_changed.append(index)
|
||||
return modules_lines_changed
|
||||
|
||||
def _get_modules_from_lines_changed(self, lines, content):
|
||||
modules = []
|
||||
pattern = re.compile(r"mod '([a-z]+)',")
|
||||
for num in lines:
|
||||
match = pattern.match(content[num])
|
||||
if match:
|
||||
module = match.group(1)
|
||||
modules.append((module, self.filepath))
|
||||
return modules
|
@ -1,120 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import os
|
||||
import requests
|
||||
from requests.utils import quote
|
||||
|
||||
from fuelweb_test.helpers.gerrit import utils
|
||||
|
||||
|
||||
class BaseGerritClient(object):
|
||||
|
||||
def __init__(self,
|
||||
endpoint='https://review.openstack.org',
|
||||
project=None,
|
||||
branch=None,
|
||||
change_id=None,
|
||||
patchset_num=None):
|
||||
self.endpoint = endpoint
|
||||
self.project = project
|
||||
self.branch = branch
|
||||
self.change_id = change_id
|
||||
self.patchset_num = None if patchset_num is None else str(patchset_num)
|
||||
self.query = None
|
||||
|
||||
def get_content(self, filename):
|
||||
self.query = self._build_revision_endpoint('files',
|
||||
quote(filename, safe=''),
|
||||
'content')
|
||||
return self._send_get_request()
|
||||
|
||||
def get_diff(self, filename):
|
||||
self.query = self._build_revision_endpoint('files',
|
||||
quote(filename, safe=''),
|
||||
'diff')
|
||||
return self._send_get_request()
|
||||
|
||||
def get_related_changes(self):
|
||||
self.query = self._build_revision_endpoint('related')
|
||||
return self._send_get_request()
|
||||
|
||||
def list_files(self):
|
||||
self.query = self._build_revision_endpoint('files')
|
||||
return self._send_get_request()
|
||||
|
||||
def _build_change_id(self):
|
||||
return '{}~{}~{}'.format(quote(self.project, safe=''),
|
||||
quote(self.branch, safe=''),
|
||||
self.change_id)
|
||||
|
||||
def _build_full_change_id(self):
|
||||
return os.path.join(self.endpoint, 'changes', self._build_change_id())
|
||||
|
||||
def _build_revision_endpoint(self, *args):
|
||||
return os.path.join(self._build_full_change_id(),
|
||||
'revisions',
|
||||
self.patchset_num,
|
||||
*args)
|
||||
|
||||
def _build_reviewer_endpoint(self, *args):
|
||||
return os.path.join(self._build_full_change_id(), 'reviewers', *args)
|
||||
|
||||
def _send_get_request(self):
|
||||
return requests.get(self.query, verify=False)
|
||||
|
||||
|
||||
class GerritClient(BaseGerritClient):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(GerritClient, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_files(self):
|
||||
r = self._request_file_list()
|
||||
text = r.text
|
||||
files = utils.filter_response_text(text)
|
||||
return set(filter(lambda x: x != '/COMMIT_MSG',
|
||||
utils.json_to_dict(files).keys()))
|
||||
|
||||
def get_content_as_dict(self, filename):
|
||||
content_decoded = self._request_content(filename).text
|
||||
content = base64.b64decode(content_decoded)
|
||||
return {num: line for num, line in enumerate(content.split('\n'), 1)}
|
||||
|
||||
def get_diff_as_dict(self, filename):
|
||||
diff_raw = self._request_diff(filename).text
|
||||
diff_filtered = utils.filter_response_text(diff_raw)
|
||||
return utils.json_to_dict(diff_filtered)
|
||||
|
||||
def get_dependencies_as_dict(self):
|
||||
dependencies_raw = self._request_related_changes().text
|
||||
dependencies_filtered = utils.filter_response_text(dependencies_raw)
|
||||
return utils.json_to_dict(dependencies_filtered)
|
||||
|
||||
@utils.check_status_code(200)
|
||||
def _request_file_list(self):
|
||||
return self.list_files()
|
||||
|
||||
@utils.check_status_code(200)
|
||||
def _request_content(self, filename):
|
||||
return self.get_content(filename)
|
||||
|
||||
@utils.check_status_code(200)
|
||||
def _request_diff(self, filename):
|
||||
return self.get_diff(filename)
|
||||
|
||||
@utils.check_status_code(200)
|
||||
def _request_related_changes(self):
|
||||
return self.get_related_changes()
|
@ -1,106 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers.gerrit.gerrit_client import GerritClient
|
||||
from fuelweb_test.helpers.gerrit import rules
|
||||
|
||||
|
||||
class TemplateMap(object):
|
||||
|
||||
M_PATH = 'deployment/puppet/'
|
||||
|
||||
MAP = [
|
||||
{'deployment/Puppetfile':
|
||||
rules.get_changed_modules_inside_file},
|
||||
{os.path.join(M_PATH, 'osnailyfacter/modular/roles/'):
|
||||
rules.osnailyfacter_roles_rule},
|
||||
{os.path.join(M_PATH, 'osnailyfacter/modular/'):
|
||||
rules.osnailyfacter_modular_rule},
|
||||
{os.path.join(M_PATH, 'osnailyfacter/manifests/'):
|
||||
rules.osnailyfacter_manifest_rule},
|
||||
{os.path.join(M_PATH, 'osnailyfacter/templates/'):
|
||||
rules.osnailyfacter_templates_rule},
|
||||
{os.path.join(M_PATH, 'osnailyfacter/'):
|
||||
rules.no_rule},
|
||||
{os.path.join(M_PATH, 'openstack_tasks/Puppetfile'):
|
||||
rules.get_changed_modules_inside_file},
|
||||
{os.path.join(M_PATH, 'openstack_tasks/lib/facter/'):
|
||||
rules.openstack_tasks_libfacter_rule},
|
||||
{os.path.join(M_PATH, 'openstack_tasks/manifests/roles/'):
|
||||
rules.openstack_tasks_roles_rule},
|
||||
{os.path.join(M_PATH, 'openstack_tasks/examples/roles/'):
|
||||
rules.openstack_tasks_roles_rule},
|
||||
{os.path.join(M_PATH, 'openstack_tasks/manifests/'):
|
||||
rules.openstack_manifest_rule},
|
||||
{os.path.join(M_PATH, 'openstack_tasks/examples/'):
|
||||
rules.openstack_examples_rule},
|
||||
{os.path.join(M_PATH, 'openstack_tasks/'):
|
||||
rules.no_rule},
|
||||
{M_PATH:
|
||||
rules.common_rule},
|
||||
]
|
||||
|
||||
|
||||
class FuelLibraryModulesProvider(object):
|
||||
|
||||
def __init__(self, review):
|
||||
self.changed_modules = {}
|
||||
self.review = review
|
||||
|
||||
@classmethod
|
||||
def from_environment_vars(cls, endpoint='https://review.openstack.org'):
|
||||
review = GerritClient(endpoint,
|
||||
project=settings.GERRIT_PROJECT,
|
||||
branch=settings.GERRIT_BRANCH,
|
||||
change_id=settings.GERRIT_CHANGE_ID,
|
||||
patchset_num=settings.GERRIT_PATCHSET_NUMBER)
|
||||
return cls(review)
|
||||
|
||||
def get_changed_modules(self):
|
||||
logger.debug('Review details: branch={0}, id={1}, patchset={2}'
|
||||
.format(self.review.branch,
|
||||
self.review.change_id,
|
||||
self.review.patchset_num))
|
||||
files = self.review.get_files()
|
||||
for _file in files:
|
||||
self._apply_rule(review=self.review, _file=_file)
|
||||
return self.changed_modules
|
||||
|
||||
def _add_module(self, module, module_path):
|
||||
logger.debug("Add module '{}' to changed modules".format(module))
|
||||
if module in self.changed_modules:
|
||||
self.changed_modules[module].add(module_path)
|
||||
else:
|
||||
self.changed_modules[module] = {module_path}
|
||||
|
||||
def _add_modules(self, modules):
|
||||
for module, module_path in modules:
|
||||
self._add_module(module, module_path)
|
||||
|
||||
def _apply_rule(self, review, _file):
|
||||
for path_rule in TemplateMap.MAP:
|
||||
tmpl, rule = next(iter(path_rule.items()))
|
||||
if _file.startswith(tmpl):
|
||||
logger.debug("Using '{0}' rule with '{1}' template "
|
||||
"for '{2}' filename".format(rule.__name__,
|
||||
tmpl,
|
||||
_file))
|
||||
modules = rules.invoke_rule(review, _file, rule)
|
||||
if modules:
|
||||
self._add_modules(modules)
|
||||
return
|
@ -1,101 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from fuelweb_test.helpers.gerrit.content_parser import PuppetfileChangesParser
|
||||
|
||||
|
||||
FUEL_LIBRARY_PROJECT_NAME = 'fuel-library'
|
||||
|
||||
|
||||
def invoke_rule(review, path, rule):
|
||||
if rule.__name__ == 'get_changed_modules_inside_file':
|
||||
return rule(review, path)
|
||||
else:
|
||||
return rule(path)
|
||||
|
||||
|
||||
def get_changed_modules_inside_file(review, filename):
|
||||
parser = PuppetfileChangesParser(review=review, path=filename)
|
||||
return [(module, os.path.join(FUEL_LIBRARY_PROJECT_NAME, module_path))
|
||||
for module, module_path in parser.get_changed_modules()]
|
||||
|
||||
|
||||
def no_rule(path):
|
||||
return []
|
||||
|
||||
|
||||
def common_rule(path):
|
||||
return _apply_standard_rule(path=path, mod_depth=2)
|
||||
|
||||
|
||||
def osnailyfacter_roles_rule(path):
|
||||
return _apply_subdir_rule(path=path, subdir='roles', mod_depth=5)
|
||||
|
||||
|
||||
def osnailyfacter_modular_rule(path):
|
||||
return _apply_standard_rule(path=path)
|
||||
|
||||
|
||||
def osnailyfacter_manifest_rule(path):
|
||||
return _apply_standard_rule(path=path)
|
||||
|
||||
|
||||
def osnailyfacter_templates_rule(path):
|
||||
return _apply_standard_rule(path=path)
|
||||
|
||||
|
||||
def openstack_tasks_libfacter_rule(path):
|
||||
return _apply_standard_rule(path=path, mod_depth=5)
|
||||
|
||||
|
||||
def openstack_tasks_roles_rule(path):
|
||||
return _apply_subdir_rule(path=path, subdir='roles', mod_depth=4)
|
||||
|
||||
|
||||
def openstack_manifest_rule(path):
|
||||
return _apply_standard_rule(path=path)
|
||||
|
||||
|
||||
def openstack_examples_rule(path):
|
||||
return _apply_standard_rule(path=path)
|
||||
|
||||
|
||||
def _join_module_path(split_path, depth):
|
||||
return os.path.join(FUEL_LIBRARY_PROJECT_NAME, *split_path[:depth])
|
||||
|
||||
|
||||
def _apply_subdir_rule(path, subdir, mod_depth=4):
|
||||
"""Returns module name and module path if not given subdir, otherwise
|
||||
returns module combined with given subdir.
|
||||
"""
|
||||
split_path = path.split('/')
|
||||
module = split_path[mod_depth]
|
||||
if module == subdir:
|
||||
filename, _ = os.path.splitext(os.path.basename(path))
|
||||
module = '{}/{}'.format(subdir, filename)
|
||||
module_path = _join_module_path(split_path, mod_depth + 2)
|
||||
return [(module, module_path)]
|
||||
|
||||
|
||||
def _apply_standard_rule(path, mod_depth=4):
|
||||
"""Returns module name and module path by applying the following rule:
|
||||
if this is a directory, then use directory name as the module name,
|
||||
otherwise use filename without extension as the module name.
|
||||
"""
|
||||
split_path = path.split('/')
|
||||
module, _ = os.path.splitext(split_path[mod_depth])
|
||||
module_path = _join_module_path(split_path, mod_depth + 1)
|
||||
return [(module, module_path)]
|
@ -1,47 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
|
||||
def check_status_code(code):
|
||||
def outer_wrap(f):
|
||||
def inner_wrap(*args, **kwargs):
|
||||
r = f(*args, **kwargs)
|
||||
if r.status_code != code:
|
||||
raise Exception("Unexpected status code. "
|
||||
"Wanted status code: {0}. "
|
||||
"Got status code: {1}"
|
||||
.format(code, r.status_code))
|
||||
return r
|
||||
return inner_wrap
|
||||
return outer_wrap
|
||||
|
||||
|
||||
def json_to_dict(data):
|
||||
return dict(json.loads(data))
|
||||
|
||||
|
||||
def filter_gerrit_response_separator(data):
|
||||
return data.replace(")]}\'", "")
|
||||
|
||||
|
||||
def filter_newlines(data):
|
||||
return data.replace('\n', '')
|
||||
|
||||
|
||||
def filter_response_text(data):
|
||||
data = filter_gerrit_response_separator(data)
|
||||
data = filter_newlines(data)
|
||||
return data
|
@ -1,102 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import time
|
||||
|
||||
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_true
|
||||
|
||||
from fuelweb_test import logger
|
||||
|
||||
|
||||
def check_hiera_resources(remote, file_name=None):
|
||||
cmd_sh = 'if [ -d /etc/hiera ] ; then echo "fine" ; fi'
|
||||
output = ''.join(remote.execute(cmd_sh)['stdout'])
|
||||
assert_true('fine' in output, output)
|
||||
if not file_name:
|
||||
output_f = ''.join(remote.execute(
|
||||
'if [ -r /etc/hiera.yaml ] ; then echo "passed" ; fi')['stdout'])
|
||||
assert_true('passed' in output_f, output_f)
|
||||
else:
|
||||
output_f = ''.join(remote.execute(
|
||||
'if [ -r /etc/%s ] ; then echo "passed" ; fi' % file_name)[
|
||||
'stdout'])
|
||||
assert_true('passed' in output_f,
|
||||
'Can not find passed result in '
|
||||
'output {0}'.format(output_f))
|
||||
|
||||
|
||||
def get_hiera_data(remote, data):
|
||||
cmd = 'hiera {}'.format(data)
|
||||
res = remote.execute(cmd)['stdout']
|
||||
return res
|
||||
|
||||
|
||||
def check_interface_status(remote, iname):
|
||||
cmd = 'ethtools {0}| grep "Link detected"'.format(iname)
|
||||
result = remote.execute(cmd)
|
||||
assert_equal(0, result['exit_code'],
|
||||
"Non-zero exit code stderr {0}, "
|
||||
"stdout {1}".format(result['stderr'], result['stdout']))
|
||||
|
||||
assert_true('yes' in ''.join(result['stdout']),
|
||||
"No link detected for interface {0},"
|
||||
" Actual stdout {1}".format(iname, result['stdout']))
|
||||
|
||||
|
||||
def ping_remote_net(remote, ip):
|
||||
cmd = "ping -q -c1 -w10 {0}".format(ip)
|
||||
res = remote.execute(cmd)
|
||||
logger.debug('Current res from ping is {0}'.format(res))
|
||||
assert_equal(
|
||||
res['exit_code'], 0,
|
||||
"Ping of {0} ended with non zero exit-code. "
|
||||
"Stdout is {1}, stderr {2}".format(
|
||||
ip, ''.join(res['stdout']), ''.join(res['stderr'])))
|
||||
|
||||
|
||||
def check_logging_task(remote, conf_name):
|
||||
cmd_sh = 'if [ -r /rsyslog.d/{0}] ; then echo "fine" ; fi'.format(
|
||||
conf_name)
|
||||
output = ''.join(remote.execute(cmd_sh)['stdout'])
|
||||
assert_true('fine' in output, output)
|
||||
|
||||
|
||||
def check_tools_task(remote, tool_name):
|
||||
cmd_sh = 'pgrep {0}'.format(tool_name)
|
||||
output = remote.execute(cmd_sh)
|
||||
assert_equal(
|
||||
0, output['exit_code'],
|
||||
"Command {0} failed with non zero exit code, current output is:"
|
||||
" stdout {1}, stderr: {2} ".format(
|
||||
cmd_sh, ''.join(output['stdout']), ''.join(output['stderr'])))
|
||||
|
||||
|
||||
def run_check_from_task(remote, path):
|
||||
res = remote.execute('{0}'.format(path))
|
||||
try:
|
||||
assert_equal(
|
||||
0, res['exit_code'],
|
||||
"Check {0} finishes with non zero exit code, stderr is {1}, "
|
||||
"stdout is {2} on remote".format(
|
||||
path, res['stderr'], res['stdout']))
|
||||
except AssertionError:
|
||||
time.sleep(60)
|
||||
logger.info('remote is {0}'.format(remote))
|
||||
res = remote.execute('{0}'.format(path))
|
||||
assert_equal(
|
||||
0, res['exit_code'],
|
||||
"Check {0} finishes with non zero exit code, stderr is {1}, "
|
||||
"stdout is {2} on remote".format(
|
||||
path, res['stderr'], res['stdout']))
|
@ -1,10 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "Creating test file"
|
||||
touch /home/test_file
|
||||
|
||||
echo "Creating volume mount script on instance"
|
||||
echo -e '#!/bin/sh\nsudo /usr/sbin/mkfs.ext4 /dev/vdb | logger -t mount_volume.sh\nsudo mount -t ext4 /dev/vdb /mnt | logger -t mount_volume.sh\nmount | grep /mnt | logger -t mount_volume.sh' | tee /home/mount_volume.sh
|
||||
chmod 777 /home/mount_volume.sh
|
||||
|
||||
echo -e "test\ntest" | passwd cirros
|
@ -1,118 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from devops.helpers.helpers import tcp_ping
|
||||
from devops.helpers.helpers import wait
|
||||
|
||||
from fuelweb_test.helpers import os_actions
|
||||
|
||||
|
||||
class IronicActions(os_actions.OpenStackActions):
|
||||
"""IronicActions.""" # TODO documentation
|
||||
|
||||
def __init__(self, controller_ip, user='admin',
|
||||
passwd='admin', tenant='admin'):
|
||||
super(IronicActions, self).__init__(controller_ip,
|
||||
user, passwd,
|
||||
tenant)
|
||||
|
||||
@staticmethod
|
||||
def upload_user_image(nailgun_node, ssh_manager, img_url):
|
||||
disk_info = [{"name": "vda", "extra": [], "free_space": 11000,
|
||||
"type": "disk", "id": "vda", "size": 11000,
|
||||
"volumes": [{"mount": "/", "type": "partition",
|
||||
"file_system": "ext4", "size": 10000}]}]
|
||||
cmd = ('. /root/openrc; cd /tmp/; '
|
||||
'curl {img_url} | tar -xzp; '
|
||||
'glance image-create --name virtual_trusty_ext4 '
|
||||
'--disk-format raw --container-format bare '
|
||||
'--file trusty-server-cloudimg-amd64.img --visibility public '
|
||||
'--property cpu_arch="x86_64" '
|
||||
'--property hypervisor_type="baremetal" '
|
||||
'--property fuel_disk_info=\'{disk_info}\'').format(
|
||||
disk_info=json.dumps(disk_info),
|
||||
img_url=img_url)
|
||||
|
||||
ssh_manager.execute_on_remote(nailgun_node['ip'], cmd=cmd)
|
||||
|
||||
def enroll_ironic_node(self, ironic_slave, hw_ip):
|
||||
deploy_kernel = self.get_image_by_name('ironic-deploy-linux')
|
||||
deploy_ramdisk = self.get_image_by_name('ironic-deploy-initramfs')
|
||||
deploy_squashfs = self.get_image_by_name('ironic-deploy-squashfs')
|
||||
|
||||
libvirt_uri = 'qemu+tcp://{server_ip}/system'.format(
|
||||
server_ip=hw_ip)
|
||||
driver_info = {'libvirt_uri': libvirt_uri,
|
||||
'deploy_kernel': deploy_kernel.id,
|
||||
'deploy_ramdisk': deploy_ramdisk.id,
|
||||
'deploy_squashfs': deploy_squashfs.id}
|
||||
|
||||
mac_address = ironic_slave.interface_by_network_name(
|
||||
'ironic').mac_address
|
||||
|
||||
properties = {'memory_mb': ironic_slave.memory,
|
||||
'cpu_arch': ironic_slave.architecture,
|
||||
'local_gb': '50',
|
||||
'cpus': ironic_slave.vcpu}
|
||||
|
||||
ironic_node = self.create_ironic_node(driver='fuel_libvirt',
|
||||
driver_info=driver_info,
|
||||
properties=properties)
|
||||
self.create_ironic_port(address=mac_address,
|
||||
node_uuid=ironic_node.uuid)
|
||||
|
||||
@staticmethod
|
||||
def wait_for_ironic_hypervisors(ironic_conn, ironic_slaves):
|
||||
|
||||
def _wait_for_ironic_hypervisor():
|
||||
hypervisors = ironic_conn.get_hypervisors() or []
|
||||
ironic_hypervisors = [h for h in hypervisors if
|
||||
h.hypervisor_type == 'ironic']
|
||||
|
||||
if len(ironic_slaves) == len(ironic_hypervisors):
|
||||
for hypervisor in ironic_hypervisors:
|
||||
if hypervisor.memory_mb == 0:
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
wait(_wait_for_ironic_hypervisor,
|
||||
timeout=60 * 10,
|
||||
timeout_msg='Failed to update hypervisor details')
|
||||
|
||||
def wait_for_vms(self, ironic_conn):
|
||||
srv_list = ironic_conn.get_servers()
|
||||
for srv in srv_list:
|
||||
wait(lambda: self.get_instance_detail(srv).status == "ACTIVE",
|
||||
timeout=60 * 30, timeout_msg='Server didn\'t became active')
|
||||
|
||||
@staticmethod
|
||||
def verify_vms_connection(ironic_conn):
|
||||
srv_list = ironic_conn.get_servers()
|
||||
for srv in srv_list:
|
||||
wait(lambda: tcp_ping(srv.networks['baremetal'][0], 22),
|
||||
timeout=60 * 10, timeout_msg='Failed to connect to port 22')
|
||||
|
||||
def delete_servers(self, ironic_conn):
|
||||
srv_list = ironic_conn.get_servers()
|
||||
for srv in srv_list:
|
||||
self.nova.servers.delete(srv)
|
||||
|
||||
def create_ironic_node(self, **kwargs):
|
||||
return self.ironic.node.create(**kwargs)
|
||||
|
||||
def create_ironic_port(self, **kwargs):
|
||||
return self.ironic.port.create(**kwargs)
|
@ -1,81 +0,0 @@
|
||||
# Copyright 2013 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import select
|
||||
import socket
|
||||
import threading
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
|
||||
|
||||
class LogServer(threading.Thread):
|
||||
"""LogServer.""" # TODO documentation
|
||||
|
||||
@logwrap
|
||||
def __init__(self, address="localhost", port=5514):
|
||||
super(LogServer, self).__init__()
|
||||
self.socket = socket.socket(
|
||||
socket.AF_INET, socket.SOCK_DGRAM
|
||||
)
|
||||
self.socket.bind((str(address), port))
|
||||
self.rlist = [self.socket]
|
||||
self._stop = threading.Event()
|
||||
self._handler = self.handler
|
||||
self._status = False
|
||||
|
||||
def handler(self, messages):
|
||||
pass
|
||||
|
||||
def set_status(self, status):
|
||||
self._status = status
|
||||
|
||||
def get_status(self):
|
||||
return self._status
|
||||
|
||||
def set_handler(self, handler):
|
||||
self._handler = handler
|
||||
|
||||
@logwrap
|
||||
def stop(self):
|
||||
self.socket.close()
|
||||
self._stop.set()
|
||||
|
||||
def started(self):
|
||||
return not self._stop.is_set()
|
||||
|
||||
def rude_join(self, timeout=None):
|
||||
self._stop.set()
|
||||
super(LogServer, self).join(timeout)
|
||||
|
||||
def join(self, timeout=None):
|
||||
self.rude_join(timeout)
|
||||
|
||||
@logwrap
|
||||
def run(self):
|
||||
while self.started():
|
||||
r, _, _ = select.select(self.rlist, [], [], 1)
|
||||
if self.socket in r:
|
||||
message, _ = self.socket.recvfrom(2048)
|
||||
self._handler(message)
|
||||
|
||||
|
||||
class TriggeredLogServer(LogServer):
|
||||
"""TriggeredLogServer.""" # TODO documentation
|
||||
|
||||
def __init__(self, address="localhost", port=5514):
|
||||
super(TriggeredLogServer, self).__init__(address, port)
|
||||
self.set_handler(self.handler)
|
||||
|
||||
def handler(self, message):
|
||||
self.set_status(True)
|
@ -1,37 +0,0 @@
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from warnings import warn
|
||||
|
||||
warn(
|
||||
'fuelweb_test.helpers.metaclasses.SingletonMeta is deprecated:'
|
||||
'class is moved to devops.helpers.metaclasses.\n'
|
||||
'Due to it was single metaclass in file, this file will be deleted in a'
|
||||
'short time!',
|
||||
DeprecationWarning
|
||||
)
|
||||
|
||||
|
||||
class SingletonMeta(type):
|
||||
"""Metaclass for Singleton
|
||||
|
||||
Main goals: not need to implement __new__ in singleton classes
|
||||
"""
|
||||
_instances = {}
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(
|
||||
SingletonMeta, cls).__call__(*args, **kwargs)
|
||||
return cls._instances[cls]
|
@ -1,83 +0,0 @@
|
||||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# TODO(apanchenko): This file contains hacks (e.g. configuring of dhcp-server
|
||||
# or firewall on master node) which are used for testing multiple cluster
|
||||
# networks feature:
|
||||
# https://blueprints.launchpad.net/fuel/+spec/multiple-cluster-networks
|
||||
# This code should be removed from tests as soon as automatic cobbler
|
||||
# configuring for non-default admin (PXE) networks is implemented in Fuel
|
||||
|
||||
from proboscis.asserts import assert_equal
|
||||
|
||||
from core.helpers.log_helpers import logwrap
|
||||
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
|
||||
|
||||
@logwrap
|
||||
def configure_second_admin_dhcp(ip, interface):
|
||||
dhcp_conf_file = '/etc/cobbler/dnsmasq.template'
|
||||
cmd = ("sed '0,/^interface.*/s//\\0\\ninterface={0}/' -i {1};"
|
||||
"cobbler sync").format(interface,
|
||||
dhcp_conf_file)
|
||||
result = SSHManager().execute(
|
||||
ip=ip,
|
||||
cmd=cmd
|
||||
)
|
||||
assert_equal(result['exit_code'], 0, ('Failed to add second admin '
|
||||
'network to DHCP server: {0}').format(result))
|
||||
|
||||
|
||||
@logwrap
|
||||
def configure_second_admin_firewall(ip, network, netmask, interface,
|
||||
master_ip):
|
||||
# Allow input/forwarding for nodes from the second admin network and
|
||||
# enable source NAT for UDP (tftp) and HTTP (proxy server) traffic
|
||||
# on master node
|
||||
rules = [
|
||||
('-I INPUT -i {0} -m comment --comment "input from admin network" '
|
||||
'-j ACCEPT').format(interface),
|
||||
('-t nat -I POSTROUTING -s {0}/{1} -o e+ -m comment --comment '
|
||||
'"004 forward_admin_net2" -j MASQUERADE').
|
||||
format(network, netmask),
|
||||
("-t nat -I POSTROUTING -o {0} -d {1}/{2} -p udp -m addrtype "
|
||||
"--src-type LOCAL -j SNAT --to-source {3}").format(interface,
|
||||
network, netmask,
|
||||
master_ip),
|
||||
("-t nat -I POSTROUTING -d {0}/{1} -p tcp --dport 8888 -j SNAT "
|
||||
"--to-source {2}").format(network, netmask, master_ip),
|
||||
('-I FORWARD -i {0} -m comment --comment '
|
||||
'"forward custom admin net" -j ACCEPT').format(interface)
|
||||
]
|
||||
|
||||
for rule in rules:
|
||||
cmd = 'iptables {0}'.format(rule)
|
||||
result = SSHManager().execute(
|
||||
ip=ip,
|
||||
cmd=cmd
|
||||
)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
('Failed to add firewall rule for admin net on'
|
||||
' master node: {0}, {1}').format(rule, result))
|
||||
|
||||
# Save new firewall configuration
|
||||
cmd = 'service iptables save'
|
||||
result = SSHManager().execute(
|
||||
ip=ip,
|
||||
cmd=cmd
|
||||
)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
('Failed to save firewall configuration on master node:'
|
||||
' {0}').format(result))
|
@ -1,197 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import json
|
||||
import os
|
||||
|
||||
from devops.helpers.helpers import wait
|
||||
from proboscis import asserts
|
||||
import requests
|
||||
# pylint: disable=import-error
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves import urllib
|
||||
# pylint: enable=import-error
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers.decorators import token
|
||||
|
||||
|
||||
class NessusClient(object):
|
||||
def __init__(self, hostname, port, username, password, ssl_verify=False):
|
||||
self.nessus_auth_token = None
|
||||
self.nessus_base_url = 'https://{0}:{1}'.format(hostname, port)
|
||||
self.nessus_username = username
|
||||
self.nessus_password = password
|
||||
self.ssl_verify = ssl_verify
|
||||
self.login()
|
||||
|
||||
@staticmethod
|
||||
def log_request(url, method, request_headers, request_body,
|
||||
status_code, response_headers, response_body):
|
||||
log_fmt = ("Request {method} {url}\n"
|
||||
"Request - Headers: {request_headers}\n"
|
||||
" Body: {request_body}\n"
|
||||
"Response status code: {status_code}\n"
|
||||
"Response - Headers: {response_headers}\n"
|
||||
" Body: {response_body}\n")
|
||||
|
||||
logger.info(log_fmt.format(url=url,
|
||||
method=method,
|
||||
request_headers=request_headers,
|
||||
request_body=request_body,
|
||||
status_code=status_code,
|
||||
response_headers=response_headers,
|
||||
response_body=response_body))
|
||||
|
||||
@token
|
||||
def request(self, method, url, body=None, **kwargs):
|
||||
headers = {'X-Cookie': 'token={0}'.format(self.nessus_auth_token),
|
||||
'Content-Type': 'application/json'}
|
||||
url = urllib.parse.urljoin(self.nessus_base_url, url)
|
||||
|
||||
response = requests.request(
|
||||
method, url, data=body, headers=headers,
|
||||
verify=self.ssl_verify, **kwargs)
|
||||
|
||||
self.log_request(url, method, headers, body,
|
||||
response.status_code, response.headers,
|
||||
response.content[:1024])
|
||||
|
||||
asserts.assert_equal(
|
||||
response.status_code, 200,
|
||||
"Request failed: {0}\n{1}".format(response.status_code,
|
||||
response.content))
|
||||
|
||||
return response
|
||||
|
||||
def get(self, url, body=None):
|
||||
return self.request("GET", url, json.dumps(body)).json()
|
||||
|
||||
def get_raw(self, url, body=None):
|
||||
return self.request("GET", url, json.dumps(body)).content
|
||||
|
||||
def post(self, url, body=None):
|
||||
return self.request("POST", url, json.dumps(body)).json()
|
||||
|
||||
def login(self):
|
||||
creds = {'username': self.nessus_username,
|
||||
'password': self.nessus_password}
|
||||
|
||||
self.nessus_auth_token = self.post('/session', creds)['token']
|
||||
|
||||
def add_policy(self, policy_def):
|
||||
return self.post('/policies', policy_def)
|
||||
|
||||
def list_policy_templates(self):
|
||||
return self.get('/editor/policy/templates')['templates']
|
||||
|
||||
def add_cpa_policy(self, name, description, pid):
|
||||
policy_def = \
|
||||
{
|
||||
"uuid": pid,
|
||||
"settings": {
|
||||
"name": name,
|
||||
"description": description
|
||||
},
|
||||
"credentials": {
|
||||
"add": {
|
||||
"Host": {
|
||||
"SSH": [
|
||||
{
|
||||
"auth_method": "password",
|
||||
"username": "root",
|
||||
"password": "r00tme",
|
||||
"elevate_privileges_with": "Nothing"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return self.add_policy(policy_def)['policy_id']
|
||||
|
||||
def add_wat_policy(self, name, desc, pid):
|
||||
policy_def = \
|
||||
{
|
||||
"uuid": pid,
|
||||
"settings": {
|
||||
"name": name,
|
||||
"description": desc,
|
||||
"discovery_mode": "Port scan (all ports)",
|
||||
"assessment_mode": "Scan for all web vulnerabilities "
|
||||
"(complex)",
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return self.add_policy(policy_def)['policy_id']
|
||||
|
||||
def create_scan(self, name, description, target_ip,
|
||||
policy_id, policy_template_id):
|
||||
scan_def = \
|
||||
{
|
||||
"uuid": policy_template_id,
|
||||
"settings": {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"scanner_id": "1",
|
||||
"policy_id": policy_id,
|
||||
"text_targets": target_ip,
|
||||
"launch": "ONETIME",
|
||||
"enabled": False,
|
||||
"launch_now": False
|
||||
}
|
||||
}
|
||||
|
||||
return self.post('/scans', scan_def)['scan']['id']
|
||||
|
||||
def launch_scan(self, scan_id):
|
||||
return self.post('/scans/{0}/launch'.format(scan_id))['scan_uuid']
|
||||
|
||||
def get_scan_history(self, scan_id, history_id):
|
||||
return self.get('/scans/{0}'.format(scan_id),
|
||||
{'history_id': history_id})['info']
|
||||
|
||||
def get_scan_status(self, scan_id, history_id):
|
||||
return self.get_scan_history(scan_id, history_id)['status']
|
||||
|
||||
def list_scan_history_ids(self, scan_id):
|
||||
data = self.get('/scans/{0}'.format(scan_id))
|
||||
return dict((h['uuid'], h['history_id']) for h in data['history'])
|
||||
|
||||
def check_scan_export_status(self, scan_id, file_id):
|
||||
return self.get('/scans/{0}/export/{1}/status'
|
||||
.format(scan_id, file_id))['status'] == 'ready'
|
||||
|
||||
def export_scan(self, scan_id, history_id, save_format):
|
||||
export_def = {'history_id': history_id,
|
||||
'format': save_format,
|
||||
'chapters': 'vuln_hosts_summary'}
|
||||
file_id = self.post('/scans/{0}/export'.format(scan_id),
|
||||
body=export_def)['file']
|
||||
wait(lambda: self.check_scan_export_status(scan_id, file_id),
|
||||
interval=10, timeout=600,
|
||||
timeout_msg='Nessus export scan status != "ready" for '
|
||||
' scan_id={} file_id={}'.format(scan_id, file_id))
|
||||
return file_id
|
||||
|
||||
def download_scan_result(
|
||||
self, scan_id, file_id, scan_type, save_format, file_path):
|
||||
report = self.get_raw('/scans/{0}/export/{1}/download'
|
||||
.format(scan_id, file_id))
|
||||
|
||||
filename = 'nessus_report_scan_{0}_{1}.{2}'\
|
||||
.format(scan_id, scan_type, save_format)
|
||||
file_with_path = os.path.join(file_path, filename)
|
||||
logger.info("Saving Nessus scan report: {0}".format(file_with_path))
|
||||
with open(file_with_path, 'w') as report_file:
|
||||
report_file.write(report)
|
@ -1,804 +0,0 @@
|
||||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import random
|
||||
|
||||
from devops.error import TimeoutError
|
||||
from devops.helpers import helpers
|
||||
from proboscis import asserts
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers import common
|
||||
|
||||
|
||||
class OpenStackActions(common.Common):
|
||||
"""OpenStackActions.""" # TODO documentation
|
||||
|
||||
def __init__(self, controller_ip, user='admin',
|
||||
passwd='admin', tenant='admin'):
|
||||
super(OpenStackActions, self).__init__(controller_ip,
|
||||
user, passwd,
|
||||
tenant)
|
||||
|
||||
def _get_cirros_image(self):
|
||||
for image in self.glance.images.list():
|
||||
if image.name.startswith("TestVM"):
|
||||
return image
|
||||
|
||||
def get_image_by_name(self, name):
|
||||
for image in self.glance.images.list():
|
||||
if image.name.startswith(name):
|
||||
return image
|
||||
|
||||
def get_hypervisors(self):
|
||||
hypervisors = self.nova.hypervisors.list()
|
||||
if hypervisors:
|
||||
return hypervisors
|
||||
|
||||
def get_hypervisor_vms_count(self, hypervisor):
|
||||
hypervisor = self.nova.hypervisors.get(hypervisor.id)
|
||||
return getattr(hypervisor, "running_vms")
|
||||
|
||||
def get_hypervisor_hostname(self, hypervisor):
|
||||
hypervisor = self.nova.hypervisors.get(hypervisor.id)
|
||||
return getattr(hypervisor, "hypervisor_hostname")
|
||||
|
||||
def get_srv_hypervisor_name(self, srv):
|
||||
srv = self.nova.servers.get(srv.id)
|
||||
return getattr(srv, "OS-EXT-SRV-ATTR:hypervisor_hostname")
|
||||
|
||||
def get_servers(self):
|
||||
servers = self.nova.servers.list()
|
||||
if servers:
|
||||
return servers
|
||||
|
||||
def get_server_by_name(self, name):
|
||||
servers = self.get_servers()
|
||||
for srv in servers:
|
||||
if srv.name == name:
|
||||
return srv
|
||||
logger.warning("Instance with name {} was not found".format(name))
|
||||
return None
|
||||
|
||||
def get_flavor_by_name(self, name):
|
||||
flavor_list = self.nova.flavors.list()
|
||||
for flavor in flavor_list:
|
||||
if flavor.name == name:
|
||||
return flavor
|
||||
logger.warning("Flavor with name {} was not found".format(name))
|
||||
return None
|
||||
|
||||
def wait_for_server_is_active(self, server, timeout):
|
||||
"""Wait for server is in active state
|
||||
|
||||
:param server: nova server object
|
||||
:param timeout: int, timeout in sec
|
||||
"""
|
||||
|
||||
helpers.wait(
|
||||
lambda: self.get_instance_detail(server).status == "ACTIVE",
|
||||
timeout=timeout,
|
||||
timeout_msg="Create server {!r} failed by timeout. Please, take"
|
||||
" a look at OpenStack logs".format(server.id))
|
||||
srv = self.get_instance_detail(server.id)
|
||||
logger.debug('The Instance {!r} booted successfully on {!r}'
|
||||
.format(srv.id,
|
||||
srv.to_dict()['OS-EXT-SRV-ATTR:host']))
|
||||
return srv
|
||||
|
||||
def create_server(
|
||||
self,
|
||||
name=None,
|
||||
security_groups=None,
|
||||
flavor_id=None,
|
||||
net_id=None,
|
||||
timeout=100,
|
||||
image=None,
|
||||
**kwargs
|
||||
):
|
||||
""" Creates simple server, like in OSTF.
|
||||
|
||||
:param name: server name, if None -> test-serv + random suffix
|
||||
:param security_groups: list, if None -> ssh + icmp v4 & icmp v6
|
||||
:param flavor_id: create a new flavor if None
|
||||
:param net_id: network id, could be omitted
|
||||
:param timeout: int=100
|
||||
:param image: TestVM if None.
|
||||
:return: Server, in started state
|
||||
"""
|
||||
|
||||
if not name:
|
||||
name = "test-serv" + str(random.randint(1, 0x7fffffff))
|
||||
if not security_groups:
|
||||
security_groups = [self.create_sec_group_for_ssh()]
|
||||
if not flavor_id:
|
||||
flavor = self.create_flavor('test_flavor_{}'.
|
||||
format(random.randint(10, 10000)),
|
||||
64, 1, 0)
|
||||
flavor_id = flavor.id
|
||||
if image is None:
|
||||
image = self._get_cirros_image().id
|
||||
|
||||
nics = [{'net-id': net_id}] if net_id else None
|
||||
|
||||
srv = self.nova.servers.create(
|
||||
name=name,
|
||||
image=image,
|
||||
flavor=flavor_id,
|
||||
security_groups=[sec_group.name for sec_group in security_groups],
|
||||
nics=nics,
|
||||
**kwargs)
|
||||
logger.debug('Start instance {!r} ...'.format(srv.id))
|
||||
self.wait_for_server_is_active(srv, timeout)
|
||||
return self.get_instance_detail(srv)
|
||||
|
||||
def create_server_for_migration(self, neutron=True, scenario='',
|
||||
timeout=100, filename=None, key_name=None,
|
||||
label=None, flavor_id=None, **kwargs):
|
||||
name = "test-serv" + str(random.randint(1, 0x7fffffff))
|
||||
security_group = {}
|
||||
try:
|
||||
if scenario:
|
||||
with open(scenario, "r+") as f:
|
||||
scenario = f.read()
|
||||
except Exception as exc:
|
||||
logger.info("Error opening file: {:s}".format(exc))
|
||||
raise Exception()
|
||||
image_id = self._get_cirros_image().id
|
||||
security_group[self.keystone_access.tenant_id] =\
|
||||
self.create_sec_group_for_ssh()
|
||||
security_groups = [security_group[self.keystone_access.tenant_id].name]
|
||||
|
||||
if neutron:
|
||||
net_label = label if label else 'net04'
|
||||
network = [net.id for net in self.nova.networks.list()
|
||||
if net.label == net_label]
|
||||
|
||||
kwargs.update({'nics': [{'net-id': network[0]}],
|
||||
'security_groups': security_groups})
|
||||
else:
|
||||
kwargs.update({'security_groups': security_groups})
|
||||
|
||||
if not flavor_id:
|
||||
flavor = self.create_flavor('test_flavor_{}'.
|
||||
format(random.randint(10, 10000)),
|
||||
64, 1, 0)
|
||||
flavor_id = flavor.id
|
||||
|
||||
srv = self.nova.servers.create(name=name,
|
||||
image=image_id,
|
||||
flavor=flavor_id,
|
||||
userdata=scenario,
|
||||
files=filename,
|
||||
key_name=key_name,
|
||||
**kwargs)
|
||||
self.wait_for_server_is_active(srv, timeout)
|
||||
return self.get_instance_detail(srv)
|
||||
|
||||
def create_server_from_volume(self, name=None, security_groups=None,
|
||||
flavor_id=None, net_id=None, timeout=100,
|
||||
image=None, **kwargs):
|
||||
bootable_volume = self.create_volume(
|
||||
image_id=image or self._get_cirros_image().id)
|
||||
kwargs['block_device_mapping'] = {'vda': bootable_volume.id + ':::0'}
|
||||
srv = self.create_server(name=name, security_groups=security_groups,
|
||||
flavor_id=flavor_id, net_id=net_id,
|
||||
timeput=timeout, image=image, **kwargs)
|
||||
return srv
|
||||
|
||||
def is_srv_deleted(self, srv):
|
||||
for server in self.nova.servers.list():
|
||||
if srv.id == server.id:
|
||||
logger.info("Server found in server list")
|
||||
return False
|
||||
logger.info("Server was successfully deleted")
|
||||
return True
|
||||
|
||||
def verify_srv_deleted(self, srv, timeout=150):
|
||||
helpers.wait(lambda: self.is_srv_deleted(srv),
|
||||
interval=2, timeout=timeout,
|
||||
timeout_msg="Server wasn't deleted in "
|
||||
"{0} seconds".format(timeout))
|
||||
|
||||
def assign_floating_ip(self, srv, use_neutron=False):
|
||||
if use_neutron:
|
||||
# Find external net id for tenant
|
||||
nets = self.neutron.list_networks()['networks']
|
||||
err_msg = "Active external network not found in nets:{}"
|
||||
ext_net_ids = [
|
||||
net['id'] for net in nets
|
||||
if net['router:external'] and net['status'] == "ACTIVE"]
|
||||
asserts.assert_true(ext_net_ids, err_msg.format(nets))
|
||||
net_id = ext_net_ids[0]
|
||||
# Find instance port
|
||||
ports = self.neutron.list_ports(device_id=srv.id)['ports']
|
||||
err_msg = "Not found active ports for instance:{}"
|
||||
asserts.assert_true(ports, err_msg.format(srv.id))
|
||||
port = ports[0]
|
||||
# Create floating IP
|
||||
body = {'floatingip': {'floating_network_id': net_id,
|
||||
'port_id': port['id']}}
|
||||
flip = self.neutron.create_floatingip(body)
|
||||
# Wait active state for port
|
||||
port_id = flip['floatingip']['port_id']
|
||||
helpers.wait(lambda: self.neutron.show_port(
|
||||
port_id)['port']['status'] == "ACTIVE")
|
||||
return flip['floatingip']
|
||||
|
||||
fl_ips_pool = self.nova.floating_ip_pools.list()
|
||||
if fl_ips_pool:
|
||||
floating_ip = self.nova.floating_ips.create(
|
||||
pool=fl_ips_pool[0].name)
|
||||
self.nova.servers.add_floating_ip(srv, floating_ip)
|
||||
return floating_ip
|
||||
|
||||
def create_sec_group_for_ssh(self):
|
||||
name = "test-sg" + str(random.randint(1, 0x7fffffff))
|
||||
secgroup = self.nova.security_groups.create(
|
||||
name, "descr")
|
||||
|
||||
rulesets = [
|
||||
{
|
||||
# ssh
|
||||
'ip_protocol': 'tcp',
|
||||
'from_port': 22,
|
||||
'to_port': 22,
|
||||
'cidr': '0.0.0.0/0',
|
||||
},
|
||||
{
|
||||
# ping
|
||||
'ip_protocol': 'icmp',
|
||||
'from_port': -1,
|
||||
'to_port': -1,
|
||||
'cidr': '0.0.0.0/0',
|
||||
},
|
||||
{
|
||||
# ping6
|
||||
'ip_protocol': 'icmp',
|
||||
'from_port': -1,
|
||||
'to_port': -1,
|
||||
'cidr': '::/0',
|
||||
}
|
||||
]
|
||||
|
||||
for ruleset in rulesets:
|
||||
self.nova.security_group_rules.create(
|
||||
secgroup.id, **ruleset)
|
||||
return secgroup
|
||||
|
||||
def get_srv_host_name(self, srv):
|
||||
# Get host name server is currently on
|
||||
srv = self.nova.servers.get(srv.id)
|
||||
return getattr(srv, "OS-EXT-SRV-ATTR:host")
|
||||
|
||||
def get_srv_instance_name(self, srv):
|
||||
# Get instance name of the server
|
||||
server = self.nova.servers.get(srv.id)
|
||||
return getattr(server, "OS-EXT-SRV-ATTR:instance_name")
|
||||
|
||||
def migrate_server(self, server, host, timeout):
|
||||
curr_host = self.get_srv_host_name(server)
|
||||
logger.debug("Current compute host is {0}".format(curr_host))
|
||||
logger.debug("Start live migration of instance")
|
||||
server.live_migrate(host._info['host_name'])
|
||||
try:
|
||||
helpers.wait(
|
||||
lambda: self.get_instance_detail(server).status == "ACTIVE",
|
||||
timeout=timeout)
|
||||
except TimeoutError:
|
||||
logger.debug("Instance do not became active after migration")
|
||||
asserts.assert_true(
|
||||
self.get_instance_detail(server).status == "ACTIVE",
|
||||
"Instance do not become Active after live migration, "
|
||||
"current status is {0}".format(
|
||||
self.get_instance_detail(server).status))
|
||||
|
||||
asserts.assert_true(
|
||||
self.get_srv_host_name(
|
||||
self.get_instance_detail(server)) != curr_host,
|
||||
"Server did not migrate")
|
||||
server = self.get_instance_detail(server.id)
|
||||
return server
|
||||
|
||||
def create_volume(self, size=1, image_id=None, **kwargs):
|
||||
volume = self.cinder.volumes.create(size=size, imageRef=image_id,
|
||||
**kwargs)
|
||||
helpers.wait(
|
||||
lambda: self.cinder.volumes.get(volume.id).status == "available",
|
||||
timeout=100)
|
||||
logger.info("Created volume: '{0}', parent image: '{1}'"
|
||||
.format(volume.id, image_id))
|
||||
return self.cinder.volumes.get(volume.id)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
return self.cinder.volumes.delete(volume)
|
||||
|
||||
def delete_volume_and_wait(self, volume, timeout=60):
|
||||
self.delete_volume(volume)
|
||||
try:
|
||||
helpers.wait(
|
||||
lambda: volume not in self.cinder.volumes.list(),
|
||||
timeout=timeout)
|
||||
except TimeoutError:
|
||||
asserts.assert_false(
|
||||
volume in self.cinder.volumes.list(),
|
||||
"Volume wasn't deleted in {0} sec".format(timeout))
|
||||
|
||||
def attach_volume(self, volume, server, mount='/dev/vdb'):
|
||||
self.cinder.volumes.attach(volume, server.id, mount)
|
||||
logger.debug('The volume {!r} was attached to instance {!r}'
|
||||
.format(volume.id, server.id))
|
||||
return self.cinder.volumes.get(volume.id)
|
||||
|
||||
def extend_volume(self, volume, newsize):
|
||||
self.cinder.volumes.extend(volume, newsize)
|
||||
return self.cinder.volumes.get(volume.id)
|
||||
|
||||
def get_volume_status(self, volume):
|
||||
vol = self.cinder.volumes.get(volume.id)
|
||||
return vol._info['status']
|
||||
|
||||
def get_hosts_for_migr(self, srv_host_name):
|
||||
# Determine which host is available for live migration
|
||||
return [
|
||||
host for host in self.nova.hosts.list()
|
||||
if host.host_name != srv_host_name and
|
||||
host._info['service'] == 'compute']
|
||||
|
||||
def get_tenant(self, tenant_name):
|
||||
tenant_list = self.keystone.tenants.list()
|
||||
for ten in tenant_list:
|
||||
if ten.name == tenant_name:
|
||||
return ten
|
||||
return None
|
||||
|
||||
def get_user(self, username):
|
||||
user_list = self.keystone.users.list()
|
||||
for user in user_list:
|
||||
if user.name == username:
|
||||
return user
|
||||
return None
|
||||
|
||||
def create_tenant(self, tenant_name):
|
||||
tenant = self.get_tenant(tenant_name)
|
||||
if tenant:
|
||||
return tenant
|
||||
return self.keystone.tenants.create(enabled=True,
|
||||
tenant_name=tenant_name)
|
||||
|
||||
def update_tenant(self, tenant_id, tenant_name=None, description=None,
|
||||
enabled=None, **kwargs):
|
||||
self.keystone.tenants.update(tenant_id, tenant_name, description,
|
||||
enabled)
|
||||
return self.keystone.tenants.get(tenant_id)
|
||||
|
||||
def delete_tenant(self, tenant):
|
||||
return self.keystone.tenants.delete(tenant)
|
||||
|
||||
def create_user(self, username, passw, tenant):
|
||||
user = self.get_user(username)
|
||||
if user:
|
||||
return user
|
||||
return self.keystone.users.create(
|
||||
name=username,
|
||||
password=passw,
|
||||
tenant_id=tenant.id)
|
||||
|
||||
def update_user_enabled(self, user, enabled=True):
|
||||
self.keystone.users.update_enabled(user, enabled)
|
||||
return self.keystone.users.get(user)
|
||||
|
||||
def delete_user(self, user):
|
||||
return self.keystone.users.delete(user)
|
||||
|
||||
def create_user_and_tenant(self, tenant_name, username, password):
|
||||
tenant = self.create_tenant(tenant_name)
|
||||
return self.create_user(username, password, tenant)
|
||||
|
||||
def get_network(self, network_name):
|
||||
net_list = self.neutron.list_networks()
|
||||
for net in net_list['networks']:
|
||||
if net['name'] == network_name:
|
||||
return net
|
||||
return None
|
||||
|
||||
def get_network_by_type(self, net_type):
|
||||
"""Get the first network by type: external or internal
|
||||
|
||||
:param net_type: str, value is external or internal
|
||||
:return: dict, network data
|
||||
"""
|
||||
if net_type == 'external':
|
||||
flag = True
|
||||
elif net_type == 'internal':
|
||||
flag = False
|
||||
else:
|
||||
raise Exception('Type should be "external" or "internal".'
|
||||
' Your type is {!r}!'.format(net_type))
|
||||
net_list = self.neutron.list_networks()
|
||||
for net in net_list['networks']:
|
||||
if net['router:external'] == flag:
|
||||
return net
|
||||
return None
|
||||
|
||||
def get_subnet(self, subnet_name):
|
||||
subnet_list = self.neutron.list_subnets()
|
||||
for subnet in subnet_list['subnets']:
|
||||
if subnet['name'] == subnet_name:
|
||||
return subnet
|
||||
return None
|
||||
|
||||
def nova_get_net(self, net_name):
|
||||
for net in self.nova.networks.list():
|
||||
if net.human_id == net_name:
|
||||
return net
|
||||
return None
|
||||
|
||||
def get_router(self, network):
|
||||
router_list = self.neutron.list_routers()
|
||||
for router in router_list['routers']:
|
||||
network_id = router['external_gateway_info'].get('network_id')
|
||||
if network_id == network['id']:
|
||||
return router
|
||||
return None
|
||||
|
||||
def create_image(self, **kwargs):
|
||||
image = self.glance.images.create(**kwargs)
|
||||
logger.info("Created image: '{0}'".format(image.id))
|
||||
logger.info("Image status: '{0}'".format(image.status))
|
||||
return image
|
||||
|
||||
def get_image_list(self):
|
||||
return self.glance.images.list()
|
||||
|
||||
def update_image(self, image, **kwargs):
|
||||
self.glance.images.update(image, **kwargs)
|
||||
|
||||
def get_image(self, image_name):
|
||||
image_list = self.get_image_list()
|
||||
for img in image_list:
|
||||
if img.name == image_name:
|
||||
return img
|
||||
return None
|
||||
|
||||
def get_image_data(self, image_name):
|
||||
return self.glance.images.data(image_name)
|
||||
|
||||
def get_security_group_list(self):
|
||||
return self.nova.security_groups.list()
|
||||
|
||||
def get_security_group(self, sg_name):
|
||||
sg_list = self.get_security_group_list()
|
||||
for sg in sg_list:
|
||||
if sg.name == sg_name:
|
||||
return sg
|
||||
return None
|
||||
|
||||
def get_nova_service_list(self):
|
||||
return self.nova.services.list()
|
||||
|
||||
def get_nova_service_status(self, service):
|
||||
services = self.get_nova_service_list()
|
||||
for s in services:
|
||||
if s.host == service.host and s.binary == service.binary:
|
||||
return s.status
|
||||
|
||||
def enable_nova_service(self, service, timeout=30):
|
||||
self.nova.services.enable(service.host, service.binary)
|
||||
helpers.wait(
|
||||
lambda: self.get_nova_service_status(service) == "enabled",
|
||||
timeout=timeout,
|
||||
timeout_msg="Service {0} on {1} does not reach enabled "
|
||||
"state, current state "
|
||||
"is {2}".format(service.binary, service.host,
|
||||
service.status))
|
||||
|
||||
def disable_nova_service(self, service, timeout=30):
|
||||
self.nova.services.disable(service.host, service.binary)
|
||||
helpers.wait(
|
||||
lambda: self.get_nova_service_status(service) == "disabled",
|
||||
timeout=timeout,
|
||||
timeout_msg="Service {0} on {1} does not reach disabled "
|
||||
"state, current state "
|
||||
"is {2}".format(service.binary, service.host,
|
||||
service.status))
|
||||
|
||||
def delete_nova_service(self, service_id):
|
||||
return self.nova.services.delete(service_id)
|
||||
|
||||
def get_nova_network_list(self):
|
||||
return self.nova.networks.list()
|
||||
|
||||
def get_neutron_router(self):
|
||||
return self.neutron.list_routers()
|
||||
|
||||
def get_routers_ids(self):
|
||||
result = self.get_neutron_router()
|
||||
ids = [i['id'] for i in result['routers']]
|
||||
return ids
|
||||
|
||||
def get_l3_for_router(self, router_id):
|
||||
return self.neutron.list_l3_agent_hosting_routers(router_id)
|
||||
|
||||
def get_l3_agent_ids(self, router_id):
|
||||
result = self.get_l3_for_router(router_id)
|
||||
ids = [i['id'] for i in result['agents']]
|
||||
return ids
|
||||
|
||||
def get_l3_agent_hosts(self, router_id):
|
||||
result = self.get_l3_for_router(router_id)
|
||||
hosts = [i['host'] for i in result['agents']]
|
||||
return hosts
|
||||
|
||||
def remove_l3_from_router(self, l3_agent, router_id):
|
||||
return self.neutron.remove_router_from_l3_agent(l3_agent, router_id)
|
||||
|
||||
def add_l3_to_router(self, l3_agent, router_id):
|
||||
return self.neutron.add_router_to_l3_agent(
|
||||
l3_agent, {"router_id": router_id})
|
||||
|
||||
def list_agents(self):
|
||||
return self.neutron.list_agents()
|
||||
|
||||
def get_available_l3_agents_ids(self, hosted_l3_agent_id):
|
||||
result = self.list_agents()
|
||||
ids = [i['id'] for i in result['agents']
|
||||
if i['binary'] == 'neutron-l3-agent']
|
||||
ids.remove(hosted_l3_agent_id)
|
||||
return ids
|
||||
|
||||
def list_dhcp_agents_for_network(self, net_id):
|
||||
return self.neutron.list_dhcp_agent_hosting_networks(net_id)
|
||||
|
||||
def get_node_with_dhcp_for_network(self, net_id):
|
||||
result = self.list_dhcp_agents_for_network(net_id)
|
||||
nodes = [i['host'] for i in result['agents']]
|
||||
return nodes
|
||||
|
||||
def get_neutron_dhcp_ports(self, net_id):
|
||||
ports = self.neutron.list_ports()['ports']
|
||||
network_ports = [x for x in ports
|
||||
if x['device_owner'] == 'network:dhcp' and
|
||||
x['network_id'] == net_id]
|
||||
return network_ports
|
||||
|
||||
def create_pool(self, pool_name):
|
||||
sub_net = self.neutron.list_subnets()
|
||||
body = {"pool": {"name": pool_name,
|
||||
"lb_method": "ROUND_ROBIN",
|
||||
"protocol": "HTTP",
|
||||
"subnet_id": sub_net['subnets'][0]['id']}}
|
||||
return self.neutron.create_pool(body=body)
|
||||
|
||||
def get_vips(self):
|
||||
return self.neutron.list_vips()
|
||||
|
||||
def create_vip(self, name, protocol, port, pool):
|
||||
sub_net = self.neutron.list_subnets()
|
||||
logger.debug("subnet list is {0}".format(sub_net))
|
||||
logger.debug("pool is {0}".format(pool))
|
||||
body = {"vip": {
|
||||
"name": name,
|
||||
"protocol": protocol,
|
||||
"protocol_port": port,
|
||||
"subnet_id": sub_net['subnets'][0]['id'],
|
||||
"pool_id": pool['pool']['id']
|
||||
}}
|
||||
return self.neutron.create_vip(body=body)
|
||||
|
||||
def delete_vip(self, vip):
|
||||
return self.neutron.delete_vip(vip)
|
||||
|
||||
def get_vip(self, vip):
|
||||
return self.neutron.show_vip(vip)
|
||||
|
||||
@staticmethod
|
||||
def get_nova_instance_ip(srv, net_name='novanetwork', addrtype='fixed'):
|
||||
for network_label, address_list in srv.addresses.items():
|
||||
if network_label != net_name:
|
||||
continue
|
||||
for addr in address_list:
|
||||
if addr['OS-EXT-IPS:type'] == addrtype:
|
||||
return addr['addr']
|
||||
raise Exception("Instance {0} doesn't have {1} address for network "
|
||||
"{2}, available addresses: {3}".format(srv.id,
|
||||
addrtype,
|
||||
net_name,
|
||||
srv.addresses))
|
||||
|
||||
def get_instance_mac(self, remote, srv):
|
||||
res = ''.join(remote.execute('virsh dumpxml {0} | grep "mac address="'
|
||||
.format(self.get_srv_instance_name(srv)))['stdout'])
|
||||
return res.split('\'')[1]
|
||||
|
||||
def create_network(self, network_name, **kwargs):
|
||||
body = {'network': {'name': network_name}}
|
||||
if kwargs:
|
||||
body['network'].update(kwargs)
|
||||
return self.neutron.create_network(body)
|
||||
|
||||
def create_subnet(
|
||||
self, subnet_name, network_id, cidr, ip_version=4, **kwargs):
|
||||
body = {"subnet": {"name": subnet_name, "network_id": network_id,
|
||||
"ip_version": ip_version, "cidr": cidr}}
|
||||
if kwargs:
|
||||
body['subnet'].update(kwargs)
|
||||
subnet = self.neutron.create_subnet(body)
|
||||
return subnet['subnet']
|
||||
|
||||
def get_router_by_name(self, router_name):
|
||||
router_list = self.neutron.list_routers()
|
||||
for router in router_list['routers']:
|
||||
if router['name'] == router_name:
|
||||
return router
|
||||
return None
|
||||
|
||||
def add_router_interface(self, router_id, subnet_id, port_id=None):
|
||||
body = {"router_id": router_id, "subnet_id": subnet_id}
|
||||
if port_id:
|
||||
body["port_id"] = port_id
|
||||
self.neutron.add_interface_router(router_id, body)
|
||||
return None
|
||||
|
||||
def create_router(self, name, tenant):
|
||||
"""Creates router at neutron.
|
||||
|
||||
:param name: str, router name
|
||||
:param tenant: tenant
|
||||
:return: router object
|
||||
"""
|
||||
external_network = None
|
||||
for network in self.neutron.list_networks()["networks"]:
|
||||
if network.get("router:external"):
|
||||
external_network = network
|
||||
|
||||
if not external_network:
|
||||
raise RuntimeError('Cannot find the external network.')
|
||||
|
||||
gw_info = {
|
||||
"network_id": external_network["id"],
|
||||
"enable_snat": True
|
||||
}
|
||||
|
||||
router_info = {
|
||||
"router": {
|
||||
"name": name,
|
||||
"external_gateway_info": gw_info,
|
||||
"tenant_id": tenant.id
|
||||
}
|
||||
}
|
||||
return self.neutron.create_router(router_info)['router']
|
||||
|
||||
def get_keystone_endpoints(self):
|
||||
endpoints = self.keystone.endpoints.list()
|
||||
return endpoints
|
||||
|
||||
def boot_parameterized_vms(self, attach_volume=False,
|
||||
boot_vm_from_volume=False,
|
||||
enable_floating_ips=False,
|
||||
on_each_compute=False,
|
||||
**kwargs):
|
||||
"""Boot parameterized VMs
|
||||
|
||||
:param attach_volume: bool, flag for attaching of volume to booted VM
|
||||
:param boot_vm_from_volume: bool, flag for the boot of VM from volume
|
||||
:param enable_floating_ips: bool, flag for assigning of floating ip to
|
||||
booted VM
|
||||
:param on_each_compute: bool, boot VMs on each compute or only one
|
||||
:param kwargs: dict, it includes the same keys like for
|
||||
nova.servers.create
|
||||
:return: list, list of vms data dicts
|
||||
"""
|
||||
vms_data = []
|
||||
if on_each_compute:
|
||||
for hypervisor in self.get_hypervisors():
|
||||
kwargs['availability_zone'] = '{}:{}'.format(
|
||||
'nova',
|
||||
hypervisor.hypervisor_hostname)
|
||||
vms_data.extend(
|
||||
self.boot_parameterized_vms(
|
||||
attach_volume=attach_volume,
|
||||
boot_vm_from_volume=boot_vm_from_volume,
|
||||
enable_floating_ips=enable_floating_ips,
|
||||
on_each_compute=False,
|
||||
**kwargs))
|
||||
return vms_data
|
||||
|
||||
fixed_network_id = self.get_network_by_type('internal')['id']
|
||||
if boot_vm_from_volume:
|
||||
server = self.create_server_from_volume(net_id=fixed_network_id,
|
||||
**kwargs)
|
||||
else:
|
||||
server = self.create_server(net_id=fixed_network_id,
|
||||
**kwargs)
|
||||
vm_data = {'server': server.to_dict()}
|
||||
|
||||
if attach_volume:
|
||||
volume = self.create_volume()
|
||||
self.attach_volume(volume, server)
|
||||
volume = self.cinder.volumes.get(volume.id)
|
||||
# cinderclient in kilo does not contains "to_dict" method for
|
||||
# volume object
|
||||
vm_data['attached_volume'] = volume._info
|
||||
|
||||
if enable_floating_ips:
|
||||
self.assign_floating_ip(server)
|
||||
|
||||
server = self.get_instance_detail(server)
|
||||
vm_data['server'] = server.to_dict()
|
||||
vms_data.append(vm_data)
|
||||
return vms_data
|
||||
|
||||
def create_network_resources_for_ipv6_test(self, tenant):
|
||||
"""Create network resources: two dualstack network IPv6 subnets
|
||||
(should be in SLAAC mode, address space should not intersect),
|
||||
virtual router and set gateway.
|
||||
|
||||
:param tenant: obj, object of keystone tenant
|
||||
"""
|
||||
net1 = self.create_network(
|
||||
network_name='net1',
|
||||
tenant_id=tenant.id)['network']
|
||||
net2 = self.create_network(
|
||||
network_name='net2',
|
||||
tenant_id=tenant.id)['network']
|
||||
|
||||
subnet_1_v4 = self.create_subnet(
|
||||
subnet_name='subnet_1_v4',
|
||||
network_id=net1['id'],
|
||||
cidr='192.168.100.0/24',
|
||||
ip_version=4)
|
||||
|
||||
subnet_1_v6 = self.create_subnet(
|
||||
subnet_name='subnet_1_v6',
|
||||
network_id=net1['id'],
|
||||
ip_version=6,
|
||||
cidr="2001:db8:100::/64",
|
||||
gateway_ip="2001:db8:100::1",
|
||||
ipv6_ra_mode="slaac",
|
||||
ipv6_address_mode="slaac")
|
||||
|
||||
subnet_2_v4 = self.create_subnet(
|
||||
subnet_name='subnet_2_v4',
|
||||
network_id=net2['id'],
|
||||
cidr='192.168.200.0/24',
|
||||
ip_version=4)
|
||||
|
||||
subnet_2_v6 = self.create_subnet(
|
||||
subnet_name='subnet_2_v6',
|
||||
network_id=net2['id'],
|
||||
ip_version=6,
|
||||
cidr="2001:db8:200::/64",
|
||||
gateway_ip="2001:db8:200::1",
|
||||
ipv6_ra_mode="slaac",
|
||||
ipv6_address_mode="slaac")
|
||||
|
||||
router = self.create_router('test_router', tenant=tenant)
|
||||
|
||||
self.add_router_interface(
|
||||
router_id=router["id"],
|
||||
subnet_id=subnet_1_v4["id"])
|
||||
|
||||
self.add_router_interface(
|
||||
router_id=router["id"],
|
||||
subnet_id=subnet_1_v6["id"])
|
||||
|
||||
self.add_router_interface(
|
||||
router_id=router["id"],
|
||||
subnet_id=subnet_2_v4["id"])
|
||||
|
||||
self.add_router_interface(
|
||||
router_id=router["id"],
|
||||
subnet_id=subnet_2_v6["id"])
|
||||
return net1, net2
|
@ -1,94 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from fuelweb_test import logger
|
||||
|
||||
|
||||
def ovs_get_data(remote, table, columns=None):
|
||||
"""Get data from a specified OpenVSwitch table
|
||||
|
||||
:param SSHClient remote: fuel-devops.helpers.helpers object
|
||||
:param str table: ovs table name (see `ovsdb-client list-tables`)
|
||||
:param list columns:
|
||||
list of strings to get specified columns. if None - all columns
|
||||
will be requested.
|
||||
:return dict: data from JSON object
|
||||
"""
|
||||
if columns:
|
||||
col = '--columns=' + ','.join(columns)
|
||||
else:
|
||||
col = ''
|
||||
cmd = ('ovs-vsctl --oneline --format=json {columns} list {table}'
|
||||
.format(columns=col, table=table))
|
||||
res = remote.check_call(cmd).stdout_json
|
||||
logger.debug("OVS output of the command '{0}': {1}".format(cmd, res))
|
||||
return res
|
||||
|
||||
|
||||
def ovs_decode_columns(ovs_data):
|
||||
"""Decode columns from OVS data format to a python dict
|
||||
:param str ovs_data: data from JSON object
|
||||
:return list: list of decoded dicts
|
||||
"""
|
||||
data = ovs_data['data']
|
||||
headings = ovs_data['headings']
|
||||
res = []
|
||||
for fields in data:
|
||||
res_fields = {}
|
||||
for i, field in enumerate(fields):
|
||||
if isinstance(field, list):
|
||||
if field[0] == 'map':
|
||||
d = {}
|
||||
for f in field[1]:
|
||||
d[f[0]] = f[1]
|
||||
res_fields[headings[i]] = d
|
||||
elif field[0] == 'uuid':
|
||||
res_fields[headings[i]] = {'uuid': field[1]}
|
||||
else:
|
||||
res_fields[headings[i]] = field
|
||||
else:
|
||||
res_fields[headings[i]] = field
|
||||
res.append(res_fields)
|
||||
return res
|
||||
|
||||
|
||||
def ovs_get_tag_by_port(remote, port):
|
||||
"""Get the tag used for OVS interface by Neutron port ID
|
||||
|
||||
:param SSHClient remote: fuel-devops.helpers.helpers object
|
||||
:param str port: Neutron port ID
|
||||
:return str: tag number
|
||||
"""
|
||||
interfaces_raw = ovs_get_data(remote,
|
||||
table='Interface',
|
||||
columns=['external_ids', 'name'])
|
||||
interfaces = ovs_decode_columns(interfaces_raw)
|
||||
|
||||
ports_ifaces = {x['external_ids']['iface-id']: x['name']
|
||||
for x in interfaces if 'iface-id' in x['external_ids']}
|
||||
logger.debug("OVS interfaces: {0}".format(ports_ifaces))
|
||||
if port not in ports_ifaces:
|
||||
raise ValueError("Neutron port {0} not found in OVS interfaces."
|
||||
.format(port))
|
||||
|
||||
iface_id = ports_ifaces[port]
|
||||
|
||||
ovs_port_raw = ovs_get_data(remote,
|
||||
table='Port {0}'.format(iface_id),
|
||||
columns=['tag'])
|
||||
ovs_port = ovs_decode_columns(ovs_port_raw)
|
||||
logger.debug("OVS tag for port {0}: {1}".format(iface_id, ovs_port))
|
||||
ovs_tag = ovs_port[0]['tag']
|
||||
|
||||
return str(ovs_tag)
|
@ -1,131 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from xml.etree import ElementTree
|
||||
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
|
||||
ssh_manager = SSHManager()
|
||||
|
||||
|
||||
def get_pacemaker_nodes_attributes(cibadmin_status_xml):
|
||||
"""Parse 'cibadmin --query --scope status'.
|
||||
:param cibadmin_status_xml: stdout from 'cibadmin --query --scope status'
|
||||
:return: nested dictionary with node-fqdn and attribute name as keys
|
||||
"""
|
||||
|
||||
""" Get cibadmin_status to a python dict:
|
||||
return:
|
||||
{
|
||||
fqdn: {
|
||||
'arch':
|
||||
'cpu_cores':
|
||||
'cpu_info':
|
||||
'cpu_load':
|
||||
'cpu_speed':
|
||||
'free_swap':
|
||||
'gtidd':
|
||||
'master-p_conntrackd':
|
||||
'master-p_rabbitmq-server':
|
||||
'os':
|
||||
'#health_disk': # only on master if root_free < 100M
|
||||
'pingd':
|
||||
'rabbit-master': # only on master
|
||||
'rabbit-start-time':
|
||||
'rabbit_get_alarms_timeouts':
|
||||
'rabbit_list_channels_timeouts':
|
||||
'ram_free':
|
||||
'ram_total':
|
||||
'root_free':
|
||||
'var_lib_glance_free':
|
||||
'var_lib_mysql_free':
|
||||
'var_log_free':
|
||||
},
|
||||
...
|
||||
}
|
||||
"""
|
||||
root = ElementTree.fromstring(cibadmin_status_xml)
|
||||
nodes = {}
|
||||
for node_state in root.iter('node_state'):
|
||||
node_name = node_state.get('uname')
|
||||
nodes[node_name] = {}
|
||||
for instance_attribute in node_state.iter('nvpair'):
|
||||
nodes[node_name][instance_attribute.get(
|
||||
'name')] = instance_attribute.get('value')
|
||||
return nodes
|
||||
|
||||
|
||||
def get_pcs_nodes(pcs_status_xml):
|
||||
"""Parse 'pcs status xml'. <Nodes> section
|
||||
:param pcs_status_xml: stdout from 'pcs status xml'
|
||||
:return: nested dictionary with node-fqdn and attribute name as keys
|
||||
"""
|
||||
""" Get crm node attributes to a python dict:
|
||||
return:
|
||||
{
|
||||
fqdn: {
|
||||
'node name':
|
||||
'id':
|
||||
'online':
|
||||
'standby':
|
||||
'standby_on_fail':
|
||||
'maintenance':
|
||||
'pending':
|
||||
'unclean':
|
||||
'shutdown':
|
||||
'expected_up':
|
||||
'is_dc':
|
||||
'resources_running':
|
||||
'type':
|
||||
},
|
||||
...
|
||||
}
|
||||
"""
|
||||
|
||||
root = ElementTree.fromstring(pcs_status_xml)
|
||||
nodes = {}
|
||||
for nodes_group in root.iter('nodes'):
|
||||
for node in nodes_group:
|
||||
nodes[node.get('name')] = node.attrib
|
||||
return nodes
|
||||
|
||||
|
||||
def parse_pcs_status_xml(remote_ip):
|
||||
"""Parse 'pcs status xml'. <Nodes> section
|
||||
:param remote_ip: remote IP address
|
||||
:return: nested dictionary with node-fqdn and attribute name as keys
|
||||
"""
|
||||
pcs_status_dict = ssh_manager.execute_on_remote(
|
||||
remote_ip, 'pcs status xml')['stdout_str']
|
||||
return pcs_status_dict
|
||||
|
||||
|
||||
def get_pacemaker_resource_name(remote_ip, resource_name):
|
||||
""" Parse 'cibadmin -Q --scope resources' and check whether the resource
|
||||
is multistate. Return parent resource name if it is, resource name
|
||||
otherwise
|
||||
:param remote_ip: remote IP address
|
||||
:param resource_name: resource name string
|
||||
:return: string with proper resource name
|
||||
"""
|
||||
cib = ssh_manager.execute_on_remote(
|
||||
remote_ip, 'cibadmin -Q --scope resources')['stdout_str']
|
||||
root = ElementTree.fromstring(cib)
|
||||
|
||||
resource_parent = root.find(
|
||||
".//primitive[@id='{0}']/..".format(resource_name))
|
||||
|
||||
if resource_parent.tag in ['master', 'clone']:
|
||||
return resource_parent.attrib['id']
|
||||
else:
|
||||
return resource_name
|
@ -1,622 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import zlib
|
||||
from xml.dom.minidom import parseString
|
||||
|
||||
from proboscis import register
|
||||
from proboscis import TestProgram
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_is_not_none
|
||||
from proboscis.asserts import assert_not_equal
|
||||
from proboscis.asserts import assert_true
|
||||
# pylint: disable=import-error,wrong-import-order
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves.urllib.request import urlopen
|
||||
# noinspection PyUnresolvedReferences
|
||||
from six.moves.urllib.parse import urlparse
|
||||
# pylint: enable=import-error,wrong-import-order
|
||||
import yaml
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers.ssh_manager import SSHManager
|
||||
|
||||
patching_validation_schema = {
|
||||
'type': {
|
||||
'required': True,
|
||||
'values': ['service_stop', 'service_start', 'service_restart',
|
||||
'server_down', 'server_up', 'server_reboot',
|
||||
'run_command', 'upload_script', 'run_tasks'],
|
||||
'data_type': str
|
||||
},
|
||||
'target': {
|
||||
'required': True,
|
||||
'values': {'master', 'slaves', 'controller_role', 'compute_role',
|
||||
'cinder_role', 'ceph-osd_role', 'mongo_role',
|
||||
'zabbix-server_role', 'base-os_role'},
|
||||
'data_type': list
|
||||
},
|
||||
'service': {
|
||||
'required': False,
|
||||
'data_type': str
|
||||
},
|
||||
'command': {
|
||||
'required': False,
|
||||
'data_type': str
|
||||
},
|
||||
'script': {
|
||||
'required': False,
|
||||
'data_type': str
|
||||
},
|
||||
'upload_path': {
|
||||
'required': False,
|
||||
'data_type': str
|
||||
},
|
||||
'id': {
|
||||
'required': True,
|
||||
'data_type': int
|
||||
},
|
||||
'tasks': {
|
||||
'required': False,
|
||||
'data_type': list
|
||||
},
|
||||
'tasks_timeout': {
|
||||
'required': False,
|
||||
'data_type': int
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def map_test(target):
|
||||
assert_is_not_none(settings.PATCHING_BUG_ID,
|
||||
"Bug ID wasn't specified, can't start patching tests!")
|
||||
errata = get_errata(path=settings.PATCHING_APPLY_TESTS,
|
||||
bug_id=settings.PATCHING_BUG_ID)
|
||||
verify_errata(errata)
|
||||
if not any(target == e_target['type'] for e_target in errata['targets']):
|
||||
skip_patching_test(target, errata['target'])
|
||||
env_distro = settings.OPENSTACK_RELEASE
|
||||
master_distro = settings.OPENSTACK_RELEASE_CENTOS
|
||||
if 'affected-pkgs' in errata.keys():
|
||||
if target == 'master':
|
||||
settings.PATCHING_PKGS = set(
|
||||
[re.split('=|<|>', package)[0] for package
|
||||
in errata['affected-pkgs'][master_distro.lower()]])
|
||||
else:
|
||||
settings.PATCHING_PKGS = set(
|
||||
[re.split('=|<|>', package)[0] for package
|
||||
in errata['affected-pkgs'][env_distro.lower()]])
|
||||
available_env_packages = set()
|
||||
available_master_packages = set()
|
||||
for repo in settings.PATCHING_MIRRORS:
|
||||
logger.debug(
|
||||
'Checking packages from "{0}" repository'.format(repo))
|
||||
available_env_packages.update(get_repository_packages(repo,
|
||||
env_distro))
|
||||
for repo in settings.PATCHING_MASTER_MIRRORS:
|
||||
logger.debug(
|
||||
'Checking packages from "{0}" repository'.format(repo))
|
||||
available_master_packages.update(get_repository_packages(
|
||||
repo, master_distro))
|
||||
available_packages = available_env_packages | available_master_packages
|
||||
if not settings.PATCHING_PKGS:
|
||||
if target == 'master':
|
||||
settings.PATCHING_PKGS = available_master_packages
|
||||
else:
|
||||
settings.PATCHING_PKGS = available_env_packages
|
||||
else:
|
||||
assert_true(settings.PATCHING_PKGS <= available_packages,
|
||||
"Patching repositories don't contain all packages need"
|
||||
"ed for tests. Need: {0}, available: {1}, missed: {2}."
|
||||
"".format(settings.PATCHING_PKGS,
|
||||
available_packages,
|
||||
settings.PATCHING_PKGS - available_packages))
|
||||
assert_not_equal(len(settings.PATCHING_PKGS), 0,
|
||||
"No packages found in repository(s) for patching:"
|
||||
" '{0} {1}'".format(settings.PATCHING_MIRRORS,
|
||||
settings.PATCHING_MASTER_MIRRORS))
|
||||
if target == 'master':
|
||||
tests_groups = get_packages_tests(settings.PATCHING_PKGS,
|
||||
master_distro,
|
||||
target)
|
||||
else:
|
||||
tests_groups = get_packages_tests(settings.PATCHING_PKGS,
|
||||
env_distro,
|
||||
target)
|
||||
|
||||
if 'rally' in errata.keys():
|
||||
if len(errata['rally']) > 0:
|
||||
settings.PATCHING_RUN_RALLY = True
|
||||
settings.RALLY_TAGS = errata['rally']
|
||||
|
||||
if settings.PATCHING_CUSTOM_TEST:
|
||||
deployment_test = settings.PATCHING_CUSTOM_TEST
|
||||
settings.PATCHING_SNAPSHOT = \
|
||||
'patching_after_{0}'.format(deployment_test)
|
||||
register(groups=['prepare_patching_environment'],
|
||||
depends_on_groups=[deployment_test])
|
||||
register(groups=['prepare_patching_master_environment'],
|
||||
depends_on_groups=[deployment_test])
|
||||
else:
|
||||
program = TestProgram(argv=['none'])
|
||||
deployment_test = None
|
||||
for my_test in program.plan.tests:
|
||||
if all(patching_group in my_test.entry.info.groups for
|
||||
patching_group in tests_groups):
|
||||
deployment_test = my_test
|
||||
break
|
||||
if deployment_test:
|
||||
settings.PATCHING_SNAPSHOT = 'patching_after_{0}'.format(
|
||||
deployment_test.entry.method.im_func.func_name)
|
||||
if target == 'master':
|
||||
register(groups=['prepare_patching_master_environment'],
|
||||
depends_on=[deployment_test.entry.home])
|
||||
else:
|
||||
register(groups=['prepare_patching_environment'],
|
||||
depends_on=[deployment_test.entry.home])
|
||||
else:
|
||||
raise Exception(
|
||||
"Test with groups {0} not found.".format(tests_groups))
|
||||
|
||||
|
||||
def get_repository_packages(remote_repo_url, repo_type):
|
||||
repo_url = urlparse(remote_repo_url)
|
||||
packages = []
|
||||
if repo_type == settings.OPENSTACK_RELEASE_UBUNTU:
|
||||
packages_url = '{0}/Packages'.format(repo_url.geturl())
|
||||
pkgs_raw = urlopen(packages_url).read()
|
||||
for pkg in pkgs_raw.split('\n'):
|
||||
match = re.search(r'^Package: (\S+)\s*$', pkg)
|
||||
if match:
|
||||
packages.append(match.group(1))
|
||||
else:
|
||||
packages_url = '{0}/repodata/primary.xml.gz'.format(repo_url.geturl())
|
||||
pkgs_xml = parseString(zlib.decompressobj(zlib.MAX_WBITS | 32).
|
||||
decompress(urlopen(packages_url).read()))
|
||||
for pkg in pkgs_xml.getElementsByTagName('package'):
|
||||
packages.append(
|
||||
pkg.getElementsByTagName('name')[0].firstChild.nodeValue)
|
||||
return packages
|
||||
|
||||
|
||||
def _get_target_and_project(_pkg, _all_pkgs):
|
||||
for _installation_target in _all_pkgs.keys():
|
||||
for _project in _all_pkgs[_installation_target]['projects']:
|
||||
if _pkg in _project['packages']:
|
||||
return _installation_target, _project['name']
|
||||
|
||||
|
||||
def get_package_test_info(package, pkg_type, tests_path, patch_target):
|
||||
packages_path = "{0}/{1}/packages.yaml".format(tests_path, pkg_type)
|
||||
tests = set()
|
||||
tests_file = 'test.yaml'
|
||||
all_packages = yaml.load(open(packages_path).read())
|
||||
assert_is_not_none(_get_target_and_project(package, all_packages),
|
||||
"Package '{0}' doesn't belong to any installation "
|
||||
"target / project".format(package))
|
||||
target, project = _get_target_and_project(package, all_packages)
|
||||
if patch_target == 'master':
|
||||
if target not in ['master', 'bootstrap']:
|
||||
return {None}
|
||||
if patch_target == 'environment':
|
||||
if target not in ['deployment', 'provisioning']:
|
||||
return {None}
|
||||
target_tests_path = "/".join((tests_path, pkg_type, target, tests_file))
|
||||
project_tests_path = "/".join((tests_path, pkg_type, target, project,
|
||||
tests_file))
|
||||
package_tests_path = "/".join((tests_path, pkg_type, target, project,
|
||||
package, tests_file))
|
||||
for path in (target_tests_path, project_tests_path, package_tests_path):
|
||||
try:
|
||||
test = yaml.load(open(path).read())
|
||||
if 'system_tests' in test.keys():
|
||||
tests.update(test['system_tests']['tags'])
|
||||
except IOError as e:
|
||||
logger.warning('Ignoring exception: {!r}'.format(e))
|
||||
logger.debug(traceback.format_exc())
|
||||
return tests
|
||||
|
||||
|
||||
def get_packages_tests(packages, distro, target):
|
||||
assert_true(os.path.isdir(settings.PATCHING_PKGS_TESTS),
|
||||
"Path for packages tests doesn't exist: '{0}'".format(
|
||||
settings.PATCHING_PKGS_TESTS))
|
||||
if distro == settings.OPENSTACK_RELEASE_UBUNTU:
|
||||
pkg_type = 'deb'
|
||||
else:
|
||||
pkg_type = 'rpm'
|
||||
packages_tests = set()
|
||||
for package in packages:
|
||||
tests = get_package_test_info(package,
|
||||
pkg_type,
|
||||
settings.PATCHING_PKGS_TESTS,
|
||||
target)
|
||||
assert_true(len(tests) > 0,
|
||||
"Tests for package {0} not found".format(package))
|
||||
if None in tests:
|
||||
continue
|
||||
packages_tests.update(tests)
|
||||
return packages_tests
|
||||
|
||||
|
||||
def mirror_remote_repository(admin_remote, remote_repo_url, local_repo_path):
|
||||
repo_url = urlparse(remote_repo_url)
|
||||
cut_dirs = len(repo_url.path.strip('/').split('/'))
|
||||
download_cmd = ('wget --recursive --no-parent --no-verbose --reject "index'
|
||||
'.html*,*.gif" --exclude-directories "{pwd}/repocache" '
|
||||
'--directory-prefix {path} -nH --cut-dirs={cutd} {url}').\
|
||||
format(pwd=repo_url.path.rstrip('/'), path=local_repo_path,
|
||||
cutd=cut_dirs, url=repo_url.geturl())
|
||||
result = admin_remote.execute(download_cmd)
|
||||
assert_equal(result['exit_code'], 0, 'Mirroring of remote packages '
|
||||
'repository failed: {0}'.format(
|
||||
result))
|
||||
|
||||
|
||||
def add_remote_repositories(environment, mirrors, prefix_name='custom_repo'):
|
||||
repositories = set()
|
||||
for mir in mirrors:
|
||||
name = '{0}_{1}'.format(prefix_name, mirrors.index(mir))
|
||||
local_repo_path = '/'.join([settings.PATCHING_WEB_DIR, name])
|
||||
remote_repo_url = mir
|
||||
with environment.d_env.get_admin_remote() as remote:
|
||||
mirror_remote_repository(
|
||||
admin_remote=remote,
|
||||
remote_repo_url=remote_repo_url,
|
||||
local_repo_path=local_repo_path)
|
||||
repositories.add(name)
|
||||
return repositories
|
||||
|
||||
|
||||
def connect_slaves_to_repo(environment, nodes, repo_name):
|
||||
repo_ip = environment.get_admin_node_ip()
|
||||
repo_port = '8080'
|
||||
repourl = 'http://{master_ip}:{repo_port}/{repo_name}/'.format(
|
||||
master_ip=repo_ip, repo_name=repo_name, repo_port=repo_port)
|
||||
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_UBUNTU:
|
||||
cmds = [
|
||||
"echo -e '\ndeb {repourl} /' > /etc/apt/sources.list.d/{repo_name}"
|
||||
".list".format(repourl=repourl, repo_name=repo_name),
|
||||
"apt-key add <(curl -s '{repourl}/Release.key') || :".format(
|
||||
repourl=repourl),
|
||||
# Set highest priority to all repositories located on master node
|
||||
"echo -e 'Package: *\nPin: origin {0}\nPin-Priority: 1060' > "
|
||||
"/etc/apt/preferences.d/custom_repo".format(
|
||||
environment.get_admin_node_ip()),
|
||||
"apt-get update"
|
||||
]
|
||||
else:
|
||||
cmds = [
|
||||
"yum-config-manager --add-repo {url}".format(url=repourl),
|
||||
"echo -e 'gpgcheck=0\npriority=20' >>/etc/yum.repos.d/{ip}_{port}_"
|
||||
"{repo}_.repo".format(ip=repo_ip, repo=repo_name, port=repo_port),
|
||||
"yum -y clean all",
|
||||
]
|
||||
|
||||
for slave in nodes:
|
||||
for cmd in cmds:
|
||||
SSHManager().execute_on_remote(
|
||||
ip=slave['ip'],
|
||||
cmd=cmd
|
||||
)
|
||||
|
||||
|
||||
def connect_admin_to_repo(environment, repo_name):
|
||||
repo_ip = environment.get_admin_node_ip()
|
||||
repo_port = '8080'
|
||||
repourl = 'http://{master_ip}:{repo_port}/{repo_name}/'.format(
|
||||
master_ip=repo_ip, repo_name=repo_name, repo_port=repo_port)
|
||||
|
||||
cmds = [
|
||||
"yum-config-manager --add-repo {url}".format(url=repourl),
|
||||
"echo -e 'gpgcheck=0\npriority=20' >>/etc/yum.repos.d/{ip}_{port}_"
|
||||
"{repo}_.repo".format(ip=repo_ip, repo=repo_name, port=repo_port),
|
||||
"yum -y clean all",
|
||||
# FIXME(apanchenko):
|
||||
# Temporary disable this check in order to test packages update
|
||||
# inside Docker containers. When building of new images for containers
|
||||
# is implemented, we should check here that `yum check-update` returns
|
||||
# ONLY `100` exit code (updates are available for master node).
|
||||
"yum check-update; [[ $? -eq 100 || $? -eq 0 ]]"
|
||||
]
|
||||
|
||||
for cmd in cmds:
|
||||
SSHManager().execute_on_remote(
|
||||
ip=SSHManager().admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
|
||||
|
||||
def update_packages(environment, remote, packages, exclude_packages=None):
|
||||
if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_UBUNTU:
|
||||
cmds = [
|
||||
'apt-get -o Dpkg::Options::="--force-confdef" '
|
||||
'-o Dpkg::Options::="--force-confold" -y install '
|
||||
'--only-upgrade {0}'.format(' '.join(packages))
|
||||
]
|
||||
if exclude_packages:
|
||||
exclude_commands = ["apt-mark hold {0}".format(pkg)
|
||||
for pkg in exclude_packages]
|
||||
cmds = exclude_commands + cmds
|
||||
else:
|
||||
cmds = [
|
||||
"yum -y update --nogpgcheck {0} -x '{1}'".format(
|
||||
' '.join(packages), ','.join(exclude_packages or []))
|
||||
]
|
||||
for cmd in cmds:
|
||||
remote.check_call(cmd)
|
||||
|
||||
|
||||
def update_packages_on_slaves(environment, slaves, packages=None,
|
||||
exclude_packages=None):
|
||||
if not packages:
|
||||
# Install all updates
|
||||
packages = ' '
|
||||
for slave in slaves:
|
||||
with environment.d_env.get_ssh_to_remote(slave['ip']) as remote:
|
||||
update_packages(environment, remote, packages, exclude_packages)
|
||||
|
||||
|
||||
def get_slaves_ips_by_role(slaves, role=None):
|
||||
if role:
|
||||
return [slave['ip'] for slave in slaves if role in slave['roles']]
|
||||
return [slave['ip'] for slave in slaves]
|
||||
|
||||
|
||||
def get_devops_slaves_by_role(env, slaves, role=None):
|
||||
if role:
|
||||
return [env.fuel_web.find_devops_node_by_nailgun_fqdn(slave['fqdn'],
|
||||
env.d_env.nodes().slaves)
|
||||
for slave in slaves if role in slave['roles']]
|
||||
return [env.fuel_web.find_devops_node_by_nailgun_fqdn(slave['fqdn'],
|
||||
env.d_env.nodes().slaves) for slave in slaves]
|
||||
|
||||
|
||||
def get_slaves_ids_by_role(slaves, role=None):
|
||||
if role:
|
||||
return [slave['id'] for slave in slaves if role in slave['roles']]
|
||||
return [slave['id'] for slave in slaves]
|
||||
|
||||
|
||||
def verify_fix_apply_step(apply_step):
|
||||
validation_schema = patching_validation_schema
|
||||
for key in validation_schema:
|
||||
if key in apply_step.keys():
|
||||
is_exists = apply_step[key] is not None
|
||||
else:
|
||||
is_exists = False
|
||||
if validation_schema[key]['required']:
|
||||
assert_true(is_exists, "Required field '{0}' not found in patch "
|
||||
"apply scenario step".format(key))
|
||||
if not is_exists:
|
||||
continue
|
||||
is_valid = True
|
||||
if 'values' in validation_schema[key].keys():
|
||||
if validation_schema[key]['data_type'] == str:
|
||||
is_valid = apply_step[key] in validation_schema[key]['values']
|
||||
elif validation_schema[key]['data_type'] in (list, set):
|
||||
is_valid = set(apply_step[key]) <= \
|
||||
validation_schema[key]['values']
|
||||
|
||||
assert_true(is_valid, 'Step in patch apply actions scenario '
|
||||
'contains incorrect data: "{key}": "{value}"'
|
||||
'. Supported values for "{key}" are '
|
||||
'"{valid}"'.format(
|
||||
key=key,
|
||||
value=apply_step[key],
|
||||
valid=validation_schema[key]['values']))
|
||||
if 'data_type' in validation_schema[key].keys():
|
||||
assert_true(
|
||||
isinstance(
|
||||
apply_step[key], validation_schema[key]['data_type']),
|
||||
"Unexpected data type in patch apply scenario step: '"
|
||||
"{key}' is '{type}', but expecting '{expect}'.".format(
|
||||
key=key,
|
||||
type=type(apply_step[key]),
|
||||
expect=validation_schema[key]['data_type']))
|
||||
|
||||
|
||||
def validate_fix_apply_step(apply_step, environment, slaves):
|
||||
verify_fix_apply_step(apply_step)
|
||||
slaves = [] if not slaves else slaves
|
||||
command = '',
|
||||
remotes_ips = set()
|
||||
devops_action = ''
|
||||
devops_nodes = set()
|
||||
nodes_ids = set()
|
||||
|
||||
if apply_step['type'] == 'run_tasks':
|
||||
remotes_ips.add(environment.get_admin_node_ip())
|
||||
assert_true('master' not in apply_step['target'],
|
||||
"Action type 'run_tasks' accepts only slaves (roles) "
|
||||
"as target value, but 'master' is specified!")
|
||||
|
||||
for target in apply_step['target']:
|
||||
if target == 'slaves':
|
||||
nodes_ids.update(get_slaves_ids_by_role(slaves, role=None))
|
||||
else:
|
||||
role = target.split('_role')[0]
|
||||
nodes_ids.update(get_slaves_ids_by_role(slaves, role=role))
|
||||
else:
|
||||
for target in apply_step['target']:
|
||||
if target == 'master':
|
||||
remotes_ips.add(environment.get_admin_node_ip())
|
||||
devops_nodes.add(
|
||||
environment.d_env.nodes().admin)
|
||||
elif target == 'slaves':
|
||||
remotes_ips.update(get_slaves_ips_by_role(slaves, role=None))
|
||||
devops_nodes.update(get_devops_slaves_by_role(environment,
|
||||
slaves))
|
||||
else:
|
||||
role = target.split('_role')[0]
|
||||
remotes_ips.update(get_slaves_ips_by_role(slaves, role))
|
||||
devops_nodes.update(get_devops_slaves_by_role(environment,
|
||||
slaves,
|
||||
role=role))
|
||||
if apply_step['type'] in ('service_stop', 'service_start',
|
||||
'service_restart'):
|
||||
assert_true(len(apply_step['service'] or '') > 0,
|
||||
"Step #{0} in apply patch scenario perform '{1}', but "
|
||||
"service isn't specified".format(apply_step['id'],
|
||||
apply_step['type']))
|
||||
action = apply_step['type'].split('service_')[1]
|
||||
command = (
|
||||
"find /etc/init.d/ -regex '/etc/init.d/{service}' -printf "
|
||||
"'%f\n' -quit | xargs -i service {{}} {action}".format(
|
||||
service=apply_step['service'], action=action), )
|
||||
elif apply_step['type'] in ('server_down', 'server_up', 'server_reboot'):
|
||||
assert_true('master' not in apply_step['target'],
|
||||
'Action type "{0}" doesn\'t accept "master" node as '
|
||||
'target! Use action "run_command" instead.'.format(
|
||||
apply_step['type']))
|
||||
devops_action = apply_step['type'].split('server_')[1]
|
||||
elif apply_step['type'] == 'upload_script':
|
||||
assert_true(len(apply_step['script'] or '') > 0,
|
||||
"Step #{0} in apply patch scenario perform '{1}', but "
|
||||
"script isn't specified".format(apply_step['id'],
|
||||
apply_step['type']))
|
||||
assert_true(len(apply_step['upload_path'] or '') > 0,
|
||||
"Step #{0} in apply patch scenario perform '{1}', but "
|
||||
"upload path isn't specified".format(apply_step['id'],
|
||||
apply_step['type']))
|
||||
command = ('UPLOAD', apply_step['script'], apply_step['upload_path'])
|
||||
elif apply_step['type'] == 'run_tasks':
|
||||
assert_true(len(apply_step['tasks'] or '') > 0,
|
||||
"Step #{0} in apply patch scenario perform '{1}', but "
|
||||
"tasks aren't specified".format(apply_step['id'],
|
||||
apply_step['type']))
|
||||
tasks_timeout = apply_step['tasks_timeout'] if 'tasks_timeout' in \
|
||||
apply_step.keys() else 60 * 30
|
||||
command = (
|
||||
'RUN_TASKS',
|
||||
nodes_ids,
|
||||
apply_step['tasks'],
|
||||
tasks_timeout
|
||||
)
|
||||
else:
|
||||
assert_true(len(apply_step['command'] or '') > 0,
|
||||
"Step #{0} in apply patch scenario perform '{1}', but "
|
||||
"command isn't specified".format(apply_step['id'],
|
||||
apply_step['type']))
|
||||
command = apply_step['command']
|
||||
# remotes sessions .clear() placed in run_actions()
|
||||
remotes = [environment.d_env.get_ssh_to_remote(ip) for ip in remotes_ips] \
|
||||
if command else []
|
||||
devops_nodes = devops_nodes if devops_action else []
|
||||
return command, remotes, devops_action, devops_nodes
|
||||
|
||||
|
||||
def get_errata(path, bug_id):
|
||||
scenario_path = '{0}/bugs/{1}/erratum.yaml'.format(path, bug_id)
|
||||
assert_true(os.path.exists(scenario_path),
|
||||
"Erratum for bug #{0} is not found in '{1}' "
|
||||
"directory".format(bug_id, settings.PATCHING_APPLY_TESTS))
|
||||
with open(scenario_path) as f:
|
||||
return yaml.load(f.read())
|
||||
|
||||
|
||||
def verify_errata(errata):
|
||||
actions_types = ('patch-scenario', 'verify-scenario')
|
||||
distro = settings.OPENSTACK_RELEASE.lower()
|
||||
for target in errata['targets']:
|
||||
for action_type in actions_types:
|
||||
assert_true(distro in target[action_type].keys(),
|
||||
"Steps for '{0}' not found for '{1}' distro!".format(
|
||||
action_type, distro))
|
||||
scenario = sorted(target[action_type][distro],
|
||||
key=lambda k: k['id'])
|
||||
for step in scenario:
|
||||
verify_fix_apply_step(step)
|
||||
|
||||
|
||||
def run_actions(environment, target, slaves, action_type='patch-scenario'):
|
||||
errata = get_errata(path=settings.PATCHING_APPLY_TESTS,
|
||||
bug_id=settings.PATCHING_BUG_ID)
|
||||
distro = settings.OPENSTACK_RELEASE.lower()
|
||||
target_scenarios = [e_target for e_target in errata['targets']
|
||||
if target == e_target['type']]
|
||||
assert_true(len(target_scenarios) > 0,
|
||||
"Can't found patch scenario for '{0}' target in erratum "
|
||||
"for bug #{1}!".format(target, settings.PATCHING_BUG_ID))
|
||||
scenario = sorted(target_scenarios[0][action_type][distro],
|
||||
key=lambda k: k['id'])
|
||||
|
||||
for step in scenario:
|
||||
command, remotes, devops_action, devops_nodes = \
|
||||
validate_fix_apply_step(step, environment, slaves)
|
||||
if 'UPLOAD' in command:
|
||||
file_name = command[1]
|
||||
upload_path = command[2]
|
||||
source_path = '{0}/bugs/{1}/tests/{2}'.format(
|
||||
settings.PATCHING_APPLY_TESTS,
|
||||
settings.PATCHING_BUG_ID,
|
||||
file_name)
|
||||
assert_true(os.path.exists(source_path),
|
||||
'File for uploading "{0}" doesn\'t exist!'.format(
|
||||
source_path))
|
||||
for remote in remotes:
|
||||
remote.upload(source_path, upload_path)
|
||||
continue
|
||||
elif 'RUN_TASKS' in command:
|
||||
nodes_ids = command[1]
|
||||
tasks = command[2]
|
||||
timeout = command[3]
|
||||
nodes = [node for node in environment.fuel_web.client.list_nodes()
|
||||
if node['id'] in nodes_ids]
|
||||
assert_true(len(nodes_ids) == len(nodes),
|
||||
'Get nodes with ids: {0} for deployment task, but '
|
||||
'found {1}!'.format(nodes_ids,
|
||||
[n['id'] for n in nodes]))
|
||||
assert_true(len(set([node['cluster'] for node in nodes])) == 1,
|
||||
'Slaves for patching actions belong to different '
|
||||
'environments, can\'t run deployment tasks!')
|
||||
cluster_id = nodes[0]['cluster']
|
||||
environment.fuel_web.wait_deployment_tasks(cluster_id, nodes_ids,
|
||||
tasks, timeout)
|
||||
continue
|
||||
for remote in remotes:
|
||||
remote.check_call(command)
|
||||
if devops_action == 'down':
|
||||
environment.fuel_web.warm_shutdown_nodes(devops_nodes)
|
||||
elif devops_action == 'up':
|
||||
environment.fuel_web.warm_start_nodes(devops_nodes)
|
||||
elif devops_action == 'reboot':
|
||||
environment.fuel_web.warm_restart_nodes(devops_nodes)
|
||||
|
||||
# clear connections
|
||||
for remote in remotes:
|
||||
remote.clear()
|
||||
|
||||
|
||||
def apply_patches(environment, target, slaves=None):
|
||||
run_actions(environment, target, slaves, action_type='patch-scenario')
|
||||
|
||||
|
||||
def verify_fix(environment, target, slaves=None):
|
||||
run_actions(environment, target, slaves, action_type='verify-scenario')
|
||||
|
||||
|
||||
def skip_patching_test(target, errata_target):
|
||||
# TODO(apanchenko):
|
||||
# If 'target' from erratum doesn't match 'target' from tests we need to
|
||||
# skip tests and return special exit code, so Jenkins is able to recognize
|
||||
# test were skipped and it shouldn't vote to CRs (just leave comment)
|
||||
logger.error('Tests for "{0}" were started, but patches are targeted to '
|
||||
'"{1}" according to erratum.'.format(target, errata_target))
|
||||
sys.exit(123)
|
@ -1,427 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from devops.helpers.helpers import wait
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_true
|
||||
|
||||
from fuelweb_test import logger
|
||||
|
||||
|
||||
class RallyEngine(object):
|
||||
def __init__(self,
|
||||
admin_remote,
|
||||
container_repo,
|
||||
proxy_url=None,
|
||||
user_id=0,
|
||||
dir_for_home='/var/rally_home',
|
||||
home_bind_path='/home/rally'):
|
||||
self.admin_remote = admin_remote
|
||||
self.container_repo = container_repo
|
||||
self.repository_tag = 'latest'
|
||||
self.proxy_url = proxy_url or ""
|
||||
self.user_id = user_id
|
||||
self.dir_for_home = dir_for_home
|
||||
self.home_bind_path = home_bind_path
|
||||
self.setup()
|
||||
|
||||
def image_exists(self, tag='latest'):
|
||||
cmd = "docker images | awk 'NR > 1{print $1\" \"$2}'"
|
||||
logger.debug('Checking Docker images...')
|
||||
result = self.admin_remote.execute(cmd)
|
||||
logger.debug(result)
|
||||
existing_images = [line.strip().split() for line in result['stdout']]
|
||||
return [self.container_repo, tag] in existing_images
|
||||
|
||||
def pull_image(self):
|
||||
# TODO(apanchenko): add possibility to load image from local path or
|
||||
# remote link provided in settings, in order to speed up downloading
|
||||
cmd = 'docker pull {0}'.format(self.container_repo)
|
||||
logger.debug('Downloading Rally repository/image from registry...')
|
||||
result = self.admin_remote.execute(cmd)
|
||||
logger.debug(result)
|
||||
return self.image_exists()
|
||||
|
||||
def run_container_command(self, command, in_background=False):
|
||||
command = str(command).replace(r"'", r"'\''")
|
||||
options = ''
|
||||
if in_background:
|
||||
options = '{0} -d'.format(options)
|
||||
cmd = ("docker run {options} --user {user_id} --net=\"host\" -e "
|
||||
"\"http_proxy={proxy_url}\" -e \"https_proxy={proxy_url}\" "
|
||||
"-v {dir_for_home}:{home_bind_path} {container_repo}:{tag} "
|
||||
"/bin/bash -c '{command}'".format(
|
||||
options=options,
|
||||
user_id=self.user_id,
|
||||
proxy_url=self.proxy_url,
|
||||
dir_for_home=self.dir_for_home,
|
||||
home_bind_path=self.home_bind_path,
|
||||
container_repo=self.container_repo,
|
||||
tag=self.repository_tag,
|
||||
command=command))
|
||||
logger.debug('Executing command "{0}" in Rally container {1}..'.format(
|
||||
cmd, self.container_repo))
|
||||
result = self.admin_remote.execute(cmd)
|
||||
logger.debug(result)
|
||||
return result
|
||||
|
||||
def setup_utils(self):
|
||||
utils = ['gawk', 'vim', 'curl']
|
||||
cmd = ('unset http_proxy https_proxy; apt-get update; '
|
||||
'apt-get install -y {0}'.format(' '.join(utils)))
|
||||
logger.debug('Installing utils "{0}" to the Rally container...'.format(
|
||||
utils))
|
||||
result = self.run_container_command(cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
'Utils installation failed in Rally container: '
|
||||
'{0}'.format(result))
|
||||
|
||||
def create_database(self):
|
||||
check_rally_db_cmd = 'test -s .rally.sqlite'
|
||||
result = self.run_container_command(check_rally_db_cmd)
|
||||
if result['exit_code'] == 0:
|
||||
return
|
||||
logger.debug('Recreating Database for Rally...')
|
||||
create_rally_db_cmd = 'rally-manage db recreate'
|
||||
result = self.run_container_command(create_rally_db_cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
'Rally Database creation failed: {0}!'.format(result))
|
||||
result = self.run_container_command(check_rally_db_cmd)
|
||||
assert_equal(result['exit_code'], 0, 'Failed to create Database for '
|
||||
'Rally: {0} !'.format(result))
|
||||
|
||||
def prepare_image(self):
|
||||
self.create_database()
|
||||
self.setup_utils()
|
||||
last_container_cmd = "docker ps -lq"
|
||||
result = self.admin_remote.execute(last_container_cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Unable to get last container ID: {0}!".format(result))
|
||||
last_container = ''.join([line.strip() for line in result['stdout']])
|
||||
commit_cmd = 'docker commit {0} {1}:ready'.format(last_container,
|
||||
self.container_repo)
|
||||
result = self.admin_remote.execute(commit_cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
'Commit to Docker image "{0}" failed: {1}.'.format(
|
||||
self.container_repo, result))
|
||||
return self.image_exists(tag='ready')
|
||||
|
||||
def setup_bash_alias(self):
|
||||
alias_name = 'rally_docker'
|
||||
check_alias_cmd = '. /root/.bashrc && alias {0}'.format(alias_name)
|
||||
result = self.admin_remote.execute(check_alias_cmd)
|
||||
if result['exit_code'] == 0:
|
||||
return
|
||||
logger.debug('Creating bash alias for Rally inside container...')
|
||||
create_alias_cmd = ("alias {alias_name}='docker run --user {user_id} "
|
||||
"--net=\"host\" -e \"http_proxy={proxy_url}\" -t "
|
||||
"-i -v {dir_for_home}:{home_bind_path} "
|
||||
"{container_repo}:{tag} rally'".format(
|
||||
alias_name=alias_name,
|
||||
user_id=self.user_id,
|
||||
proxy_url=self.proxy_url,
|
||||
dir_for_home=self.dir_for_home,
|
||||
home_bind_path=self.home_bind_path,
|
||||
container_repo=self.container_repo,
|
||||
tag=self.repository_tag))
|
||||
result = self.admin_remote.execute('echo "{0}">> /root/.bashrc'.format(
|
||||
create_alias_cmd))
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Alias creation for running Rally from container failed: "
|
||||
"{0}.".format(result))
|
||||
result = self.admin_remote.execute(check_alias_cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Alias creation for running Rally from container failed: "
|
||||
"{0}.".format(result))
|
||||
|
||||
def setup(self):
|
||||
if not self.image_exists():
|
||||
assert_true(self.pull_image(),
|
||||
"Docker image for Rally not found!")
|
||||
if not self.image_exists(tag='ready'):
|
||||
assert_true(self.prepare_image(),
|
||||
"Docker image for Rally is not ready!")
|
||||
self.repository_tag = 'ready'
|
||||
self.setup_bash_alias()
|
||||
|
||||
def list_deployments(self):
|
||||
cmd = (r"rally deployment list | awk -F "
|
||||
r"'[[:space:]]*\\\\|[[:space:]]*' '/\ydeploy\y/{print $2}'")
|
||||
result = self.run_container_command(cmd)
|
||||
logger.debug('Rally deployments list: {0}'.format(result))
|
||||
return [line.strip() for line in result['stdout']]
|
||||
|
||||
def show_deployment(self, deployment_uuid):
|
||||
cmd = ("rally deployment show {0} | awk -F "
|
||||
"'[[:space:]]*\\\\|[[:space:]]*' '/\w/{{print $2\",\"$3\",\"$4"
|
||||
"\",\"$5\",\"$6\",\"$7\",\"$8}}'").format(deployment_uuid)
|
||||
result = self.run_container_command(cmd)
|
||||
assert_equal(len(result['stdout']), 2,
|
||||
"Command 'rally deployment show' returned unexpected "
|
||||
"value: expected 2 lines, got {0}: ".format(result))
|
||||
keys = [k for k in result['stdout'][0].strip().split(',') if k != '']
|
||||
values = [v for v in result['stdout'][1].strip().split(',') if v != '']
|
||||
return {keys[i]: values[i] for i in range(0, len(keys))}
|
||||
|
||||
def list_tasks(self):
|
||||
cmd = "rally task list --uuids-only"
|
||||
result = self.run_container_command(cmd)
|
||||
logger.debug('Rally tasks list: {0}'.format(result))
|
||||
return [line.strip() for line in result['stdout']]
|
||||
|
||||
def get_task_status(self, task_uuid):
|
||||
cmd = "rally task status {0}".format(task_uuid)
|
||||
result = self.run_container_command(cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Getting Rally task status failed: {0}".format(result))
|
||||
task_status = ''.join(result['stdout']).strip().split()[-1]
|
||||
logger.debug('Rally task "{0}" has status "{1}".'.format(task_uuid,
|
||||
task_status))
|
||||
return task_status
|
||||
|
||||
|
||||
class RallyDeployment(object):
|
||||
def __init__(self, rally_engine, cluster_vip, username, password, tenant,
|
||||
key_port=5000, proxy_url=''):
|
||||
self.rally_engine = rally_engine
|
||||
self.cluster_vip = cluster_vip
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.tenant_name = tenant
|
||||
self.keystone_port = str(key_port)
|
||||
self.proxy_url = proxy_url
|
||||
self.auth_url = "http://{0}:{1}/v2.0/".format(self.cluster_vip,
|
||||
self.keystone_port)
|
||||
self.set_proxy = not self.is_proxy_set
|
||||
self._uuid = None
|
||||
self.create_deployment()
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
if self._uuid is None:
|
||||
for d_uuid in self.rally_engine.list_deployments():
|
||||
deployment = self.rally_engine.show_deployment(d_uuid)
|
||||
logger.debug("Deployment info: {0}".format(deployment))
|
||||
if self.auth_url in deployment['auth_url'] and \
|
||||
self.username == deployment['username'] and \
|
||||
self.tenant_name == deployment['tenant_name']:
|
||||
self._uuid = d_uuid
|
||||
break
|
||||
return self._uuid
|
||||
|
||||
@property
|
||||
def is_proxy_set(self):
|
||||
cmd = '[ "${{http_proxy}}" == "{0}" ]'.format(self.proxy_url)
|
||||
return self.rally_engine.run_container_command(cmd)['exit_code'] == 0
|
||||
|
||||
@property
|
||||
def is_deployment_exist(self):
|
||||
return self.uuid is not None
|
||||
|
||||
def create_deployment(self):
|
||||
if self.is_deployment_exist:
|
||||
return
|
||||
cmd = ('rally deployment create --name "{0}" --filename '
|
||||
'<(echo \'{{ "admin": {{ "password": "{1}", "tenant_name": "{2}'
|
||||
'", "username": "{3}" }}, "auth_url": "{4}", "endpoint": null, '
|
||||
'"type": "ExistingCloud", "https_insecure": true }}\')').format(
|
||||
self.cluster_vip, self.password, self.tenant_name, self.username,
|
||||
self.auth_url)
|
||||
result = self.rally_engine.run_container_command(cmd)
|
||||
assert_true(self.is_deployment_exist,
|
||||
'Rally deployment creation failed: {0}'.format(result))
|
||||
logger.debug('Rally deployment created: {0}'.format(result))
|
||||
assert_true(self.check_deployment(),
|
||||
"Rally deployment check failed.")
|
||||
|
||||
def check_deployment(self, deployment_uuid=''):
|
||||
cmd = 'rally deployment check {0}'.format(deployment_uuid)
|
||||
result = self.rally_engine.run_container_command(cmd)
|
||||
if result['exit_code'] == 0:
|
||||
return True
|
||||
else:
|
||||
logger.error('Rally deployment check failed: {0}'.format(result))
|
||||
return False
|
||||
|
||||
|
||||
class RallyTask(object):
|
||||
def __init__(self, rally_deployment, test_type):
|
||||
self.deployment = rally_deployment
|
||||
self.engine = self.deployment.rally_engine
|
||||
self.test_type = test_type
|
||||
self.uuid = None
|
||||
self._status = None
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
if self.uuid is None:
|
||||
self._status = None
|
||||
else:
|
||||
self._status = self.engine.get_task_status(self.uuid)
|
||||
return self._status
|
||||
|
||||
def prepare_scenario(self):
|
||||
scenario_file = '{0}/fuelweb_test/rally/scenarios/{1}.json'.format(
|
||||
os.environ.get("WORKSPACE", "./"), self.test_type)
|
||||
remote_path = '{0}/{1}.json'.format(self.engine.dir_for_home,
|
||||
self.test_type)
|
||||
self.engine.admin_remote.upload(scenario_file, remote_path)
|
||||
result = self.engine.admin_remote.execute('test -f {0}'.format(
|
||||
remote_path))
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Scenario upload filed: {0}".format(result))
|
||||
return '{0}.json'.format(self.test_type)
|
||||
|
||||
def start(self):
|
||||
scenario = self.prepare_scenario()
|
||||
temp_file = '{0}_results.tmp.txt'.format(scenario)
|
||||
cmd = 'rally task start {0} &> {1}'.format(scenario, temp_file)
|
||||
result = self.engine.run_container_command(cmd, in_background=True)
|
||||
logger.debug('Started Rally task: {0}'.format(result))
|
||||
cmd = ("awk 'BEGIN{{retval=1}};/^Using task:/{{print $NF; retval=0}};"
|
||||
"END {{exit retval}}' {0}").format(temp_file)
|
||||
wait(lambda: self.engine.run_container_command(cmd)['exit_code'] == 0,
|
||||
timeout=30, timeout_msg='Rally task {!r} creation timeout'
|
||||
''.format(result))
|
||||
result = self.engine.run_container_command(cmd)
|
||||
task_uuid = ''.join(result['stdout']).strip()
|
||||
assert_true(task_uuid in self.engine.list_tasks(),
|
||||
"Rally task creation failed: {0}".format(result))
|
||||
self.uuid = task_uuid
|
||||
|
||||
def abort(self, task_id):
|
||||
logger.debug('Stop Rally task {0}'.format(task_id))
|
||||
cmd = 'rally task abort {0}'.format(task_id)
|
||||
self.engine.run_container_command(cmd)
|
||||
assert_true(
|
||||
self.status in ('finished', 'aborted'),
|
||||
"Rally task {0} was not aborted; current task status "
|
||||
"is {1}".format(task_id, self.status))
|
||||
|
||||
def get_results(self):
|
||||
if self.status == 'finished':
|
||||
cmd = 'rally task results {0}'.format(self.uuid)
|
||||
result = self.engine.run_container_command(cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Getting task results failed: {0}".format(result))
|
||||
logger.debug("Rally task {0} result: {1}".format(self.uuid,
|
||||
result))
|
||||
return ''.join(result['stdout'])
|
||||
|
||||
|
||||
class RallyResult(object):
|
||||
def __init__(self, json_results):
|
||||
self.values = {
|
||||
'full_duration': 0.00,
|
||||
'load_duration': 0.00,
|
||||
'errors': 0
|
||||
}
|
||||
self.raw_data = []
|
||||
self.parse_raw_results(json_results)
|
||||
|
||||
def parse_raw_results(self, raw_results):
|
||||
data = json.loads(raw_results)
|
||||
assert_equal(len(data), 1,
|
||||
"Current implementation of RallyResult class doesn't "
|
||||
"support results with length greater than '1'!")
|
||||
self.raw_data = data[0]
|
||||
self.values['full_duration'] = data[0]['full_duration']
|
||||
self.values['load_duration'] = data[0]['load_duration']
|
||||
self.values['errors'] = sum([len(result['error'])
|
||||
for result in data[0]['result']])
|
||||
|
||||
@staticmethod
|
||||
def compare(first_result, second_result, deviation=0.1):
|
||||
"""
|
||||
Compare benchmark results
|
||||
:param first_result: RallyResult
|
||||
:param second_result: RallyResult
|
||||
:param deviation: float
|
||||
:return: bool
|
||||
"""
|
||||
message = ''
|
||||
equal = True
|
||||
for val in first_result.values.keys():
|
||||
logger.debug('Comparing {2}: {0} and {1}'.format(
|
||||
first_result.values[val], second_result.values[val],
|
||||
val
|
||||
))
|
||||
if first_result.values[val] == 0 or second_result.values[val] == 0:
|
||||
if first_result.values[val] != second_result.values[val]:
|
||||
message += "Values of '{0}' are: {1} and {2}. ".format(
|
||||
val,
|
||||
first_result.values[val],
|
||||
second_result.values[val])
|
||||
equal = False
|
||||
continue
|
||||
diff = abs(
|
||||
first_result.values[val] / second_result.values[val] - 1)
|
||||
if diff > deviation:
|
||||
message += "Values of '{0}' are: {1} and {2}. ".format(
|
||||
val, first_result.values[val], second_result.values[val])
|
||||
equal = False
|
||||
if not equal:
|
||||
logger.info("Rally benchmark results aren't equal: {0}".format(
|
||||
message))
|
||||
return equal
|
||||
|
||||
def show(self):
|
||||
return json.dumps(self.raw_data)
|
||||
|
||||
|
||||
class RallyBenchmarkTest(object):
|
||||
def __init__(self, container_repo, environment, cluster_id,
|
||||
test_type):
|
||||
self.admin_remote = environment.d_env.get_admin_remote()
|
||||
self.cluster_vip = environment.fuel_web.get_mgmt_vip(cluster_id)
|
||||
self.cluster_credentials = \
|
||||
environment.fuel_web.get_cluster_credentials(cluster_id)
|
||||
self.proxy_url = environment.fuel_web.get_alive_proxy(cluster_id)
|
||||
logger.debug('Rally proxy URL is: {0}'.format(self.proxy_url))
|
||||
self.container_repo = container_repo
|
||||
self.home_dir = 'rally-{0}'.format(cluster_id)
|
||||
self.test_type = test_type
|
||||
self.engine = RallyEngine(
|
||||
admin_remote=self.admin_remote,
|
||||
container_repo=self.container_repo,
|
||||
proxy_url=self.proxy_url,
|
||||
dir_for_home='/var/{0}/'.format(self.home_dir)
|
||||
)
|
||||
self.deployment = RallyDeployment(
|
||||
rally_engine=self.engine,
|
||||
cluster_vip=self.cluster_vip,
|
||||
username=self.cluster_credentials['username'],
|
||||
password=self.cluster_credentials['password'],
|
||||
tenant=self.cluster_credentials['tenant'],
|
||||
proxy_url=self.proxy_url
|
||||
)
|
||||
self.current_task = None
|
||||
|
||||
def run(self, timeout=60 * 10, result=True):
|
||||
self.current_task = RallyTask(self.deployment, self.test_type)
|
||||
logger.info('Starting Rally benchmark test...')
|
||||
self.current_task.start()
|
||||
assert_equal(self.current_task.status, 'running',
|
||||
'Rally task was started, but it is not running, status: '
|
||||
'{0}'.format(self.current_task.status))
|
||||
if result:
|
||||
wait(lambda: self.current_task.status == 'finished',
|
||||
timeout=timeout, timeout_msg='Rally benchmark test timeout')
|
||||
logger.info('Rally benchmark test is finished.')
|
||||
return RallyResult(json_results=self.current_task.get_results())
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user