New StarlingX Automation Framework

Fresh start for the StarlingX automation framework.

Change-Id: Ie265e0791024f45f71faad6315c2b91b022934d1
This commit is contained in:
croy 2024-11-28 13:33:38 -05:00
parent cf200be48d
commit 82d417b9e6
826 changed files with 50998 additions and 92966 deletions

30
.gitignore vendored
View File

@ -27,38 +27,26 @@ wheels/
# Log files
*.log
# Eclipse / PyCharm files
# Eclipse / PyCharm files / VSCode files
.project
.pydevproject
.settings/
.idea/
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / static analysis / coverage reports
htmlcov/
coverage_html_report/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
flake8-output.txt
shellcheck-output.xml
.vscode/launch.json
.vscode/settings.json
# Environments
.env
.env.local
.env.*.local
.venv
env/
venv/
ENV/
# Configuration files
# config.ini
# Dashboard UI Files
/portal/ui/.angular
/portal/ui/node_modules
# Mac files
.DS_Store

32
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,32 @@
---
default_stages: [commit]
default_language_version:
python: python3.11
repos:
- repo: local
hooks:
- id: flake8
name: flake8 - code lint and style checks
entry: flake8
language: python
types: [python]
args: [--config, pre-commit/.flake8]
- id: isort
name: isort - import sorting
entry: isort
language: python
types: [python]
args: [--settings-path, pre-commit/pyproject.toml]
- id: black
name: black - check formatting (show diff on FAIL)
entry: black
language: python
types: [python]
args: [--config, pre-commit/pyproject.toml, --check, --diff, --color, --quiet]
- id: black
name: black - auto-format code on FAIL
entry: black
language: python
types: [python]
args: [--config, pre-commit/pyproject.toml]

View File

@ -7,15 +7,11 @@
- build-openstack-releasenotes:
vars:
sphinx_python: python3
- openstack-tox-pep8
- openstack-tox-pylint
gate:
jobs:
- build-openstack-releasenotes:
vars:
sphinx_python: python3
- openstack-tox-pep8
- openstack-tox-pylint
post:
jobs:
- stx-test-upload-git-mirror

17
DB_SETUP.txt Normal file
View File

@ -0,0 +1,17 @@
These instructions are for setting up the DB for the test automation results dashboard
1) Install postgres 16.2 database
2) Install pgadmin4 version 8.4
3) Right click on 'Servers' then select Create -> Server Groups. Create a new Server group called 'Automation'
4) Expand Automation folder and right click the Login/Group Roles folder and select Create -> Login/Group Role
- Name user automation_user
- From Privileges tab, enable all permissions
5) From pgadmin4, connect to the postgres db
- From Databases folder, Right click -> Create -> Database with name automation_central
- Right click on created db (automation_central) and select Restore
- Set format to Custom or tar
- Select the <repo_root>/framework/resource/db/automation_central_backup.sql file
- Select automation_user for Role name
- From Data Options -> Select Pre-data, Data, and Post-data
- Click Restore button
4) -- config steps once created go here --

22
Pipfile Normal file
View File

@ -0,0 +1,22 @@
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
[requires]
python_version = ">=3.11"
[packages]
# Linting and Static Analysis
pre-commit = "==3.7.0"
black = "==24.3.0"
isort = "==5.13.2"
flake8 = "==7.0.0"
# Tools Packages
pytest = "==8.1.1"
paramiko = "==3.4.0"
json5 = "==0.9.24"
selenium = "==4.20.0"
django = "==5.0.6"
psycopg2-binary = "==2.9.9"
jinja2 = "*"
requests = "*"

868
Pipfile.lock generated Normal file
View File

@ -0,0 +1,868 @@
{
"_meta": {
"hash": {
"sha256": "ea9321978b73f4a63700306112ba707e262be757bf530165e75602d54804e979"
},
"pipfile-spec": 6,
"requires": {
"python_version": ">=3.11"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {
"asgiref": {
"hashes": [
"sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47",
"sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"
],
"markers": "python_version >= '3.8'",
"version": "==3.8.1"
},
"attrs": {
"hashes": [
"sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346",
"sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"
],
"markers": "python_version >= '3.7'",
"version": "==24.2.0"
},
"bcrypt": {
"hashes": [
"sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb",
"sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399",
"sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291",
"sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d",
"sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7",
"sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170",
"sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d",
"sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe",
"sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060",
"sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184",
"sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a",
"sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68",
"sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c",
"sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458",
"sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9",
"sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328",
"sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7",
"sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34",
"sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e",
"sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2",
"sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5",
"sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae",
"sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00",
"sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841",
"sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8",
"sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221",
"sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"
],
"markers": "python_version >= '3.7'",
"version": "==4.2.0"
},
"black": {
"hashes": [
"sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f",
"sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93",
"sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11",
"sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0",
"sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9",
"sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5",
"sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213",
"sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d",
"sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7",
"sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837",
"sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f",
"sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395",
"sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995",
"sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f",
"sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597",
"sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959",
"sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5",
"sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb",
"sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4",
"sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7",
"sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd",
"sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==24.3.0"
},
"certifi": {
"hashes": [
"sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8",
"sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"
],
"markers": "python_version >= '3.6'",
"version": "==2024.8.30"
},
"cffi": {
"hashes": [
"sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8",
"sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2",
"sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1",
"sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15",
"sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36",
"sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824",
"sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8",
"sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36",
"sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17",
"sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf",
"sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc",
"sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3",
"sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed",
"sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702",
"sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1",
"sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8",
"sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903",
"sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6",
"sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d",
"sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b",
"sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e",
"sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be",
"sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c",
"sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683",
"sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9",
"sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c",
"sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8",
"sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1",
"sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4",
"sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655",
"sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67",
"sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595",
"sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0",
"sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65",
"sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41",
"sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6",
"sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401",
"sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6",
"sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3",
"sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16",
"sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93",
"sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e",
"sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4",
"sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964",
"sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c",
"sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576",
"sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0",
"sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3",
"sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662",
"sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3",
"sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff",
"sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5",
"sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd",
"sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f",
"sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5",
"sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14",
"sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d",
"sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9",
"sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7",
"sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382",
"sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a",
"sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e",
"sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a",
"sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4",
"sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99",
"sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87",
"sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"
],
"markers": "platform_python_implementation != 'PyPy'",
"version": "==1.17.1"
},
"cfgv": {
"hashes": [
"sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9",
"sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"
],
"markers": "python_version >= '3.8'",
"version": "==3.4.0"
},
"charset-normalizer": {
"hashes": [
"sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621",
"sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6",
"sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8",
"sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912",
"sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c",
"sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b",
"sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d",
"sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d",
"sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95",
"sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e",
"sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565",
"sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64",
"sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab",
"sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be",
"sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e",
"sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907",
"sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0",
"sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2",
"sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62",
"sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62",
"sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23",
"sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc",
"sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284",
"sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca",
"sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455",
"sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858",
"sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b",
"sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594",
"sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc",
"sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db",
"sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b",
"sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea",
"sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6",
"sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920",
"sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749",
"sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7",
"sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd",
"sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99",
"sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242",
"sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee",
"sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129",
"sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2",
"sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51",
"sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee",
"sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8",
"sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b",
"sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613",
"sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742",
"sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe",
"sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3",
"sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5",
"sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631",
"sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7",
"sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15",
"sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c",
"sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea",
"sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417",
"sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250",
"sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88",
"sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca",
"sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa",
"sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99",
"sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149",
"sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41",
"sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574",
"sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0",
"sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f",
"sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d",
"sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654",
"sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3",
"sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19",
"sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90",
"sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578",
"sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9",
"sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1",
"sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51",
"sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719",
"sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236",
"sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a",
"sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c",
"sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade",
"sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944",
"sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc",
"sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6",
"sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6",
"sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27",
"sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6",
"sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2",
"sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12",
"sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf",
"sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114",
"sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7",
"sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf",
"sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d",
"sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b",
"sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed",
"sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03",
"sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4",
"sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67",
"sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365",
"sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a",
"sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748",
"sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b",
"sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079",
"sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"
],
"markers": "python_full_version >= '3.7.0'",
"version": "==3.4.0"
},
"click": {
"hashes": [
"sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
"sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"
],
"markers": "python_version >= '3.7'",
"version": "==8.1.7"
},
"cryptography": {
"hashes": [
"sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362",
"sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4",
"sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa",
"sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83",
"sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff",
"sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805",
"sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6",
"sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664",
"sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08",
"sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e",
"sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18",
"sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f",
"sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73",
"sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5",
"sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984",
"sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd",
"sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3",
"sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e",
"sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405",
"sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2",
"sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c",
"sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995",
"sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73",
"sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16",
"sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7",
"sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd",
"sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"
],
"markers": "python_version >= '3.7'",
"version": "==43.0.3"
},
"distlib": {
"hashes": [
"sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87",
"sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"
],
"version": "==0.3.9"
},
"django": {
"hashes": [
"sha256:8363ac062bb4ef7c3f12d078f6fa5d154031d129a15170a1066412af49d30905",
"sha256:ff1b61005004e476e0aeea47c7f79b85864c70124030e95146315396f1e7951f"
],
"index": "pypi",
"markers": "python_version >= '3.10'",
"version": "==5.0.6"
},
"filelock": {
"hashes": [
"sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0",
"sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"
],
"markers": "python_version >= '3.8'",
"version": "==3.16.1"
},
"flake8": {
"hashes": [
"sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132",
"sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"
],
"index": "pypi",
"markers": "python_full_version >= '3.8.1'",
"version": "==7.0.0"
},
"h11": {
"hashes": [
"sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d",
"sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"
],
"markers": "python_version >= '3.7'",
"version": "==0.14.0"
},
"identify": {
"hashes": [
"sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0",
"sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"
],
"markers": "python_version >= '3.8'",
"version": "==2.6.1"
},
"idna": {
"hashes": [
"sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9",
"sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"
],
"markers": "python_version >= '3.6'",
"version": "==3.10"
},
"iniconfig": {
"hashes": [
"sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
"sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"
],
"markers": "python_version >= '3.7'",
"version": "==2.0.0"
},
"isort": {
"hashes": [
"sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109",
"sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"
],
"index": "pypi",
"markers": "python_full_version >= '3.8.0'",
"version": "==5.13.2"
},
"jinja2": {
"hashes": [
"sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369",
"sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==3.1.4"
},
"json5": {
"hashes": [
"sha256:0c638399421da959a20952782800e5c1a78c14e08e1dc9738fa10d8ec14d58c8",
"sha256:4ca101fd5c7cb47960c055ef8f4d0e31e15a7c6c48c3b6f1473fc83b6c462a13"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==0.9.24"
},
"markupsafe": {
"hashes": [
"sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4",
"sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30",
"sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0",
"sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9",
"sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396",
"sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13",
"sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028",
"sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca",
"sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557",
"sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832",
"sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0",
"sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b",
"sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579",
"sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a",
"sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c",
"sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff",
"sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c",
"sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22",
"sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094",
"sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb",
"sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e",
"sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5",
"sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a",
"sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d",
"sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a",
"sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b",
"sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8",
"sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225",
"sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c",
"sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144",
"sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f",
"sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87",
"sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d",
"sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93",
"sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf",
"sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158",
"sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84",
"sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb",
"sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48",
"sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171",
"sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c",
"sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6",
"sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd",
"sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d",
"sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1",
"sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d",
"sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca",
"sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a",
"sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29",
"sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe",
"sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798",
"sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c",
"sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8",
"sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f",
"sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f",
"sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a",
"sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178",
"sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0",
"sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79",
"sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430",
"sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"
],
"markers": "python_version >= '3.9'",
"version": "==3.0.2"
},
"mccabe": {
"hashes": [
"sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325",
"sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"
],
"markers": "python_version >= '3.6'",
"version": "==0.7.0"
},
"mypy-extensions": {
"hashes": [
"sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
"sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
],
"markers": "python_version >= '3.5'",
"version": "==1.0.0"
},
"nodeenv": {
"hashes": [
"sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f",
"sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'",
"version": "==1.9.1"
},
"outcome": {
"hashes": [
"sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8",
"sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b"
],
"markers": "python_version >= '3.7'",
"version": "==1.3.0.post0"
},
"packaging": {
"hashes": [
"sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002",
"sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"
],
"markers": "python_version >= '3.8'",
"version": "==24.1"
},
"paramiko": {
"hashes": [
"sha256:43f0b51115a896f9c00f59618023484cb3a14b98bbceab43394a39c6739b7ee7",
"sha256:aac08f26a31dc4dffd92821527d1682d99d52f9ef6851968114a8728f3c274d3"
],
"index": "pypi",
"markers": "python_version >= '3.6'",
"version": "==3.4.0"
},
"pathspec": {
"hashes": [
"sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
"sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
],
"markers": "python_version >= '3.8'",
"version": "==0.12.1"
},
"platformdirs": {
"hashes": [
"sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907",
"sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"
],
"markers": "python_version >= '3.8'",
"version": "==4.3.6"
},
"pluggy": {
"hashes": [
"sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1",
"sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"
],
"markers": "python_version >= '3.8'",
"version": "==1.5.0"
},
"pre-commit": {
"hashes": [
"sha256:5eae9e10c2b5ac51577c3452ec0a490455c45a0533f7960f993a0d01e59decab",
"sha256:e209d61b8acdcf742404408531f0c37d49d2c734fd7cff2d6076083d191cb060"
],
"index": "pypi",
"markers": "python_version >= '3.9'",
"version": "==3.7.0"
},
"psycopg2-binary": {
"hashes": [
"sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9",
"sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77",
"sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e",
"sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84",
"sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3",
"sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2",
"sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67",
"sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876",
"sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152",
"sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f",
"sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a",
"sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6",
"sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503",
"sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f",
"sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493",
"sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996",
"sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f",
"sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e",
"sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59",
"sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94",
"sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7",
"sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682",
"sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420",
"sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae",
"sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291",
"sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe",
"sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980",
"sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93",
"sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692",
"sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119",
"sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716",
"sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472",
"sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b",
"sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2",
"sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc",
"sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c",
"sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5",
"sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab",
"sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984",
"sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9",
"sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf",
"sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0",
"sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f",
"sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212",
"sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb",
"sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be",
"sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90",
"sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041",
"sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7",
"sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860",
"sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d",
"sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245",
"sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27",
"sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417",
"sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359",
"sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202",
"sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0",
"sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7",
"sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba",
"sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1",
"sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd",
"sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07",
"sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98",
"sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55",
"sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d",
"sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972",
"sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f",
"sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e",
"sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26",
"sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957",
"sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53",
"sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==2.9.9"
},
"pycodestyle": {
"hashes": [
"sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f",
"sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"
],
"markers": "python_version >= '3.8'",
"version": "==2.11.1"
},
"pycparser": {
"hashes": [
"sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6",
"sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"
],
"markers": "python_version >= '3.8'",
"version": "==2.22"
},
"pyflakes": {
"hashes": [
"sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f",
"sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"
],
"markers": "python_version >= '3.8'",
"version": "==3.2.0"
},
"pynacl": {
"hashes": [
"sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858",
"sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d",
"sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93",
"sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1",
"sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92",
"sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff",
"sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba",
"sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394",
"sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b",
"sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"
],
"markers": "python_version >= '3.6'",
"version": "==1.5.0"
},
"pysocks": {
"hashes": [
"sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299",
"sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5",
"sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"
],
"version": "==1.7.1"
},
"pytest": {
"hashes": [
"sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7",
"sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==8.1.1"
},
"pyyaml": {
"hashes": [
"sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff",
"sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48",
"sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086",
"sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e",
"sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133",
"sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5",
"sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484",
"sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee",
"sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5",
"sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68",
"sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a",
"sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf",
"sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99",
"sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8",
"sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85",
"sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19",
"sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc",
"sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a",
"sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1",
"sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317",
"sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c",
"sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631",
"sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d",
"sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652",
"sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5",
"sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e",
"sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b",
"sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8",
"sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476",
"sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706",
"sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563",
"sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237",
"sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b",
"sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083",
"sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180",
"sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425",
"sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e",
"sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f",
"sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725",
"sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183",
"sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab",
"sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774",
"sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725",
"sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e",
"sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5",
"sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d",
"sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290",
"sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44",
"sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed",
"sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4",
"sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba",
"sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12",
"sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"
],
"markers": "python_version >= '3.8'",
"version": "==6.0.2"
},
"requests": {
"hashes": [
"sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760",
"sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==2.32.3"
},
"selenium": {
"hashes": [
"sha256:0bd564ee166980d419a8aaf4ac00289bc152afcf2eadca5efe8c8e36711853fd",
"sha256:b1d0c33b38ca27d0499183e48e1dd09ff26973481f5d3ef2983073813ae6588d"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==4.20.0"
},
"sniffio": {
"hashes": [
"sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2",
"sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"
],
"markers": "python_version >= '3.7'",
"version": "==1.3.1"
},
"sortedcontainers": {
"hashes": [
"sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88",
"sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"
],
"version": "==2.4.0"
},
"sqlparse": {
"hashes": [
"sha256:773dcbf9a5ab44a090f3441e2180efe2560220203dc2f8c0b0fa141e18b505e4",
"sha256:bb6b4df465655ef332548e24f08e205afc81b9ab86cb1c45657a7ff173a3a00e"
],
"markers": "python_version >= '3.8'",
"version": "==0.5.1"
},
"trio": {
"hashes": [
"sha256:1dcc95ab1726b2da054afea8fd761af74bad79bd52381b84eae408e983c76831",
"sha256:68eabbcf8f457d925df62da780eff15ff5dc68fd6b367e2dde59f7aaf2a0b884"
],
"markers": "python_version >= '3.8'",
"version": "==0.27.0"
},
"trio-websocket": {
"hashes": [
"sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f",
"sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638"
],
"markers": "python_version >= '3.7'",
"version": "==0.11.1"
},
"typing-extensions": {
"hashes": [
"sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d",
"sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"
],
"markers": "python_version >= '3.8'",
"version": "==4.12.2"
},
"urllib3": {
"extras": [
"socks"
],
"hashes": [
"sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac",
"sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"
],
"markers": "python_version >= '3.8'",
"version": "==2.2.3"
},
"virtualenv": {
"hashes": [
"sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba",
"sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4"
],
"markers": "python_version >= '3.8'",
"version": "==20.27.1"
},
"wsproto": {
"hashes": [
"sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065",
"sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"
],
"markers": "python_full_version >= '3.7.0'",
"version": "==1.2.0"
}
},
"develop": {}
}

View File

@ -2,8 +2,18 @@
stx-test
========
StarlingX Test repository for manual and automated test cases.
StarlingX Test repository for automated test cases.
Pre-Requisites
----------
.. code-block:: bash
You must have a machine/VM running Ubuntu 22.04 or later
The RunAgent must be able to connect to the internet to pull images and dependencies.
The RunAgent must be able to connect to your labs via SSH.
Download and install Python 3.11, pip and pipenv.
Download and install git on the RunAgent
Contribute
----------
@ -17,9 +27,69 @@ Contribute
ssh-keygen -t rsa -C "<your email address>"
ssh-add $private_keyfile_path
# add ssh key to settings https://review.opendev.org/#/q/project:starlingx/test
# Add ssh key to settings https://review.opendev.org/#/q/project:starlingx/test
cd <stx-test repo>
git remote add gerrit ssh://<your gerrit username>@review.opendev.org/starlingx/test.git
git review -s
- When you are ready, create your commit with detailed commit message, and submit for review.
# Create/activate a virtual python environment and pull the project dependencies.
pipenv shell
pipenv sync
- When you are ready, create your commit with detailed commit message, and submit for review.
Configuration
----------
The framework contains multiple configuration files found under the config folder. There are configurations for docker,
hosts, kubernetes, labs and logger. By default, the runner will choose the default config file for each (default.json5)
when running. These files can be found under config/<config_type>/files. However, using command line overrides a user
can use a custom file. Command line options are --lab_config_file, --k8s_config_file, --logger_config_file, and --docker_config_file.
There are a couple of files that will need to be updated when first setting up.
1) config/lab/files/default.json5
This file is responsible for holding information such as floating ip, lab type, lab capabilities etc. Adjust the
contents of default.json5 to match the information of the lab where you want to execute the test cases. Based on your
system type, you can use one of the template files (such as template_simplex.json5) as a starting point. If using a
jump server, update the values under config/host/files/jump_host.json5 to use the connection information of the
jump server. Then in the lab configuration file, set "use_jump_host: true", and the "jump_server_config:<jump_host_location>"
(ex. jump_server_config: "config/host/files/jump_host.json5")
2) config/docker/files/default.json5
This file is responsible for holding information for docker registries used in testing. Adjust the local registry
credentials to match those of the lab where you want to execute the tests.
Update Lab Capabilities
Using the lab capability scanner, we can identify common lab capabilities and automatically add them to the configuration.
This script will create a backup of the original file and create a new one with the lab capabilities added. These
capabilities will help identify which tests are applicable for a given lab setup.
// Run script from the root location of the repo
cd <repo_location_root>
python scripts/lab_capability_scanner.py --lab_config_file=<lab_config_file>
.. code-block:: bash
# (Optional) Install Chrome for Webdriver UI tests
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo dpkg -i google-chrome-stable_current_amd64.deb
sudo apt -f install [If you encounter errors during the install]
google-chrome --version [Verify that the install was successful]
Execution
----------
You are now ready to run some tests!
// From the root repo location we can now run tests
cd <repo_location_root>
python framework/runner/scripts/test_executor.py --tests_location=<testcase_location>
// Note non-default config locations and filenames are also supported on the commandline as --lab_config_file, --k8s_config_file, --logger_config_file, --docker_config_file
python framework/runner/scripts/test_executor.py --tests_location=<testcase_location> --lab_config_file=<config_location>
// Ex. python framework/runner/scripts/test_executor.py --tests_location=testcases/cloud_platform/sanity --lab_config_file=/dev/configs/my_config.json

View File

@ -1,76 +0,0 @@
====================================
StarlingX Integration Test Framework
====================================
The project contains integration test cases that can be executed on an
installed and configured StarlingX system.
Supported test cases:
- CLI tests over SSH connection to StarlingX system via OAM floating IP
- Platform RestAPI test cases via external endpoints
- Horizon test cases
Packages Required
-----------------
- python >='3.4.3,<3.7'
- pytest>='3.1.0,<4.0'
- pexpect
- pyyaml
- requests (used by RestAPI test cases only)
- selenium (used by Horizon test cases only)
- Firefox (used by Horizon test cases only)
- pyvirtualdisplay (used by Horizon test cases only)
- ffmpeg (used by Horizon test cases only)
- Xvfb or Xephyr or Xvnc (used by pyvirtualdisplay for Horizon test cases only)
Setup Test Tool
---------------
This is a off-box test tool that needs to be set up once on a Linux server
that can reach the StarlingX system under test (such as SSH to STX
system, send/receive RestAPI requests, open Horizon page).
- Install above packages
- Clone stx-test repo
- Add absolute path for automated-pytest-suite to PYTHONPATH environment variable
Execute Test Cases
------------------
Precondition: STX system under test should be installed and configured.
- | Customized config can be provided via --testcase-config <config_file>.
| Config template can be found at ${project_root}/stx-test_template.conf.
- Test cases can be selected by specifying via -m <markers>
- | If stx-openstack is not deployed, platform specific marker should be specified,
| e.g., -m "platform_sanity or platform"
- | Automation logs will be created at ${HOME}/AUTOMATION_LOGS directory by default.
| Log directory can also be specified with --resultlog=${LOG_DIR} commandline option
- Examples:
.. code-block:: bash
export project_root=<automated-pytest-suite dir>
# Include $project_root to PYTHONPATH if not already done
export PYTHONPATH=${PYTHONPATH}:${project_root}
cd $project_root
# Example 1: Run all platform_sanity test cases under testcases/
pytest -m platform_sanity --testcase-config=~/my_config.conf testcases/
# Example 2: Run platform_sanity or sanity (requires stx-openstack) test cases,
# on a StarlingX virtual box system that is already saved in consts/lab.py
# and save automation logs to /tmp/AUTOMATION_LOGS
pytest --resultlog=/tmp/ -m sanity --lab=vbox --natbox=localhost testcases/
# Example 3: List (not execute) the test cases with "migrate" in the name
pytest --collect-only -k "migrate" --lab=<stx_oam_fip> testcases/
Contribute
----------
- In order to contribute, python3.4 is required to avoid producing code that is incompatible with python3.4.

View File

@ -1,716 +0,0 @@
#
# Copyright (c) 2019, 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
import os
from time import strftime, gmtime
# import threading # Used for formatting logger
import pytest # Don't remove. Used in eval
import setups
from consts.proj_vars import ProjVar
from utils.tis_log import LOG
from utils import parse_log
tc_start_time = None
tc_end_time = None
has_fail = False
repeat_count = -1
stress_count = -1
count = -1
no_teardown = False
tracebacks = []
region = None
test_count = 0
console_log = True
################################
# Process and log test results #
################################
class MakeReport:
nodeid = None
instances = {}
def __init__(self, item):
MakeReport.nodeid = item.nodeid
self.test_pass = None
self.test_results = {}
MakeReport.instances[item.nodeid] = self
def update_results(self, call, report):
if report.failed:
global has_fail
has_fail = True
msg = "***Failure at test {}: {}".format(call.when, call.excinfo)
print(msg)
LOG.debug(msg + "\n***Details: {}".format(report.longrepr))
tracebacks.append(str(report.longrepr))
self.test_results[call.when] = ['Failed', call.excinfo]
elif report.skipped:
sep = 'Skipped: '
skipreason_list = str(call.excinfo).split(sep=sep)[1:]
skipreason_str = sep.join(skipreason_list)
self.test_results[call.when] = ['Skipped', skipreason_str]
elif report.passed:
self.test_results[call.when] = ['Passed', '']
def get_results(self):
return self.test_results
@classmethod
def get_report(cls, item):
if item.nodeid == cls.nodeid:
return cls.instances[cls.nodeid]
else:
return cls(item)
class TestRes:
PASSNUM = 0
FAILNUM = 0
SKIPNUM = 0
TOTALNUM = 0
def _write_results(res_in_tests, test_name):
global tc_start_time
with open(ProjVar.get_var("TCLIST_PATH"), mode='a', encoding='utf8') as f:
f.write('\n{}\t{}\t{}'.format(res_in_tests, tc_start_time, test_name))
global test_count
test_count += 1
# reset tc_start and end time for next test case
tc_start_time = None
def pytest_runtest_makereport(item, call, __multicall__):
report = __multicall__.execute()
my_rep = MakeReport.get_report(item)
my_rep.update_results(call, report)
test_name = item.nodeid.replace('::()::',
'::') # .replace('testcases/', '')
res_in_tests = ''
res = my_rep.get_results()
# Write final result to test_results.log
if report.when == 'teardown':
res_in_log = 'Test Passed'
fail_at = []
for key, val in res.items():
if val[0] == 'Failed':
fail_at.append('test ' + key)
elif val[0] == 'Skipped':
res_in_log = 'Test Skipped\nReason: {}'.format(val[1])
res_in_tests = 'SKIP'
break
if fail_at:
fail_at = ', '.join(fail_at)
res_in_log = 'Test Failed at {}'.format(fail_at)
# Log test result
testcase_log(msg=res_in_log, nodeid=test_name, log_type='tc_res')
if 'Test Passed' in res_in_log:
res_in_tests = 'PASS'
elif 'Test Failed' in res_in_log:
res_in_tests = 'FAIL'
if ProjVar.get_var('PING_FAILURE'):
setups.add_ping_failure(test_name=test_name)
if not res_in_tests:
res_in_tests = 'UNKNOWN'
# count testcases by status
TestRes.TOTALNUM += 1
if res_in_tests == 'PASS':
TestRes.PASSNUM += 1
elif res_in_tests == 'FAIL':
TestRes.FAILNUM += 1
elif res_in_tests == 'SKIP':
TestRes.SKIPNUM += 1
_write_results(res_in_tests=res_in_tests, test_name=test_name)
if repeat_count > 0:
for key, val in res.items():
if val[0] == 'Failed':
global tc_end_time
tc_end_time = strftime("%Y%m%d %H:%M:%S", gmtime())
_write_results(res_in_tests='FAIL', test_name=test_name)
TestRes.FAILNUM += 1
if ProjVar.get_var('PING_FAILURE'):
setups.add_ping_failure(test_name=test_name)
try:
parse_log.parse_test_steps(ProjVar.get_var('LOG_DIR'))
except Exception as e:
LOG.warning(
"Unable to parse test steps. \nDetails: {}".format(
e.__str__()))
pytest.exit(
"Skip rest of the iterations upon stress test failure")
if no_teardown and report.when == 'call':
for key, val in res.items():
if val[0] == 'Skipped':
break
else:
pytest.exit("No teardown and skip rest of the tests if any")
return report
def pytest_runtest_setup(item):
global tc_start_time
# tc_start_time = setups.get_tis_timestamp(con_ssh)
tc_start_time = strftime("%Y%m%d %H:%M:%S", gmtime())
print('')
message = "Setup started:"
testcase_log(message, item.nodeid, log_type='tc_setup')
# set test name for ping vm failure
test_name = 'test_{}'.format(
item.nodeid.rsplit('::test_', 1)[-1].replace('/', '_'))
ProjVar.set_var(TEST_NAME=test_name)
ProjVar.set_var(PING_FAILURE=False)
def pytest_runtest_call(item):
separator = \
'++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
message = "Test steps started:"
testcase_log(message, item.nodeid, separator=separator, log_type='tc_start')
def pytest_runtest_teardown(item):
print('')
message = 'Teardown started:'
testcase_log(message, item.nodeid, log_type='tc_teardown')
def testcase_log(msg, nodeid, separator=None, log_type=None):
if separator is None:
separator = '-----------'
print_msg = separator + '\n' + msg
logging_msg = '\n{}{} {}'.format(separator, msg, nodeid)
if console_log:
print(print_msg)
if log_type == 'tc_res':
global tc_end_time
tc_end_time = strftime("%Y%m%d %H:%M:%S", gmtime())
LOG.tc_result(msg=msg, tc_name=nodeid)
elif log_type == 'tc_start':
LOG.tc_func_start(nodeid)
elif log_type == 'tc_setup':
LOG.tc_setup_start(nodeid)
elif log_type == 'tc_teardown':
LOG.tc_teardown_start(nodeid)
else:
LOG.debug(logging_msg)
########################
# Command line options #
########################
@pytest.mark.tryfirst
def pytest_configure(config):
config.addinivalue_line("markers",
"features(feature_name1, feature_name2, "
"...): mark impacted feature(s) for a test case.")
config.addinivalue_line("markers",
"priorities(, cpe_sanity, p2, ...): mark "
"priorities for a test case.")
config.addinivalue_line("markers",
"known_issue(LP-xxxx): mark known issue with "
"LP ID or description if no LP needed.")
if config.getoption('help'):
return
# Common reporting params
collect_all = config.getoption('collectall')
always_collect = config.getoption('alwayscollect')
session_log_dir = config.getoption('sessiondir')
resultlog = config.getoption('resultlog')
# Test case params on installed system
testcase_config = config.getoption('testcase_config')
lab_arg = config.getoption('lab')
natbox_arg = config.getoption('natbox')
tenant_arg = config.getoption('tenant')
horizon_visible = config.getoption('horizon_visible')
is_vbox = config.getoption('is_vbox')
global repeat_count
repeat_count = config.getoption('repeat')
global stress_count
stress_count = config.getoption('stress')
global count
if repeat_count > 0:
count = repeat_count
elif stress_count > 0:
count = stress_count
global no_teardown
no_teardown = config.getoption('noteardown')
if repeat_count > 0 or no_teardown:
ProjVar.set_var(NO_TEARDOWN=True)
collect_netinfo = config.getoption('netinfo')
# Determine lab value.
lab = natbox = None
if lab_arg:
lab = setups.get_lab_dict(lab_arg)
if natbox_arg:
natbox = setups.get_natbox_dict(natbox_arg)
lab, natbox = setups.setup_testcase_config(testcase_config, lab=lab,
natbox=natbox)
tenant = tenant_arg.upper() if tenant_arg else 'TENANT1'
# Log collection params
collect_all = True if collect_all else False
always_collect = True if always_collect else False
# If floating ip cannot be reached, whether to try to ping/ssh
# controller-0 unit IP, etc.
if collect_netinfo:
ProjVar.set_var(COLLECT_SYS_NET_INFO=True)
horizon_visible = True if horizon_visible else False
if session_log_dir:
log_dir = session_log_dir
else:
# compute directory for all logs based on resultlog arg, lab,
# and timestamp on local machine
resultlog = resultlog if resultlog else os.path.expanduser("~")
if '/AUTOMATION_LOGS' in resultlog:
resultlog = resultlog.split(sep='/AUTOMATION_LOGS')[0]
resultlog = os.path.join(resultlog, 'AUTOMATION_LOGS')
lab_name = lab['short_name']
time_stamp = strftime('%Y%m%d%H%M')
log_dir = '{}/{}/{}'.format(resultlog, lab_name, time_stamp)
os.makedirs(log_dir, exist_ok=True)
# set global constants, which will be used for the entire test session, etc
ProjVar.init_vars(lab=lab, natbox=natbox, logdir=log_dir, tenant=tenant,
collect_all=collect_all,
always_collect=always_collect,
horizon_visible=horizon_visible)
if lab.get('central_region'):
default_subloud = config.getoption('subcloud')
subcloud_list = config.getoption('subcloud_list')
if subcloud_list:
if default_subloud not in subcloud_list:
msg = ("default subcloud --subcloud=%s not in --subcloud_list=%s" %
(default_subloud, subcloud_list))
LOG.error(msg)
pytest.exit(msg)
ProjVar.set_var(IS_DC=True, PRIMARY_SUBCLOUD=default_subloud, SUBCLOUD_LIST=subcloud_list)
if is_vbox:
ProjVar.set_var(IS_VBOX=True)
config_logger(log_dir, console=console_log)
# set resultlog save location
config.option.resultlog = ProjVar.get_var("PYTESTLOG_PATH")
# Repeat test params
file_or_dir = config.getoption('file_or_dir')
origin_file_dir = list(file_or_dir)
if count > 1:
print("Repeat following tests {} times: {}".format(count, file_or_dir))
del file_or_dir[:]
for f_or_d in origin_file_dir:
for i in range(count):
file_or_dir.append(f_or_d)
def pytest_addoption(parser):
testconf_help = "Absolute path for testcase config file. Template can be " \
"found at automated-pytest-suite/stx-test_template.conf"
lab_help = "STX system to connect to. Valid value: 1) short_name or name " \
"of an existing dict entry in consts.Labs; Or 2) OAM floating " \
"ip of the STX system under test"
tenant_help = "Default tenant to use when unspecified. Valid values: " \
"tenant1, tenant2, or admin"
natbox_help = "NatBox IP or name. If automated tests are executed from " \
"NatBox, --natbox=localhost can be used. " \
"If username/password are required to SSH to NatBox, " \
"please specify them in test config file."
vbox_help = "Specify if StarlingX system is installed in virtual " \
"environment."
collect_all_help = "Run collect all on STX system at the end of test " \
"session if any test fails."
logdir_help = "Directory to store test session logs. If this is " \
"specified, then --resultlog will be ignored."
stress_help = "Number of iterations to run specified testcase(s). Abort " \
"rest of the test session on first failure"
count_help = "Repeat tests x times - NO stop on failure"
horizon_visible_help = "Display horizon on screen"
no_console_log = 'Print minimal console logs'
region_help = "Multi-region parameter. Use when connected region is " \
"different than region to test. " \
"e.g., creating vm on RegionTwo from RegionOne"
subcloud_help = "Default subcloud used for automated test when boot vm, " \
"etc. 'subcloud1' if unspecified."
subcloud_list_help = "Specifies subclouds for DC labs, e.g. --subcloud_list=subcloud1," \
"subcloud2. If unspecified the lab's subclouds from lab.py will " \
"be used."
# Test session options on installed and configured STX system:
parser.addoption('--testcase-config', action='store',
metavar='testcase_config', default=None,
help=testconf_help)
parser.addoption('--lab', action='store', metavar='lab', default=None,
help=lab_help)
parser.addoption('--tenant', action='store', metavar='tenantname',
default=None, help=tenant_help)
parser.addoption('--natbox', action='store', metavar='natbox', default=None,
help=natbox_help)
parser.addoption('--vm', '--vbox', action='store_true', dest='is_vbox',
help=vbox_help)
# Multi-region or distributed cloud options
parser.addoption('--region', action='store', metavar='region',
default=None, help=region_help)
parser.addoption('--subcloud', action='store', metavar='subcloud',
default='subcloud1', help=subcloud_help)
parser.addoption("--subcloud_list", action="store", default=None,
help=subcloud_list_help)
# Debugging/Log collection options:
parser.addoption('--sessiondir', '--session_dir', '--session-dir',
action='store', dest='sessiondir',
metavar='sessiondir', default=None, help=logdir_help)
parser.addoption('--collectall', '--collect_all', '--collect-all',
dest='collectall', action='store_true',
help=collect_all_help)
parser.addoption('--alwayscollect', '--always-collect', '--always_collect',
dest='alwayscollect',
action='store_true', help=collect_all_help)
parser.addoption('--repeat', action='store', metavar='repeat', type=int,
default=-1, help=stress_help)
parser.addoption('--stress', metavar='stress', action='store', type=int,
default=-1, help=count_help)
parser.addoption('--no-teardown', '--no_teardown', '--noteardown',
dest='noteardown', action='store_true')
parser.addoption('--netinfo', '--net-info', dest='netinfo',
action='store_true',
help="Collect system networking info if scp keyfile fails")
parser.addoption('--horizon-visible', '--horizon_visible',
action='store_true', dest='horizon_visible',
help=horizon_visible_help)
parser.addoption('--noconsolelog', '--noconsole', '--no-console-log',
'--no_console_log', '--no-console',
'--no_console', action='store_true', dest='noconsolelog',
help=no_console_log)
def config_logger(log_dir, console=True):
# logger for log saved in file
file_name = log_dir + '/TIS_AUTOMATION.log'
logging.Formatter.converter = gmtime
log_format = '[%(asctime)s] %(lineno)-5d%(levelname)-5s %(threadName)-8s ' \
'%(module)s.%(funcName)-8s:: %(message)s'
tis_formatter = logging.Formatter(log_format)
LOG.setLevel(logging.NOTSET)
tmp_path = os.path.join(os.path.expanduser('~'), '.tmp_log')
# clear the tmp log with best effort so it wont keep growing
try:
os.remove(tmp_path)
except:
pass
logging.basicConfig(level=logging.NOTSET, format=log_format,
filename=tmp_path, filemode='w')
# file handler:
file_handler = logging.FileHandler(file_name)
file_handler.setFormatter(tis_formatter)
file_handler.setLevel(logging.DEBUG)
LOG.addHandler(file_handler)
# logger for stream output
console_level = logging.INFO if console else logging.CRITICAL
stream_hdler = logging.StreamHandler()
stream_hdler.setFormatter(tis_formatter)
stream_hdler.setLevel(console_level)
LOG.addHandler(stream_hdler)
print("LOG DIR: {}".format(log_dir))
def pytest_unconfigure(config):
# collect all if needed
if config.getoption('help'):
return
try:
natbox_ssh = ProjVar.get_var('NATBOX_SSH')
natbox_ssh.close()
except:
pass
version_and_patch = ''
try:
version_and_patch = setups.get_version_and_patch_info()
except Exception as e:
LOG.debug(e)
pass
log_dir = ProjVar.get_var('LOG_DIR')
if not log_dir:
try:
from utils.clients.ssh import ControllerClient
ssh_list = ControllerClient.get_active_controllers(fail_ok=True)
for con_ssh_ in ssh_list:
con_ssh_.close()
except:
pass
return
log_dir = ProjVar.get_var('LOG_DIR')
if not log_dir:
try:
from utils.clients.ssh import ControllerClient
ssh_list = ControllerClient.get_active_controllers(fail_ok=True)
for con_ssh_ in ssh_list:
con_ssh_.close()
except:
pass
return
try:
tc_res_path = log_dir + '/test_results.log'
build_info = ProjVar.get_var('BUILD_INFO')
build_id = build_info.get('BUILD_ID', '')
build_job = build_info.get('JOB', '')
build_server = build_info.get('BUILD_HOST', '')
system_config = ProjVar.get_var('SYS_TYPE')
session_str = ''
total_exec = TestRes.PASSNUM + TestRes.FAILNUM
# pass_rate = fail_rate = '0'
if total_exec > 0:
pass_rate = "{}%".format(
round(TestRes.PASSNUM * 100 / total_exec, 2))
fail_rate = "{}%".format(
round(TestRes.FAILNUM * 100 / total_exec, 2))
with open(tc_res_path, mode='a', encoding='utf8') as f:
# Append general info to result log
f.write('\n\nLab: {}\n'
'Build ID: {}\n'
'Job: {}\n'
'Build Server: {}\n'
'System Type: {}\n'
'Automation LOGs DIR: {}\n'
'Ends at: {}\n'
'{}' # test session id and tag
'{}'.format(ProjVar.get_var('LAB_NAME'), build_id,
build_job, build_server, system_config,
ProjVar.get_var('LOG_DIR'), tc_end_time,
session_str, version_and_patch))
# Add result summary to beginning of the file
f.write(
'\nSummary:\nPassed: {} ({})\nFailed: {} ({})\nTotal '
'Executed: {}\n'.
format(TestRes.PASSNUM, pass_rate, TestRes.FAILNUM,
fail_rate, total_exec))
if TestRes.SKIPNUM > 0:
f.write('------------\nSkipped: {}'.format(TestRes.SKIPNUM))
LOG.info("Test Results saved to: {}".format(tc_res_path))
with open(tc_res_path, 'r', encoding='utf8') as fin:
print(fin.read())
except Exception as e:
LOG.exception(
"Failed to add session summary to test_results.py. "
"\nDetails: {}".format(e.__str__()))
# Below needs con_ssh to be initialized
try:
from utils.clients.ssh import ControllerClient
con_ssh = ControllerClient.get_active_controller()
except:
LOG.warning("No con_ssh found")
return
try:
parse_log.parse_test_steps(ProjVar.get_var('LOG_DIR'))
except Exception as e:
LOG.warning(
"Unable to parse test steps. \nDetails: {}".format(e.__str__()))
if test_count > 0 and (ProjVar.get_var('ALWAYS_COLLECT') or (
has_fail and ProjVar.get_var('COLLECT_ALL'))):
# Collect tis logs if collect all required upon test(s) failure
# Failure on collect all would not change the result of the last test
# case.
try:
setups.collect_tis_logs(con_ssh)
except Exception as e:
LOG.warning("'collect all' failed. {}".format(e.__str__()))
ssh_list = ControllerClient.get_active_controllers(fail_ok=True,
current_thread_only=True)
for con_ssh_ in ssh_list:
try:
con_ssh_.close()
except:
pass
def pytest_collection_modifyitems(items):
# print("Collection modify")
move_to_last = []
absolute_last = []
for item in items:
# re-order tests:
trylast_marker = item.get_closest_marker('trylast')
abslast_marker = item.get_closest_marker('abslast')
if abslast_marker:
absolute_last.append(item)
elif trylast_marker:
move_to_last.append(item)
priority_marker = item.get_closest_marker('priorities')
if priority_marker is not None:
priorities = priority_marker.args
for priority in priorities:
item.add_marker(eval("pytest.mark.{}".format(priority)))
feature_marker = item.get_closest_marker('features')
if feature_marker is not None:
features = feature_marker.args
for feature in features:
item.add_marker(eval("pytest.mark.{}".format(feature)))
# known issue marker
known_issue_mark = item.get_closest_marker('known_issue')
if known_issue_mark is not None:
issue = known_issue_mark.args[0]
msg = "{} has a workaround due to {}".format(item.nodeid, issue)
print(msg)
LOG.debug(msg=msg)
item.add_marker(eval("pytest.mark.known_issue"))
# add dc maker to all tests start with test_dc_xxx
dc_maker = item.get_marker('dc')
if not dc_maker and 'test_dc_' in item.nodeid:
item.add_marker(pytest.mark.dc)
# add trylast tests to the end
for item in move_to_last:
items.remove(item)
items.append(item)
for i in absolute_last:
items.remove(i)
items.append(i)
def pytest_generate_tests(metafunc):
# Prefix 'remote_cli' to test names so they are reported as a different
# testcase
if ProjVar.get_var('REMOTE_CLI'):
metafunc.parametrize('prefix_remote_cli', ['remote_cli'])
##############################################################
# Manipulating fixture orders based on following pytest rules
# session > module > class > function
# autouse > non-autouse
# alphabetic after full-filling above criteria
#
# Orders we want on fixtures of same scope:
# check_alarms > delete_resources > config_host
#############################################################
@pytest.fixture(scope='session')
def check_alarms():
LOG.debug("Empty check alarms")
return
@pytest.fixture(scope='session')
def config_host_class():
LOG.debug("Empty config host class")
return
@pytest.fixture(scope='session')
def config_host_module():
LOG.debug("Empty config host module")
@pytest.fixture(autouse=True)
def a1_fixture(check_alarms):
return
@pytest.fixture(scope='module', autouse=True)
def c1_fixture(config_host_module):
return
@pytest.fixture(scope='class', autouse=True)
def c2_fixture(config_host_class):
return
@pytest.fixture(scope='session', autouse=True)
def prefix_remote_cli():
return
def __params_gen(index):
return 'iter{}'.format(index)
@pytest.fixture(scope='session')
def global_setup():
os.makedirs(ProjVar.get_var('TEMP_DIR'), exist_ok=True)
os.makedirs(ProjVar.get_var('PING_FAILURE_DIR'), exist_ok=True)
os.makedirs(ProjVar.get_var('GUEST_LOGS_DIR'), exist_ok=True)
if region:
setups.set_region(region=region)
#####################################
# End of fixture order manipulation #
#####################################
def pytest_sessionfinish():
if ProjVar.get_var('TELNET_THREADS'):
threads, end_event = ProjVar.get_var('TELNET_THREADS')
end_event.set()
for thread in threads:
thread.join()
if repeat_count > 0 and has_fail:
# _thread.interrupt_main()
print('Printing traceback: \n' + '\n'.join(tracebacks))
pytest.exit("\n========== Test failed - "
"Test session aborted without teardown to leave the "
"system in state ==========")
if no_teardown:
pytest.exit(
"\n========== Test session stopped without teardown after first "
"test executed ==========")

View File

@ -1,356 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
class Tenant:
__PASSWORD = 'St8rlingX*'
__REGION = 'RegionOne'
__URL_PLATFORM = 'http://192.168.204.2:5000/v3/'
__URL_CONTAINERS = 'http://keystone.openstack.svc.cluster.local/v3'
__DC_MAP = {'SystemController': {'region': 'SystemController',
'auth_url': __URL_PLATFORM},
'RegionOne': {'region': 'RegionOne',
'auth_url': __URL_PLATFORM}}
# Platform openstack user - admin
__ADMIN_PLATFORM = {
'user': 'admin',
'password': __PASSWORD,
'tenant': 'admin',
'domain': 'Default',
'platform': True,
}
# Containerized openstack users - admin, and two test users/tenants
__ADMIN = {
'user': 'admin',
'password': __PASSWORD,
'tenant': 'admin',
'domain': 'Default'
}
__TENANT1 = {
'user': 'tenant1',
'password': __PASSWORD,
'tenant': 'tenant1',
'domain': 'Default',
'nova_keypair': 'keypair-tenant1'
}
__TENANT2 = {
'user': 'tenant2',
'password': __PASSWORD,
'tenant': 'tenant2',
'domain': 'Default',
'nova_keypair': 'keypair-tenant2'
}
__tenants = {
'ADMIN_PLATFORM': __ADMIN_PLATFORM,
'ADMIN': __ADMIN,
'TENANT1': __TENANT1,
'TENANT2': __TENANT2}
@classmethod
def add_dc_region(cls, region_info):
cls.__DC_MAP.update(region_info)
@classmethod
def set_platform_url(cls, url, central_region=False):
"""
Set auth_url for platform keystone
Args:
url (str):
central_region (bool)
"""
if central_region:
cls.__DC_MAP.get('SystemController')['auth_url'] = url
cls.__DC_MAP.get('RegionOne')['auth_url'] = url
else:
cls.__URL_PLATFORM = url
@classmethod
def set_region(cls, region):
"""
Set default region for all tenants
Args:
region (str): e.g., SystemController, subcloud-2
"""
cls.__REGION = region
@classmethod
def add(cls, username, tenantname=None, dictname=None, password=None,
region=None, auth_url=None, domain='Default', **kwargs):
user_dict = dict(user=username)
user_dict['tenant'] = tenantname
user_dict['password'] = password if password else cls.__PASSWORD
user_dict['domain'] = domain
if region:
user_dict['region'] = region
if auth_url:
user_dict['auth_url'] = auth_url
if kwargs:
user_dict.update(kwargs)
dictname = dictname.upper() if dictname else username.upper(). \
replace('-', '_')
cls.__tenants[dictname] = user_dict
return user_dict
__primary = 'TENANT1'
@classmethod
def get(cls, tenant_dictname, dc_region=None):
"""
Get tenant auth dict that can be passed to auth_info in cli cmd
Args:
tenant_dictname (str): e.g., tenant1, TENANT2, system_controller
dc_region (None|str): key for dc_region added via add_dc_region.
Used to update auth_url and region
e.g., SystemController, RegionOne, subcloud-2
Returns (dict): mutable dictionary. If changed, DC map or tenant dict
will update as well.
"""
tenant_dictname = tenant_dictname.upper().replace('-', '_')
tenant_dict = cls.__tenants.get(tenant_dictname)
if not tenant_dict:
return tenant_dict
if dc_region:
region_dict = cls.__DC_MAP.get(dc_region, None)
if not region_dict:
raise ValueError(
'Distributed cloud region {} is not added to '
'DC_MAP yet. DC_MAP: {}'.format(dc_region, cls.__DC_MAP))
tenant_dict = dict(tenant_dict)
tenant_dict.update({'region': region_dict['region']})
else:
tenant_dict.pop('region', None)
return tenant_dict
@classmethod
def get_region_and_url(cls, platform=False, dc_region=None):
auth_region_and_url = {
'auth_url':
cls.__URL_PLATFORM if platform else cls.__URL_CONTAINERS,
'region': cls.__REGION
}
if dc_region:
region_dict = cls.__DC_MAP.get(dc_region, None)
if not region_dict:
raise ValueError(
'Distributed cloud region {} is not added to DC_MAP yet. '
'DC_MAP: {}'.format(dc_region, cls.__DC_MAP))
auth_region_and_url['region'] = region_dict.get('region')
if platform:
auth_region_and_url['auth_url'] = region_dict.get('auth_url')
return auth_region_and_url
@classmethod
def set_primary(cls, tenant_dictname):
"""
should be called after _set_region and _set_url
Args:
tenant_dictname (str): Tenant dict name
Returns:
"""
cls.__primary = tenant_dictname.upper()
@classmethod
def get_primary(cls):
return cls.get(tenant_dictname=cls.__primary)
@classmethod
def get_secondary(cls):
secondary = 'TENANT1' if cls.__primary != 'TENANT1' else 'TENANT2'
return cls.get(tenant_dictname=secondary)
@classmethod
def update(cls, tenant_dictname, username=None, password=None, tenant=None,
**kwargs):
tenant_dict = cls.get(tenant_dictname)
if not isinstance(tenant_dict, dict):
raise ValueError("{} dictionary does not exist in "
"consts/auth.py".format(tenant_dictname))
if not username and not password and not tenant and not kwargs:
raise ValueError("Please specify username, password, tenant, "
"and/or domain to update for {} dict".
format(tenant_dictname))
if username:
kwargs['user'] = username
if password:
kwargs['password'] = password
if tenant:
kwargs['tenant'] = tenant
tenant_dict.update(kwargs)
cls.__tenants[tenant_dictname] = tenant_dict
@classmethod
def get_dc_map(cls):
return cls.__DC_MAP
class HostLinuxUser:
__SYSADMIN = {
'user': 'sysadmin',
'password': 'St8rlingX*'
}
@classmethod
def get_user(cls):
return cls.__SYSADMIN['user']
@classmethod
def get_password(cls):
return cls.__SYSADMIN['password']
@classmethod
def get_home(cls):
return cls.__SYSADMIN.get('home', '/home/{}'.format(cls.get_user()))
@classmethod
def set_user(cls, username):
cls.__SYSADMIN['user'] = username
@classmethod
def set_password(cls, password):
cls.__SYSADMIN['password'] = password
@classmethod
def set_home(cls, home):
if home:
cls.__SYSADMIN['home'] = home
class Guest:
CREDS = {
'tis-centos-guest': {
'user': 'root',
'password': 'root'
},
'cgcs-guest': {
'user': 'root',
'password': 'root'
},
'ubuntu': {
'user': 'ubuntu',
'password': None
},
'centos_6': {
'user': 'centos',
'password': None
},
'centos_7': {
'user': 'centos',
'password': None
},
# This image has some issue where it usually fails to boot
'opensuse_13': {
'user': 'root',
'password': None
},
# OPV image has root/root enabled
'rhel': {
'user': 'root',
'password': 'root'
},
'cirros': {
'user': 'cirros',
'password': 'cubswin:)'
},
'win_2012': {
'user': 'Administrator',
'password': 'Li69nux*'
},
'win_2016': {
'user': 'Administrator',
'password': 'Li69nux*'
},
'ge_edge': {
'user': 'root',
'password': 'root'
},
'vxworks': {
'user': 'root',
'password': 'root'
},
}
@classmethod
def set_user(cls, image_name, username):
cls.CREDS[image_name]['user'] = username
@classmethod
def set_password(cls, image_name, password):
cls.CREDS[image_name]['password'] = password
class TestFileServer:
# Place holder for shared file server in future.
SERVER = 'server_name_or_ip_that_can_ssh_to'
USER = 'username'
PASSWORD = 'password'
HOME = 'my_home'
HOSTNAME = 'hostname'
PROMPT = r'[\[]?.*@.*\$[ ]?'
class CliAuth:
__var_dict = {
'OS_AUTH_URL': 'http://192.168.204.2:5000/v3',
'OS_ENDPOINT_TYPE': 'internalURL',
'CINDER_ENDPOINT_TYPE': 'internalURL',
'OS_USER_DOMAIN_NAME': 'Default',
'OS_PROJECT_DOMAIN_NAME': 'Default',
'OS_IDENTITY_API_VERSION': '3',
'OS_REGION_NAME': 'RegionOne',
'OS_INTERFACE': 'internal',
'HTTPS': False,
'OS_KEYSTONE_REGION_NAME': None,
}
@classmethod
def set_vars(cls, **kwargs):
for key in kwargs:
cls.__var_dict[key.upper()] = kwargs[key]
@classmethod
def get_var(cls, var_name):
var_name = var_name.upper()
valid_vars = cls.__var_dict.keys()
if var_name not in valid_vars:
raise ValueError("Invalid var_name. Valid vars: {}".
format(valid_vars))
return cls.__var_dict[var_name]

View File

@ -1,192 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
class VCPUSchedulerErr:
CANNOT_SET_VCPU0 = "vcpu 0 cannot be specified"
VCPU_VAL_OUT_OF_RANGE = "vcpu value out of range"
INVALID_PRIORITY = "priority must be between 1-99"
PRIORITY_NOT_INTEGER = "priority must be an integer"
INVALID_FORMAT = "invalid format"
UNSUPPORTED_POLICY = "not a supported policy"
POLICY_MUST_SPECIFIED_LAST = "policy/priority for all vcpus must be " \
"specified last"
MISSING_PARAMETER = "missing required parameter"
TOO_MANY_PARAMETERS = "too many parameters"
VCPU_MULTIPLE_ASSIGNMENT = "specified multiple times, specification is " \
"ambiguous"
CPU_MODEL_UNAVAIL = "No valid host was found.*Host VCPU model.*required.*"
CPU_MODEL_CONFLICT = "Image vCPU model is not permitted to override " \
"configuration set against the flavor"
class NumaErr:
GENERAL_ERR_PIKE = 'Requested instance NUMA topology cannot fit the ' \
'given host NUMA topology'
# NUMA_AFFINITY_MISMATCH = " not match requested NUMA: {}"
NUMA_VSWITCH_MISMATCH = 'vswitch not configured.* does not match ' \
'requested NUMA'
NUMA_NODE_EXCLUDED = "NUMA: {} excluded"
# UNINITIALIZED = '(NUMATopologyFilter) Uninitialized'
TWO_NUMA_ONE_VSWITCH = 'vswitch not configured'
FLV_UNDEVISIBLE = 'ERROR (Conflict): flavor vcpus not evenly divisible ' \
'by the specified hw:numa_nodes value'
FLV_CPU_OR_MEM_UNSPECIFIED = 'ERROR (Conflict): CPU and memory ' \
'allocation must be provided for all ' \
'NUMA nodes'
INSUFFICIENT_CORES = 'Not enough free cores to schedule the instance'
class MinCPUErr:
VAL_LARGER_THAN_VCPUS = "min_vcpus must be less than or equal to " \
"the flavor vcpus value"
VAL_LESS_THAN_1 = "min_vcpus must be greater than or equal to 1"
CPU_POLICY_NOT_DEDICATED = "min_vcpus is only valid when hw:cpu_policy " \
"is dedicated"
class ScaleErr:
SCALE_LIMIT_HIT = "When scaling, cannot scale beyond limits"
class CpuAssignment:
VSWITCH_TOO_MANY_CORES = "The vswitch function can only be assigned up to" \
" 8 core"
TOTAL_TOO_MANY_CORES = "More total logical cores requested than present " \
"on 'Processor {}'"
NO_VM_CORE = "There must be at least one unused core for VMs."
VSWITCH_INSUFFICIENT_CORES = "The vswitch function must have at least {} " \
"core(s)"
class CPUThreadErr:
INVALID_POLICY = "invalid hw:cpu_thread_policy '{}', must be one of " \
"prefer, isolate, require"
DEDICATED_CPU_REQUIRED_FLAVOR = 'ERROR (Conflict): hw:cpu_thread_policy ' \
'is only valid when hw:cpu_policy is ' \
'dedicated. Either unset ' \
'hw:cpu_thread_policy or set ' \
'hw:cpu_policy to dedicated.'
DEDICATED_CPU_REQUIRED_BOOT_VM = 'ERROR (BadRequest): Cannot set cpu ' \
'thread pinning policy in a non ' \
'dedicated ' \
'cpu pinning policy'
VCPU_NUM_UNDIVISIBLE = "(NUMATopologyFilter) Cannot use 'require' cpu " \
"threads policy as requested #VCPUs: {}, " \
"is not divisible by number of threads: 2"
INSUFFICIENT_CORES_FOR_ISOLATE = "{}: (NUMATopologyFilter) Cannot use " \
"isolate cpu thread policy as requested " \
"VCPUS: {} is greater than available " \
"CPUs with all siblings free"
HT_HOST_UNAVAIL = "(NUMATopologyFilter) Host not useable. Requested " \
"threads policy: '{}'; from flavor or image " \
"is not allowed on non-hyperthreaded host"
UNSET_SHARED_VCPU = "Cannot set hw:cpu_thread_policy to {} if " \
"hw:wrs:shared_vcpu is set. Either unset " \
"hw:cpu_thread_policy, set it to prefer, or unset " \
"hw:wrs:shared_vcpu"
UNSET_MIN_VCPUS = "Cannot set hw:cpu_thread_policy to {} if " \
"hw:wrs:min_vcpus is set. Either unset " \
"hw:cpu_thread_policy, set it to another policy, " \
"or unset hw:wrs:min_vcpus"
CONFLICT_FLV_IMG = "Image property 'hw_cpu_thread_policy' is not " \
"permitted to override CPU thread pinning policy " \
"set against the flavor"
class CPUPolicyErr:
CONFLICT_FLV_IMG = "Image property 'hw_cpu_policy' is not permitted to " \
"override CPU pinning policy set against " \
"the flavor "
class SharedCPUErr:
DEDICATED_CPU_REQUIRED = "hw:wrs:shared_vcpu is only valid when " \
"hw:cpu_policy is dedicated"
INVALID_VCPU_ID = "hw:wrs:shared_vcpu must be greater than or equal to 0"
MORE_THAN_FLAVOR = "hw:wrs:shared_vcpu value ({}) must be less than " \
"flavor vcpus ({})"
class ResizeVMErr:
RESIZE_ERR = "Error resizing server"
SHARED_NOT_ENABLED = 'Shared vCPU not enabled .*, required by instance ' \
'cell {}'
class ColdMigErr:
HT_HOST_REQUIRED = "(NUMATopologyFilter) Host not useable. Requested " \
"threads policy: '[{}, {}]'; from flavor or " \
"image is not allowed on non-hyperthreaded host"
class LiveMigErr:
BLOCK_MIG_UNSUPPORTED = "is not on local storage: Block migration can " \
"not be used with shared storage"
GENERAL_NO_HOST = "No valid host was found. There are not enough hosts " \
"available."
BLOCK_MIG_UNSUPPORTED_LVM = 'Block live migration is not supported for ' \
'hosts with LVM backed storage'
LVM_PRECHECK_ERROR = 'Live migration can not be used with LVM backed ' \
'storage except a booted from volume VM ' \
'which does not have a local disk'
class NetworkingErr:
INVALID_VXLAN_VNI_RANGE = "exceeds 16777215"
INVALID_MULTICAST_IP_ADDRESS = "is not a valid multicast IP address."
INVALID_VXLAN_PROVISION_PORTS = "Invalid input for port"
VXLAN_TTL_RANGE_MISSING = "VXLAN time-to-live attribute missing"
VXLAN_TTL_RANGE_TOO_LARGE = "is too large - must be no larger than '255'."
VXLAN_TTL_RANGE_TOO_SMALL = "is too small - must be at least '1'."
OVERLAP_SEGMENTATION_RANGE = "segmentation id range overlaps with"
INVALID_MTU_VALUE = "requires an interface MTU value of at least"
VXLAN_MISSING_IP_ON_INTERFACE = "requires an IP address"
WRONG_IF_ADDR_MODE = "interface address mode must be 'static'"
SET_IF_ADDR_MODE_WHEN_IP_EXIST = "addresses still exist on interfac"
NULL_IP_ADDR = "Address must not be null"
NULL_NETWORK_ADDR = "Network must not be null"
NULL_GATEWAY_ADDR = "Gateway address must not be null"
NULL_HOST_PARTION_ADDR = "Host bits must not be zero"
NOT_UNICAST_ADDR = "Address must be a unicast address"
NOT_BROADCAST_ADDR = "Address cannot be the network broadcast address"
DUPLICATE_IP_ADDR = "already exists"
INVALID_IP_OR_PREFIX = "Invalid IP address and prefix"
INVALID_IP_NETWORK = "Invalid IP network"
ROUTE_GATEWAY_UNREACHABLE = "not reachable"
IP_VERSION_NOT_MATCH = "Network and gateway IP versions must match"
GATEWAY_IP_IN_SUBNET = "Gateway address must not be within destination " \
"subnet"
NETWORK_IP_EQUAL_TO_GATEWAY = "Network and gateway IP addresses must be " \
"different"
class PciAddrErr:
NONE_ZERO_DOMAIN = 'Only domain 0000 is supported'
LARGER_THAN_MAX_BUS = 'PCI bus maximum value is 8'
NONE_ZERO_FUNCTION = 'Only function 0 is supported'
RESERVED_SLOTS_BUS0 = 'Slots 0,1 are reserved for PCI bus 0'
RESERVED_SLOT_ANY_BUS = 'Slots 0 is reserved for any PCI bus'
LARGER_THAN_MAX_SLOT = 'PCI slot maximum value is 31'
BAD_FORMAT = 'Bad PCI address format'
WRONG_BUS_VAL = 'Wrong bus value for PCI address'
class SrvGrpErr:
EXCEEDS_GRP_SIZE = 'Action would result in server group {} exceeding the ' \
'group size of {}'
HOST_UNAVAIL_ANTI_AFFINITY = '(ServerGroupAntiAffinityFilter) ' \
'Anti-affinity server group specified, ' \
'but this host is already used by that group'
class CpuRtErr:
RT_AND_ORD_REQUIRED = 'Realtime policy needs vCPU.* mask configured with ' \
'at least 1 RT vCPU and 1 ordinary vCPU'
DED_CPU_POL_REQUIRED = 'Cannot set realtime policy in a non dedicated cpu' \
' pinning policy'
RT_MASK_SHARED_VCPU_CONFLICT = 'hw:wrs:shared_vcpu .* is not a subset of ' \
'non-realtime vCPUs'

View File

@ -1,55 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
class StxPath:
TIS_UBUNTU_PATH = '~/userdata/ubuntu_if_config.sh'
TIS_CENTOS_PATH = '~/userdata/centos_if_config.sh'
USERDATA = '~/userdata/'
IMAGES = '~/images/'
HEAT = '~/heat/'
BACKUPS = '/opt/backups'
CUSTOM_HEAT_TEMPLATES = '~/custom_heat_templates/'
HELM_CHARTS_DIR = '/var/www/pages/helm_charts/'
DOCKER_CONF = '/etc/docker-distribution/registry/config.yml'
DOCKER_REPO = '/var/lib/docker-distribution/docker/registry/v2/repositories'
class VMPath:
VM_IF_PATH_UBUNTU = '/etc/network/interfaces.d/'
ETH_PATH_UBUNTU = '/etc/network/interfaces.d/{}.cfg'
# Below two paths are common for CentOS, OpenSUSE, and RHEL
VM_IF_PATH_CENTOS = '/etc/sysconfig/network-scripts/'
ETH_PATH_CENTOS = '/etc/sysconfig/network-scripts/ifcfg-{}'
# Centos paths for ipv4:
RT_TABLES = '/etc/iproute2/rt_tables'
ETH_RT_SCRIPT = '/etc/sysconfig/network-scripts/route-{}'
ETH_RULE_SCRIPT = '/etc/sysconfig/network-scripts/rule-{}'
ETH_ARP_ANNOUNCE = '/proc/sys/net/ipv4/conf/{}/arp_announce'
ETH_ARP_FILTER = '/proc/sys/net/ipv4/conf/{}/arp_filter'
class UserData:
ADDUSER_TO_GUEST = 'cloud_config_adduser.txt'
DPDK_USER_DATA = 'dpdk_user_data.txt'
class TestServerPath:
USER_DATA = '/home/svc-cgcsauto/userdata/'
TEST_SCRIPT = '/home/svc-cgcsauto/test_scripts/'
CUSTOM_HEAT_TEMPLATES = '/sandbox/custom_heat_templates/'
CUSTOM_APPS = '/sandbox/custom_apps/'
class PrivKeyPath:
OPT_PLATFORM = '/opt/platform/id_rsa'
SYS_HOME = '~/.ssh/id_rsa'
class SysLogPath:
DC_MANAGER = '/var/log/dcmanager/dcmanager.log'
DC_ORCH = '/var/log/dcorch/dcorch.log'

View File

@ -1,8 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
test_result = False

View File

@ -1,167 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
class Labs(object):
# Place for existing stx systems for convenience.
# --lab <short_name> can be used in cmdline to specify an existing system
# Each lab should be a dictionary such as following. The short_name and name are free style
# strings while the floating IP and controller-0/1 IPs should map to what is shown in system
# oam-show
# STX_SYS1 = {
# 'short_name': 'my_server1',
# 'name': 'my_server1.com',
# 'floating ip': '10.10.10.2',
# 'controller-0 ip': '10.10.10.3',
# 'controller-1 ip': '10.10.10.4',
# }
pass
def update_lab(lab_dict_name=None, lab_name=None, floating_ip=None, **kwargs):
"""
Update/Add lab dict params for specified lab
Args:
lab_dict_name (str|None):
lab_name (str|None): lab short_name. This is used only if
lab_dict_name is not specified
floating_ip (str|None):
**kwargs: Some possible keys: subcloud-1, name, etc
Returns (dict): updated lab dict
"""
if not lab_name and not lab_dict_name:
from consts.proj_vars import ProjVar
lab_name = ProjVar.get_var('LAB').get('short_name', None)
if not lab_name:
raise ValueError("lab_dict_name or lab_name needs to be specified")
if floating_ip:
kwargs.update(**{'floating ip': floating_ip})
if not kwargs:
raise ValueError("Please specify floating_ip and/or kwargs")
if not lab_dict_name:
attr_names = [attr for attr in dir(Labs) if not attr.startswith('__')]
lab_names = [getattr(Labs, attr).get('short_name') for attr in
attr_names]
lab_index = lab_names.index(lab_name.lower().strip())
lab_dict_name = attr_names[lab_index]
else:
lab_dict_name = lab_dict_name.upper().replace('-', '_')
lab_dict = getattr(Labs, lab_dict_name)
lab_dict.update(kwargs)
return lab_dict
def get_lab_dict(lab, key='short_name'):
"""
Args:
lab: lab name or fip
key: unique identifier to locate a lab. Valid values: short_name,
name, floating ip
Returns (dict|None): lab dict or None if no matching lab found
"""
__lab_attr_list = [attr for attr in dir(Labs) if not attr.startswith('__')]
__lab_list = [getattr(Labs, attr) for attr in __lab_attr_list]
__lab_list = [lab for lab in __lab_list if isinstance(lab, dict)]
lab_info = None
for lab_ in __lab_list:
if lab.lower().replace('-', '_') == lab_.get(key).lower().replace('-',
'_'):
lab_info = lab_
break
return lab_info
def add_lab_entry(floating_ip, dict_name=None, short_name=None, name=None,
**kwargs):
"""
Add a new lab dictionary to Labs class
Args:
floating_ip (str): floating ip of a lab to be added
dict_name: name of the entry, such as 'PV0'
short_name: short name of the TiS system, such as ip_1_4
name: name of the STX system, such as 'yow-cgcs-pv-0'
**kwargs: other information of the lab such as controllers' ips, etc
Returns:
dict: lab dict added to Labs class
"""
for attr in dir(Labs):
lab = getattr(Labs, attr)
if isinstance(lab, dict):
if lab['floating ip'] == floating_ip:
raise ValueError(
"Entry for {} already exists in Labs class!".format(
floating_ip))
if dict_name and dict_name in dir(Labs):
raise ValueError(
"Entry for {} already exists in Labs class!".format(dict_name))
if not short_name:
short_name = floating_ip
if not name:
name = floating_ip
if not dict_name:
dict_name = floating_ip
lab_dict = {'name': name,
'short_name': short_name,
'floating ip': floating_ip,
}
lab_dict.update(kwargs)
setattr(Labs, dict_name, lab_dict)
return lab_dict
class NatBoxes(object):
# Place for existing NatBox that are already configured
NAT_BOX_HW_EXAMPLE = {
'name': 'nat_hw',
'ip': '10.10.10.10',
'user': 'natbox_user',
'password': 'natbox_password'
}
# Following example when localhost is configured as natbox, and test cases
# are also ran from same localhost
NAT_BOX_VBOX_EXAMPLE = {
'name': 'localhost',
'ip': 'localhost',
'user': None,
'password': None,
}
@staticmethod
def add_natbox(ip, user=None, password=None, prompt=None):
user = user if user else 'svc-cgcsauto'
password = password if password else ')OKM0okm'
nat_dict = {'ip': ip,
'name': ip,
'user': user,
'password': password,
}
if prompt:
nat_dict['prompt'] = prompt
setattr(NatBoxes, 'NAT_NEW', nat_dict)
return nat_dict

View File

@ -1,89 +0,0 @@
#
# Copyright (c) 2019, 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Please DO NOT import any modules
class ProjVar:
__var_dict = {'BUILD_PATH': None,
'LOG_DIR': None,
'SOURCE_OPENRC': False,
'SW_VERSION': [],
'PATCH': None,
'SESSION_ID': None,
'CGCS_DB': True,
'IS_SIMPLEX': False,
'KEYSTONE_DEBUG': False,
'TEST_NAME': None,
'PING_FAILURE': False,
'LAB': None,
'ALWAYS_COLLECT': False,
'REGION': 'RegionOne',
'COLLECT_TELNET': False,
'TELNET_THREADS': None,
'SYS_TYPE': None,
'COLLECT_SYS_NET_INFO': False,
'IS_VBOX': False,
'RELEASE': 'R6',
'REMOTE_CLI': False,
'USER_FILE_DIR': '~/',
'NO_TEARDOWN': False,
'VSWITCH_TYPE': None,
'IS_DC': False,
'PRIMARY_SUBCLOUD': None,
'SUBCLOUD_LIST': None,
'BUILD_INFO': {},
'TEMP_DIR': '',
'INSTANCE_BACKING': {},
'OPENSTACK_DEPLOYED': None,
'DEFAULT_INSTANCE_BACKING': None,
'STX_KEYFILE_PATH': '~/.ssh/id_rsa',
'IPV6_OAM': None,
}
@classmethod
def init_vars(cls, lab, natbox, logdir, tenant, collect_all, always_collect,
horizon_visible):
labname = lab['short_name']
cls.__var_dict.update(**{
'NATBOX_KEYFILE_PATH': '~/priv_keys/keyfile_{}.pem'.format(labname),
'STX_KEYFILE_SYS_HOME': '~/keyfile_{}.pem'.format(labname),
'LOG_DIR': logdir,
'TCLIST_PATH': logdir + '/test_results.log',
'PYTESTLOG_PATH': logdir + '/pytestlog.log',
'LAB_NAME': lab['short_name'],
'TEMP_DIR': logdir + '/tmp_files/',
'PING_FAILURE_DIR': logdir + '/ping_failures/',
'GUEST_LOGS_DIR': logdir + '/guest_logs/',
'PRIMARY_TENANT': tenant,
'LAB': lab,
'NATBOX': natbox,
'COLLECT_ALL': collect_all,
'ALWAYS_COLLECT': always_collect,
'HORIZON_VISIBLE': horizon_visible
})
@classmethod
def set_var(cls, append=False, **kwargs):
for key, val in kwargs.items():
if append:
cls.__var_dict[key.upper()].append(val)
else:
cls.__var_dict[key.upper()] = val
@classmethod
def get_var(cls, var_name):
var_name = var_name.upper()
valid_vars = cls.__var_dict.keys()
if var_name not in valid_vars:
raise ValueError(
"Invalid var_name: {}. Valid vars: {}".format(var_name,
valid_vars))
return cls.__var_dict[var_name]

View File

@ -1,42 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
class SkipStorageSpace:
SMALL_CINDER_VOLUMES_POOL = "Cinder Volumes Pool is less than 30G"
INSUFFICIENT_IMG_CONV = 'Insufficient image-conversion space to convert ' \
'{} image to raw format'
class SkipStorageBacking:
LESS_THAN_TWO_HOSTS_WITH_BACKING = "Less than two hosts with {} instance " \
"storage backing exist on system"
NO_HOST_WITH_BACKING = "No host with {} instance storage backing exists " \
"on system"
class SkipHypervisor:
LESS_THAN_TWO_HYPERVISORS = "Less than two hypervisors available"
class SkipHyperthreading:
LESS_THAN_TWO_HT_HOSTS = "Less than two hyperthreaded hosts available"
MORE_THAN_ONE_HT_HOSTS = "More than one hyperthreaded hosts available"
class SkipHostIf:
PCI_IF_UNAVAIL = "SRIOV or PCI-passthrough interface unavailable"
PCIPT_IF_UNAVAIL = "PCI-passthrough interface unavailable"
SRIOV_IF_UNAVAIL = "SRIOV interface unavailable"
MGMT_INFRA_UNAVAIL = 'traffic control class is not defined in this lab'
class SkipSysType:
SMALL_FOOTPRINT = "Skip for small footprint lab"
LESS_THAN_TWO_CONTROLLERS = "Less than two controllers on system"
SIMPLEX_SYSTEM = 'Not applicable to Simplex system'
DUPLEX_SYSTEM = 'Not applicable to Duplex system'
SIMPLEX_ONLY = 'Only applicable to Simplex system'

View File

@ -1,683 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from consts.proj_vars import ProjVar
# output of date. such as: Tue Mar 1 18:20:29 UTC 2016
DATE_OUTPUT = r'[0-2]\d:[0-5]\d:[0-5]\d\s[A-Z]{3,}\s\d{4}$'
EXT_IP = '8.8.8.8'
# such as in string '5 packets transmitted, 0 received, 100% packet loss,
# time 4031ms', number 100 will be found
PING_LOSS_RATE = r'\, (\d{1,3})\% packet loss\,'
# vshell ping loss rate pattern. 3 packets transmitted, 0 received, 0 total,
# 100.00%% loss
VSHELL_PING_LOSS_RATE = r'\, (\d{1,3}).\d{1,2}[%]% loss'
# Matches 8-4-4-4-12 hexadecimal digits. Lower case only
UUID = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
# Match name and uuid.
# Such as: 'ubuntu_14 (a764c205-eb82-4f18-bda6-6c8434223eb5)'
NAME_UUID = r'(.*) \((' + UUID + r')\)'
# Message to indicate boot from volume from nova show
BOOT_FROM_VOLUME = 'Attempt to boot from volume - no image supplied'
METADATA_SERVER = '169.254.169.254'
# Heat template path
HEAT_PATH = 'heat/hot/simple/'
HEAT_SCENARIO_PATH = 'heat/hot/scenarios/'
HEAT_FLAVORS = ['small_ded', 'small_float']
HEAT_CUSTOM_TEMPLATES = 'custom_heat_templates'
# special NIC patterns
MELLANOX_DEVICE = 'MT27500|MT27710'
MELLANOX4 = 'MT.*ConnectX-4'
PLATFORM_AFFINE_INCOMPLETE = '/etc/platform/.task_affining_incomplete'
PLATFORM_CONF_PATH = '/etc/platform/platform.conf'
SUBCLOUD_PATTERN = 'subcloud'
PLATFORM_NET_TYPES = ('mgmt', 'oam', 'infra', 'pxeboot')
TIMEZONES = [
"Asia/Hong_Kong", # UTC+8
"America/Los_Angeles", # UTC-8, DST:UTC-7
"Canada/Eastern", # UTC-5, DST:UTC-4
"Canada/Central", # UTC-6, DST:UTC-5
# "Europe/London", # UTC, DST:UTC+1
"Europe/Berlin", # UTC+1, DST:UTC+2
"UTC"
]
STORAGE_AGGREGATE = {
# 'local_lvm' : 'local_storage_lvm_hosts',
'local_image': 'local_storage_image_hosts',
'remote': 'remote_storage_hosts',
}
class NtpPool:
NTP_POOL_1 = '2.pool.ntp.org,1.pool.ntp.org,0.pool.ntp.org'
NTP_POOL_2 = '1.pool.ntp.org,2.pool.ntp.org,2.pool.ntp.org'
NTP_POOL_3 = '3.ca.pool.ntp.org,2.ca.pool.ntp.org,1.ca.pool.ntp.org'
NTP_POOL_TOO_LONG = '3.ca.pool.ntp.org,2.ca.pool.ntp.org,' \
'1.ca.pool.ntp.org,1.com,2.com,3.com'
NTP_NAME_TOO_LONG = 'garbage_' * 30
class GuestImages:
TMP_IMG_DIR = '/opt/backups'
DEFAULT = {
'image_dir': '{}/images'.format(ProjVar.get_var('USER_FILE_DIR')),
'image_dir_file_server': '/sandbox/images',
'guest': 'tis-centos-guest'
}
TIS_GUEST_PATTERN = 'cgcs-guest|tis-centos-guest'
GUESTS_NO_RM = ['ubuntu_14', 'tis-centos-guest', 'cgcs-guest']
# Image files name and size from TestFileServer
# <glance_image_name>: <source_file_name>, <root disk size>,
# <dest_file_name>, <disk_format>, <container_format>
IMAGE_FILES = {
'ubuntu_14': (
'ubuntu-14.04-server-cloudimg-amd64-disk1.img', 3,
'ubuntu_14.qcow2', 'qcow2', 'bare'),
'ubuntu_12': (
'ubuntu-12.04-server-cloudimg-amd64-disk1.img', 8,
'ubuntu_12.qcow2', 'qcow2', 'bare'),
'ubuntu_16': (
'ubuntu-16.04-xenial-server-cloudimg-amd64-disk1.img', 8,
'ubuntu_16.qcow2', 'qcow2', 'bare'),
'centos_6': (
'CentOS-6.8-x86_64-GenericCloud-1608.qcow2', 8,
'centos_6.qcow2', 'qcow2', 'bare'),
'centos_7': (
'CentOS-7-x86_64-GenericCloud.qcow2', 8,
'centos_7.qcow2', 'qcow2', 'bare'),
'rhel_6': (
'rhel-6.5-x86_64.qcow2', 11, 'rhel_6.qcow2', 'qcow2', 'bare'),
'rhel_7': (
'rhel-7.2-x86_64.qcow2', 11, 'rhel_7.qcow2', 'qcow2', 'bare'),
'opensuse_11': (
'openSUSE-11.3-x86_64.qcow2', 11,
'opensuse_11.qcow2', 'qcow2', 'bare'),
'opensuse_12': (
'openSUSE-12.3-x86_64.qcow2', 21,
'opensuse_12.qcow2', 'qcow2', 'bare'),
'opensuse_13': (
'openSUSE-13.2-OpenStack-Guest.x86_64-0.0.10-Build2.94.qcow2', 16,
'opensuse_13.qcow2', 'qcow2', 'bare'),
'win_2012': (
'win2012r2_cygwin_compressed.qcow2', 13,
'win2012r2.qcow2', 'qcow2', 'bare'),
'win_2016': (
'win2016_cygwin_compressed.qcow2', 29,
'win2016.qcow2', 'qcow2', 'bare'),
'ge_edge': (
'edgeOS.hddirect.qcow2', 5,
'ge_edge.qcow2', 'qcow2', 'bare'),
'cgcs-guest': (
'cgcs-guest.img', 1, 'cgcs-guest.img', 'raw', 'bare'),
'vxworks': (
'vxworks-tis.img', 1, 'vxworks.img', 'raw', 'bare'),
'tis-centos-guest': (
None, 2, 'tis-centos-guest.img', 'raw', 'bare'),
'tis-centos-guest-rt': (
None, 2, 'tis-centos-guest-rt.img', 'raw', 'bare'),
'tis-centos-guest-qcow2': (
None, 2, 'tis-centos-guest.qcow2', 'qcow2', 'bare'),
'centos_gpu': (
'centos-67-cloud-gpu.img', 8,
'centos_6_gpu.qcow2', 'qcow2', 'bare'),
'debian-8-m-agent': (
'debian-8-m-agent.qcow2', 1.8,
'debian-8-m-agent.qcow2', 'qcow2', 'bare'),
'trusty_uefi': (
'trusty-server-cloudimg-amd64-uefi1.img', 2.2,
'trusty-uefi.qcow2', 'qcow2', 'bare'),
'uefi_shell': (
'uefi_shell.iso', 2, 'uefi_shell.iso', 'raw', 'bare'),
}
class Networks:
INFRA_NETWORK_CIDR = "192.168.205.0/24"
IPV4_IP = r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}'
__NEUTRON_NET_NAME_PATTERN = {
'mgmt': r'tenant\d-mgmt-net',
'data': r'tenant\d-net',
'internal': 'internal',
'external': 'external',
}
__NEUTRON_NET_IP_PATTERN = {
'data': r'172.\d{1,3}.\d{1,3}.\d{1,3}',
'mgmt': r'192.168.\d{3}\.\d{1,3}|192.168.[8|9]\d\.\d{1,3}',
'internal': r'10.\d{1,3}.\d{1,3}.\d{1,3}',
'external': r'192.168.\d\.\d{1,3}|192.168.[1-5]\d\.\d{1,3}|10.10.\d{'
r'1,3}\.\d{1,3}'
}
@classmethod
def get_nenutron_net_patterns(cls, net_type='mgmt'):
return cls.__NEUTRON_NET_NAME_PATTERN.get(
net_type), cls.__NEUTRON_NET_IP_PATTERN.get(net_type)
@classmethod
def set_neutron_net_patterns(cls, net_type, net_name_pattern=None,
net_ip_pattern=None):
if net_type not in cls.__NEUTRON_NET_NAME_PATTERN:
raise ValueError("Unknown net_type {}. Select from: {}".format(
net_type, list(cls.__NEUTRON_NET_NAME_PATTERN.keys())))
if net_name_pattern is not None:
cls.__NEUTRON_NET_NAME_PATTERN[net_type] = net_name_pattern
if net_ip_pattern is not None:
cls.__NEUTRON_NET_IP_PATTERN[net_type] = net_ip_pattern
class SystemType:
CPE = 'All-in-one'
STANDARD = 'Standard'
class StorageAggregate:
LOCAL_LVM = 'local_storage_lvm_hosts'
LOCAL_IMAGE = 'local_storage_image_hosts'
REMOTE = 'remote_storage_hosts'
class VMStatus:
# under http://docs.openstack.org/developer/nova/vmstates.html
ACTIVE = 'ACTIVE'
BUILD = 'BUILDING'
REBUILD = 'REBUILD'
VERIFY_RESIZE = 'VERIFY_RESIZE'
RESIZE = 'RESIZED'
ERROR = 'ERROR'
SUSPENDED = 'SUSPENDED'
PAUSED = 'PAUSED'
NO_STATE = 'NO STATE'
HARD_REBOOT = 'HARD REBOOT'
SOFT_REBOOT = 'REBOOT'
STOPPED = "SHUTOFF"
MIGRATING = 'MIGRATING'
class ImageStatus:
QUEUED = 'queued'
ACTIVE = 'active'
SAVING = 'saving'
class HostAdminState:
UNLOCKED = 'unlocked'
LOCKED = 'locked'
class HostOperState:
ENABLED = 'enabled'
DISABLED = 'disabled'
class HostAvailState:
DEGRADED = 'degraded'
OFFLINE = 'offline'
ONLINE = 'online'
AVAILABLE = 'available'
FAILED = 'failed'
POWER_OFF = 'power-off'
class HostTask:
BOOTING = 'Booting'
REBOOTING = 'Rebooting'
POWERING_ON = 'Powering-on'
POWER_CYCLE = 'Critical Event Power-Cycle'
POWER_DOWN = 'Critical Event Power-Down'
class Prompt:
CONTROLLER_0 = r'.*controller\-0[:| ].*\$'
CONTROLLER_1 = r'.*controller\-1[:| ].*\$'
CONTROLLER_PROMPT = r'.*controller\-[01][:| ].*\$ '
VXWORKS_PROMPT = '-> '
ADMIN_PROMPT = r'\[.*@controller\-[01].*\(keystone_admin\)\]\$'
TENANT1_PROMPT = r'\[.*@controller\-[01] .*\(keystone_tenant1\)\]\$ '
TENANT2_PROMPT = r'\[.*@controller\-[01] .*\(keystone_tenant2\)\]\$ '
TENANT_PROMPT = r'\[.*@controller\-[01] .*\(keystone_{}\)\]\$ ' #
# general prompt. Need to fill in tenant name
REMOTE_CLI_PROMPT = r'\(keystone_{}\)\]\$ ' # remote cli prompt
COMPUTE_PROMPT = r'.*compute\-([0-9]){1,}\:~\$'
STORAGE_PROMPT = r'.*storage\-([0-9]){1,}\:~\$'
PASSWORD_PROMPT = r'.*assword\:[ ]?$|assword for .*:[ ]?$'
LOGIN_PROMPT = "ogin:"
SUDO_PASSWORD_PROMPT = 'Password: '
BUILD_SERVER_PROMPT_BASE = r'{}@{}\:~.*'
TEST_SERVER_PROMPT_BASE = r'\[{}@.*\]\$ '
# TIS_NODE_PROMPT_BASE = r'{}\:~\$ '
TIS_NODE_PROMPT_BASE = r'{}[: ]?~.*$'
ADD_HOST = r'.*\(yes/no\).*'
ROOT_PROMPT = '.*root@.*'
Y_N_PROMPT = r'.*\(y/n\)\?.*'
YES_N_PROMPT = r'.*\[yes/N\]\: ?'
CONFIRM_PROMPT = '.*confirm: ?'
class NovaCLIOutput:
VM_ACTION_ACCEPTED = "Request to {} server (.*) has been accepted."
VM_START_ACCEPTED = "Request to start server (.*) has been accepted."
VM_STOP_ACCEPTED = "Request to stop server (.*) has been accepted."
VM_DELETE_REJECTED_NOT_EXIST = "No server with a name or ID of '(.*)' " \
"exists."
VM_DELETE_ACCEPTED = "Request to delete server (.*) has been accepted."
VM_BOOT_REJECT_MEM_PAGE_SIZE_FORBIDDEN = "Page size .* forbidden against .*"
SRV_GRP_DEL_REJ_NOT_EXIST = "Delete for server group (.*) failed"
SRV_GRP_DEL_SUCC = "Server group (.*) has been successfully deleted."
class FlavorSpec:
CPU_POLICY = 'hw:cpu_policy'
VCPU_MODEL = 'hw:cpu_model'
SHARED_VCPU = 'hw:wrs:shared_vcpu'
CPU_THREAD_POLICY = 'hw:cpu_thread_policy'
VCPU_SCHEDULER = 'hw:wrs:vcpu:scheduler'
MIN_VCPUS = "hw:wrs:min_vcpus"
STORAGE_BACKING = 'aggregate_instance_extra_specs:stx_storage'
DISK_READ_BYTES = 'quota:disk_read_bytes_sec'
DISK_READ_IOPS = 'quota:disk_read_iops_sec'
DISK_WRITE_BYTES = 'quota:disk_write_bytes_sec'
DISK_WRITE_IOPS = 'quota:disk_write_iops_sec'
DISK_TOTAL_BYTES = 'quota:disk_total_bytes_sec'
DISK_TOTAL_IOPS = 'quota:disk_total_iops_sec'
NUMA_NODES = 'hw:numa_nodes'
NUMA_0 = 'hw:numa_node.0'
NUMA_1 = 'hw:numa_node.1'
NUMA0_CPUS = 'hw:numa_cpus.0'
NUMA1_CPUS = 'hw:numa_cpus.1'
NUMA0_MEM = 'hw:numa_mem.0'
NUMA1_MEM = 'hw:numa_mem.1'
VSWITCH_NUMA_AFFINITY = 'hw:wrs:vswitch_numa_affinity'
MEM_PAGE_SIZE = 'hw:mem_page_size'
AUTO_RECOVERY = 'sw:wrs:auto_recovery'
GUEST_HEARTBEAT = 'sw:wrs:guest:heartbeat'
SRV_GRP_MSG = "sw:wrs:srv_grp_messaging"
NIC_ISOLATION = "hw:wrs:nic_isolation"
PCI_NUMA_AFFINITY = "hw:pci_numa_affinity_policy"
PCI_PASSTHROUGH_ALIAS = "pci_passthrough:alias"
PCI_IRQ_AFFINITY_MASK = "hw:pci_irq_affinity_mask"
CPU_REALTIME = 'hw:cpu_realtime'
CPU_REALTIME_MASK = 'hw:cpu_realtime_mask'
HPET_TIMER = 'sw:wrs:guest:hpet'
NESTED_VMX = 'hw:wrs:nested_vmx'
NUMA0_CACHE_CPUS = 'hw:cache_vcpus.0'
NUMA1_CACHE_CPUS = 'hw:cache_vcpus.1'
NUMA0_L3_CACHE = 'hw:cache_l3.0'
NUMA1_L3_CACHE = 'hw:cache_l3.1'
LIVE_MIG_TIME_OUT = 'hw:wrs:live_migration_timeout'
LIVE_MIG_MAX_DOWNTIME = 'hw:wrs:live_migration_max_downtime'
class ImageMetadata:
MEM_PAGE_SIZE = 'hw_mem_page_size'
AUTO_RECOVERY = 'sw_wrs_auto_recovery'
VIF_MODEL = 'hw_vif_model'
CPU_THREAD_POLICY = 'hw_cpu_thread_policy'
CPU_POLICY = 'hw_cpu_policy'
CPU_RT_MASK = 'hw_cpu_realtime_mask'
CPU_RT = 'hw_cpu_realtime'
CPU_MODEL = 'hw_cpu_model'
FIRMWARE_TYPE = 'hw_firmware_type'
class VMMetaData:
EVACUATION_PRIORITY = 'sw:wrs:recovery_priority'
class InstanceTopology:
NODE = r'node:(\d),'
PGSIZE = r'pgsize:(\d{1,3}),'
VCPUS = r'vcpus:(\d{1,2}),'
PCPUS = r'pcpus:(\d{1,2}),\s' # find a string separated by ',
# ' if multiple numa nodes
CPU_POLICY = 'pol:(.*),'
SIBLINGS = 'siblings:(.*),'
THREAD_POLICY = 'thr:(.*)$|thr:(.*),'
TOPOLOGY = r'\d{1,2}s,\d{1,2}c,\d{1,2}t'
class RouterStatus:
ACTIVE = 'ACTIVE'
DOWN = 'DOWN'
class EventLogID:
PATCH_INSTALL_FAIL = '900.002'
PATCH_IN_PROGRESS = '900.001'
CINDER_IO_CONGEST = '800.101'
STORAGE_LOR = '800.011'
STORAGE_POOLQUOTA = '800.003'
STORAGE_ALARM_COND = '800.001'
HEARTBEAT_CHECK_FAILED = '700.215'
HEARTBEAT_ENABLED = '700.211'
REBOOT_VM_COMPLETE = '700.186'
REBOOT_VM_INPROGRESS = '700.182'
REBOOT_VM_ISSUED = '700.181' # soft-reboot or hard-reboot in reason text
VM_DELETED = '700.114'
VM_DELETING = '700.110'
VM_CREATED = '700.108'
MULTI_NODE_RECOVERY = '700.016'
HEARTBEAT_DISABLED = '700.015'
VM_REBOOTING = '700.005'
VM_FAILED = '700.001'
IMA = '500.500'
SERVICE_GROUP_STATE_CHANGE = '401.001'
LOSS_OF_REDUNDANCY = '400.002'
CON_DRBD_SYNC = '400.001'
PROVIDER_NETWORK_FAILURE = '300.005'
NETWORK_AGENT_NOT_RESPOND = '300.003'
CONFIG_OUT_OF_DATE = '250.001'
INFRA_NET_FAIL = '200.009'
BMC_SENSOR_ACTION = '200.007'
STORAGE_DEGRADE = '200.006'
# 200.004 compute-0 experienced a service-affecting failure.
# Auto-recovery in progress.
# host=compute-0 critical April 7, 2017, 2:34 p.m.
HOST_RECOVERY_IN_PROGRESS = '200.004'
HOST_LOCK = '200.001'
NTP_ALARM = '100.114'
INFRA_PORT_FAIL = '100.110'
FS_THRESHOLD_EXCEEDED = '100.104'
CPU_USAGE_HIGH = '100.101'
MNFA_MODE = '200.020'
class NetworkingVmMapping:
VSWITCH = {
'vif': 'avp',
'flavor': 'medium.dpdk',
}
AVP = {
'vif': 'avp',
'flavor': 'small',
}
VIRTIO = {
'vif': 'avp',
'flavor': 'small',
}
class VifMapping:
VIF_MAP = {'vswitch': 'DPDKAPPS',
'avp': 'AVPAPPS',
'virtio': 'VIRTIOAPPS',
'vhost': 'VHOSTAPPS',
'sriov': 'SRIOVAPPS',
'pcipt': 'PCIPTAPPS'
}
class LocalStorage:
DIR_PROFILE = 'storage_profiles'
TYPE_STORAGE_PROFILE = ['storageProfile', 'localstorageProfile']
class VMNetwork:
NET_IF = r"auto {}\niface {} inet dhcp\n"
IFCFG_DHCP = """
DEVICE={}
BOOTPROTO=dhcp
ONBOOT=yes
TYPE=Ethernet
USERCTL=yes
PEERDNS=yes
IPV6INIT={}
PERSISTENT_DHCLIENT=1
"""
IFCFG_STATIC = """
DEVICE={}
BOOTPROTO=static
ONBOOT=yes
TYPE=Ethernet
USERCTL=yes
PEERDNS=yes
IPV6INIT={}
PERSISTENT_DHCLIENT=1
IPADDR={}
"""
class HTTPPort:
NEUTRON_PORT = 9696
NEUTRON_VER = "v2.0"
CEIL_PORT = 8777
CEIL_VER = "v2"
GNOCCHI_PORT = 8041
GNOCCHI_VER = 'v1'
SYS_PORT = 6385
SYS_VER = "v1"
CINDER_PORT = 8776
CINDER_VER = "v3" # v1 and v2 are also supported
GLANCE_PORT = 9292
GLANCE_VER = "v2"
HEAT_PORT = 8004
HEAT_VER = "v1"
HEAT_CFN_PORT = 8000
HEAT_CFN_VER = "v1"
NOVA_PORT = 8774
NOVA_VER = "v2.1" # v3 also supported
NOVA_EC2_PORT = 8773
NOVA_EC2_VER = "v2"
PATCHING_PORT = 15491
PATCHING_VER = "v1"
class QoSSpec:
READ_BYTES = 'read_bytes_sec'
WRITE_BYTES = 'write_bytes_sec'
TOTAL_BYTES = 'total_bytes_sec'
READ_IOPS = 'read_iops_sec'
WRITE_IOPS = 'write_iops_sec'
TOTAL_IOPS = 'total_iops_sec'
class DevClassID:
QAT_VF = '0b4000'
GPU = '030000'
USB = '0c0320|0c0330'
class MaxVmsSupported:
SX = 10
XEON_D = 4
DX = 10
VBOX = 2
class CpuModel:
CPU_MODELS = (
'Skylake-Server', 'Skylake-Client',
'Broadwell', 'Broadwell-noTSX',
'Haswell-noTSX-IBRS', 'Haswell',
'IvyBridge', 'SandyBridge',
'Westmere', 'Nehalem', 'Penryn', 'Conroe')
class BackendState:
CONFIGURED = 'configured'
CONFIGURING = 'configuring'
class BackendTask:
RECONFIG_CONTROLLER = 'reconfig-controller'
APPLY_MANIFEST = 'applying-manifests'
class PartitionStatus:
READY = 'Ready'
MODIFYING = 'Modifying'
DELETING = 'Deleting'
CREATING = 'Creating'
IN_USE = 'In-Use'
class SysType:
AIO_DX = 'AIO-DX'
AIO_SX = 'AIO-SX'
STORAGE = 'Storage'
REGULAR = 'Regular'
MULTI_REGION = 'Multi-Region'
DISTRIBUTED_CLOUD = 'Distributed_Cloud'
class HeatStackStatus:
CREATE_FAILED = 'CREATE_FAILED'
CREATE_COMPLETE = 'CREATE_COMPLETE'
UPDATE_COMPLETE = 'UPDATE_COMPLETE'
UPDATE_FAILED = 'UPDATE_FAILED'
DELETE_FAILED = 'DELETE_FAILED'
class VimEventID:
LIVE_MIG_BEGIN = 'instance-live-migrate-begin'
LIVE_MIG_END = 'instance-live-migrated'
COLD_MIG_BEGIN = 'instance-cold-migrate-begin'
COLD_MIG_END = 'instance-cold-migrated'
COLD_MIG_CONFIRM_BEGIN = 'instance-cold-migrate-confirm-begin'
COLD_MIG_CONFIRMED = 'instance-cold-migrate-confirmed'
class MigStatus:
COMPLETED = 'completed'
RUNNING = 'running'
PREPARING = 'preparing'
PRE_MIG = 'pre-migrating'
POST_MIG = 'post-migrating'
class TrafficControl:
CLASSES = {'1:40': 'default', '1:1': 'root', '1:10': 'hiprio',
'1:20': 'storage', '1:30': 'migration',
'1:50': 'drbd'}
RATE_PATTERN_ROOT = r'class htb 1:1 root rate (\d+)([GMK])bit ceil (\d+)(' \
r'[GMK])bit burst \d+b cburst \d+b'
RATE_PATTERN = r'class htb (1:\d+) parent 1:1 leaf \d+: prio \d+ rate (' \
r'\d+)([GMK])bit ceil (\d+)([GMK])bit ' \
r'burst \d+b cburst \d+b'
# no infra
MGMT_NO_INFRA = {
'config': 'no infra',
'root': (1, 1),
'default': (0.1, 0.2),
'hiprio': (0.1, 0.2),
'storage': (0.5, 1),
'migration': (0.3, 1),
'drbd': (0.8, 1)}
# infra must be sep
MGMT_SEP = {
'config': 'separate mgmt',
'root': (1, 1),
'default': (0.1, 1),
'hiprio': (0.1, 1)}
# infra could be sep or over pxe
MGMT_USES_PXE = {
'config': 'mgmt consolidated over pxeboot',
'root': (1, 1),
'default': (0.1, 0.2),
'hiprio': (0.1, 0.2)}
# infra over mgmt
MGMT_USED_BY_INFRA = {
'config': 'infra consolidated over mgmt',
'root': (1, 1),
'default': (0.1, 0.2),
'hiprio': (0.1, 0.2),
'storage': (0.5, 1),
'migration': (0.3, 1),
'drbd': (0.8, 1)}
# infra over mgmt
INFRA_USES_MGMT = {
'config': 'infra consolidated over mgmt',
'root': (0.99, 0.99),
'default': (0.99 * 0.1, 0.99 * 0.2),
'hiprio': (0.99 * 0.1, 0.99 * 0.2),
'storage': (0.99 * 0.5, 0.99 * 1),
'migration': (0.99 * 0.3, 0.99 * 1),
'drbd': (0.99 * 0.8, 0.99 * 1)}
# mgmt could be sep or over pxe
INFRA_SEP = {
'config': 'separate infra',
'root': (1, 1),
'default': (0.1, 0.2),
'hiprio': (0.1, 0.2),
'storage': (0.5, 1),
'migration': (0.3, 1),
'drbd': (0.8, 1)}
# mgmt must be over pxe
INFRA_USES_PXE = {
'config': 'infra and mgmt consolidated over pxeboot',
'root': (1, 1),
'default': (0.99 * 0.1, 0.99 * 0.2), # 0.1, 0.2 is the ratio for mgmt
'hiprio': (0.99 * 0.1, 0.99 * 0.2), # 0.1, 0.2 is the ratio for mgmt
'storage': (0.99 * 0.5, 0.99),
'migration': (0.99 * 0.3, 0.99),
'drbd': (0.99 * 0.8, 0.99)}
class SubcloudStatus:
AVAIL_ONLINE = "online"
AVAIL_OFFLINE = "offline"
MGMT_MANAGED = "managed"
MGMT_UNMANAGED = "unmanaged"
SYNCED = 'in-sync'
UNSYNCED = 'out-of-sync'
class PodStatus:
RUNNING = 'Running'
COMPLETED = 'Completed'
CRASH = 'CrashLoopBackOff'
POD_INIT = 'PodInitializing'
INIT = 'Init:0/1'
PENDING = 'Pending'
TERMINATING = 'Terminating'
class AppStatus:
UPLOADING = 'uploading'
UPLOADED = 'uploaded'
UPLOAD_FAILED = 'upload-failed'
APPLIED = 'applied'
APPLY_FAILED = 'apply-failed'
REMOVE_FAILED = 'remove-failed'
DELETE_FAILED = 'delete-failed'
class VSwitchType:
OVS_DPDK = 'ovs-dpdk'
AVS = 'avs'
NONE = 'none'
class Container:
LOCAL_DOCKER_REG = 'registry.local:9001'

View File

@ -1,160 +0,0 @@
#
# Copyright (c) 2019, 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
CLI_TIMEOUT = 600
class HostTimeout:
# Host in online state after locked
ONLINE_AFTER_LOCK = 1200
# Compute host reaches enabled/available state after system host-unlock
# returned
COMPUTE_UNLOCK = 840
# Host reaches enabled/available state after system host-unlock returned
CONTROLLER_UNLOCK = 1360
# Host reaches enabled/available state after sudo reboot -f from host
REBOOT = 2400
# Active controller switched and being able to run openstack CLI after
# system host-swact returned
SWACT = 600
# Host in locked state after system host-lock cli returned
LOCK = 900
# Task clears in system host-show after host reaches enabled/available state
TASK_CLEAR = 600
# Host in offline or failed state via system host-show after sudo reboot
# -f returned
FAIL_AFTER_REBOOT = 120
# Hypervsior in enabled/up state after host in available state and task
# clears
HYPERVISOR_UP = 300
# Web service up in sudo sm-dump after host in available state and task
# clears
WEB_SERVICE_UP = 180
PING_TIMEOUT = 60
TIMEOUT_BUFFER = 2
# subfunction go enabled/available after host admin/avail states go
# enabled/available
SUBFUNC_READY = 300
SYSTEM_RESTORE = 3600 # System restore complete
SYSTEM_BACKUP = 1800 # system backup complete
BACKUP_COPY_USB = 600
INSTALL_CLONE = 3600
INSTALL_CLONE_STATUS = 60
INSTALL_CONTROLLER = 2400
INSTALL_LOAD = 3600
POST_INSTALL_SCRIPTS = 3600
CONFIG_CONTROLLER_TIMEOUT = 1800
CEPH_MON_ADD_CONFIG = 300
NODES_STATUS_READY = 7200
class InstallTimeout:
# Host reaches enabled/available state after system host-unlock returned
CONTROLLER_UNLOCK = 9000
CONFIG_CONTROLLER_TIMEOUT = 1800
# REBOOT = 2000 # Host reaches enabled/available state after sudo
# reboot -f from host
UPGRADE = 7200
WIPE_DISK_TIMEOUT = 30
SYSTEM_RESTORE = 3600 # System restore complete
SYSTEM_BACKUP = 1800 # system backup complete
BACKUP_COPY_USB = 600
INSTALL_CLONE = 3600
INSTALL_CLONE_STATUS = 60
INSTALL_CONTROLLER = 2400
INSTALL_LOAD = 3600
POST_INSTALL_SCRIPTS = 3600
class VMTimeout:
STATUS_CHANGE = 300
STATUS_VERIFY_RESIZE = 30
LIVE_MIGRATE_COMPLETE = 240
COLD_MIGRATE_CONFIRM = 600
BOOT_VM = 1800
DELETE = 180
VOL_ATTACH = 60
SSH_LOGIN = 90
AUTO_RECOVERY = 600
REBOOT = 180
PAUSE = 180
IF_ADD = 30
REBUILD = 300
DHCP_IP_ASSIGN = 30
DHCP_RETRY = 500
PING_VM = 200
class VolumeTimeout:
STATUS_CHANGE = 2700 # Windows guest takes a long time
DELETE = 90
class SysInvTimeout:
RETENTION_PERIOD_SAVED = 30
RETENTION_PERIOD_MODIFY = 60
DNS_SERVERS_SAVED = 30
DNS_MODIFY = 60
PARTITION_CREATE = 120
PARTITION_DELETE = 120
PARTITION_MODIFY = 120
class CMDTimeout:
HOST_CPU_MODIFY = 600
RESOURCE_LIST = 60
REBOOT_VM = 60
CPU_PROFILE_APPLY = 30
class ImageTimeout:
CREATE = 1800
STATUS_CHANGE = 60
DELETE = 120
class EventLogTimeout:
HEARTBEAT_ESTABLISH = 300
HEALTH_CHECK_FAIL = 60
VM_REBOOT = 60
NET_AGENT_NOT_RESPOND_CLEAR = 120
class MTCTimeout:
KILL_PROCESS_HOST_CHANGE_STATUS = 40
KILL_PROCESS_HOST_KEEP_STATUS = 20
KILL_PROCESS_SWACT_NOT_START = 20
KILL_PROCESS_SWACT_START = 40
KILL_PROCESS_SWACT_COMPLETE = 40
class CeilTimeout:
EXPIRE = 300
class OrchestrationPhaseTimeout:
INITIAL = 20
BUILD = 60
ABORT = 7200
APPLY = 86400
class DCTimeout:
SYNC = 3600 # 60 minutes
SUBCLOUD_AUDIT = 600 # 4 minutes + 1
PATCH_AUDIT = 240 # 3 minutes + 1
class MiscTimeout:
# timeout for two audits. 'sudo ntpq' got pulled every 10 minutes in
# /var/log/user.log
NTPQ_UPDATE = 1260
class K8sTimeout:
APP_UPLOAD = 300
APP_APPLY = 600

View File

@ -1,10 +0,0 @@
#!/bin/bash
# Ubuntu cloud-init user data script to be executed after ubuntu vm
# initialization
sudo echo -e "auto eth1\niface eth1 inet dhcp\n\nauto eth2\niface eth2 inet dhcp" >> "/etc/network/interfaces"
sudo ifup eth1
sudo ifup eth2
ip addr

View File

@ -1,67 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from consts.auth import Tenant
from utils import table_parser, cli
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
def get_alarms(header='alarm_id', name=None, strict=False,
auth_info=Tenant.get('admin'), con_ssh=None):
"""
Args:
header
name:
strict:
auth_info:
con_ssh:
Returns:
"""
table_ = table_parser.table(cli.openstack('alarm list',
ssh_client=con_ssh,
auth_info=auth_info)[1],
combine_multiline_entry=True)
if name is None:
return table_parser.get_column(table_, header)
return table_parser.get_values(table_, header, Name=name, strict=strict)
def get_events(event_type, limit=None, header='message_id', con_ssh=None,
auth_info=None, **filters):
"""
Args:
event_type:
limit
header:
con_ssh:
auth_info:
Returns:
"""
args = ''
if limit:
args = '--limit {}'.format(limit)
if event_type or filters:
if event_type:
filters['event_type'] = event_type
extra_args = ['{}={}'.format(k, v) for k, v in filters.items()]
args += ' --filter {}'.format(';'.join(extra_args))
table_ = table_parser.table(cli.openstack('event list', args,
ssh_client=con_ssh,
auth_info=auth_info)[1])
return table_parser.get_values(table_, header)

View File

@ -1,635 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
###############################################################
# Intended for check functions for test result verifications
# assert is used to fail the check
# LOG.tc_step is used log the info
# Should be called by test function directly
###############################################################
import re
import time
import copy
from utils.tis_log import LOG
from utils.rest import Rest
from consts.auth import Tenant
from consts.stx import GuestImages, EventLogID
from keywords import host_helper, system_helper, vm_helper, common, \
glance_helper, storage_helper
SEP = '\n------------------------------------ '
def check_topology_of_vm(vm_id, vcpus, prev_total_cpus=None, numa_num=None,
vm_host=None, cpu_pol=None,
cpu_thr_pol=None, expt_increase=None, min_vcpus=None,
current_vcpus=None,
prev_siblings=None, shared_vcpu=None, con_ssh=None,
guest=None):
"""
Check vm has the correct topology based on the number of vcpus,
cpu policy, cpu threads policy, number of numa nodes
Check is done via vm-topology, nova host-describe, virsh vcpupin (on vm
host), nova-compute.log (on vm host),
/sys/devices/system/cpu/<cpu#>/topology/thread_siblings_list (on vm)
Args:
vm_id (str):
vcpus (int): number of vcpus specified in flavor
prev_total_cpus (float): such as 37.0000, 37.0625
numa_num (int): number of numa nodes vm vcpus are on. Default is 1 if
unset in flavor.
vm_host (str):
cpu_pol (str): dedicated or shared
cpu_thr_pol (str): isolate, require, or prefer
expt_increase (int): expected total vcpu increase on vm host compared
to prev_total_cpus
min_vcpus (None|int): min vcpu flavor spec. vcpu scaling specific
current_vcpus (None|int): current number of vcpus. vcpu scaling specific
prev_siblings (list): list of siblings total. Usually used when
checking vm topology after live migration
con_ssh (SSHClient)
shared_vcpu (int): which vcpu is shared
guest (str|None): guest os. e.g., ubuntu_14. Default guest is assumed
when None.
"""
LOG.info(
"------ Check topology of vm {} on controller, hypervisor and "
"vm".format(
vm_id))
cpu_pol = cpu_pol if cpu_pol else 'shared'
if vm_host is None:
vm_host = vm_helper.get_vm_host(vm_id, con_ssh=con_ssh)
log_cores_siblings = host_helper.get_logcore_siblings(host=vm_host,
con_ssh=con_ssh)
if prev_total_cpus is not None:
if expt_increase is None:
expt_increase = vcpus
LOG.info(
"{}Check total vcpus for vm host is increased by {} via "
"'openstack hypervisor show'".format(
SEP, expt_increase))
expt_used_vcpus = prev_total_cpus + expt_increase
end_time = time.time() + 70
while time.time() < end_time:
post_hosts_cpus = host_helper.get_vcpus_for_computes(
hosts=vm_host, field='vcpus_used')
if expt_used_vcpus == post_hosts_cpus[vm_host]:
break
time.sleep(10)
else:
post_hosts_cpus = host_helper.get_vcpus_for_computes(
hosts=vm_host, field='used_now')
assert expt_used_vcpus == post_hosts_cpus[
vm_host], "Used vcpus on host {} is not as expected. " \
"Expected: {}; Actual: {}".format(vm_host,
expt_used_vcpus,
post_hosts_cpus[
vm_host])
LOG.info(
"{}Check vm vcpus, pcpus on vm host via nova-compute.log and virsh "
"vcpupin".format(SEP))
# Note: floating vm pcpus will not be checked via virsh vcpupin
vm_host_cpus, vm_siblings = _check_vm_topology_on_host(
vm_id, vcpus=vcpus, vm_host=vm_host, cpu_pol=cpu_pol,
cpu_thr_pol=cpu_thr_pol,
host_log_core_siblings=log_cores_siblings,
shared_vcpu=shared_vcpu)
LOG.info(
"{}Check vm vcpus, siblings on vm via "
"/sys/devices/system/cpu/<cpu>/topology/thread_siblings_list".
format(SEP))
check_sibling = True if shared_vcpu is None else False
_check_vm_topology_on_vm(vm_id, vcpus=vcpus, siblings_total=vm_siblings,
current_vcpus=current_vcpus,
prev_siblings=prev_siblings, guest=guest,
check_sibling=check_sibling)
return vm_host_cpus, vm_siblings
def _check_vm_topology_on_host(vm_id, vcpus, vm_host, cpu_pol, cpu_thr_pol,
host_log_core_siblings=None, shared_vcpu=None,
shared_host_cpus=None):
"""
Args:
vm_id (str):
vcpus (int):
vm_host (str):
cpu_pol (str):
cpu_thr_pol (str):
host_log_core_siblings (list|None):
shared_vcpu (int|None):
shared_host_cpus (None|list)
Returns: None
"""
if not host_log_core_siblings:
host_log_core_siblings = host_helper.get_logcore_siblings(host=vm_host)
if shared_vcpu and not shared_host_cpus:
shared_cpus_ = host_helper.get_host_cpu_cores_for_function(
func='Shared', hostname=vm_host, thread=None)
shared_host_cpus = []
for proc, shared_cores in shared_cpus_.items():
shared_host_cpus += shared_cores
LOG.info(
'======= Check vm topology from vm_host via: virsh vcpupin, taskset')
instance_name = vm_helper.get_vm_instance_name(vm_id)
with host_helper.ssh_to_host(vm_host) as host_ssh:
vcpu_cpu_map = vm_helper.get_vcpu_cpu_map(host_ssh=host_ssh)
used_host_cpus = []
vm_host_cpus = []
vcpus_list = list(range(vcpus))
for instance_name_, instance_map in vcpu_cpu_map.items():
used_host_cpus += list(instance_map.values())
if instance_name_ == instance_name:
for vcpu in vcpus_list:
vm_host_cpus.append(instance_map[vcpu])
used_host_cpus = list(set(used_host_cpus))
vm_siblings = None
# Check vm sibling pairs
if 'ded' in cpu_pol and cpu_thr_pol in ('isolate', 'require'):
if len(host_log_core_siblings[0]) == 1:
assert cpu_thr_pol != 'require', \
"cpu_thread_policy 'require' must be used on a HT host"
vm_siblings = [[vcpu_] for vcpu_ in vcpus_list]
else:
vm_siblings = []
for vcpu_index in vcpus_list:
vm_host_cpu = vm_host_cpus[vcpu_index]
for host_sibling in host_log_core_siblings:
if vm_host_cpu in host_sibling:
other_cpu = host_sibling[0] if \
vm_host_cpu == host_sibling[1] else \
host_sibling[1]
if cpu_thr_pol == 'require':
assert other_cpu in vm_host_cpus, \
"'require' vm uses only 1 of the sibling " \
"cores"
vm_siblings.append(sorted([vcpu_index,
vm_host_cpus.index(
other_cpu)]))
else:
assert other_cpu not in used_host_cpus, \
"sibling core was not reserved for " \
"'isolate' vm"
vm_siblings.append([vcpu_index])
LOG.info("{}Check vcpus for vm via sudo virsh vcpupin".format(SEP))
vcpu_pins = host_helper.get_vcpu_pins_for_instance_via_virsh(
host_ssh=host_ssh,
instance_name=instance_name)
assert vcpus == len(vcpu_pins), \
'Actual vm cpus number - {} is not as expected - {} in sudo ' \
'virsh vcpupin'.format(len(vcpu_pins), vcpus)
virsh_cpus_sets = []
for vcpu_pin in vcpu_pins:
vcpu = int(vcpu_pin['vcpu'])
cpu_set = common.parse_cpus_list(vcpu_pin['cpuset'])
virsh_cpus_sets += cpu_set
if shared_vcpu is not None and vcpu == shared_vcpu:
assert len(cpu_set) == 1, \
"shared vcpu is pinned to more than 1 host cpu"
assert cpu_set[0] in shared_host_cpus, \
"shared vcpu is not pinned to shared host cpu"
if 'ded' in cpu_pol:
assert set(vm_host_cpus) == set(
virsh_cpus_sets), "pinned cpus in virsh cpupin is not the " \
"same as ps"
else:
assert set(vm_host_cpus) < set(
virsh_cpus_sets), "floating vm should be affined to all " \
"available host cpus"
LOG.info("{}Get cpu affinity list for vm via taskset -pc".format(SEP))
ps_affined_cpus = \
vm_helper.get_affined_cpus_for_vm(vm_id,
host_ssh=host_ssh,
vm_host=vm_host,
instance_name=instance_name)
assert set(ps_affined_cpus) == set(
virsh_cpus_sets), "Actual affined cpu in taskset is different " \
"than virsh"
return vm_host_cpus, vm_siblings
def _check_vm_topology_on_vm(vm_id, vcpus, siblings_total, current_vcpus=None,
prev_siblings=None, guest=None,
check_sibling=True):
siblings_total_ = None
if siblings_total:
siblings_total_ = copy.deepcopy(siblings_total)
# Check from vm in /proc/cpuinfo and
# /sys/devices/.../cpu#/topology/thread_siblings_list
if not guest:
guest = ''
if not current_vcpus:
current_vcpus = int(vcpus)
LOG.info(
'=== Check vm topology from within the vm via: /sys/devices/system/cpu')
actual_sibs = []
vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
win_expt_cores_per_sib = win_log_count_per_sibling = None
if 'win' in guest:
LOG.info(
"{}Check windows guest cores via wmic cpu get cmds".format(SEP))
offline_cores_count = 0
log_cores_count, win_log_count_per_sibling = \
get_procs_and_siblings_on_windows(vm_ssh)
online_cores_count = present_cores_count = log_cores_count
else:
LOG.info(
"{}Check vm present|online|offline cores from inside vm via "
"/sys/devices/system/cpu/".format(SEP))
present_cores, online_cores, offline_cores = \
vm_helper.get_proc_nums_from_vm(vm_ssh)
present_cores_count = len(present_cores)
online_cores_count = len(online_cores)
offline_cores_count = len(offline_cores)
assert vcpus == present_cores_count, \
"Number of vcpus: {}, present cores: {}".format(
vcpus, present_cores_count)
assert current_vcpus == online_cores_count, \
"Current vcpus for vm: {}, online cores: {}".format(
current_vcpus, online_cores_count)
expt_total_cores = online_cores_count + offline_cores_count
assert expt_total_cores in [present_cores_count, 512], \
"Number of present cores: {}. online+offline cores: {}".format(
vcpus, expt_total_cores)
if check_sibling and siblings_total_ and online_cores_count == \
present_cores_count:
expt_sibs_list = [[vcpu] for vcpu in
range(present_cores_count)] if not \
siblings_total_ \
else siblings_total_
expt_sibs_list = [sorted(expt_sibs_list)]
if prev_siblings:
# siblings_total may get modified here
expt_sibs_list.append(sorted(prev_siblings))
if 'win' in guest:
LOG.info("{}Check windows guest siblings via wmic cpu get "
"cmds".format(SEP))
expt_cores_list = []
for sib_list in expt_sibs_list:
win_expt_cores_per_sib = [len(vcpus) for vcpus in sib_list]
expt_cores_list.append(win_expt_cores_per_sib)
assert win_log_count_per_sibling in expt_cores_list, \
"Expected log cores count per sibling: {}, actual: {}".\
format(win_expt_cores_per_sib, win_log_count_per_sibling)
else:
LOG.info(
"{}Check vm /sys/devices/system/cpu/["
"cpu#]/topology/thread_siblings_list".format(
SEP))
for cpu in ['cpu{}'.format(i) for i in
range(online_cores_count)]:
actual_sibs_for_cpu = \
vm_ssh.exec_cmd(
'cat /sys/devices/system/cpu/{}/topology/thread_'
'siblings_list'.format(cpu), fail_ok=False)[1]
sib_for_cpu = common.parse_cpus_list(actual_sibs_for_cpu)
if sib_for_cpu not in actual_sibs:
actual_sibs.append(sib_for_cpu)
assert sorted(
actual_sibs) in expt_sibs_list, "Expt sib lists: {}, " \
"actual sib list: {}". \
format(expt_sibs_list, sorted(actual_sibs))
def get_procs_and_siblings_on_windows(vm_ssh):
cmd = 'wmic cpu get {}'
procs = []
for param in ['NumberOfCores', 'NumberOfLogicalProcessors']:
output = vm_ssh.exec_cmd(cmd.format(param), fail_ok=False)[1].strip()
num_per_proc = [int(line.strip()) for line in output.splitlines() if
line.strip()
and not re.search('{}|x'.format(param), line)]
procs.append(num_per_proc)
procs = zip(procs[0], procs[1])
log_procs_per_phy = [nums[0] * nums[1] for nums in procs]
total_log_procs = sum(log_procs_per_phy)
LOG.info(
"Windows guest total logical cores: {}, logical_cores_per_phy_core: {}".
format(total_log_procs, log_procs_per_phy))
return total_log_procs, log_procs_per_phy
def check_vm_vswitch_affinity(vm_id, on_vswitch_nodes=True):
vm_host, vm_numa_nodes = vm_helper.get_vm_host_and_numa_nodes(vm_id)
vswitch_cores_dict = host_helper.get_host_cpu_cores_for_function(
vm_host, func='vSwitch')
vswitch_procs = [proc for proc in vswitch_cores_dict if
vswitch_cores_dict[proc]]
if not vswitch_procs:
return
if on_vswitch_nodes:
assert set(vm_numa_nodes) <= set(
vswitch_procs), "VM {} is on numa nodes {} instead of vswitch " \
"numa nodes {}".format(
vm_id, vm_numa_nodes, vswitch_procs)
else:
assert not (set(vm_numa_nodes) & set(
vswitch_procs)), "VM {} is on vswitch numa node(s). VM numa " \
"nodes: {}, vSwitch numa nodes: {}".format(
vm_id, vm_numa_nodes, vswitch_procs)
def check_fs_sufficient(guest_os, boot_source='volume'):
"""
Check if volume pool, image storage, and/or image conversion space is
sufficient to launch vm
Args:
guest_os (str): e.g., tis-centos-guest, win_2016
boot_source (str): volume or image
Returns (str): image id
"""
LOG.info("Check if storage fs is sufficient to launch boot-from-{} vm "
"with {}".format(boot_source, guest_os))
check_disk = True if 'win' in guest_os else False
cleanup = None if re.search(
'ubuntu_14|{}'.format(GuestImages.TIS_GUEST_PATTERN),
guest_os) else 'function'
img_id = glance_helper.get_guest_image(guest_os, check_disk=check_disk,
cleanup=cleanup)
return img_id
def check_vm_files(vm_id, storage_backing, ephemeral, swap, vm_type, file_paths,
content, root=None, vm_action=None,
prev_host=None, post_host=None, disks=None, post_disks=None,
guest_os=None,
check_volume_root=False):
"""
Check the files on vm after specified action. This is to check the disks
in the basic nova matrix table.
Args:
vm_id (str):
storage_backing (str): local_image, local_lvm, or remote
root (int): root disk size in flavor. e.g., 2, 5
ephemeral (int): e.g., 0, 1
swap (int): e.g., 0, 512
vm_type (str): image, volume, image_with_vol, vol_with_vol
file_paths (list): list of file paths to check
content (str): content of the files (assume all files have the same
content)
vm_action (str|None): live_migrate, cold_migrate, resize, evacuate,
None (expect no data loss)
prev_host (None|str): vm host prior to vm_action. This is used to
check if vm host has changed when needed.
post_host (None|str): vm host after vm_action.
disks (dict): disks that are returned from
vm_helper.get_vm_devices_via_virsh()
post_disks (dict): only used in resize case
guest_os (str|None): default guest assumed for None. e,g., ubuntu_16
check_volume_root (bool): whether to check root disk size even if vm
is booted from image
Returns:
"""
final_disks = post_disks if post_disks else disks
final_paths = list(file_paths)
if not disks:
disks = vm_helper.get_vm_devices_via_virsh(vm_id=vm_id)
eph_disk = disks.get('eph', {})
if not eph_disk:
if post_disks:
eph_disk = post_disks.get('eph', {})
swap_disk = disks.get('swap', {})
if not swap_disk:
if post_disks:
swap_disk = post_disks.get('swap', {})
disk_check = 'no_loss'
if vm_action in [None, 'live_migrate']:
disk_check = 'no_loss'
elif vm_type == 'volume':
# boot-from-vol, non-live migrate actions
disk_check = 'no_loss'
if storage_backing == 'local_lvm' and (eph_disk or swap_disk):
disk_check = 'eph_swap_loss'
elif storage_backing == 'local_image' and vm_action == 'evacuate' and (
eph_disk or swap_disk):
disk_check = 'eph_swap_loss'
elif storage_backing == 'local_image':
# local_image, boot-from-image, non-live migrate actions
disk_check = 'no_loss'
if vm_action == 'evacuate':
disk_check = 'local_loss'
elif storage_backing == 'local_lvm':
# local_lvm, boot-from-image, non-live migrate actions
disk_check = 'local_loss'
if vm_action == 'resize':
post_host = post_host if post_host else vm_helper.get_vm_host(vm_id)
if post_host == prev_host:
disk_check = 'eph_swap_loss'
LOG.info("disk check type: {}".format(disk_check))
loss_paths = []
if disk_check == 'no_loss':
no_loss_paths = final_paths
else:
# If there's any loss, we must not have remote storage. And any
# ephemeral/swap disks will be local.
disks_to_check = disks.get('eph', {})
# skip swap type checking for data loss since it's not a regular
# filesystem
# swap_disks = disks.get('swap', {})
# disks_to_check.update(swap_disks)
for path_ in final_paths:
# For tis-centos-guest, ephemeral disk is mounted to /mnt after
# vm launch.
if str(path_).rsplit('/', 1)[0] == '/mnt':
loss_paths.append(path_)
break
for disk in disks_to_check:
for path in final_paths:
if disk in path:
# We mount disk vdb to /mnt/vdb, so this is looking for
# vdb in the mount path
loss_paths.append(path)
break
if disk_check == 'local_loss':
# if vm booted from image, then the root disk is also local disk
root_img = disks.get('root_img', {})
if root_img:
LOG.info(
"Auto mount vm disks again since root disk was local with "
"data loss expected")
vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=final_disks)
file_name = final_paths[0].rsplit('/')[-1]
root_path = '/{}'.format(file_name)
loss_paths.append(root_path)
assert root_path in final_paths, \
"root_path:{}, file_paths:{}".format(root_path, final_paths)
no_loss_paths = list(set(final_paths) - set(loss_paths))
LOG.info("loss_paths: {}, no_loss_paths: {}, total_file_pahts: {}".format(
loss_paths, no_loss_paths, final_paths))
res_files = {}
with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_id,
vm_image_name=guest_os) as vm_ssh:
vm_ssh.exec_sudo_cmd('cat /etc/fstab')
vm_ssh.exec_sudo_cmd("mount | grep --color=never '/dev'")
for file_path in loss_paths:
vm_ssh.exec_sudo_cmd('touch {}2'.format(file_path), fail_ok=False)
vm_ssh.exec_sudo_cmd('echo "{}" >> {}2'.format(content, file_path),
fail_ok=False)
for file_path in no_loss_paths:
output = vm_ssh.exec_sudo_cmd('cat {}'.format(file_path),
fail_ok=False)[1]
res = '' if content in output else 'content mismatch'
res_files[file_path] = res
for file, error in res_files.items():
assert not error, "Check {} failed: {}".format(file, error)
swap_disk = final_disks.get('swap', {})
if swap_disk:
disk_name = list(swap_disk.keys())[0]
partition = '/dev/{}'.format(disk_name)
if disk_check != 'local_loss' and not disks.get('swap', {}):
mount_on, fs_type = storage_helper.mount_partition(
ssh_client=vm_ssh, disk=disk_name,
partition=partition, fs_type='swap')
storage_helper.auto_mount_fs(ssh_client=vm_ssh, fs=partition,
mount_on=mount_on, fs_type=fs_type)
LOG.info("Check swap disk is on")
swap_output = vm_ssh.exec_sudo_cmd(
'cat /proc/swaps | grep --color=never {}'.format(partition))[1]
assert swap_output, "Expect swapon for {}. Actual output: {}". \
format(partition, vm_ssh.exec_sudo_cmd('cat /proc/swaps')[1])
LOG.info("Check swap disk size")
_check_disk_size(vm_ssh, disk_name=disk_name, expt_size=swap)
eph_disk = final_disks.get('eph', {})
if eph_disk:
LOG.info("Check ephemeral disk size")
eph_name = list(eph_disk.keys())[0]
_check_disk_size(vm_ssh, eph_name, expt_size=ephemeral * 1024)
if root:
image_root = final_disks.get('root_img', {})
root_name = ''
if image_root:
root_name = list(image_root.keys())[0]
elif check_volume_root:
root_name = list(final_disks.get('root_vol').keys())[0]
if root_name:
LOG.info("Check root disk size")
_check_disk_size(vm_ssh, disk_name=root_name,
expt_size=root * 1024)
def _check_disk_size(vm_ssh, disk_name, expt_size):
partition = vm_ssh.exec_sudo_cmd(
'cat /proc/partitions | grep --color=never "{}$"'.format(disk_name))[1]
actual_size = int(
int(partition.split()[-2].strip()) / 1024) if partition else 0
expt_size = int(expt_size)
assert actual_size == expt_size, "Expected disk size: {}M. Actual: {}M".\
format(expt_size, actual_size)
def check_alarms(before_alarms, timeout=300,
auth_info=Tenant.get('admin_platform'), con_ssh=None,
fail_ok=False):
after_alarms = system_helper.get_alarms(auth_info=auth_info,
con_ssh=con_ssh)
new_alarms = []
check_interval = 5
for item in after_alarms:
if item not in before_alarms:
alarm_id, entity_id = item.split('::::')
if alarm_id == EventLogID.CPU_USAGE_HIGH:
check_interval = 45
elif alarm_id == EventLogID.NTP_ALARM:
# NTP alarm handling
LOG.info("NTP alarm found, checking ntpq stats")
host = entity_id.split('host=')[1].split('.ntp')[0]
system_helper.wait_for_ntp_sync(host=host, fail_ok=False,
auth_info=auth_info,
con_ssh=con_ssh)
continue
new_alarms.append((alarm_id, entity_id))
res = True
remaining_alarms = None
if new_alarms:
LOG.info("New alarms detected. Waiting for new alarms to clear.")
res, remaining_alarms = \
system_helper.wait_for_alarms_gone(new_alarms,
fail_ok=True,
timeout=timeout,
check_interval=check_interval,
auth_info=auth_info,
con_ssh=con_ssh)
if not res:
msg = "New alarm(s) found and did not clear within {} seconds. " \
"Alarm IDs and Entity IDs: {}".format(timeout, remaining_alarms)
LOG.warning(msg)
if not fail_ok:
assert res, msg
return res, remaining_alarms
def check_rest_api():
LOG.info("Check sysinv REST API")
sysinv_rest = Rest('sysinv', platform=True)
resource = '/controller_fs'
status_code, text = sysinv_rest.get(resource=resource, auth=True)
message = "Retrieved: status_code: {} message: {}"
LOG.debug(message.format(status_code, text))
LOG.info("Check status_code of 200 is received")
message = "Expected status_code of 200 - received {} and message {}"
assert status_code == 200, message.format(status_code, text)

File diff suppressed because it is too large Load Diff

View File

@ -1,859 +0,0 @@
#
# Copyright (c) 2019, 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#############################################################
# DO NOT import anything from helper modules to this module #
#############################################################
import socket
import os
import re
import time
from contextlib import contextmanager
from datetime import datetime
import pexpect
import yaml
from pytest import skip
from consts.auth import Tenant, TestFileServer, HostLinuxUser
from consts.stx import Prompt
from consts.proj_vars import ProjVar
from utils import exceptions
from utils.clients.ssh import ControllerClient, NATBoxClient, SSHClient, \
get_cli_client
from utils.tis_log import LOG
def scp_from_test_server_to_user_file_dir(source_path, dest_dir, dest_name=None,
timeout=900, con_ssh=None,
central_region=False):
if con_ssh is None:
con_ssh = get_cli_client(central_region=central_region)
if dest_name is None:
dest_name = source_path.split(sep='/')[-1]
if ProjVar.get_var('USER_FILE_DIR') == ProjVar.get_var('TEMP_DIR'):
LOG.info("Copy file from test server to localhost")
source_server = TestFileServer.SERVER
source_user = TestFileServer.USER
source_password = TestFileServer.PASSWORD
dest_path = dest_dir if not dest_name else os.path.join(dest_dir,
dest_name)
LOG.info('Check if file already exists on TiS')
if con_ssh.file_exists(file_path=dest_path):
LOG.info('dest path {} already exists. Return existing path'.format(
dest_path))
return dest_path
os.makedirs(dest_dir, exist_ok=True)
con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server,
source_path=source_path,
dest_path=dest_path, source_pswd=source_password,
timeout=timeout)
return dest_path
else:
LOG.info("Copy file from test server to active controller")
return scp_from_test_server_to_active_controller(
source_path=source_path, dest_dir=dest_dir,
dest_name=dest_name, timeout=timeout, con_ssh=con_ssh)
def _scp_from_remote_to_active_controller(source_server, source_path,
dest_dir, dest_name=None,
source_user=None,
source_password=None,
timeout=900, con_ssh=None,
is_dir=False):
"""
SCP file or files under a directory from remote server to TiS server
Args:
source_path (str): remote server file path or directory path
dest_dir (str): destination directory. should end with '/'
dest_name (str): destination file name if not dir
timeout (int):
con_ssh:
is_dir
Returns (str|None): destination file/dir path if scp successful else None
"""
if con_ssh is None:
con_ssh = ControllerClient.get_active_controller()
if not source_user:
source_user = TestFileServer.USER
if not source_password:
source_password = TestFileServer.PASSWORD
if dest_name is None and not is_dir:
dest_name = source_path.split(sep='/')[-1]
dest_path = dest_dir if not dest_name else os.path.join(dest_dir, dest_name)
LOG.info('Check if file already exists on TiS')
if not is_dir and con_ssh.file_exists(file_path=dest_path):
LOG.info('dest path {} already exists. Return existing path'.format(
dest_path))
return dest_path
LOG.info('Create destination directory on tis server if not already exists')
cmd = 'mkdir -p {}'.format(dest_dir)
con_ssh.exec_cmd(cmd, fail_ok=False)
nat_name = ProjVar.get_var('NATBOX')
if nat_name:
nat_name = nat_name.get('name')
if nat_name and ProjVar.get_var('IS_VBOX'):
LOG.info('VBox detected, performing intermediate scp')
nat_dest_path = '/tmp/{}'.format(dest_name)
nat_ssh = NATBoxClient.get_natbox_client()
if not nat_ssh.file_exists(nat_dest_path):
LOG.info("scp file from {} to NatBox: {}".format(nat_name,
source_server))
nat_ssh.scp_on_dest(source_user=source_user,
source_ip=source_server,
source_path=source_path,
dest_path=nat_dest_path,
source_pswd=source_password, timeout=timeout,
is_dir=is_dir)
LOG.info(
'scp file from natbox {} to active controller'.format(nat_name))
dest_user = HostLinuxUser.get_user()
dest_pswd = HostLinuxUser.get_password()
dest_ip = ProjVar.get_var('LAB').get('floating ip')
nat_ssh.scp_on_source(source_path=nat_dest_path, dest_user=dest_user,
dest_ip=dest_ip, dest_path=dest_path,
dest_password=dest_pswd, timeout=timeout,
is_dir=is_dir)
else: # if not a VBox lab, scp from remote server directly to TiS server
LOG.info("scp file(s) from {} to tis".format(source_server))
con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server,
source_path=source_path,
dest_path=dest_path, source_pswd=source_password,
timeout=timeout, is_dir=is_dir)
return dest_path
def scp_from_test_server_to_active_controller(source_path, dest_dir,
dest_name=None, timeout=900,
con_ssh=None,
is_dir=False):
"""
SCP file or files under a directory from test server to TiS server
Args:
source_path (str): test server file path or directory path
dest_dir (str): destination directory. should end with '/'
dest_name (str): destination file name if not dir
timeout (int):
con_ssh:
is_dir (bool)
Returns (str|None): destination file/dir path if scp successful else None
"""
skip('Shared Test File Server is not ready')
if con_ssh is None:
con_ssh = ControllerClient.get_active_controller()
source_server = TestFileServer.SERVER
source_user = TestFileServer.USER
source_password = TestFileServer.PASSWORD
return _scp_from_remote_to_active_controller(
source_server=source_server,
source_path=source_path,
dest_dir=dest_dir,
dest_name=dest_name,
source_user=source_user,
source_password=source_password,
timeout=timeout,
con_ssh=con_ssh,
is_dir=is_dir)
def scp_from_active_controller_to_test_server(source_path, dest_dir,
dest_name=None, timeout=900,
is_dir=False,
con_ssh=None):
"""
SCP file or files under a directory from test server to TiS server
Args:
source_path (str): test server file path or directory path
dest_dir (str): destination directory. should end with '/'
dest_name (str): destination file name if not dir
timeout (int):
is_dir (bool):
con_ssh:
Returns (str|None): destination file/dir path if scp successful else None
"""
skip('Shared Test File Server is not ready')
if con_ssh is None:
con_ssh = ControllerClient.get_active_controller()
dir_option = '-r ' if is_dir else ''
dest_server = TestFileServer.SERVER
dest_user = TestFileServer.USER
dest_password = TestFileServer.PASSWORD
dest_path = dest_dir if not dest_name else os.path.join(dest_dir, dest_name)
scp_cmd = 'scp -oStrictHostKeyChecking=no -o ' \
'UserKnownHostsFile=/dev/null ' \
'{}{} {}@{}:{}'.\
format(dir_option, source_path, dest_user, dest_server, dest_path)
LOG.info("scp file(s) from tis server to test server")
con_ssh.send(scp_cmd)
index = con_ssh.expect(
[con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.ADD_HOST],
timeout=timeout)
if index == 2:
con_ssh.send('yes')
index = con_ssh.expect([con_ssh.prompt, Prompt.PASSWORD_PROMPT],
timeout=timeout)
if index == 1:
con_ssh.send(dest_password)
index = con_ssh.expect(timeout=timeout)
assert index == 0, "Failed to scp files"
exit_code = con_ssh.get_exit_code()
assert 0 == exit_code, "scp not fully succeeded"
return dest_path
def scp_from_localhost_to_active_controller(
source_path, dest_path=None,
dest_user=None,
dest_password=None,
timeout=900, is_dir=False):
active_cont_ip = ControllerClient.get_active_controller().host
if not dest_path:
dest_path = HostLinuxUser.get_home()
if not dest_user:
dest_user = HostLinuxUser.get_user()
if not dest_password:
dest_password = HostLinuxUser.get_password()
return scp_from_local(source_path, active_cont_ip, dest_path=dest_path,
dest_user=dest_user, dest_password=dest_password,
timeout=timeout, is_dir=is_dir)
def scp_from_active_controller_to_localhost(
source_path, dest_path='',
src_user=None,
src_password=None,
timeout=900, is_dir=False):
active_cont_ip = ControllerClient.get_active_controller().host
if not src_user:
src_user = HostLinuxUser.get_user()
if not src_password:
src_password = HostLinuxUser.get_password()
return scp_to_local(source_path=source_path, source_ip=active_cont_ip,
source_user=src_user, source_password=src_password,
dest_path=dest_path, timeout=timeout, is_dir=is_dir)
def scp_from_local(source_path, dest_ip, dest_path,
dest_user,
dest_password,
timeout=900, is_dir=False):
"""
Scp file(s) from localhost (i.e., from where the automated tests are
executed).
Args:
source_path (str): source file/directory path
dest_ip (str): ip of the destination host
dest_user (str): username of destination host.
dest_password (str): password of destination host
dest_path (str): destination directory path to copy the file(s) to
timeout (int): max time to wait for scp finish in seconds
is_dir (bool): whether to copy a single file or a directory
"""
dir_option = '-r ' if is_dir else ''
cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
'{}{} {}@{}:{}'. \
format(dir_option, source_path, dest_user, dest_ip, dest_path)
_scp_on_local(cmd, remote_password=dest_password, timeout=timeout)
def scp_to_local(source_path, source_ip, source_user, source_password,
dest_path, timeout=900, is_dir=False):
"""
Scp file(s) to localhost (i.e., to where the automated tests are executed).
Args:
source_path (str): source file/directory path
source_ip (str): ip of the source host.
source_user (str): username of source host.
source_password (str): password of source host
dest_path (str): destination directory path to copy the file(s) to
timeout (int): max time to wait for scp finish in seconds
is_dir (bool): whether to copy a single file or a directory
"""
dir_option = '-r ' if is_dir else ''
cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
'{}{}@{}:{} {}'.\
format(dir_option, source_user, source_ip, source_path, dest_path)
_scp_on_local(cmd, remote_password=source_password, timeout=timeout)
def _scp_on_local(cmd, remote_password, logdir=None, timeout=900):
LOG.debug('scp cmd: {}'.format(cmd))
logdir = logdir or ProjVar.get_var('LOG_DIR')
logfile = os.path.join(logdir, 'scp_files.log')
with open(logfile, mode='a', encoding='utf8') as f:
local_child = pexpect.spawn(command=cmd, encoding='utf-8', logfile=f)
index = local_child.expect([pexpect.EOF, 'assword:', 'yes/no'],
timeout=timeout)
if index == 2:
local_child.sendline('yes')
index = local_child.expect([pexpect.EOF, 'assword:'],
timeout=timeout)
if index == 1:
local_child.sendline(remote_password)
local_child.expect(pexpect.EOF, timeout=timeout)
def get_tenant_name(auth_info=None):
"""
Get name of given tenant. If None is given, primary tenant name will be
returned.
Args:
auth_info (dict|None): Tenant dict
Returns:
str: name of the tenant
"""
if auth_info is None:
auth_info = Tenant.get_primary()
return auth_info['tenant']
class Count:
__vm_count = 0
__flavor_count = 0
__volume_count = 0
__image_count = 0
__server_group = 0
__router = 0
__subnet = 0
__other = 0
@classmethod
def get_vm_count(cls):
cls.__vm_count += 1
return cls.__vm_count
@classmethod
def get_flavor_count(cls):
cls.__flavor_count += 1
return cls.__flavor_count
@classmethod
def get_volume_count(cls):
cls.__volume_count += 1
return cls.__volume_count
@classmethod
def get_image_count(cls):
cls.__image_count += 1
return cls.__image_count
@classmethod
def get_sever_group_count(cls):
cls.__server_group += 1
return cls.__server_group
@classmethod
def get_router_count(cls):
cls.__router += 1
return cls.__router
@classmethod
def get_subnet_count(cls):
cls.__subnet += 1
return cls.__subnet
@classmethod
def get_other_count(cls):
cls.__other += 1
return cls.__other
class NameCount:
__names_count = {
'vm': 0,
'flavor': 0,
'volume': 0,
'image': 0,
'server_group': 0,
'subnet': 0,
'heat_stack': 0,
'qos': 0,
'other': 0,
}
@classmethod
def get_number(cls, resource_type='other'):
cls.__names_count[resource_type] += 1
return cls.__names_count[resource_type]
@classmethod
def get_valid_types(cls):
return list(cls.__names_count.keys())
def get_unique_name(name_str, existing_names=None, resource_type='other'):
"""
Get a unique name string by appending a number to given name_str
Args:
name_str (str): partial name string
existing_names (list): names to avoid
resource_type (str): type of resource. valid values: 'vm'
Returns:
"""
valid_types = NameCount.get_valid_types()
if resource_type not in valid_types:
raise ValueError(
"Invalid resource_type provided. Valid types: {}".format(
valid_types))
if existing_names:
if resource_type in ['image', 'volume', 'flavor']:
unique_name = name_str
else:
unique_name = "{}-{}".format(name_str, NameCount.get_number(
resource_type=resource_type))
for i in range(50):
if unique_name not in existing_names:
return unique_name
unique_name = "{}-{}".format(name_str, NameCount.get_number(
resource_type=resource_type))
else:
raise LookupError("Cannot find unique name.")
else:
unique_name = "{}-{}".format(name_str, NameCount.get_number(
resource_type=resource_type))
return unique_name
def parse_cpus_list(cpus):
"""
Convert human friendly pcup list to list of integers.
e.g., '5-7,41-43, 43, 45' >> [5, 6, 7, 41, 42, 43, 43, 45]
Args:
cpus (str):
Returns (list): list of integers
"""
if isinstance(cpus, str):
if cpus.strip() == '':
return []
cpus = cpus.split(sep=',')
cpus_list = list(cpus)
for val in cpus:
# convert '3-6' to [3, 4, 5, 6]
if '-' in val:
cpus_list.remove(val)
min_, max_ = val.split(sep='-')
# unpinned:20; pinned_cpulist:-, unpinned_cpulist:10-19,30-39
if min_ != '':
cpus_list += list(range(int(min_), int(max_) + 1))
return sorted([int(val) for val in cpus_list])
def get_timedelta_for_isotimes(time1, time2):
"""
Args:
time1 (str): such as "2016-08-16T12:59:45.440697+00:00"
time2 (str):
Returns ()
"""
def _parse_time(time_):
time_ = time_.strip().split(sep='.')[0].split(sep='+')[0]
if 'T' in time_:
pattern = "%Y-%m-%dT%H:%M:%S"
elif ' ' in time_:
pattern = "%Y-%m-%d %H:%M:%S"
else:
raise ValueError("Unknown format for time1: {}".format(time_))
time_datetime = datetime.strptime(time_, pattern)
return time_datetime
time1_datetime = _parse_time(time_=time1)
time2_datetime = _parse_time(time_=time2)
return time2_datetime - time1_datetime
def _execute_with_openstack_cli():
"""
DO NOT USE THIS IN TEST FUNCTIONS!
"""
return ProjVar.get_var('OPENSTACK_CLI')
def get_date_in_format(ssh_client=None, date_format="%Y%m%d %T"):
"""
Get date in given format.
Args:
ssh_client (SSHClient):
date_format (str): Please see date --help for valid format strings
Returns (str): date output in given format
"""
if ssh_client is None:
ssh_client = ControllerClient.get_active_controller()
return ssh_client.exec_cmd("date +'{}'".format(date_format), fail_ok=False)[
1]
def write_to_file(file_path, content, mode='a'):
"""
Write content to specified local file
Args:
file_path (str): file path on localhost
content (str): content to write to file
mode (str): file operation mode. Default is 'a' (append to end of file).
Returns: None
"""
time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
with open(file_path, mode=mode, encoding='utf8') as f:
f.write(
'\n-----------------[{}]-----------------\n{}\n'.format(time_stamp,
content))
def collect_software_logs(con_ssh=None):
if not con_ssh:
con_ssh = ControllerClient.get_active_controller()
LOG.info("Collecting all hosts logs...")
con_ssh.exec_cmd('source /etc/platform/openrc', get_exit_code=False)
con_ssh.send('collect all')
expect_list = ['.*password for sysadmin:', 'collecting data.',
con_ssh.prompt]
index_1 = con_ssh.expect(expect_list, timeout=20)
if index_1 == 2:
LOG.error(
"Something is wrong with collect all. Check ssh console log for "
"detail.")
return
elif index_1 == 0:
con_ssh.send(con_ssh.password)
con_ssh.expect('collecting data')
index_2 = con_ssh.expect(['/scratch/ALL_NODES.*', con_ssh.prompt],
timeout=1200)
if index_2 == 0:
output = con_ssh.cmd_output
con_ssh.expect()
logpath = re.findall('.*(/scratch/ALL_NODES_.*.tar).*', output)[0]
LOG.info(
"\n################### TiS server log path: {}".format(logpath))
else:
LOG.error("Collecting logs failed. No ALL_NODES logs found.")
return
dest_path = ProjVar.get_var('LOG_DIR')
try:
LOG.info("Copying log file from active controller to local {}".format(
dest_path))
scp_from_active_controller_to_localhost(
source_path=logpath, dest_path=dest_path, timeout=300)
LOG.info("{} is successfully copied to local directory: {}".format(
logpath, dest_path))
except Exception as e:
LOG.warning("Failed to copy log file to localhost.")
LOG.error(e, exc_info=True)
def parse_args(args_dict, repeat_arg=False, vals_sep=' '):
"""
parse args dictionary and convert it to string
Args:
args_dict (dict): key/value pairs
repeat_arg: if value is tuple, list, dict, should the arg be repeated.
e.g., True for --nic in nova boot. False for -m in gnocchi
measures aggregation
vals_sep (str): separator to join multiple vals. Only applicable when
repeat_arg=False.
Returns (str):
"""
def convert_val_dict(key__, vals_dict, repeat_key):
vals_ = []
for k, v in vals_dict.items():
if ' ' in v:
v = '"{}"'.format(v)
vals_.append('{}={}'.format(k, v))
if repeat_key:
args_str = ' ' + ' '.join(
['{} {}'.format(key__, v_) for v_ in vals_])
else:
args_str = ' {} {}'.format(key__, vals_sep.join(vals_))
return args_str
args = ''
for key, val in args_dict.items():
if val is None:
continue
key = key if key.startswith('-') else '--{}'.format(key)
if isinstance(val, str):
if ' ' in val:
val = '"{}"'.format(val)
args += ' {}={}'.format(key, val)
elif isinstance(val, bool):
if val:
args += ' {}'.format(key)
elif isinstance(val, (int, float)):
args += ' {}={}'.format(key, val)
elif isinstance(val, dict):
args += convert_val_dict(key__=key, vals_dict=val,
repeat_key=repeat_arg)
elif isinstance(val, (list, tuple)):
if repeat_arg:
for val_ in val:
if isinstance(val_, dict):
args += convert_val_dict(key__=key, vals_dict=val_,
repeat_key=False)
else:
args += ' {}={}'.format(key, val_)
else:
args += ' {}={}'.format(key, vals_sep.join(val))
else:
raise ValueError(
"Unrecognized value type. Key: {}; value: {}".format(key, val))
return args.strip()
def get_symlink(ssh_client, file_path):
code, output = ssh_client.exec_cmd(
'ls -l {} | grep --color=never ""'.format(file_path))
if code != 0:
LOG.warning('{} not found!'.format(file_path))
return None
res = re.findall('> (.*)', output)
if not res:
LOG.warning('No symlink found for {}'.format(file_path))
return None
link = res[0].strip()
return link
def is_file(filename, ssh_client):
code = ssh_client.exec_cmd('test -f {}'.format(filename), fail_ok=True)[0]
return 0 == code
def is_directory(dirname, ssh_client):
code = ssh_client.exec_cmd('test -d {}'.format(dirname), fail_ok=True)[0]
return 0 == code
def lab_time_now(con_ssh=None, date_format='%Y-%m-%dT%H:%M:%S'):
if not con_ssh:
con_ssh = ControllerClient.get_active_controller()
date_cmd_format = date_format + '.%N'
timestamp = get_date_in_format(ssh_client=con_ssh,
date_format=date_cmd_format)
with_milliseconds = timestamp.split('.')[0] + '.{}'.format(
int(int(timestamp.split('.')[1]) / 1000))
format1 = date_format + '.%f'
parsed = datetime.strptime(with_milliseconds, format1)
return with_milliseconds.split('.')[0], parsed
@contextmanager
def ssh_to_remote_node(host, username=None, password=None, prompt=None,
ssh_client=None, use_telnet=False,
telnet_session=None):
"""
ssh to a external node from sshclient.
Args:
host (str|None): hostname or ip address of remote node to ssh to.
username (str):
password (str):
prompt (str):
ssh_client (SSHClient): client to ssh from
use_telnet:
telnet_session:
Returns (SSHClient): ssh client of the host
Examples: with ssh_to_remote_node('128.224.150.92) as remote_ssh:
remote_ssh.exec_cmd(cmd)
"""
if not host:
raise exceptions.SSHException(
"Remote node hostname or ip address must be provided")
if use_telnet and not telnet_session:
raise exceptions.SSHException(
"Telnet session cannot be none if using telnet.")
if not ssh_client and not use_telnet:
ssh_client = ControllerClient.get_active_controller()
if not use_telnet:
from keywords.security_helper import LinuxUser
default_user, default_password = LinuxUser.get_current_user_password()
else:
default_user = HostLinuxUser.get_user()
default_password = HostLinuxUser.get_password()
user = username if username else default_user
password = password if password else default_password
if use_telnet:
original_host = telnet_session.exec_cmd('hostname')[1]
else:
original_host = ssh_client.host
if not prompt:
prompt = '.*' + host + r'\:~\$'
remote_ssh = SSHClient(host, user=user, password=password,
initial_prompt=prompt)
remote_ssh.connect()
current_host = remote_ssh.host
if not current_host == host:
raise exceptions.SSHException(
"Current host is {} instead of {}".format(current_host, host))
try:
yield remote_ssh
finally:
if current_host != original_host:
remote_ssh.close()
def ssh_to_stx(lab=None, set_client=False):
if not lab:
lab = ProjVar.get_var('LAB')
con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(),
password=HostLinuxUser.get_password(),
initial_prompt=Prompt.CONTROLLER_PROMPT)
con_ssh.connect(retry=True, retry_timeout=30, use_current=False)
if set_client:
ControllerClient.set_active_controller(con_ssh)
return con_ssh
def get_yaml_data(filepath):
"""
Returns the yaml data in json
Args:
filepath(str): location of the yaml file to load
Return(json):
returns the json data
"""
with open(filepath, 'r', encoding='utf8') as f:
data = yaml.safe_load(f)
return data
def write_yaml_data_to_file(data, filename, directory=None):
"""
Writes data to a file in yaml format
Args:
data(json): data in json format
filename(str): filename
directory(boo): directory to save the file
Return(str):
returns the location of the yaml file
"""
if directory is None:
directory = ProjVar.get_var('LOG_DIR')
src_path = "{}/{}".format(directory, filename)
with open(src_path, 'w', encoding='utf8') as f:
yaml.dump(data, f)
return src_path
def get_lab_fip(region=None):
"""
Returns system OAM floating ip
Args:
region (str|None): central_region or subcloud, only applicable to DC
Returns (str): floating ip of the lab
"""
if ProjVar.get_var('IS_DC'):
if not region:
region = ProjVar.get_var('PRIMARY_SUBCLOUD')
elif region == 'RegionOne':
region = 'central_region'
oam_fip = ProjVar.get_var('lab')[region]["floating ip"]
else:
oam_fip = ProjVar.get_var('lab')["floating ip"]
return oam_fip
def get_dnsname(region='RegionOne'):
# means that the dns name is unreachable
return None

View File

@ -1,879 +0,0 @@
#
# Copyright (c) 2019, 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Container/Application related helper functions for non-kubectl commands.
For example:
- docker commands
- system application-xxx commands
- helm commands
"""
import os
import time
import yaml
from utils import cli, exceptions, table_parser
from utils.tis_log import LOG
from utils.clients.ssh import ControllerClient
from consts.auth import Tenant
from consts.proj_vars import ProjVar
from consts.stx import AppStatus, Prompt, EventLogID, Container
from consts.filepaths import StxPath
from keywords import system_helper, host_helper
def exec_helm_upload_cmd(tarball, repo=None, timeout=120, con_ssh=None,
fail_ok=False):
if not con_ssh:
con_ssh = ControllerClient.get_active_controller()
if not repo:
repo = 'starlingx'
cmd = 'helm-upload {} {}'.format(repo, tarball)
con_ssh.send(cmd)
pw_prompt = Prompt.PASSWORD_PROMPT
prompts = [con_ssh.prompt, pw_prompt]
index = con_ssh.expect(prompts, timeout=timeout, searchwindowsize=100,
fail_ok=fail_ok)
if index == 1:
con_ssh.send(con_ssh.password)
prompts.remove(pw_prompt)
con_ssh.expect(prompts, timeout=timeout, searchwindowsize=100,
fail_ok=fail_ok)
code, output = con_ssh._process_exec_result(rm_date=True,
get_exit_code=True)
if code != 0 and not fail_ok:
raise exceptions.SSHExecCommandFailed(
"Non-zero return code for cmd: {}. Output: {}".
format(cmd, output))
return code, output
def exec_docker_cmd(sub_cmd, args, timeout=120, con_ssh=None, fail_ok=False):
if not con_ssh:
con_ssh = ControllerClient.get_active_controller()
cmd = 'docker {} {}'.format(sub_cmd, args)
code, output = con_ssh.exec_sudo_cmd(cmd, expect_timeout=timeout,
fail_ok=fail_ok)
return code, output
def upload_helm_charts(tar_file, repo=None, delete_first=False, con_ssh=None,
timeout=120, fail_ok=False):
"""
Upload helm charts via helm-upload cmd
Args:
tar_file:
repo
delete_first:
con_ssh:
timeout:
fail_ok:
Returns (tuple):
(0, <path_to_charts>)
(1, <std_err>)
(2, <hostname for host that does not have helm charts in expected dir>)
"""
if not con_ssh:
con_ssh = ControllerClient.get_active_controller()
helm_dir = os.path.normpath(StxPath.HELM_CHARTS_DIR)
if not repo:
repo = 'starlingx'
file_path = os.path.join(helm_dir, repo, os.path.basename(tar_file))
current_host = con_ssh.get_hostname()
controllers = [current_host]
if not system_helper.is_aio_simplex(con_ssh=con_ssh):
con_name = 'controller-1' if controllers[
0] == 'controller-0' else \
'controller-0'
controllers.append(con_name)
if delete_first:
for host in controllers:
with host_helper.ssh_to_host(hostname=host,
con_ssh=con_ssh) as host_ssh:
if host_ssh.file_exists(file_path):
host_ssh.exec_sudo_cmd('rm -f {}'.format(file_path))
code, output = exec_helm_upload_cmd(tarball=tar_file, repo=repo,
timeout=timeout, con_ssh=con_ssh,
fail_ok=fail_ok)
if code != 0:
return 1, output
file_exist = con_ssh.file_exists(file_path)
if not file_exist:
raise exceptions.ContainerError(
"{} not found on {} after helm-upload".format(file_path,
current_host))
LOG.info("Helm charts {} uploaded successfully".format(file_path))
return 0, file_path
def upload_app(tar_file, app_name=None, app_version=None, check_first=True,
fail_ok=False, uploaded_timeout=300,
con_ssh=None, auth_info=Tenant.get('admin_platform')):
"""
Upload an application via 'system application-upload'
Args:
app_name:
app_version:
tar_file:
check_first
fail_ok:
uploaded_timeout:
con_ssh:
auth_info:
Returns:
"""
if check_first and get_apps(application=app_name, con_ssh=con_ssh,
auth_info=auth_info):
msg = '{} already exists. Do nothing.'.format(app_name)
LOG.info(msg)
return -1, msg
args = ''
if app_name:
args += '-n {} '.format(app_name)
if app_version:
args += '-v {} '.format(app_version)
args = '{}{}'.format(args, tar_file)
code, output = cli.system('application-upload', args, ssh_client=con_ssh,
fail_ok=fail_ok, auth_info=auth_info)
if code > 0:
return 1, output
res = wait_for_apps_status(apps=app_name, status=AppStatus.UPLOADED,
timeout=uploaded_timeout,
con_ssh=con_ssh, auth_info=auth_info,
fail_ok=fail_ok)[0]
if not res:
return 2, "{} failed to upload".format(app_name)
msg = '{} uploaded successfully'.format(app_name)
LOG.info(msg)
return 0, msg
def get_apps(field='status', application=None, con_ssh=None,
auth_info=Tenant.get('admin_platform'),
rtn_dict=False, **kwargs):
"""
Get applications values for give apps and fields via system application-list
Args:
application (str|list|tuple):
field (str|list|tuple):
con_ssh:
auth_info:
rtn_dict:
**kwargs: extra filters other than application
Returns (list|dict):
list of list, or
dict with app name(str) as key and values(list) for given fields for
each app as value
"""
table_ = table_parser.table(
cli.system('application-list', ssh_client=con_ssh, auth_info=auth_info)[
1])
if application:
kwargs['application'] = application
return table_parser.get_multi_values(table_, fields=field,
rtn_dict=rtn_dict, zip_values=True,
**kwargs)
def get_app_values(app_name, fields, con_ssh=None,
auth_info=Tenant.get('admin_platform')):
"""
Get values from system application-show
Args:
app_name:
fields (str|list|tuple):
con_ssh:
auth_info:
Returns:
"""
if isinstance(fields, str):
fields = [fields]
table_ = table_parser.table(
cli.system('application-show', app_name, ssh_client=con_ssh,
auth_info=auth_info)[1],
combine_multiline_entry=True)
values = table_parser.get_multi_values_two_col_table(table_, fields=fields)
return values
def wait_for_apps_status(apps, status, timeout=360, check_interval=5,
fail_ok=False, con_ssh=None,
auth_info=Tenant.get('admin_platform')):
"""
Wait for applications to reach expected status via system application-list
Args:
apps:
status:
timeout:
check_interval:
fail_ok:
con_ssh:
auth_info:
Returns (tuple):
"""
status = '' if not status else status
if isinstance(apps, str):
apps = [apps]
apps_to_check = list(apps)
check_failed = []
end_time = time.time() + timeout
LOG.info(
"Wait for {} application(s) to reach status: {}".format(apps, status))
while time.time() < end_time:
apps_status = get_apps(application=apps_to_check,
field=('application', 'status'), con_ssh=con_ssh,
auth_info=auth_info)
apps_status = {item[0]: item[1] for item in apps_status if item}
checked = []
for app in apps_to_check:
current_app_status = apps_status.get(app, '')
if current_app_status == status:
checked.append(app)
elif current_app_status.endswith('ed'):
check_failed.append(app)
checked.append(app)
apps_to_check = list(set(apps_to_check) - set(checked))
if not apps_to_check:
if check_failed:
msg = '{} failed to reach status - {}'.format(check_failed,
status)
LOG.warning(msg)
if fail_ok:
return False, check_failed
else:
raise exceptions.ContainerError(msg)
LOG.info("{} reached expected status {}".format(apps, status))
return True, None
time.sleep(check_interval)
check_failed += apps_to_check
msg = '{} did not reach status {} within {}s'.format(check_failed, status,
timeout)
LOG.warning(msg)
if fail_ok:
return False, check_failed
raise exceptions.ContainerError(msg)
def apply_app(app_name, check_first=False, fail_ok=False, applied_timeout=300,
check_interval=10,
wait_for_alarm_gone=True, con_ssh=None,
auth_info=Tenant.get('admin_platform')):
"""
Apply/Re-apply application via system application-apply. Check for status
reaches 'applied'.
Args:
app_name (str):
check_first:
fail_ok:
applied_timeout:
check_interval:
con_ssh:
wait_for_alarm_gone (bool):
auth_info:
Returns (tuple):
(-1, "<app_name> is already applied. Do nothing.") # only returns
if check_first=True.
(0, "<app_name> (re)applied successfully")
(1, <std_err>) # cli rejected
(2, "<app_name> failed to apply") # did not reach applied status
after apply.
"""
if check_first:
app_status = get_apps(application=app_name, field='status',
con_ssh=con_ssh, auth_info=auth_info)
if app_status and app_status[0] == AppStatus.APPLIED:
msg = '{} is already applied. Do nothing.'.format(app_name)
LOG.info(msg)
return -1, msg
LOG.info("Apply application: {}".format(app_name))
code, output = cli.system('application-apply', app_name, ssh_client=con_ssh,
fail_ok=fail_ok, auth_info=auth_info)
if code > 0:
return 1, output
res = wait_for_apps_status(apps=app_name, status=AppStatus.APPLIED,
timeout=applied_timeout,
check_interval=check_interval, con_ssh=con_ssh,
auth_info=auth_info, fail_ok=fail_ok)[0]
if not res:
return 2, "{} failed to apply".format(app_name)
if wait_for_alarm_gone:
alarm_id = EventLogID.CONFIG_OUT_OF_DATE
if system_helper.wait_for_alarm(alarm_id=alarm_id,
entity_id='controller',
timeout=15, fail_ok=True,
auth_info=auth_info,
con_ssh=con_ssh)[0]:
system_helper.wait_for_alarm_gone(alarm_id=alarm_id,
entity_id='controller',
timeout=120,
check_interval=10,
con_ssh=con_ssh,
auth_info=auth_info)
msg = '{} (re)applied successfully'.format(app_name)
LOG.info(msg)
return 0, msg
def delete_app(app_name, check_first=True, fail_ok=False, applied_timeout=300,
con_ssh=None,
auth_info=Tenant.get('admin_platform')):
"""
Delete an application via system application-delete. Verify application
no longer listed.
Args:
app_name:
check_first:
fail_ok:
applied_timeout:
con_ssh:
auth_info:
Returns (tuple):
(-1, "<app_name> does not exist. Do nothing.")
(0, "<app_name> deleted successfully")
(1, <std_err>)
(2, "<app_name> failed to delete")
"""
if check_first:
app_vals = get_apps(application=app_name, field='status',
con_ssh=con_ssh, auth_info=auth_info)
if not app_vals:
msg = '{} does not exist. Do nothing.'.format(app_name)
LOG.info(msg)
return -1, msg
code, output = cli.system('application-delete', app_name,
ssh_client=con_ssh, fail_ok=fail_ok,
auth_info=auth_info)
if code > 0:
return 1, output
res = wait_for_apps_status(apps=app_name, status=None,
timeout=applied_timeout,
con_ssh=con_ssh, auth_info=auth_info,
fail_ok=fail_ok)[
0]
if not res:
return 2, "{} failed to delete".format(app_name)
msg = '{} deleted successfully'.format(app_name)
LOG.info(msg)
return 0, msg
def remove_app(app_name, check_first=True, fail_ok=False, applied_timeout=300,
con_ssh=None,
auth_info=Tenant.get('admin_platform')):
"""
Remove applied application via system application-remove. Verify it is in
'uploaded' status.
Args:
app_name (str):
check_first:
fail_ok:
applied_timeout:
con_ssh:
auth_info:
Returns (tuple):
(-1, "<app_name> is not applied. Do nothing.")
(0, "<app_name> removed successfully")
(1, <std_err>)
(2, "<app_name> failed to remove") # Did not reach uploaded status
"""
if check_first:
app_vals = get_apps(application=app_name, field='status',
con_ssh=con_ssh, auth_info=auth_info)
if not app_vals or app_vals[0] in (AppStatus.UPLOADED,
AppStatus.UPLOAD_FAILED):
msg = '{} is not applied. Do nothing.'.format(app_name)
LOG.info(msg)
return -1, msg
code, output = cli.system('application-remove', app_name,
ssh_client=con_ssh, fail_ok=fail_ok,
auth_info=auth_info)
if code > 0:
return 1, output
res = wait_for_apps_status(apps=app_name, status=AppStatus.UPLOADED,
timeout=applied_timeout,
con_ssh=con_ssh, auth_info=auth_info,
fail_ok=fail_ok)[0]
if not res:
return 2, "{} failed to remove".format(app_name)
msg = '{} removed successfully'.format(app_name)
LOG.info(msg)
return 0, msg
def get_docker_reg_addr(con_ssh=None):
"""
Get local docker registry ip address in docker conf file.
Args:
con_ssh:
Returns (str):
"""
if not con_ssh:
con_ssh = ControllerClient.get_active_controller()
output = con_ssh.exec_cmd(
'grep --color=never "addr: " {}'.format(StxPath.DOCKER_CONF),
fail_ok=False)[1]
reg_addr = output.split('addr: ')[1].strip()
return reg_addr
def pull_docker_image(name, tag=None, digest=None, con_ssh=None, timeout=300,
fail_ok=False):
"""
Pull docker image via docker image pull. Verify image is listed in docker
image list.
Args:
name:
tag:
digest:
con_ssh:
timeout:
fail_ok:
Returns (tuple):
(0, <docker image ID>)
(1, <std_err>)
"""
args = '{}'.format(name.strip())
if tag:
args += ':{}'.format(tag)
elif digest:
args += '@{}'.format(digest)
LOG.info("Pull docker image {}".format(args))
code, out = exec_docker_cmd('image pull', args, timeout=timeout,
fail_ok=fail_ok, con_ssh=con_ssh)
if code != 0:
return 1, out
image_id = get_docker_images(repo=name, tag=tag, field='IMAGE ID',
con_ssh=con_ssh, fail_ok=False)[0]
LOG.info(
'docker image {} successfully pulled. ID: {}'.format(args, image_id))
return 0, image_id
def login_to_docker(registry=None, user=None, password=None, con_ssh=None,
fail_ok=False):
"""
Login to docker registry
Args:
registry (str|None): default docker registry will be used when None
user (str|None): admin user will be used when None
password (str|None): admin password will be used when None
con_ssh (SSHClient|None):
fail_ok (bool):
Returns (tuple):
(0, <cmd_args>(str)) # login succeeded
(1, <std_err>(str)) # login failed
"""
if not user:
user = 'admin'
if not password:
password = Tenant.get('admin_platform').get('password')
if not registry:
registry = Container.LOCAL_DOCKER_REG
args = '-u {} -p {} {}'.format(user, password, registry)
LOG.info("Login to docker registry {}".format(registry))
code, out = exec_docker_cmd('login', args, timeout=60, fail_ok=fail_ok,
con_ssh=con_ssh)
if code != 0:
return 1, out
LOG.info('Logged into docker registry successfully: {}'.format(registry))
return 0, args
def push_docker_image(name, tag=None, login_registry=None, con_ssh=None,
timeout=300, fail_ok=False):
"""
Push docker image via docker image push.
Args:
name:
tag:
login_registry (str|None): when set, login to given docker registry
before push
con_ssh:
timeout:
fail_ok:
Returns (tuple):
(0, <args_used>)
(1, <std_err>)
"""
args = '{}'.format(name.strip())
if tag:
args += ':{}'.format(tag)
if login_registry:
login_to_docker(registry=login_registry, con_ssh=con_ssh)
LOG.info("Push docker image: {}".format(args))
code, out = exec_docker_cmd('image push', args, timeout=timeout,
fail_ok=fail_ok, con_ssh=con_ssh)
if code != 0:
return 1, out
LOG.info('docker image {} successfully pushed.'.format(args))
return 0, args
def tag_docker_image(source_image, target_name, source_tag=None,
target_tag=None, con_ssh=None, timeout=300,
fail_ok=False):
"""
Tag docker image via docker image tag. Verify image is tagged via docker
image list.
Args:
source_image:
target_name:
source_tag:
target_tag:
con_ssh:
timeout:
fail_ok:
Returns:
(0, <target_args>)
(1, <std_err>)
"""
source_args = source_image.strip()
if source_tag:
source_args += ':{}'.format(source_tag)
target_args = target_name.strip()
if target_tag:
target_args += ':{}'.format(target_tag)
LOG.info("Tag docker image {} as {}".format(source_args, target_args))
args = '{} {}'.format(source_args, target_args)
code, out = exec_docker_cmd('image tag', args, timeout=timeout,
fail_ok=fail_ok, con_ssh=con_ssh)
if code != 0:
return 1, out
if not get_docker_images(repo=target_name, tag=target_tag, con_ssh=con_ssh,
fail_ok=False):
raise exceptions.ContainerError(
"Docker image {} is not listed after tagging {}".format(
target_name, source_image))
LOG.info('docker image {} successfully tagged as {}.'.format(source_args,
target_args))
return 0, target_args
def remove_docker_images_with_pattern(pattern, con_ssh=None, timeout=300):
"""
Remove docker image(s) via docker image rm matching 'pattern'
Args:
pattern:
con_ssh:
timeout:
Returns (tuple):
(0, <std_out>)
(1, <std_err>)
"""
LOG.info("Remove docker images matching pattern: {}".format(pattern))
args = " | grep " + pattern + " | awk '{print $3}' "
code, out = exec_docker_cmd("images", args, timeout=timeout, fail_ok=True, con_ssh=con_ssh)
if out:
image_list = out.splitlines()
code, out = remove_docker_images(image_list, force=True, con_ssh=con_ssh)
return code, out
def remove_docker_images(images, force=False, con_ssh=None, timeout=300,
fail_ok=False):
"""
Remove docker image(s) via docker image rm
Args:
images (str|tuple|list):
force (bool):
con_ssh:
timeout:
fail_ok:
Returns (tuple):
(0, <std_out>)
(1, <std_err>)
"""
if isinstance(images, str):
images = (images,)
LOG.info("Remove docker images: {}".format(images))
args = ' '.join(images)
if force:
args = '--force {}'.format(args)
code, out = exec_docker_cmd('image rm', args, timeout=timeout,
fail_ok=fail_ok, con_ssh=con_ssh)
return code, out
def get_docker_images(repo=None, tag=None, field='IMAGE ID', con_ssh=None,
fail_ok=False):
"""
get values for given docker image via 'docker image ls <repo>'
Args:
repo (str):
tag (str|None):
field (str|tuple|list):
con_ssh:
fail_ok
Returns (list|None): return None if no docker images returned at all due
to cmd failure
"""
args = None
if repo:
args = repo
if tag:
args += ':{}'.format(tag)
code, output = exec_docker_cmd(sub_cmd='image ls', args=args,
fail_ok=fail_ok, con_ssh=con_ssh)
if code != 0:
return None
table_ = table_parser.table_kube(output)
if not table_['values']:
if fail_ok:
return None
else:
raise exceptions.ContainerError(
"docker image {} does not exist".format(args))
values = table_parser.get_multi_values(table_, fields=field,
zip_values=True)
return values
def get_helm_overrides(field='overrides namespaces', app_name='stx-openstack',
charts=None,
auth_info=Tenant.get('admin_platform'), con_ssh=None):
"""
Get helm overrides values via system helm-override-list
Args:
field (str):
app_name
charts (None|str|list|tuple):
auth_info:
con_ssh:
Returns (list):
"""
table_ = table_parser.table(
cli.system('helm-override-list', app_name, ssh_client=con_ssh,
auth_info=auth_info)[1])
if charts:
table_ = table_parser.filter_table(table_, **{'chart name': charts})
vals = table_parser.get_multi_values(table_, fields=field, evaluate=True)
return vals
def get_helm_override_values(chart, namespace, app_name='stx-openstack',
fields=('combined_overrides',),
auth_info=Tenant.get('admin_platform'),
con_ssh=None):
"""
Get helm-override values for given chart via system helm-override-show
Args:
chart (str):
namespace (str):
app_name (str)
fields (str|tuple|list):
auth_info:
con_ssh:
Returns (list): list of parsed yaml formatted output. e.g., list of dict,
list of list, list of str
"""
args = '{} {} {}'.format(app_name, chart, namespace)
table_ = table_parser.table(
cli.system('helm-override-show', args, ssh_client=con_ssh,
auth_info=auth_info)[1],
rstrip_value=True)
if isinstance(fields, str):
fields = (fields,)
values = []
for field in fields:
value = table_parser.get_value_two_col_table(table_, field=field,
merge_lines=False)
values.append(yaml.load('\n'.join(value)))
return values
def __convert_kv(k, v):
if '.' not in k:
return {k: v}
new_key, new_val = k.rsplit('.', maxsplit=1)
return __convert_kv(new_key, {new_val: v})
def update_helm_override(chart, namespace, app_name='stx-openstack',
yaml_file=None, kv_pairs=None,
reset_vals=False, reuse_vals=False,
auth_info=Tenant.get('admin_platform'),
con_ssh=None, fail_ok=False):
"""
Update helm_override values for given chart
Args:
chart:
namespace:
app_name
yaml_file:
kv_pairs:
reset_vals:
reuse_vals:
fail_ok
con_ssh
auth_info
Returns (tuple):
(0, <overrides>(str|list|dict)) # cmd accepted.
(1, <std_err>) # system helm-override-update cmd rejected
"""
args = '{} {} {}'.format(app_name, chart, namespace)
if reset_vals:
args = '--reset-values {}'.format(args)
if reuse_vals:
args = '--reuse-values {}'.format(args)
if yaml_file:
args = '--values {} {}'.format(yaml_file, args)
if kv_pairs:
cmd_overrides = ','.join(
['{}={}'.format(k, v) for k, v in kv_pairs.items()])
args = '--set {} {}'.format(cmd_overrides, args)
code, output = cli.system('helm-override-update', args, ssh_client=con_ssh,
fail_ok=fail_ok, auth_info=auth_info)
if code != 0:
return 1, output
table_ = table_parser.table(output, rstrip_value=True)
overrides = table_parser.get_value_two_col_table(table_, 'user_overrides')
overrides = yaml.load('\n'.join(overrides))
# yaml.load converts str to bool, int, float; but does not convert
# None type. Updates are not verified here since it is rather complicated
# to verify properly.
LOG.info("Helm-override updated : {}".format(overrides))
return 0, overrides
def is_stx_openstack_deployed(applied_only=False, con_ssh=None,
auth_info=Tenant.get('admin_platform'),
force_check=False):
"""
Whether stx-openstack application is deployed.
Args:
applied_only (bool): if True, then only return True when application
is in applied state
con_ssh:
auth_info:
force_check:
Returns (bool):
"""
openstack_deployed = ProjVar.get_var('OPENSTACK_DEPLOYED')
if not applied_only and not force_check and openstack_deployed is not None:
return openstack_deployed
openstack_status = get_apps(application='stx-openstack', field='status',
con_ssh=con_ssh, auth_info=auth_info)
LOG.info("{}".format(openstack_status))
res = False
if openstack_status and 'appl' in openstack_status[0].lower():
res = True
if applied_only and openstack_status[0] != AppStatus.APPLIED:
res = False
return res

View File

@ -1,434 +0,0 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
import copy
from utils import cli, exceptions, table_parser
from utils.tis_log import LOG
from utils.clients.ssh import ControllerClient
from consts.auth import Tenant, HostLinuxUser
from consts.proj_vars import ProjVar
from consts.timeout import DCTimeout
from consts.filepaths import SysLogPath
from keywords import system_helper, nova_helper
def get_subclouds(field='name', name=None, avail=None, sync=None, mgmt=None, deploy=None,
auth_info=Tenant.get('admin_platform', 'RegionOne'), con_ssh=None,
source_openrc=None, rtn_dict=False, evaluate=False, strict=True, regex=False,
filter_subclouds=True):
"""
Get subclouds values
Args:
field (str | tuple): fields of value to get
name (str): subcloud name
avail (str): subcloud availability status
sync (str): subcloud sync status
mgmt (str): subcloud management status
deploy (str): subcloud deploy status
auth_info (dict):
con_ssh (SSHClient):
source_openrc (None|bool):
rtn_dict (bool): whether to return dict of field/value pairs
evaluate (bool): whether to convert value to python data type
strict (bool): True to use re.match, False to use re.search
regex (bool): whether to use regex to find value(s)
filter_subclouds (bool): whether to filter out the subclouds that are not in
the --subcloud_list arg
Returns (list | dict):
when rtn_dict is False, list of values
when rtn_dict is True, dict of field/values pairs
"""
table_ = table_parser.table(cli.dcmanager('subcloud list', ssh_client=con_ssh,
auth_info=auth_info, source_openrc=source_openrc)[1])
arg_map = {'name': name,
'availability': avail,
'sync': sync,
'management': mgmt,
'deploy status': deploy}
kwargs = {key: val for key, val in arg_map.items() if val}
if filter_subclouds:
filtered_subclouds = table_parser.get_values(table_, target_header=field, **kwargs)
subcloud_list = ProjVar.get_var('SUBCLOUD_LIST')
if subcloud_list:
filtered_subclouds = [subcloud for subcloud in filtered_subclouds
if subcloud in subcloud_list]
LOG.info('filtered_subclouds: {}'.format(filtered_subclouds))
return filtered_subclouds
else:
return table_parser.get_multi_values(table_, field, rtn_dict=rtn_dict, evaluate=evaluate,
strict=strict, regex=regex, **kwargs)
def wait_for_subcloud_status(subcloud, avail=None, sync=None, mgmt=None, deploy=None,
timeout=DCTimeout.SUBCLOUD_AUDIT, check_interval=30,
auth_info=Tenant.get('admin_platform', 'RegionOne'),
con_ssh=None, source_openrc=None, fail_ok=False):
"""
Wait for subcloud status
Args:
subcloud:
avail:
sync:
mgmt:
timeout:
check_interval:
auth_info:
con_ssh:
source_openrc:
fail_ok:
Returns:
"""
if not subcloud:
raise ValueError("Subcloud name must be specified")
expt_status = {}
if avail:
expt_status['avail'] = avail
if sync:
expt_status['sync'] = sync
if mgmt:
expt_status['mgmt'] = mgmt
if deploy:
expt_status['deploy'] = deploy
if not expt_status:
raise ValueError("At least one expected status of the subcloud must be specified.")
LOG.info("Wait for {} status: {}".format(subcloud, expt_status))
end_time = time.time() + timeout + check_interval
while time.time() < end_time:
if get_subclouds(field='name', name=subcloud, con_ssh=con_ssh, source_openrc=source_openrc,
auth_info=auth_info, **expt_status):
return 0, subcloud
LOG.info("Not in expected states yet...")
time.sleep(check_interval)
msg = '{} status did not reach {} within {} seconds'.format(subcloud, expt_status, timeout)
LOG.warning(msg)
if fail_ok:
return 1, msg
else:
raise exceptions.DCError(msg)
def _manage_unmanage_subcloud(subcloud=None, manage=False, check_first=True, fail_ok=False,
con_ssh=None, auth_info=Tenant.get('admin_platform', 'RegionOne'),
source_openrc=False):
"""
Manage/Unmanage given subcloud(s)
Args:
subcloud:
manage:
check_first:
fail_ok:
Returns:
"""
operation = 'manage' if manage else 'unmanage'
expt_state = '{}d'.format(operation)
if not subcloud:
subcloud = [ProjVar.get_var('PRIMARY_SUBCLOUD')]
elif isinstance(subcloud, str):
subcloud = [subcloud]
subclouds_to_update = list(subcloud)
if check_first:
subclouds_in_state = get_subclouds(mgmt=expt_state, con_ssh=con_ssh, auth_info=auth_info)
subclouds_to_update = list(set(subclouds_to_update) - set(subclouds_in_state))
if not subclouds_to_update:
LOG.info("{} already {}. Do nothing.".format(subcloud, expt_state))
return -1, []
LOG.info("Attempt to {}: {}".format(operation, subclouds_to_update))
failed_subclouds = []
for subcloud_ in subclouds_to_update:
code, out = cli.dcmanager('subcloud ' + operation, subcloud_, ssh_client=con_ssh,
fail_ok=True, auth_info=auth_info, source_openrc=source_openrc)
if code > 0:
failed_subclouds.append(subcloud_)
if failed_subclouds:
err = "Failed to {} {}".format(operation, failed_subclouds)
if fail_ok:
LOG.info(err)
return 1, failed_subclouds
raise exceptions.DCError(err)
LOG.info("Check management status for {} after dcmanager subcloud {}".format(
subclouds_to_update, operation))
mgmt_states = get_subclouds(field='management', name=subclouds_to_update, auth_info=auth_info,
con_ssh=con_ssh)
failed_subclouds = \
[subclouds_to_update[i] for i in range(len(mgmt_states)) if mgmt_states[i] != expt_state]
if failed_subclouds:
raise exceptions.DCError("{} not {} after dcmanger subcloud {}".format(
failed_subclouds, expt_state, operation))
return 0, subclouds_to_update
def manage_subcloud(subcloud=None, check_first=True, fail_ok=False, con_ssh=None):
"""
Manage subcloud(s)
Args:
subcloud (str|tuple|list):
check_first (bool):
fail_ok (bool):
con_ssh(SSClient):
Returns (tuple):
(-1, []) All give subcloud(s) already managed. Do nothing.
(0, [<updated subclouds>]) Successfully managed the give subcloud(s)
(1, [<cli_rejected_subclouds>]) dcmanager manage cli failed on these subcloud(s)
"""
return _manage_unmanage_subcloud(subcloud=subcloud, manage=True, check_first=check_first,
fail_ok=fail_ok,
con_ssh=con_ssh)
def unmanage_subcloud(subcloud=None, check_first=True, fail_ok=False, con_ssh=None,
source_openrc=False):
"""
Unmanage subcloud(s)
Args:
subcloud (str|tuple|list):
check_first (bool):
fail_ok (bool):
con_ssh (SSHClient):
Returns (tuple):
(-1, []) All give subcloud(s) already unmanaged. Do nothing.
(0, [<updated subclouds>]) Successfully unmanaged the give subcloud(s)
(1, [<cli_rejected_subclouds>]) dcmanager unmanage cli failed on these subcloud(s)
"""
return _manage_unmanage_subcloud(subcloud=subcloud, manage=False, check_first=check_first,
fail_ok=fail_ok, con_ssh=con_ssh, source_openrc=source_openrc)
def wait_for_subcloud_config(func, *func_args, subcloud=None, config_name=None,
expected_value=None, auth_name='admin_platform', fail_ok=False,
timeout=DCTimeout.SYNC, check_interval=30, strict_order=True,
**func_kwargs):
"""
Wait for subcloud configuration to reach expected value
Args:
subcloud (str|None):
func: function defined to get current value, which has to has parameter con_ssh and auth_info
*func_args: positional args for above func. Should NOT include auth_info or con_ssh.
config_name (str): such as dns, keypair, etc
expected_value (None|str|list):
auth_name (str): auth dict name. e.g., admin_platform, admin, tenant1, TENANT2, etc
fail_ok (bool):
timeout (int):
check_interval (int):
strict_order (bool)
**func_kwargs: kwargs for defined func. auth_info and con_ssh has to be provided here
Returns (tuple):
(0, <subcloud_config>) # same as expected
(1, <subcloud_config>) # did not update within timeout
(2, <subcloud_config>) # updated to unexpected value
"""
if not subcloud:
subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
config_name = ' ' + config_name if config_name else ''
if expected_value is None:
central_ssh = ControllerClient.get_active_controller(name='RegionOne')
expected_value = func(con_ssh=central_ssh,
auth_info=Tenant.get(auth_name, dc_region='RegionOne'))
elif isinstance(expected_value, str):
expected_value = expected_value.split(sep=',')
if not strict_order:
expected_value = sorted(list(expected_value))
LOG.info("Wait for {}{} to be {}".format(subcloud, config_name, expected_value))
if not func_kwargs.get('con_ssh', None):
func_kwargs['con_ssh'] = ControllerClient.get_active_controller(name=subcloud)
if not func_kwargs.get('auth_info', None):
func_kwargs['auth_info'] = Tenant.get(auth_name, dc_region=subcloud)
origin_subcloud_val = func(*func_args, **func_kwargs)
subcloud_val = copy.copy(origin_subcloud_val)
if isinstance(subcloud_val, str):
subcloud_val = subcloud_val.split(sep=',')
if not strict_order:
subcloud_val = sorted(list(subcloud_val))
end_time = time.time() + timeout + check_interval
while time.time() < end_time:
if subcloud_val == expected_value:
LOG.info("{}{} setting is same as central region".format(subcloud, config_name))
return 0, subcloud_val
elif subcloud_val != origin_subcloud_val:
msg = '{}{} config changed to unexpected value. Expected: {}; Actual: {}'.\
format(subcloud, config_name, expected_value, subcloud_val)
if fail_ok:
LOG.info(msg)
return 2, subcloud_val
else:
raise exceptions.DCError(msg)
time.sleep(check_interval)
subcloud_val = func(*func_args, **func_kwargs)
msg = '{}{} config did not reach: {} within {} seconds; actual: {}'.format(
subcloud, config_name, expected_value, timeout, subcloud_val)
if fail_ok:
LOG.info(msg)
return 1, subcloud_val
else:
raise exceptions.DCError(msg)
def wait_for_sync_audit(subclouds, con_ssh=None, fail_ok=False, filters_regex=None,
timeout=DCTimeout.SYNC):
"""
Wait for Updating subcloud log msg in dcmanager.log for given subcloud(s)
Args:
subclouds (list|tuple|str):
con_ssh:
fail_ok:
filters_regex: e.g., ['audit_action.*keypair', 'Clean audit.*ntp'], '\/compute'
timeout:
Returns (tuple):
(True, <res_dict>)
(False, <res_dict>)
"""
if not con_ssh:
con_ssh = ControllerClient.get_active_controller('RegionOne')
if isinstance(subclouds, str):
subclouds = [subclouds]
LOG.info("Waiting for sync audit in dcmanager.log for: {}".format(subclouds))
if not filters_regex:
filters_regex = ['platform', 'patching', 'identity']
elif isinstance(filters_regex, str):
filters_regex = [filters_regex]
subclouds_dict = {subcloud: list(filters_regex) for subcloud in subclouds}
res = {subcloud: False for subcloud in subclouds}
subclouds_to_wait = list(subclouds)
end_time = time.time() + timeout
expt_list = []
for subcloud in subclouds_dict:
expt_list += ['{}.*{}'.format(subcloud, service) for service in subclouds_dict[subcloud]]
con_ssh.send('tail -n 0 -f {}'.format(SysLogPath.DC_ORCH))
try:
while time.time() < end_time:
index = con_ssh.expect(expt_list, timeout=timeout, fail_ok=True)
if index >= 0:
subcloud_, service_ = expt_list[index].split('.*', maxsplit=1)
subclouds_dict[subcloud_].remove(service_)
expt_list.pop(index)
if not subclouds_dict[subcloud_]:
subclouds_to_wait.remove(subcloud_)
subclouds_dict.pop(subcloud_)
res[subcloud_] = True
if not subclouds_to_wait:
LOG.info("sync request logged for: {}".format(subclouds))
return True, res
else:
msg = 'sync audit for {} not shown in {} in {}s: {}'.format(
subclouds_to_wait, SysLogPath.DC_ORCH, timeout, subclouds_dict)
if fail_ok:
LOG.info(msg)
for subcloud in subclouds_to_wait:
res[subcloud] = False
return False, res
else:
raise exceptions.DCError(msg)
finally:
con_ssh.send_control()
con_ssh.expect()
def wait_for_subcloud_dns_config(subcloud=None, subcloud_ssh=None, expected_dns=None,
fail_ok=False, timeout=DCTimeout.SYNC, check_interval=30):
"""
Wait for dns configuration to reach expected value
Args:
subcloud (str|None):
subcloud_ssh (None|SSHClient):
expected_dns (None|str|list):
fail_ok (bool):
timeout (int):
check_interval (int):
Returns (tuple):
(0, <subcloud_dns_servers>) # same as expected
(1, <subcloud_dns_servers>) # did not update within timeout
(2, <subcloud_dns_servers>) # updated to unexpected value
"""
func = system_helper.get_dns_servers
func_kwargs = {'con_ssh': subcloud_ssh} if subcloud_ssh else {}
return wait_for_subcloud_config(subcloud=subcloud, func=func, config_name='DNS',
expected_value=expected_dns, fail_ok=fail_ok, timeout=timeout,
check_interval=check_interval, **func_kwargs)
def wait_for_subcloud_ntp_config(subcloud=None, subcloud_ssh=None, expected_ntp=None,
clear_alarm=True, fail_ok=False, timeout=DCTimeout.SYNC,
check_interval=30):
"""
Wait for ntp configuration to reach expected value
Args:
subcloud (str|None):
subcloud_ssh (None|SSHClient):
expected_ntp (None|str|list):
clear_alarm (bool)
fail_ok (bool):
timeout (int):
check_interval (int):
Returns (tuple):
(0, <subcloud_ntp_servers>) # same as expected
(1, <subcloud_ntp_servers>) # did not update within timeout
(2, <subcloud_ntp_servers>) # updated to unexpected value
"""
if not subcloud:
subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')
func_kwargs = {'auth_info': Tenant.get('admin_platform', subcloud)}
if subcloud_ssh:
func_kwargs['con_ssh'] = subcloud_ssh
func = system_helper.get_ntp_servers
res = wait_for_subcloud_config(subcloud=subcloud, func=func, config_name='NTP',
expected_value=expected_ntp, fail_ok=fail_ok, timeout=timeout,
check_interval=check_interval, **func_kwargs)
if res[0] in (0, 2) and clear_alarm:
system_helper.wait_and_clear_config_out_of_date_alarms(host_type='controller',
**func_kwargs)
return res

File diff suppressed because it is too large Load Diff

View File

@ -1,165 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from utils import cli
from utils import table_parser
from utils.tis_log import LOG
from consts.auth import Tenant
from keywords import common
def get_aggregated_measures(field='value', resource_type=None, metrics=None,
start=None, stop=None, overlap=None,
refresh=None, resource_ids=None, extra_query=None,
fail_ok=False, auth_info=Tenant.get('admin'),
con_ssh=None):
"""
Get measurements via 'openstack metric measures aggregation'
Args:
field (str): header of a column
resource_type (str|None): used in --resource-type <resource_type>
metrics (str|list|tuple|None): used in --metric <metric1> [metric2 ...]
start (str|None): used in --start <start>
stop (str|None): used in --stop <stop>
refresh (bool): used in --refresh
overlap (str|None): overlap percentage. used in
--needed-overlap <overlap>
resource_ids (str|list|tuple|None): used in --query "id=<resource_id1>[
or id=<resource_id2> ...]"
extra_query (str|None): used in --query <extra_query>
fail_ok:
auth_info:
con_ssh:
Returns (list): list of strings
"""
LOG.info("Getting aggregated measurements...")
args_dict = {
'resource-type': resource_type,
'metric': metrics,
'start': start,
'stop': stop,
'needed-overlap': overlap,
'refresh': refresh,
}
args = common.parse_args(args_dict, vals_sep=' ')
query_str = ''
if resource_ids:
if isinstance(resource_ids, str):
resource_ids = [resource_ids]
resource_ids = ['id={}'.format(val) for val in resource_ids]
query_str = ' or '.join(resource_ids)
if extra_query:
if resource_ids:
query_str += ' and '
query_str += '{}'.format(extra_query)
if query_str:
args += ' --query "{}"'.format(query_str)
code, out = cli.openstack('metric measures aggregation', args,
ssh_client=con_ssh, fail_ok=fail_ok,
auth_info=auth_info)
if code > 0:
return 1, out
table_ = table_parser.table(out)
return 0, table_parser.get_values(table_, field)
def get_metric_values(metric_id=None, metric_name=None, resource_id=None,
fields='id', fail_ok=False,
auth_info=Tenant.get('admin'), con_ssh=None):
"""
Get metric info via 'openstack metric show'
Args:
metric_id (str|None):
metric_name (str|None): Only used if metric_id is not provided
resource_id (str|None): Only used if metric_id is not provided
fields (str|list|tuple): field name
fail_ok (bool):
auth_info:
con_ssh:
Returns (list):
"""
if metric_id is None and metric_name is None:
raise ValueError("metric_id or metric_name has to be provided.")
if metric_id:
arg = metric_id
else:
if resource_id:
arg = '--resource-id {} "{}"'.format(resource_id, metric_name)
else:
if not fail_ok:
raise ValueError("resource_id needs to be provided when using "
"metric_name")
arg = '"{}"'.format(metric_name)
code, output = cli.openstack('openstack metric show', arg,
ssh_client=con_ssh, fail_ok=fail_ok,
auth_info=auth_info)
if code > 0:
return output
table_ = table_parser.table(output)
return table_parser.get_multi_values_two_col_table(table_, fields)
def get_metrics(field='id', metric_name=None, resource_id=None, fail_ok=True,
auth_info=Tenant.get('admin'), con_ssh=None):
"""
Get metrics values via 'openstack metric list'
Args:
field (str|list|tuple): header of the metric list table
metric_name (str|None):
resource_id (str|None):
fail_ok (bool):
auth_info:
con_ssh:
Returns (list): list of strings
"""
columns = ['id', 'archive_policy/name', 'name', 'unit', 'resource_id']
arg = '-f value '
arg += ' '.join(['-c {}'.format(column) for column in columns])
grep_str = ''
if resource_id:
grep_str += ' | grep --color=never -E -i {}'.format(resource_id)
if metric_name:
grep_str += ' | grep --color=never -E -i {}'.format(metric_name)
arg += grep_str
code, output = cli.openstack('metric list', arg, ssh_client=con_ssh,
fail_ok=fail_ok, auth_info=auth_info)
if code > 0:
return []
values = []
convert = False
if isinstance(field, str):
field = (field, )
convert = True
for header in field:
lines = output.splitlines()
index = columns.index(header.lower())
vals = [line.split(sep=' ')[index] for line in lines]
values.append(vals)
if convert:
values = values[0]
return values

View File

@ -1,398 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import time
from utils import table_parser, cli, exceptions
from utils.tis_log import LOG
from utils.clients.ssh import get_cli_client
from consts.stx import GuestImages, HeatStackStatus, HEAT_CUSTOM_TEMPLATES
from consts.filepaths import TestServerPath
from keywords import network_helper, common
from testfixtures.fixture_resources import ResourceCleanup
def _wait_for_heat_stack_deleted(stack_name=None, timeout=120,
check_interval=3, con_ssh=None,
auth_info=None):
"""
This will wait for the heat stack to be deleted
Args:
stack_name(str): Heat stack name to check for state
con_ssh (SSHClient): If None, active controller ssh will be used.
auth_info (dict): Tenant dict. If None, primary tenant will be used.
Returns:
"""
LOG.info("Waiting for {} to be deleted...".format(stack_name))
end_time = time.time() + timeout
while time.time() < end_time:
stack_status = get_stack_status(stack=stack_name, auth_info=auth_info,
con_ssh=con_ssh, fail_ok=True)
if not stack_status:
return True
elif stack_status[0] == HeatStackStatus.DELETE_FAILED:
LOG.warning('Heat stack in DELETE_FAILED state')
return False
time.sleep(check_interval)
msg = "Heat stack {} did not get deleted within timeout".format(stack_name)
LOG.warning(msg)
return False
def wait_for_heat_status(stack_name=None,
status=HeatStackStatus.CREATE_COMPLETE,
timeout=300, check_interval=5,
fail_ok=False, con_ssh=None, auth_info=None):
"""
This will wait for the desired state of the heat stack or timeout
Args:
stack_name(str): Heat stack name to check for state
status(str): Status to check for
timeout (int)
check_interval (int)
fail_ok (bool
con_ssh (SSHClient): If None, active controller ssh will be used.
auth_info (dict): Tenant dict. If None, primary tenant will be used.
Returns (tuple): <res_bool>, <msg>
"""
LOG.info("Waiting for {} to be shown in {} ...".format(stack_name, status))
end_time = time.time() + timeout
fail_status = current_status = None
if status == HeatStackStatus.CREATE_COMPLETE:
fail_status = HeatStackStatus.CREATE_FAILED
elif status == HeatStackStatus.UPDATE_COMPLETE:
fail_status = HeatStackStatus.UPDATE_FAILED
while time.time() < end_time:
current_status = get_stack_status(stack=stack_name, auth_info=auth_info,
con_ssh=con_ssh)
if status == current_status:
return True, 'Heat stack {} has reached {} status'.format(
stack_name, status)
elif fail_status == current_status:
stack_id = get_stack_values(stack=stack_name, fields='id',
auth_info=auth_info, con_ssh=con_ssh)[0]
get_stack_resources(stack=stack_id, auth_info=auth_info,
con_ssh=con_ssh)
err = "Heat stack {} failed to reach {}, actual status: {}".format(
stack_name, status, fail_status)
if fail_ok:
LOG.warning(err)
return False, err
raise exceptions.HeatError(err)
time.sleep(check_interval)
stack_id = get_stack_values(stack=stack_name, fields='id',
auth_info=auth_info, con_ssh=con_ssh)[0]
get_stack_resources(stack=stack_id, auth_info=auth_info, con_ssh=con_ssh)
err_msg = "Heat stack {} did not reach {} within {}s. Actual " \
"status: {}".format(stack_name, status, timeout, current_status)
if fail_ok:
LOG.warning(err_msg)
return False, err_msg
raise exceptions.HeatError(err_msg)
def get_stack_values(stack, fields='stack_status_reason', con_ssh=None,
auth_info=None, fail_ok=False):
code, out = cli.openstack('stack show', stack, ssh_client=con_ssh,
auth_info=auth_info, fail_ok=fail_ok)
if code > 0:
return None
table_ = table_parser.table(out)
return table_parser.get_multi_values_two_col_table(table_=table_,
fields=fields)
def get_stacks(name=None, field='id', con_ssh=None, auth_info=None, all_=True):
"""
Get the stacks list based on name if given for a given tenant.
Args:
con_ssh (SSHClient): If None, active controller ssh will be used.
auth_info (dict): Tenant dict. If None, primary tenant will be used.
all_ (bool): whether to display all stacks for admin user
name (str): Given name for the heat stack
field (str|list|tuple)
Returns (list): list of heat stacks.
"""
args = ''
if auth_info is not None:
if auth_info['user'] == 'admin' and all_:
args = '--a'
table_ = table_parser.table(
cli.openstack('stack list', positional_args=args, ssh_client=con_ssh,
auth_info=auth_info)[1])
kwargs = {'Stack Name': name} if name else {}
return table_parser.get_multi_values(table_, field, **kwargs)
def get_stack_status(stack, con_ssh=None, auth_info=None, fail_ok=False):
"""
Get the stacks status based on name if given for a given tenant.
Args:
con_ssh (SSHClient): If None, active controller ssh will be used.
auth_info (dict): Tenant dict. If None, primary tenant will be used.
stack (str): Given name for the heat stack
fail_ok (bool):
Returns (str): Heat stack status of a specific tenant.
"""
status = get_stack_values(stack, fields='stack_status', con_ssh=con_ssh,
auth_info=auth_info, fail_ok=fail_ok)
status = status[0] if status else None
return status
def get_stack_resources(stack, field='resource_name', auth_info=None,
con_ssh=None, **kwargs):
"""
Args:
stack (str): id (or name) for the heat stack. ID is required if admin
user is used to display tenant resource.
field: values to return
auth_info:
con_ssh:
kwargs: key/value pair to filer out the values to return
Returns (list):
"""
table_ = table_parser.table(
cli.openstack('stack resource list --long', stack, ssh_client=con_ssh,
auth_info=auth_info)[1])
return table_parser.get_values(table_, target_header=field, **kwargs)
def delete_stack(stack, fail_ok=False, check_first=False, con_ssh=None,
auth_info=None):
"""
Delete the given heat stack for a given tenant.
Args:
con_ssh (SSHClient): If None, active controller ssh will be used.
fail_ok (bool):
check_first (bool): whether or not to check the stack existence
before attempt to delete
auth_info (dict): Tenant dict. If None, primary tenant will be used.
stack (str): Given name for the heat stack
Returns (tuple): Status and msg of the heat deletion.
"""
if not stack:
raise ValueError("stack_name is not provided.")
if check_first:
if not get_stack_status(stack, con_ssh=con_ssh, auth_info=auth_info,
fail_ok=True):
msg = "Heat stack {} doesn't exist on the system. Do " \
"nothing.".format(stack)
LOG.info(msg)
return -1, msg
LOG.info("Deleting Heat Stack %s", stack)
exitcode, output = cli.openstack('stack delete -y', stack,
ssh_client=con_ssh, fail_ok=fail_ok,
auth_info=auth_info)
if exitcode > 1:
LOG.warning("Delete heat stack request rejected.")
return 1, output
if not _wait_for_heat_stack_deleted(stack_name=stack, auth_info=auth_info):
stack_id = get_stack_values(stack=stack, fields='id',
auth_info=auth_info, con_ssh=con_ssh)[0]
get_stack_resources(stack=stack_id, auth_info=auth_info,
con_ssh=con_ssh)
msg = "heat stack {} is not removed after stack-delete.".format(stack)
if fail_ok:
LOG.warning(msg)
return 2, msg
raise exceptions.HeatError(msg)
succ_msg = "Heat stack {} is successfully deleted.".format(stack)
LOG.info(succ_msg)
return 0, succ_msg
def get_heat_params(param_name=None):
"""
Generate parameters for heat based on keywords
Args:
param_name (str): template to be used to create heat stack.
Returns (str): return None if failure or the val for the given param
"""
if param_name == 'NETWORK':
net_id = network_helper.get_mgmt_net_id()
return network_helper.get_net_name_from_id(net_id=net_id)
elif param_name == 'FLAVOR':
return 'small_ded'
elif param_name == 'IMAGE':
return GuestImages.DEFAULT['guest']
else:
return None
def create_stack(stack_name, template, pre_creates=None, environments=None,
stack_timeout=None, parameters=None, param_files=None,
enable_rollback=None, dry_run=None, wait=None, tags=None,
fail_ok=False, con_ssh=None, auth_info=None,
cleanup='function', timeout=300):
"""
Create the given heat stack for a given tenant.
Args:
stack_name (str): Given name for the heat stack
template (str): path of heat template
pre_creates (str|list|None)
environments (str|list|None)
stack_timeout (int|str|None): stack creating timeout in minutes
parameters (str|dict|None)
param_files (str|dict|None)
enable_rollback (bool|None)
dry_run (bool|None)
wait (bool|None)
tags (str|list|None)
auth_info (dict): Tenant dict. If None, primary tenant will be used.
con_ssh (SSHClient): If None, active controller ssh will be used.
timeout (int): automation timeout in seconds
fail_ok (bool):
cleanup (str|None)
Returns (tuple): Status and msg of the heat deletion.
"""
args_dict = {
'--template': template,
'--environment': environments,
'--timeout': stack_timeout,
'--pre-create': pre_creates,
'--enable-rollback': enable_rollback,
'--parameter': parameters,
'--parameter-file': param_files,
'--wait': wait,
'--tags': ','.join(tags) if isinstance(tags, (list, tuple)) else tags,
'--dry-run': dry_run,
}
args = common.parse_args(args_dict, repeat_arg=True)
LOG.info("Create Heat Stack {} with args: {}".format(stack_name, args))
exitcode, output = cli.openstack('stack create', '{} {}'.
format(args, stack_name),
ssh_client=con_ssh, fail_ok=fail_ok,
auth_info=auth_info, timeout=timeout)
if exitcode > 0:
return 1, output
if cleanup:
ResourceCleanup.add('heat_stack', resource_id=stack_name, scope=cleanup)
LOG.info("Wait for Heat Stack Status to reach CREATE_COMPLETE for "
"stack %s", stack_name)
res, msg = wait_for_heat_status(stack_name=stack_name,
status=HeatStackStatus.CREATE_COMPLETE,
auth_info=auth_info, fail_ok=fail_ok)
if not res:
return 2, msg
LOG.info("Stack {} created successfully".format(stack_name))
return 0, stack_name
def update_stack(stack_name, params_string, fail_ok=False, con_ssh=None,
auth_info=None, timeout=300):
"""
Update the given heat stack for a given tenant.
Args:
con_ssh (SSHClient): If None, active controller ssh will be used.
fail_ok (bool):
params_string: Parameters to pass to the heat create cmd.
ex: -f <stack.yaml> -P IMAGE=tis <stack_name>
auth_info (dict): Tenant dict. If None, primary tenant will be used.
stack_name (str): Given name for the heat stack
timeout (int)
Returns (tuple): Status and msg of the heat deletion.
"""
if not params_string:
raise ValueError("Parameters not provided.")
LOG.info("Create Heat Stack %s", params_string)
exitcode, output = cli.heat('stack-update', params_string,
ssh_client=con_ssh, fail_ok=fail_ok,
auth_info=auth_info)
if exitcode == 1:
LOG.warning("Create heat stack request rejected.")
return 1, output
LOG.info("Wait for Heat Stack Status to reach UPDATE_COMPLETE for stack %s",
stack_name)
res, msg = wait_for_heat_status(stack_name=stack_name,
status=HeatStackStatus.UPDATE_COMPLETE,
auth_info=auth_info, fail_ok=fail_ok,
timeout=timeout)
if not res:
return 2, msg
LOG.info("Stack {} updated successfully".format(stack_name))
return 0, stack_name
def get_custom_heat_files(file_name, file_dir=HEAT_CUSTOM_TEMPLATES,
cli_client=None):
"""
Args:
file_name:
file_dir:
cli_client:
Returns:
"""
file_path = '{}/{}'.format(file_dir, file_name)
if cli_client is None:
cli_client = get_cli_client()
if not cli_client.file_exists(file_path=file_path):
LOG.debug('Create userdata directory if not already exists')
cmd = 'mkdir -p {}'.format(file_dir)
cli_client.exec_cmd(cmd, fail_ok=False)
source_file = TestServerPath.CUSTOM_HEAT_TEMPLATES + file_name
dest_path = common.scp_from_test_server_to_user_file_dir(
source_path=source_file, dest_dir=file_dir,
dest_name=file_name, timeout=300, con_ssh=cli_client)
if dest_path is None:
raise exceptions.CommonError(
"Heat template file {} does not exist after download".format(
file_path))
return file_path

View File

@ -1,62 +0,0 @@
#
# Copyright (c) 2019, 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
from keywords import common
from utils.tis_log import LOG
from utils.horizon.helper import HorizonDriver
from consts.auth import Tenant, CliAuth
from consts.proj_vars import ProjVar
def download_openrc_files(quit_driver=True):
"""
Download openrc files from Horizon to <LOG_DIR>/horizon/.
"""
LOG.info("Download openrc files from horizon")
local_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'horizon')
from utils.horizon.pages import loginpage
rc_files = []
login_pg = loginpage.LoginPage()
login_pg.go_to_target_page()
try:
for auth_info in (Tenant.get('admin'), Tenant.get('tenant1'), Tenant.get('tenant2')):
user = auth_info['user']
password = auth_info['password']
openrc_file = '{}-openrc.sh'.format(user)
home_pg = login_pg.login(user, password=password)
home_pg.download_rc_v3()
home_pg.log_out()
openrc_path = os.path.join(local_dir, openrc_file)
assert os.path.exists(openrc_path), "{} not found after download".format(openrc_file)
rc_files.append(openrc_path)
finally:
if quit_driver:
HorizonDriver.quit_driver()
LOG.info("openrc files are successfully downloaded to: {}".format(local_dir))
return rc_files
def get_url(dnsname=False):
"""
Get the base url of the Horizon application
Args:
dnsname(bool): True if return the dns name of the host instead of the IP
Returns(str): the url on the active controller to access Horizon
"""
domain = common.get_lab_fip(region='RegionOne') if not dnsname else \
common.get_dnsname(region='RegionOne')
prefix = 'https' if CliAuth.get_var('https') else 'http'
port = 8080 if prefix == 'http' else 8443
return '{}://{}:{}'.format(prefix, domain, port)

File diff suppressed because it is too large Load Diff

View File

@ -1,198 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import json
import requests
from consts.auth import Tenant
from utils import table_parser, cli
from utils.tis_log import LOG
from consts.proj_vars import ProjVar
from keywords import keystone_helper
def get_ip_addr():
return ProjVar.get_var('lab')['floating ip']
def create_url(ip=None, port=None, version=None, extension=None):
"""
Creates a url with the given parameters inn the form:
http(s)://<ip address>:<port>/<version>/<extension>
Args:
ip (str): the main ip address. If set to None will be set to the lab's
ip address by default.
port (int): the port number to connect to.
version (str): for REST API. version number, e.g. "v1", "v2.0"
extension (str): extensions to add to the url
Returns (str): a url created with the given parameters
"""
if keystone_helper.is_https_enabled() is True:
url = 'https://'
else:
url = 'http://'
if ip:
url += ip
else:
url += get_ip_addr()
if port:
url += ':{}'.format(port)
if version:
url += '/{}'.format(version)
if extension:
url += '/{}'.format(extension)
return url
def get_user_token(field='id', con_ssh=None, auth_info=Tenant.get('admin')):
"""
Return an authentication token for the admin.
Args:
field (str):
con_ssh (SSHClient):
auth_info
Returns (list): a list containing at most one authentication token
"""
table_ = table_parser.table(cli.openstack('token issue', ssh_client=con_ssh,
auth_info=auth_info)[1])
token = table_parser.get_value_two_col_table(table_, field)
return token
def get_request(url, headers, verify=True):
"""
Sends a GET request to the url
Args:
url (str): url to send request to
headers (dict): header to add to the request
verify: Verify SSL certificate
Returns (dict): The response for the request
"""
LOG.info("Sending GET request to {}. Headers: {}".format(url, headers))
resp = requests.get(url, headers=headers, verify=verify)
if resp.status_code == requests.codes.ok:
data = json.loads(resp.text)
LOG.info("The returned data is: {}".format(data))
return data
LOG.info("Error {}".format(resp.status_code))
return None
def post_request(url, data, headers, verify=True):
"""
Sends a POST request to the url
Args:
url (str): url to send request to
data (dict): data to be sent in the request body
headers (dict): header to add to the request
verify: Verify SSL certificate
Returns (dict): The response for the request
"""
if not isinstance(data, str):
data = json.dumps(data)
LOG.info("Sending POST request to {}. Headers: {}. Data: "
"{}".format(url, headers, data))
resp = requests.post(url, headers=headers, data=data, verify=verify)
if resp.status_code == requests.codes.ok:
data = json.loads(resp.text)
LOG.info("The returned data is: {}".format(data))
return data
LOG.info("Error {}".format(resp.status_code))
return None
def put_request(url, data, headers, verify=True):
"""
Sends a GET request to the url
Args:
url (str): url to send request to
data (dict): data to be sent in the request body
headers (dict): header to add to the request
verify: Verify SSL certificate
Returns (dict): The response for the request
"""
if not isinstance(data, str):
data = json.dumps(data)
LOG.info("Sending PUT request to {}. Headers: {}. Data: "
"{}".format(url, headers, data))
resp = requests.put(url, headers=headers, data=data, verify=verify)
if resp.status_code == requests.codes.ok:
data = json.loads(resp.text)
LOG.info("The returned data is: {}".format(data))
return data
LOG.info("Error {}".format(resp.status_code))
return None
def delete_request(url, headers, verify=True):
"""
Sends a GET request to the url
Args:
url (str): url to send request to
headers (dict): header to add to the request
verify: Verify SSL certificate
Returns (dict): The response for the request
"""
LOG.info("Sending DELETE request to {}. Headers: {}".format(url, headers))
resp = requests.delete(url, headers=headers, verify=verify)
if resp.status_code == requests.codes.ok:
data = json.loads(resp.text)
LOG.info("The returned data is: {}".format(data))
return data
LOG.info("Error {}".format(resp.status_code))
return None
def patch_request(url, data, headers, verify=True):
"""
Sends a PATCH request to the url
Args:
url (str): url to send request to
data (dict|str|list): data to be sent in the request body
headers (dict): header to add to the request
verify: Verify SSL certificate
Returns (dict): The response for the request
"""
if not isinstance(data, str):
data = json.dumps(data)
LOG.info("Sending PATCH request to {}. Headers: {}. Data: "
"{}".format(url, headers, data))
resp = requests.patch(url, headers=headers, data=data, verify=verify)
if resp.status_code == requests.codes.ok:
data = json.loads(resp.text)
LOG.info("The returned data is: {}".format(data))
return data
LOG.info("Error {}".format(resp.status_code))
return None

View File

@ -1,623 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import re
from consts.auth import Tenant, HostLinuxUser
from consts.proj_vars import ProjVar
from utils import cli, exceptions, table_parser
from utils.clients.ssh import ControllerClient
from utils.tis_log import LOG
from keywords import common
def get_roles(field='ID', con_ssh=None, auth_info=Tenant.get('admin'),
**kwargs):
table_ = table_parser.table(cli.openstack('role list', ssh_client=con_ssh,
auth_info=auth_info)[1])
return table_parser.get_multi_values(table_, field, **kwargs)
def get_users(field='ID', con_ssh=None, auth_info=Tenant.get('admin'),
**kwargs):
"""
Return a list of user id(s) with given user name.
Args:
field (str|list|tuple):
con_ssh (SSHClient):
auth_info
Returns (list): list of user id(s)
"""
table_ = table_parser.table(cli.openstack('user list', ssh_client=con_ssh,
auth_info=auth_info)[1])
return table_parser.get_multi_values(table_, field, **kwargs)
def add_or_remove_role(add_=True, role='admin', project=None, user=None,
domain=None, group=None, group_domain=None,
project_domain=None, user_domain=None, inherited=None,
check_first=True, fail_ok=False,
con_ssh=None, auth_info=Tenant.get('admin')):
"""
Add or remove given role for specified user and tenant. e.g., add admin
role to tenant2 user on tenant2 project
Args:
add_(bool): whether to add or remove
role (str): an existing role from openstack role list
project (str): tenant name. When unset, the primary tenant name
will be used
user (str): an existing user that belongs to given tenant
domain (str): Include <domain> (name or ID)
group (str): Include <group> (name or ID)
group_domain (str): Domain the group belongs to (name or ID).
This can be used in case collisions between group names exist.
project_domain (str): Domain the project belongs to (name or ID).
This can be used in case collisions between project names exist.
user_domain (str): Domain the user belongs to (name or ID).
This can be used in case collisions between user names exist.
inherited (bool): Specifies if the role grant is inheritable to the
sub projects
check_first (bool): whether to check if role already exists for given
user and tenant
fail_ok (bool): whether to throw exception on failure
con_ssh (SSHClient): active controller ssh session
auth_info (dict): auth info to use to executing the add role cli
Returns (tuple):
"""
tenant_dict = {}
if project is None:
if auth_info and auth_info.get('platform'):
project = auth_info['tenant']
else:
tenant_dict = Tenant.get_primary()
project = tenant_dict['tenant']
if user is None:
user = tenant_dict.get('user', project)
if check_first:
existing_roles = get_role_assignments(role=role, project=project,
user=user,
user_domain=user_domain,
group=group,
group_domain=group_domain,
domain=domain,
project_domain=project_domain,
inherited=inherited,
effective_only=False,
con_ssh=con_ssh,
auth_info=auth_info)
if existing_roles:
if add_:
msg = "Role already exists with given criteria: {}".format(
existing_roles)
LOG.info(msg)
return -1, msg
else:
if not add_:
msg = "Role with given criteria does not exist. Do nothing."
LOG.info(msg)
return -1, msg
msg_str = 'Add' if add_ else 'Remov'
LOG.info(
"{}ing {} role to {} user under {} project".format(msg_str, role, user,
project))
sub_cmd = "--user {} --project {}".format(user, project)
if inherited is True:
sub_cmd += ' --inherited'
optional_args = {
'domain': domain,
'group': group,
'group-domain': group_domain,
'project-domain': project_domain,
'user-domain': user_domain,
}
for key, val in optional_args.items():
if val is not None:
sub_cmd += ' --{} {}'.format(key, val)
sub_cmd += ' {}'.format(role)
cmd = 'role add' if add_ else 'role remove'