diff --git a/.ansible-lint b/.ansible-lint deleted file mode 100644 index e7e2694e2..000000000 --- a/.ansible-lint +++ /dev/null @@ -1,37 +0,0 @@ -exclude_paths: - - releasenotes/ - - ci/playbooks/ -parseable: true -quiet: false -rulesdir: - - .ansible-lint_rules/ - -# Mock modules or roles in order to pass ansible-playbook --syntax-check -mock_modules: - - hiera # Modules can only be installed by rpm - - validations_read_ini # Modules can only be installed by rpm - - warn # Modules can only be installed by rpm - - tripleo_overcloud_role_list # Modules can only be installed by rpm - - tripleo_overcloud_role_show # Modules can only be installed by rpm -mock_roles: - - check_latest_packages_version - -skip_list: - # Lines should be no longer than 120 chars. - - '204' - # Using command rather module we have where - # we need to use curl or rsync. - - '303' - # shell tasks uses pipeline without pipefail, - # this requires refactoring, skip for now. - - '306' - # Tasks that run when changed should likely be handlers - # this requires refactoring, skip for now. - - '503' - # meta/main.yml should contain relevant info - - '701' - # Tags must contain lowercase letters and digits only - - '702' - # meta/main.yml default values should be changed - - '703' -verbosity: 1 diff --git a/.ansible-lint_rules/ValidationHasMetadataRule.py b/.ansible-lint_rules/ValidationHasMetadataRule.py deleted file mode 100644 index 1a04543c4..000000000 --- a/.ansible-lint_rules/ValidationHasMetadataRule.py +++ /dev/null @@ -1,197 +0,0 @@ -import os -import yaml -from ansiblelint.errors import MatchError -from ansiblelint.rules import AnsibleLintRule - - -class ValidationHasMetadataRule(AnsibleLintRule): - id = '750' - shortdesc = 'Validation playbook must have mandatory metadata' - - info = """ ---- -- hosts: localhost - vars: - metadata: - name: Validation Name - description: > - A full description of the validation. - groups: - - group1 - - group2 - - group3 - categories: - - category1 - - category2 - - category3 - products: - - product1 - - product2 - - product3 -""" - - description = ( - "The Validation playbook must have mandatory metadata:\n" - "```{}```".format(info) - ) - - severity = 'HIGH' - tags = ['metadata'] - - no_vars_found = "The validation playbook must contain a 'vars' dictionary" - no_meta_found = ( - "The validation playbook must contain " - "a 'metadata' dictionary under vars" - ) - no_classification_found = \ - "*metadata* should contain a list of {classification}" - - unknown_classifications_found = ( - "Unkown {classification_key}(s) '{unknown_classification}' found! " - "The official list of {classification_key} are '{known_classification}'. " - ) - - how_to_add_classification = { - 'groups': ( - "To add a new validation group, please add it in the groups.yaml " - "file at the root of the tripleo-validations project." - ) - } - - def get_classifications(self, classification='groups'): - """Returns a list classification names - defined for tripleo-validations in the '{classification}.yaml' file - located in the base repo directory. - """ - file_path = os.path.abspath(classification + '.yaml') - - try: - with open(file_path, "r") as definitions: - contents = yaml.safe_load(definitions) - except (PermissionError, OSError): - raise RuntimeError( - "{}.yaml file at '{}' inacessible.".format( - classification, - file_path)) - - results = [name for name, _ in contents.items()] - - return results - - def check_classification(self, metadata, path, - classification_key, strict=False): - """Check validatity of validation classifications, - such as groups, categories and products. - This one is tricky. - Empty lists in python evaluate as false - So we can't just check for truth value of returned list. - Instead we have to compare the returned value with `None`. - """ - classification = metadata.get(classification_key, None) - - if classification is None: - return MatchError( - message=self.no_classification_found.format( - classification=classification_key - ), - filename=path, - details=str(metadata)) - else: - if not isinstance(classification, list): - return MatchError( - message="*{}* must be a list".format(classification_key), - filename=path, - details=str(metadata)) - elif strict: - classifications = self.get_classifications(classification_key) - unknown_classifications = list( - set(classification) - set(classifications)) - if unknown_classifications: - message = self.unknown_classifications_found.format( - unknown_classification=unknown_classifications, - known_classification=classifications, - classification_key=classification_key) - message += self.how_to_add_classification.get(classification_key, "") - return MatchError( - message=message, - filename=path, - details=str(metadata)) - - def matchplay(self, file, data): - results = [] - path = file['path'] - - if file['type'] == 'playbook': - if path.startswith("playbooks/") \ - or "tripleo-validations/playbooks/" in path: - - # *hosts* line check - hosts = data.get('hosts', None) - if not hosts: - results.append( - MatchError( - message="No *hosts* key found in the playbook", - filename=path, - details=str(data))) - - # *vars* lines check - vars = data.get('vars', None) - if not vars: - results.append( - MatchError( - message=self.no_vars_found, - filename=path, - details=str(data))) - else: - if not isinstance(vars, dict): - results.append( - MatchError( - message='*vars* must be a dictionary', - filename=path, - details=str(data))) - - # *metadata* lines check - metadata = data['vars'].get('metadata', None) - if metadata: - if not isinstance(metadata, dict): - results.append( - MatchError( - message='*metadata* must be a dictionary', - filename=path, - details=str(data))) - else: - results.append( - MatchError( - message=self.no_meta_found, - filename=path, - details=str(data))) - - # *metadata>[name|description] lines check - for info in ['name', 'description']: - if not metadata.get(info, None): - results.append( - MatchError( - message='*metadata* must contain a %s key' % info, - filename=path, - details=str(data))) - continue - if not isinstance(metadata.get(info), str): - results.append( - MatchError( - message='*%s* should be a string' % info, - filename=path, - details=str(data))) - - #Checks for metadata we use to classify validations. - #Groups, categories and products - for classification in ['categories', 'products', 'groups']: - classification_error = self.check_classification( - metadata, - path, - classification, - strict=(classification == 'groups')) - - if classification_error: - results.append(classification_error) - - return results diff --git a/.config/molecule/Dockerfile b/.config/molecule/Dockerfile deleted file mode 100644 index a25d5e0ce..000000000 --- a/.config/molecule/Dockerfile +++ /dev/null @@ -1,37 +0,0 @@ -# Molecule managed -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -{% if item.registry is defined %} -FROM {{ item.registry.url }}/{{ item.image }} -{% else %} -FROM {{ item.image }} -{% endif %} - -RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ - elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install sudo python*-devel python*-dnf bash epel-release {{ item.pkg_extras | default('') }} && dnf clean all; \ - elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl python-setuptools bash {{ item.pkg_extras | default('') }} && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ - elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml {{ item.pkg_extras | default('') }} && zypper clean -a; \ - elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates {{ item.pkg_extras | default('') }}; \ - elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates {{ item.pkg_extras | default('') }} && xbps-remove -O; fi - -{% for pkg in item.easy_install | default([]) %} -# install pip for centos where there is no python-pip rpm in default repos -RUN easy_install {{ pkg }} -{% endfor %} - - -CMD ["sh", "-c", "while true; do sleep 10000; done"] diff --git a/.config/molecule/config.yml b/.config/molecule/config.yml deleted file mode 100644 index 924810e44..000000000 --- a/.config/molecule/config.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -# Tripleo-validations uses a shared molecule configuration file to avoid -# repetition. That configuration file is located at the repository level -# ({REPO}/.config/molecule/config.yml) and defines all the default values for -# all the molecule.yml files across all the roles. By default, the role-addition -# process will produce an empty molecule.yml inheriting this config.yml file. -# -# Any key defined in the role molecule.yml file will override values from this -# config.yml file. -# -# IMPORTANT: if you want to override the default values set here in this file, -# you will have to redefine them completely in your molecule.yml (at the role -# level) and add your extra configuration! -# -# For instance, if you need to add an extra package in your CentOS 8 Stream -# container, you will have to add the entire "platforms" key into your -# molecule.yml file and add your package name in the pkg_extras key. -# -# No merge will happen between your molecule.yml and this config.yml -# files. That's why you will have to redefine them completely. - -driver: - name: podman - -log: true - -platforms: - - name: centos - hostname: centos - image: centos/centos:stream8 - registry: - url: quay.io - dockerfile: ../../../../.config/molecule/Dockerfile - pkg_extras: python*-setuptools python*-pyyaml - volumes: - - /etc/ci/mirror_info.sh:/etc/ci/mirror_info.sh:ro - privileged: true - environment: &env - http_proxy: "{{ lookup('env', 'http_proxy') }}" - https_proxy: "{{ lookup('env', 'https_proxy') }}" - ulimits: &ulimit - - host - -provisioner: - name: ansible - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - log: true - options: - vvv: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles" - ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}" - -scenario: - test_sequence: - - destroy - - create - - prepare - - converge - - verify - - destroy - -verifier: - name: ansible diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index a719dc4e9..000000000 --- a/.coveragerc +++ /dev/null @@ -1,13 +0,0 @@ -[run] -branch = True -source = - tripleo_validations - library - lookup_plugins -omit = tripleo-validations/openstack/* - -[report] -ignore_errors = True -omit = - tripleo_validations/tests/* - tests/* diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 7a8a6b797..000000000 --- a/.dockerignore +++ /dev/null @@ -1,55 +0,0 @@ -# Docker/Podman image doesn't need any files that git doesn't track. -#Therefore the .dockerignore largely follows the structure of .gitignore. -# C extensions -*.so -# Packages -*.egg* -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 -# Installer logs -pip-log.txt -# Unit test / coverage reports -cover/ -.coverage* -!.coveragerc -.tox -nosetests.xml -.testrepository -.venv -.stestr/* -# Translations -*.mo -# Mr Developer -.mr.developer.cfg -.project -.pydevproject -# Complexity -output/*.html -output/*/index.html -# Sphinx -doc/build -doc/source/reference/api/ -# pbr generates these -AUTHORS -ChangeLog -# Editors -*~ -.*.swp -.*sw? -# Files created by releasenotes build -releasenotes/build -# Ansible specific -hosts -*.retry -#Vagrantfiles, since we are using docker -Vagrantfile.* diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 478ee492f..000000000 --- a/.gitignore +++ /dev/null @@ -1,67 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg* -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -cover/ -.coverage* -!.coveragerc -.tox -nosetests.xml -.testrepository -.venv -.stestr/* - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? -doc/source/validations-*.rst - -# Files created by releasenotes build -releasenotes/build - -# Ansible specific -hosts -*.retry - -# Roles testing -roles/roles.galaxy diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6fe0..000000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 34d282b5a..000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,53 +0,0 @@ ---- -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 - hooks: - - id: end-of-file-fixer - - id: trailing-whitespace - - id: mixed-line-ending - - id: check-byte-order-marker - - id: check-executables-have-shebangs - - id: check-merge-conflict - - id: check-symlinks - - id: debug-statements - files: .*\.(yaml|yml)$ - - - repo: https://github.com/pycqa/flake8 - rev: 3.9.1 - hooks: - - id: flake8 - additional_dependencies: [flake8-typing-imports==1.12.0] - entry: flake8 --ignore=E24,E121,E122,E123,E124,E126,E226,E265,E305,E402,F401,F405,E501,E704,F403,F841,W503,W605 - - - repo: https://github.com/adrienverge/yamllint.git - rev: v1.33.0 - hooks: - - id: yamllint - files: \.(yaml|yml)$ - types: [file, yaml] - entry: yamllint --strict -f parsable - - - repo: https://github.com/ansible-community/ansible-lint - rev: v5.3.2 - hooks: - - id: ansible-lint - always_run: true - pass_filenames: false - additional_dependencies: - - 'ansible-core<2.12' - verbose: true - entry: ansible-lint --force-color -p -v - - - repo: https://github.com/openstack-dev/bashate.git - rev: 2.0.0 - hooks: - - id: bashate - entry: bashate --error . --verbose --ignore=E006,E040 - # Run bashate check for all bash scripts - # Ignores the following rules: - # E006: Line longer than 79 columns (as many scripts use jinja - # templating, this is very difficult) - # E040: Syntax error determined using `bash -n` (as many scripts - # use jinja templating, this will often fail and the syntax - # error will be discovered in execution anyway) diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 5ed95bafd..000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${TEST_PATH:-./tripleo_validations/tests} -top_dir=./ diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 6d83b3c4e..000000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/.yamllint b/.yamllint deleted file mode 100644 index 9f3db7e6a..000000000 --- a/.yamllint +++ /dev/null @@ -1,14 +0,0 @@ ---- -extends: default - -rules: - line-length: - # matches hardcoded 160 value from ansible-lint - max: 160 - indentation: - spaces: consistent - indent-sequences: true - check-multi-line-strings: false -ignore: | - zuul.d/molecule.yaml - releasenotes/notes/*.yaml diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index fb406eb21..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,56 +0,0 @@ -For general information on contributing to OpenStack, please check out the -`contributor guide `_ to get started. -It covers all the basics that are common to all OpenStack projects: the accounts -you need, the basics of interacting with our Gerrit review system, how we -communicate as a community, etc. - -The information below will cover the project specific information you need to get started with TripleO. - -Documentation -============= -Documentation for the TripleO project can be found `here `_ - -Communication -============= -* IRC channel ``#validation-framework`` at `Libera`_ (For all subject-matters) -* IRC channel ``#tripleo`` at `OFTC`_ (OpenStack and TripleO discussions) -* Mailing list (prefix subjects with ``[tripleo][validations]`` for faster responses) - http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss - -.. _Libera: https://libera.chat/ -.. _OFTC: https://www.oftc.net/ - -Contacting the Core Team -======================== -Please refer to the `TripleO Core Team -`_ contacts. - -Bug Tracking -============= -We track our tasks in `Launchpad `_ and in -`StoryBoard `_ - -Reporting a Bug -=============== -You found an issue and want to make sure we are aware of it? You can do so on -`Launchpad `__. Please, add the -validations tag to your bug. - -More info about Launchpad usage can be found on `OpenStack docs page -`_ - -Getting Your Patch Merged -========================= -All changes proposed to the TripleO requires two ``Code-Review +2`` votes from -TripleO core reviewers before one of the core reviewers can approve patch by -giving ``Workflow +1`` vote. - -Project Team Lead Duties -======================== -All common PTL duties are enumerated in the `PTL guide -`_. - -The Release Process for TripleO is documented in `Release Management -`_. - -Documentation for the TripleO project can be found `here `_ diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index f2eade2e3..000000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -tripleo-validations Style Commandments -=============================================== - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db85882..000000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index c978a52da..000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include AUTHORS -include ChangeLog -exclude .gitignore -exclude .gitreview - -global-exclude *.pyc diff --git a/README.rst b/README.rst index 13f60c300..4ee2c5f13 100644 --- a/README.rst +++ b/README.rst @@ -1,22 +1,10 @@ -.. image:: https://governance.openstack.org/tc/badges/tripleo-validations.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +This project is no longer maintained. -.. Change things from this point on +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -A collection of Ansible roles and playbooks to detect and report potential -issues during TripleO deployments. - -The validations will help detect issues early in the deployment process and -prevent field engineers from wasting time on misconfiguration or hardware -issues in their environments. - -All validations are written in Ansible and are written in a way that's -consumable by the `Validation Framework Command Line Interface (CLI) -`_ -or by Ansible directly. - -* Free software: Apache license -* Documentation: https://docs.openstack.org/tripleo-validations/latest/ -* Release notes: https://docs.openstack.org/releasenotes/tripleo-validations/ -* Source: https://opendev.org/openstack/tripleo-validations -* Bugs: https://storyboard.openstack.org/#!/project/openstack/tripleo-validations +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/ansible-collections-requirements.yml b/ansible-collections-requirements.yml deleted file mode 100644 index d1a63e2ec..000000000 --- a/ansible-collections-requirements.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -collections: - - containers.podman - - community.general - - community.crypto - - ansible.posix - - openstack.cloud diff --git a/ansible-test-env.rc b/ansible-test-env.rc deleted file mode 100644 index 50b948308..000000000 --- a/ansible-test-env.rc +++ /dev/null @@ -1,27 +0,0 @@ -export TRIPLEO_VALIDATIONS_WORKPATH="$(dirname $(readlink -f ${BASH_SOURCE[0]}))" -export ANSIBLE_STDOUT_CALLBACK=debug -export ANSIBLE_CALLBACK_PLUGINS="${TRIPLEO_VALIDATIONS_WORKPATH}/callback_plugins" - -ANSIBLE_LIBRARY="${TRIPLEO_VALIDATIONS_WORKPATH}/library" -export ANSIBLE_LIBRARY="${ANSIBLE_LIBRARY}:${TRIPLEO_VALIDATIONS_WORKPATH}/roles/roles.galaxy/validations-common/validations_common/library" -export ANSIBLE_LOOKUP_PLUGINS="${TRIPLEO_VALIDATIONS_WORKPATH}/lookup_plugins" - -export ANSIBLE_ROLES_PATH="${TRIPLEO_VALIDATIONS_WORKPATH}/roles" -export ANSIBLE_ROLES_PATH="${ANSIBLE_ROLES_PATH}:${TRIPLEO_VALIDATIONS_WORKPATH}/roles/roles.galaxy/tripleo-ansible/tripleo_ansible/roles" - -export ANSIBLE_INVENTORY="${TRIPLEO_VALIDATIONS_WORKPATH}/tests/hosts.ini" -export ANSIBLE_RETRY_FILES_ENABLED="0" -export ANSIBLE_LOAD_CALLBACK_PLUGINS="1" -export ANSIBLE_HOST_KEY_CHECKING=False - -function unset-ansible-test-env { - for i in $(env | grep ANSIBLE_ | awk -F'=' '{print $1}'); do - unset ${i} - done - unset TRIPLEO_VALIDATIONS_WORKPATH - echo -e "Ansible test environment deactivated.\n" - unset -f unset-ansible-test-env -} - -echo -e "Ansible test environment is now active" -echo -e "Run 'unset-ansible-test-env' to deactivate.\n" diff --git a/ansible.cfg b/ansible.cfg deleted file mode 100644 index 6fd4ac950..000000000 --- a/ansible.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[defaults] -retry_files_enabled = False -host_key_checking=False -stdout_callback = default diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab818..000000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 9e8b71c3f..000000000 --- a/bindep.txt +++ /dev/null @@ -1,41 +0,0 @@ -# This file facilitates OpenStack-CI package installation -# before the execution of any tests. -# -# See the following for details: -# - https://docs.openstack.org/infra/bindep/ -# - https://opendev.org/opendev/bindep/ -# -# Even if the role does not make use of this facility, it -# is better to have this file empty, otherwise OpenStack-CI -# will fall back to installing its default packages which -# will potentially be detrimental to the tests executed. - -# The gcc compiler -gcc - -# Base requirements for RPM distros -gcc-c++ [platform:rpm] -git [platform:rpm] -libffi-devel [platform:rpm] -openssl-devel [platform:rpm] -podman [platform:rpm] -python3-devel [platform:rpm !platform:rhel-7 !platform:centos-7] -PyYAML [platform:rpm !platform:rhel-8 !platform:centos-8 !platform:fedora] -python3-pyyaml [platform:rpm !platform:rhel-7 !platform:centos-7] -python3-dnf [platform:rpm !platform:rhel-7 !platform:centos-7] - -# RH Mechanisms -python-rhsm-certificates [platform:redhat] - -# SELinux cent7 -libselinux-python3 [platform:rpm !platform:rhel-8 !platform:centos-8] -libsemanage-python3 [platform:redhat !platform:rhel-8 !platform:centos-8] -# SELinux cent8 -python3-libselinux [platform:rpm !platform:rhel-7 !platform:centos-7] -python3-libsemanage [platform:redhat !platform:rhel-7 !platform:centos-7] - -# Required for compressing collected log files in CI -gzip - -# Required to build language docs -gettext diff --git a/callback_plugins/.keep b/callback_plugins/.keep deleted file mode 100644 index e69de29bb..000000000 diff --git a/ci/playbooks/pre.yml b/ci/playbooks/pre.yml deleted file mode 100644 index 5867011df..000000000 --- a/ci/playbooks/pre.yml +++ /dev/null @@ -1,111 +0,0 @@ ---- -- hosts: all - pre_tasks: - - name: Set project path fact - set_fact: - tripleo_validations_project_path: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/tripleo-validations'].src_dir }}" - - - name: Ensure output dirs - file: - path: "{{ ansible_user_dir }}/zuul-output/logs" - state: directory - - - name: Ensure pip is available - include_role: - name: ensure-pip - - - name: Ensure virtualenv is available - include_role: - name: ensure-virtualenv - - - name: Setup bindep - pip: - name: "bindep" - virtualenv: "{{ ansible_user_dir }}/test-python" - virtualenv_command: "{{ ensure_pip_virtualenv_command }}" - virtualenv_site_packages: true - - - name: Set containers module to 3.0 - become: true - shell: | - dnf module disable container-tools:rhel8 -y - dnf module enable container-tools:3.0 -y - dnf clean metadata - - - name: Run bindep - shell: |- - . {{ ansible_user_dir }}/test-python/bin/activate - {{ tripleo_validations_project_path }}/scripts/bindep-install - become: true - changed_when: false - - - name: Ensure a recent version of pip is installed in virtualenv - pip: - name: "pip>=19.1.1" - virtualenv: "{{ ansible_user_dir }}/test-python" - virtualenv_command: "{{ ensure_pip_virtualenv_command }}" - - - name: Setup test-python - pip: - requirements: "{{ tripleo_validations_project_path }}/molecule-requirements.txt" - virtualenv: "{{ ansible_user_dir }}/test-python" - virtualenv_command: "{{ ensure_pip_virtualenv_command }}" - virtualenv_site_packages: true - - - name: Set up collections - command: "{{ ansible_user_dir }}/test-python/bin/ansible-galaxy install -fr {{ tripleo_validations_project_path }}/ansible-collections-requirements.yml" - - - name: Display test-python virtualenv package versions - shell: |- - . {{ ansible_user_dir }}/test-python/bin/activate - pip freeze - - - name: Basic ci setup - become: true - block: - - name: Ensure ci directories - file: - path: "/etc/ci" - state: "directory" - - - name: Ensure ci mirror file - file: - path: "/etc/ci/mirror_info.sh" - state: "touch" - - - name: Set an appropriate fs.file-max - sysctl: - name: fs.file-max - value: 2048000 - sysctl_set: true - state: present - reload: true - - - name: Set container_manage_cgroup boolean - seboolean: - name: container_manage_cgroup - state: true - persistent: true - failed_when: false - - - name: Create limits file for containers - copy: - content: | - * soft nofile 102400 - * hard nofile 204800 - * soft nproc 2048 - * hard nproc 4096 - dest: /etc/security/limits.d/containers.conf - - - name: Reset ssh connection - meta: reset_connection - tasks: - - name: Get necessary git repos - git: - repo: https://opendev.org/openstack/{{ item }} - dest: "{{ tripleo_validations_project_path }}/roles/roles.galaxy/{{ item }}" - version: master - force: true - with_items: - - tripleo-ansible - - validations-common diff --git a/ci/playbooks/run-local.yml b/ci/playbooks/run-local.yml deleted file mode 100644 index af5295db3..000000000 --- a/ci/playbooks/run-local.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- hosts: all - tasks: - - name: set basic zuul fact - set_fact: - zuul: - projects: - "opendev.org/openstack/tripleo-validations": - src_dir: "{{ tripleo_src }}" - ansible_connection: ssh - -- import_playbook: pre.yml - -- import_playbook: run.yml diff --git a/ci/playbooks/run.yml b/ci/playbooks/run.yml deleted file mode 100644 index bfba7b924..000000000 --- a/ci/playbooks/run.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- - -- hosts: all - environment: - ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/zuul-output/logs/ansible-execution.log" - pre_tasks: - - - name: Set project path fact - set_fact: - tripleo_validations_project_path: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/tripleo-validations'].src_dir }}" - - - name: Set roles path fact - set_fact: - tripleo_validations_roles_paths: - - "{{ tripleo_validations_project_path }}/roles/roles.galaxy/tripleo-ansible/tripleo_ansible/roles" - - "{{ tripleo_validations_project_path }}/roles/roles.galaxy/validations-common/validations_common/roles" - - "{{ tripleo_validations_project_path }}/roles" - - "/usr/share/ansible/roles" - - - name: Set library path fact - set_fact: - tripleo_validations_library_paths: - - "{{ tripleo_validations_project_path }}/roles/roles.galaxy/validations-common/validations_common/library" - - "{{ tripleo_validations_project_path }}/library" - - "/usr/share/ansible/library" - - tasks: - - name: Run role test job - shell: |- - . {{ ansible_user_dir }}/test-python/bin/activate - . {{ tripleo_validations_project_path }}/ansible-test-env.rc - pytest --color=yes \ - --html={{ ansible_user_dir }}/zuul-output/logs/reports.html \ - --self-contained-html \ - --ansible-args='{{ tripleo_job_ansible_args | default("") }}' \ - {{ tripleo_validations_project_path }}/tests/test_molecule.py - args: - chdir: "{{ tripleo_validations_project_path }}/roles/{{ tripleo_validations_role_name }}" - executable: /bin/bash - environment: - ANSIBLE_ROLES_PATH: "{{ tripleo_validations_roles_paths | join(':') }}" - ANSIBLE_LIBRARY: "{{ tripleo_validations_library_paths | join(':') }}" diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index ba22205b1..000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -# this is required for the docs build jobs -sphinx>=2.0.0,!=2.1.0 # BSD -openstackdocstheme>=2.2.2 # Apache-2.0 -reno>=3.1.0 # Apache-2.0 -doc8>=0.8.0 # Apache-2.0 -bashate>=0.6.0 # Apache-2.0 -ruamel.yaml>=0.15.5 # MIT diff --git a/doc/source/_exts/ansible-autodoc.py b/doc/source/_exts/ansible-autodoc.py deleted file mode 100644 index cc815677a..000000000 --- a/doc/source/_exts/ansible-autodoc.py +++ /dev/null @@ -1,402 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import imp -import os - -from docutils import core -from docutils import nodes -from docutils.parsers.rst import Directive -from docutils.parsers import rst -from docutils.writers.html4css1 import Writer - -from sphinx import addnodes - -import yaml -from ruamel.yaml import YAML as RYAML - -try: - import io - StringIO = io.StringIO -except ImportError: - import StringIO - - -class DocYaml(RYAML): - def _license_filter(self, data): - """This will filter out our boilerplate license heading in return data. - - The filter is used to allow documentation we're creating in variable - files to be rendered more beautifully. - """ - lines = list() - mark = True - for line in data.splitlines(): - if '# Copyright' in line: - mark = False - if mark: - lines.append(line) - if '# under the License' in line: - mark = True - return '\n'.join(lines) - - def dump(self, data, stream=None, **kw): - if not stream: - stream = StringIO() - try: - RYAML.dump(self, data, stream, **kw) - return self._license_filter(stream.getvalue().strip()) - finally: - stream.close() - - -DOCYAML = DocYaml() -DOCYAML.default_flow_style = False - - -class AnsibleAutoPluginDirective(Directive): - directive_name = "ansibleautoplugin" - has_content = True - option_spec = { - 'module': rst.directives.unchanged, - 'role': rst.directives.unchanged, - 'documentation': rst.directives.unchanged, - 'examples': rst.directives.unchanged - } - - @staticmethod - def _render_html(source): - return core.publish_parts( - source=source, - writer=Writer(), - writer_name='html', - settings_overrides={'no_system_messages': True} - ) - - def make_node(self, title, contents, content_type=None): - section = self._section_block(title=title) - if not content_type: - # Doc section - for content in contents['docs']: - for paragraph in content.split('\n'): - retnode = nodes.paragraph() - retnode.append(self._raw_html_block(data=paragraph)) - section.append(retnode) - - # Options Section - options_list = nodes.field_list() - options_section = self._section_block(title='Options') - for key, value in contents['options'].items(): - options_list.append( - self._raw_fields( - data=value['description'], - field_name=key - ) - ) - else: - options_section.append(options_list) - section.append(options_section) - - # Authors Section - authors_list = nodes.field_list() - authors_list.append( - self._raw_fields( - data=contents['author'] - ) - ) - authors_section = self._section_block(title='Authors') - authors_section.append(authors_list) - section.append(authors_section) - - elif content_type == 'yaml': - for content in contents: - section.append( - self._literal_block( - data=content, - dump_data=False - ) - ) - - return section - - @staticmethod - def load_module(filename): - return imp.load_source('__ansible_module__', filename) - - @staticmethod - def build_documentation(module): - docs = DOCYAML.load(module.DOCUMENTATION) - doc_data = dict() - doc_data['docs'] = docs['description'] - doc_data['author'] = docs.get('author', list()) - doc_data['options'] = docs.get('options', dict()) - return doc_data - - @staticmethod - def build_examples(module): - examples = DOCYAML.load(module.EXAMPLES) - return_examples = list() - for example in examples: - return_examples.append(DOCYAML.dump([example])) - return return_examples - - def _raw_html_block(self, data): - html = self._render_html(source=data) - return nodes.raw('', html['body'], format='html') - - def _raw_fields(self, data, field_name=''): - body = nodes.field_body() - if isinstance(data, list): - for item in data: - body.append(self._raw_html_block(data=item)) - else: - body.append(self._raw_html_block(data=data)) - - field = nodes.field() - field.append(nodes.field_name(text=field_name)) - field.append(body) - return field - - @staticmethod - def _literal_block(data, language='yaml', dump_data=True): - if dump_data: - literal = nodes.literal_block( - text=DOCYAML.dump(data) - ) - else: - literal = nodes.literal_block(text=data) - literal['language'] = 'yaml' - return literal - - @staticmethod - def _section_block(title, text=None): - section = nodes.section( - title, - nodes.title(text=title), - ids=[nodes.make_id('-'.join(title))], - ) - if text: - section_body = nodes.field_body() - section_body.append(nodes.paragraph(text=text)) - section.append(section_body) - - return section - - def _yaml_section(self, to_yaml_data, section_title, section_text=None): - yaml_section = self._section_block( - title=section_title, - text=section_text - ) - yaml_section.append(self._literal_block(data=to_yaml_data)) - return yaml_section - - def _run_role(self, role): - section = self._section_block( - title="Role Documentation", - text='Welcome to the "{}" role documentation.'.format( - os.path.basename(role) - ), - ) - - molecule_defaults = None - abspath_role = os.path.dirname(os.path.abspath(role)) - molecule_shared_file = os.path.join( - os.path.dirname(abspath_role), ".config/molecule/config.yml" - ) - - if os.path.exists(molecule_shared_file): - with open(molecule_shared_file) as msf: - molecule_defaults = DOCYAML.load(msf.read()) - - defaults_file = os.path.join(role, "defaults", "main.yml") - if os.path.exists(defaults_file): - with open(defaults_file) as f: - role_defaults = DOCYAML.load(f.read()) - section.append( - self._yaml_section( - to_yaml_data=role_defaults, - section_title="Role Defaults", - section_text="This section highlights all of the defaults" - ' and variables set within the "{}"' - " role.".format(os.path.basename(role)), - ) - ) - - vars_path = os.path.join(role, "vars") - if os.path.exists(vars_path): - for v_file in os.listdir(vars_path): - vars_file = os.path.join(vars_path, v_file) - with open(vars_file) as f: - vars_values = DOCYAML.load(f.read()) - section.append( - self._yaml_section( - to_yaml_data=vars_values, - section_title="Role Variables: {}".format(v_file), - ) - ) - - test_list = nodes.field_list() - test_section = self._section_block( - title="Molecule Scenarios", - text='Molecule is being used to test the "{}" role. The' - " following section highlights the drivers in service" - " and provides an example playbook showing how the role" - " is leveraged.".format(os.path.basename(role)), - ) - - molecule_path = os.path.join(role, "molecule") - if os.path.exists(molecule_path): - for test in os.listdir(molecule_path): - molecule_section = self._section_block( - title="Scenario: {}".format(test) - ) - molecule_file = os.path.join(molecule_path, test, "molecule.yml") - if not os.path.exists(molecule_file): - continue - - with open(molecule_file) as f: - molecule_conf = DOCYAML.load(f.read()) - - # if molecule.yml file from the scenarios, we get the - # information from the molecule shared configuration file. - if not molecule_conf: - molecule_conf = molecule_defaults - - # Now that we use a shared molecule configuration file, the - # molecule.yml file in the role scenarios could be empty or - # contains only overriding keys. - driver_data = molecule_conf.get('driver', - molecule_defaults.get('driver')) - - if driver_data: - molecule_section.append( - nodes.field_name(text="Driver: {}".format(driver_data["name"])) - ) - - options = driver_data.get("options") - if options: - molecule_section.append( - self._yaml_section( - to_yaml_data=options, section_title="Molecule Options" - ) - ) - - platforms_data = molecule_conf.get('platforms', - molecule_defaults.get('platforms')) - - if platforms_data: - molecule_section.append( - self._yaml_section( - to_yaml_data=platforms_data, - section_title="Molecule Platform(s)", - ) - ) - - default_playbook = [molecule_path, test, "converge.yml"] - - provisioner_data = molecule_conf.get('provisioner', - molecule_defaults.get('provisioner')) - - if provisioner_data: - inventory = provisioner_data.get('inventory') - if inventory: - molecule_section.append( - self._yaml_section( - to_yaml_data=inventory, - section_title="Molecule Inventory", - ) - ) - - try: - converge = provisioner_data['playbooks']['converge'] - default_playbook = default_playbook[:-1] + [converge] - except KeyError: - pass - - molecule_playbook_path = os.path.join(*default_playbook) - - with open(molecule_playbook_path) as f: - molecule_playbook = DOCYAML.load(f.read()) - molecule_section.append( - self._yaml_section( - to_yaml_data=molecule_playbook, - section_title="Example {} playbook".format(test), - ) - ) - test_list.append(molecule_section) - else: - test_section.append(test_list) - section.append(test_section) - - self.run_returns.append(section) - - # Document any libraries nested within the role - library_path = os.path.join(role, "library") - if os.path.exists(library_path): - self.options['documentation'] = True - self.options['examples'] = True - for lib in os.listdir(library_path): - if lib.endswith(".py"): - self._run_module( - module=self.load_module( - filename=os.path.join(library_path, lib) - ), - module_title="Embedded module: {}".format(lib), - example_title="Examples for embedded module", - ) - - def _run_module(self, module, module_title="Module Documentation", - example_title="Example Tasks"): - if self.options.get('documentation'): - docs = self.build_documentation(module=module) - self.run_returns.append( - self.make_node( - title=module_title, - contents=docs - ) - ) - - if self.options.get('examples'): - examples = self.build_examples(module=module) - self.run_returns.append( - self.make_node( - title=example_title, - contents=examples, - content_type='yaml' - ) - ) - - def run(self): - self.run_returns = list() - - if self.options.get('module'): - module = self.load_module(filename=self.options['module']) - self._run_module(module=module) - - if self.options.get('role'): - self._run_role(role=self.options['role']) - - return self.run_returns - - -def setup(app): - classes = [ - AnsibleAutoPluginDirective, - ] - for directive_class in classes: - app.add_directive(directive_class.directive_name, directive_class) - - return {'version': '0.2'} diff --git a/doc/source/_exts/generate_validations_doc.py b/doc/source/_exts/generate_validations_doc.py deleted file mode 100644 index b85ef11c0..000000000 --- a/doc/source/_exts/generate_validations_doc.py +++ /dev/null @@ -1,212 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from glob import glob -import os -import yaml - -DEFAULT_METADATA = { - 'name': 'Unnamed', - 'description': 'No description', - 'groups': [], -} - - -def get_validation_metadata(validation, key): - """Returns metadata dictionary""" - try: - return validation['vars']['metadata'][key] - except KeyError: - return DEFAULT_METADATA.get(key) - - -def get_include_role(validation): - """Returns Included Role""" - try: - if 'tasks' in validation: - return validation['tasks'][0]['include_role']['name'] - return validation['roles'][0] - except KeyError: - return list() - - -def get_remaining_metadata(validation): - try: - return {k: v for k, v in validation['vars']['metadata'].items() - if k not in ['name', 'description', 'groups']} - except KeyError: - return dict() - - -def get_validation_parameters(validation): - """Returns parameters""" - try: - return {k: v for k, v in validation['vars'].items() - if k != 'metadata'} - except KeyError: - return dict() - - -def build_summary(group, validations): - """Creates validations documentation contents by group""" - entries = [ - "* :ref:`{}`: {}".format(group + '_' + validation['id'], - validation['name']) - for validation in validations - ] - with open('doc/source/validations-{}.rst'.format(group), 'w') as f: - f.write("\n".join(entries)) - f.write("\n") - - -def format_dict(my_dict): - return ''.join(['\n\n - **{}**: {}'.format(key, value) - for key, value in my_dict.items()]) - - -def role_doc_entry(role_name, local_roles): - """Generates Documentation entry - - If the included role isn't hosted on tripleo-validations, we point to the - validations-common role documentation. Otherwise, it generates a classical - local toctree. - """ - local_role_doc = (".. toctree::\n\n" - " roles/role-{}".format(role_name)) - doc_base_url = "https://docs.openstack.org/validations-common/latest/roles" - external_role = \ - ("- `{role} <{baseurl}/role-{role}.html>`_ " - "from `openstack/validations-common " - "`_" - "".format(role=role_name, - baseurl=doc_base_url)) - - if role_name not in local_roles: - return external_role - return local_role_doc - - -def build_detail(group, validations, local_roles): - entries = ['{}\n{}\n'.format(group, len(group) * '=')] - entries = entries + [ - """.. _{label}: - -{title} -{adornment} - -{name}. - -{desc} - -- **hosts**: {hosts} -- **groups**: {groups} -- **parameters**:{parameters} -- **roles**: {roles} - -Role documentation - -{roledoc} -""" - .format(label=(group + '_' + validation['id']), - title=validation['id'], - adornment=(len(validation['id']) * '-'), - name=validation['name'], - desc=validation['description'], - groups=', '.join(validation['groups']), - hosts=validation['hosts'], - parameters=format_dict(validation['parameters']), - roles=validation['roles'], - roledoc=role_doc_entry(validation['roles'], local_roles) - ) - for validation in validations] - with open('doc/source/validations-{}-details.rst'.format(group), 'w') as f: - f.write("\n".join(entries)) - - -def build_groups_detail(groups): - entries = [ - """ -**{group}**: - -*{desc}* - -.. include:: {link} -""" - .format(group=grp.capitalize(), - link="validations-{}.rst".format(grp), - desc=desc[0].get('description', None), - ) - for grp, desc in sorted(groups.items())] - with open('doc/source/validations-groups.rst', 'w') as f: - f.write("\n".join(entries)) - - -def parse_groups_file(): - contents = {} - groups_file_path = os.path.abspath('groups.yaml') - - if os.path.exists(groups_file_path): - with open(groups_file_path, "r") as grps: - contents = yaml.safe_load(grps) - - return contents - - -def get_groups(): - # Seed it with the known groups from groups.yaml file. - groups = set() - contents = parse_groups_file() - - for group_name in contents.keys(): - groups.add(group_name) - - return groups, contents - - -def get_local_roles(path): - """Returns a list of local Ansible Roles""" - return next(os.walk(path))[1] - - -def setup(app): - group_name, group_info = get_groups() - build_groups_detail(group_info) - - local_roles = get_local_roles(os.path.abspath('roles')) - - validations = [] - for validation_path in sorted(glob('playbooks/*.yaml')): - with open(validation_path) as f: - loaded_validation = yaml.safe_load(f.read())[0] - for group in get_validation_metadata(loaded_validation, 'groups'): - group_name.add(group) - validations.append({ - 'hosts': loaded_validation['hosts'], - 'parameters': get_validation_parameters(loaded_validation), - 'id': os.path.splitext( - os.path.basename(validation_path))[0], - 'name': get_validation_metadata(loaded_validation, 'name'), - 'groups': get_validation_metadata(loaded_validation, 'groups'), - 'description': get_validation_metadata(loaded_validation, - 'description'), - 'metadata': get_remaining_metadata(loaded_validation), - 'roles': get_include_role(loaded_validation) - }) - - for group in group_name: - validations_in_group = [validation for validation - in validations - if group in validation['groups']] - build_detail(group, validations_in_group, local_roles) - build_summary(group, validations_in_group) diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100755 index dd6578d4b..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -from unittest import mock - -# Add the project -sys.path.insert(0, os.path.abspath('../..')) -# Add the extensions -sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'generate_validations_doc', - 'ansible-autodoc', - 'openstackdocstheme' -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# Mocking imports that could cause build failure -autodoc_mock_imports = ['ansible'] -sys.modules['ansible.module_utils.basic'] = mock.Mock() - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '2019, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = 'tripleo-validationsdoc' -html_theme = 'openstackdocs' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - 'tripleo-validations.tex', - 'tripleo-validations Documentation', - 'OpenStack Foundation', 'manual'), -] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/tripleo-validations' -openstackdocs_bug_project = 'tripleo' -openstackdocs_bug_tag = 'documentation' diff --git a/doc/source/contributing/contributing.rst b/doc/source/contributing/contributing.rst deleted file mode 100644 index bb4b35dc2..000000000 --- a/doc/source/contributing/contributing.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -So You Want to Contribute... -============================ - -.. include:: ../../../CONTRIBUTING.rst diff --git a/doc/source/contributing/developer_guide.rst b/doc/source/contributing/developer_guide.rst deleted file mode 100644 index 1ee9d03a0..000000000 --- a/doc/source/contributing/developer_guide.rst +++ /dev/null @@ -1,810 +0,0 @@ -Developer's Guide -================= - -Writing Validations -------------------- - -All validations are written in standard Ansible with a couple of extra -meta-data to provide information to the validation framework. - -For people not familiar with Ansible, get started with their `excellent -documentation `_. - -After the generic explanation on writing validations is a couple of concrete -examples. - -Directory Structure -~~~~~~~~~~~~~~~~~~~ - -All validations consist of an Ansible role located in the ``roles`` directory -and a playbook located in the ``playbooks`` directory. - -- the ``playbooks`` one contains all the validations playbooks you can run; -- the ``lookup_plugins`` one is for custom Ansible look up plugins available - to the validations; -- the ``library`` one is for custom Ansible modules available to the - validations; -- the ``roles`` one contains all the necessary Ansible roles to validate your - TripleO deployment; - -Here is what the tree looks like:: - - playbooks/ - ├── first_validation.yaml - ├── second_validation.yaml - ├── third_validation.yaml - └── etc... - library/ - ├── another_module.py - ├── some_module.py - └── etc... - lookup_plugins/ - ├── one_lookup_plugin.py - ├── another_lookup_plugin.py - └── etc... - roles - ├── first_role - ├── second_role - └── etc... - - -Sample Validation -~~~~~~~~~~~~~~~~~ - -Each validation is an Ansible playbook located in the ``playbooks`` directory -calling his own Ansible role located in the ``roles`` directory. Each playbook -have some metadata. Here is what a minimal validation would look like: - -.. code-block:: yaml - - --- - - hosts: undercloud - vars: - metadata: - name: Hello World - description: This validation prints Hello World! - roles: - - hello-world - -It should be saved as ``playbooks/hello_world.yaml``. - -As shown here, the validation playbook requires three top-level directives: -``hosts``, ``vars -> metadata`` and ``roles``. - -``hosts`` specify which nodes to run the validation on. Based on the -``hosts.sample`` structure, the options can be ``all`` (run on all nodes), -``undercloud``, ``allovercloud`` (all overcloud nodes), ``controller`` and -``compute``. - -The ``vars`` section serves for storing variables that are going to be -available to the Ansible playbook. The validations API uses the ``metadata`` -section to read each validation's name and description. These values are then -reported by the API. - -The validations can be grouped together by specifying a ``groups`` metadata. -Groups function similar to tags and a validation can thus be part of many -groups. Here is, for example, how to have a validation be part of the -`pre-deployment` and `hardware` groups: - -.. code-block:: yaml - - metadata: - groups: - - pre-deployment - - hardware - -The validations can be categorized by technical domain and can belong to one or -multiple categories. The categorization is depending on what the validation is -checking on the hosts. For example, if a validation checks some networking -related configuration and needs to get configuration items from the -undercloud.conf file, you will have to put `networking` and `undercloud-config` in -the ``categories`` metadata key: - -.. code-block:: yaml - - metadata: - groups: - - pre-deployment - - hardware - categories: - - networking - - undercloud-config - -.. note:: - - The ``categories`` are not restricted to a list as for the ``groups`` - present in the ``groups.yaml`` file, but it could be for example: - - * ``networking`` - * ``compute`` - * ``baremetal`` - * ``provisioning`` - * ``database`` - * ``os`` - * ``system`` - * ``packaging`` - * ``kernel`` - * ``security`` - * ``tls-everywhere`` - * ``dns`` - * ``dhcp`` - * ``dnsmasq`` - * ``webserver`` - * ``storage`` - * ``ha`` - * ``clustering`` - * ``undercloud-config`` - * etc ... - -The validations should be linked to a product. Every validations hosted in -``tripleo-validations`` should get at least ``tripleo`` in the ``products`` -metadata key: - -.. code-block:: yaml - - metadata: - groups: - - pre-deployment - - hardware - categories: - - networking - - undercloud-config - products: - - tripleo - -``roles`` include the Ansible role, which contains all the tasks to run, -associated to this validation. Each task is a YAML dictionary that must at -minimum contain a name and a module to use. Module can be any module that ships -with Ansible or any of the custom ones in the ``library`` directory. - -The `Ansible documentation on playbooks -`__ provides more detailed -information. - -Ansible Inventory -~~~~~~~~~~~~~~~~~ - -Dynamic inventory -+++++++++++++++++ - -Tripleo-validations ships with a `dynamic inventory -`__, which -contacts the various OpenStack services to provide the addresses of the -deployed nodes as well as the undercloud. - -Just pass ``-i /usr/bin/tripleo-ansible-inventory`` to ``ansible-playbook`` -command. - -As the playbooks are located in their own directory and not at the same level as -the ``roles``, ``callback_plugins``, ``library`` and ``lookup_plugins`` -directories, you will have to export some Ansible variables first: - -.. code-block:: console - - $ cd tripleo-validations/ - $ export ANSIBLE_CALLBACK_PLUGINS="${PWD}/callback_plugins" - $ export ANSIBLE_ROLES_PATH="${PWD}/roles" - $ export ANSIBLE_LOOKUP_PLUGINS="${PWD}/lookup_plugins" - $ export ANSIBLE_LIBRARY="${PWD}/library" - - $ ansible-playbook -i /usr/bin/tripleo-ansible-inventory playbooks/hello_world.yaml - -Hosts file -++++++++++ - -When more flexibility than what the current dynamic inventory provides is -needed or when running validations against a host that hasn't been deployed via -heat (such as the ``prep`` validations), it is possible to write a custom hosts -inventory file. It should look something like this: - -.. code-block:: INI - - [undercloud] - undercloud.example.com - - [allovercloud:children] - controller - compute - - [controller] - controller.example.com - - [compute] - compute-1.example.com - compute-2.example.com - - [all:vars] - ansible_ssh_user=stack - ansible_sudo=true - -It will have a ``[group]`` section for each role (``undercloud``, -``controller``, ``compute``) listing all the nodes belonging to that group. It -is also possible to create a group from other groups as done with -``[allovercloud:children]`` in the above example. If a validation specifies -``hosts: overcloud``, it will be run on any node that belongs to the -``compute`` or ``controller`` groups. If a node happens to belong to both, the -validation will only be run once. - -Lastly, there is an ``[all:vars]`` section where to configure certain -Ansible-specific options. - -``ansible_ssh_user`` will specify the user Ansible should SSH as. If that user -does not have root privileges, it is possible to instruct it to use ``sudo`` by -setting ``ansible_sudo`` to ``true``. - -Learn more at the `Ansible documentation page for the Inventory -`__ - -Custom Modules -~~~~~~~~~~~~~~ - -In case the `available Ansible modules -`__ don't cover your -needs, it is possible to write your own. Modules belong to the -``library`` directory. - -Here is a sample module that will always fail - -.. code-block:: python - - #!/usr/bin/env python - - from ansible.module_utils.basic import AnsibleModule - - if __name__ == '__main__': - module = AnsibleModule(argument_spec={}) - module.fail_json(msg="This module always fails.") - -Save it as ``library/my_module.py`` and use it in a validation like -so: - -.. code-block:: yaml - - tasks: - ... # some tasks - - name: Running my custom module - my_module: - ... # some other tasks - -The name of the module in the validation ``my_module`` must match the file name -(without extension): ``my_module.py``. - -The custom modules can accept parameters and do more complex reporting. Please -refer to the guide on writing modules in the Ansible documentation. - -.. Warning:: - - Each custom module must be accompanied by the most complete unit tests - possible. - -Learn more at the `Ansible documentation page about writing custom modules -`__. - -Running a validation --------------------- - -Running the validations require ansible and a set of nodes to run them against. -These nodes need to be reachable from the operator's machine and need to have -an account it can ssh to and perform passwordless sudo. - -The nodes need to be present in the static inventory file or available from the -dynamic inventory script depending on which one the operator chooses to use. -Check which nodes are available with: - -.. code-block:: console - - $ source stackrc - $ tripleo-ansible-inventory --list - -In general, Ansible and the validations will be located on the *undercloud*, -because it should have connectivity to all the *overcloud* nodes is already set -up to SSH to them. - -.. code-block:: console - - $ source ~/stackrc - $ tripleo-validation.py - usage: tripleo-validation.py [-h] [--inventory INVENTORY] - [--extra-vars EXTRA_VARS [EXTRA_VARS ...]] - [--validation [,,...]] - [--group [,,...]] [--quiet] - [--validation-dir VALIDATION_DIR] - [--ansible-base-dir ANSIBLE_BASE_DIR] - [--output-log OUTPUT_LOG] - {run,list,show} - - $ tripleo-validation.py run --validation - - -Example: Verify Undercloud RAM requirements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Undercloud has a requirement of 16GB RAM. Let's write a validation -that verifies this is indeed the case before deploying anything. - -Let's create ``playbooks/undercloud-ram.yaml`` and put some metadata -in there: - -.. code-block:: yaml - - --- - - hosts: undercloud - vars: - metadata: - name: Minimum RAM required on the undercloud - description: > - Make sure the undercloud has enough RAM. - groups: - - prep - - pre-introspection - categories: - - os - - system - - ram - products: - - tripleo - - -The ``hosts`` key will tell which server should the validation run on. The -common values are ``undercloud``, ``overcloud`` (i.e. all overcloud nodes), -``controller`` and ``compute`` (i.e. just the controller or the compute nodes). - -The ``name`` and ``description`` metadata will show up in the API and the -TripleO UI so make sure to put something meaningful there. The ``groups`` -metadata applies a tag to the validation and allows to group them together in -order to perform group operations, such are running them all in one call. - -Now let's include the Ansible role associated to this validation. Add this under -the same indentation as ``hosts`` and ``vars``: - -.. code-block:: yaml - - roles: - - undercloud-ram - -Now let's create the ``undercloud-ram`` Ansible role which will contain the -necessary task(s) for checking if the Undercloud has the mininum amount of RAM -required: - -.. code-block:: console - - $ cd tripleo-validations - $ ansible-galaxy init --init-path=roles/ undercloud-ram - - undercloud-ram was created successfully - -The tree of the new created role should look like:: - - undercloud-ram/ - ├── defaults - │   └── main.yml - ├── meta - │   └── main.yml - ├── tasks - │   └── main.yml - └── vars - └── main.yml - -Now let's add an Ansible task to test that it's all set up properly: - -.. code-block:: yaml - - $ cat <> roles/undercloud-ram/tasks/main.yml - - name: Test Output - debug: - msg: "Hello World!" - EOF - -When running it, it should output something like this: - -.. code-block:: console - - $ /bin/run-validations.sh --validation-name undercloud-ram.yaml --ansible-default-callback - - PLAY [undercloud] ********************************************************* - - TASK [Gathering Facts] **************************************************** - ok: [undercloud] - - TASK [undercloud-ram : Test Output] *************************************** - ok: [undercloud] => { - "msg": "Hello World!" - } - - PLAY RECAP **************************************************************** - undercloud : ok=2 changed=0 unreachable=0 failed=0 - - -If you run into an issue where the validation isn't found, it may be because the -run-validations.sh script is searching for it in the path where the packaging -installs validations. For development, export an environment variable named -VALIDATIONS_BASEDIR with the value of base path of your git repo: - -.. code-block:: console - - $ cd /path/to/git/repo - $ export VALIDATIONS_BASEDIR=$(pwd) - -Writing the full validation code is quite easy in this case because Ansible has -done all the hard work for us already. We can use the ``ansible_memtotal_mb`` -fact to get the amount of RAM (in megabytes) the tested server currently has. -For other useful values, run ``ansible -i /usr/bin/tripleo-ansible-inventory -undercloud -m setup``. - -So, let's replace the hello world task with a real one: - -.. code-block:: yaml - - tasks: - - name: Verify the RAM requirements - fail: msg="The RAM on the undercloud node is {{ ansible_memtotal_mb }} MB, the minimal recommended value is 16 GB." - failed_when: "({{ ansible_memtotal_mb }}) < 16000" - -Running this, we see: - -.. code-block:: console - - TASK: [Verify the RAM requirements] ******************************************* - failed: [localhost] => {"failed": true, "failed_when_result": true} - msg: The RAM on the undercloud node is 8778 MB, the minimal recommended value is 16 GB. - - -Because our Undercloud node really does not have enough RAM. Your mileage may -vary. - -Either way, the validation works and reports the lack of RAM properly! - -``failed_when`` is the real hero here: it evaluates an Ansible expression (e.g. -does the node have more than 16 GB of RAM) and fails when it's evaluated as -true. - -The ``fail`` line right above it lets us print a custom error in case of -a failure. If the task succeeds (because we do have enough RAM), nothing will -be printed out. - -Now, we're almost done, but there are a few things we can do to make this nicer -on everybody. - -First, let's hoist the minimum RAM requirement into a variable. That way we'll -have one place where to change it if we need to and we'll be able to test the -validation better as well! - -So, let's call the variable ``minimum_ram_gb`` and set it to ``16``. Do this in -the ``vars`` section: - -.. code-block:: yaml - - vars: - metadata: - name: ... - description: ... - groups: ... - categories: ... - products: ... - minimum_ram_gb: 16 - -Make sure it's on the same indentation level as ``metadata``. - -Then, update ``failed_when`` like this: - -.. code-block:: yaml - - failed_when: "({{ ansible_memtotal_mb }}) < {{ minimum_ram_gb|int * 1024 }}" - -And ``fail`` like so: - -.. code-block:: yaml - - fail: msg="The RAM on the undercloud node is {{ ansible_memtotal_mb }} MB, the minimal recommended value is {{ minimum_ram_gb|int * 1024 }} MB." - -And re-run it again to be sure it's still working. - -One benefit of using a variable instead of a hardcoded value is that we can now -change the value without editing the yaml file! - -Let's do that to test both success and failure cases. - -This should succeed but saying the RAM requirement is 1 GB: - -.. code-block:: console - - $ ansible-playbook -i /usr/bin/tripleo-ansible-inventory playbooks/undercloud-ram.yaml -e minimum_ram_gb=1 - -And this should fail by requiring much more RAM than is necessary: - -.. code-block:: console - - $ ansible-playbook -i /usr/bin/tripleo-ansible-inventory playbooks/undercloud-ram.yaml -e minimum_ram_gb=128 - -(the actual values may be different in your configuration -- just make sure one -is low enough and the other too high) - -And that's it! The validation is now finished and you can start using it in -earnest. - -Create a new role with automation ---------------------------------- - -The role addition process is also automated using ansible. If ansible is -available on the development workstation change directory to the root of -the `tripleo-validations` repository and run the the following command which -will perform the basic tasks noted above. - -.. code-block:: console - - $ cd tripleo-validations/ - $ export ANSIBLE_ROLES_PATH="${PWD}/roles" - $ ansible-playbook -i localhost, role-addition.yml -e validation_init_role_name=${NEWROLENAME} - -The new role will be created in `tripleo-validations/roles/` from a skeleton and one playbook -will be added in `tripleo-validations/playbooks/`. - -It will also add a new **job** entry into the `zuul.d/molecule.yaml`. - -.. code-block:: yaml - - - job: - files: - - ^roles/${NEWROLENAME}/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-${NEWROLENAME} - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: ${NEWROLENAME} - - -And the **job** name will be added into the check and gate section at the top -of the `molecule.yaml` file. - -.. code-block:: yaml - - - project: - check: - jobs: - - tripleo-validations-centos-8-molecule-${NEWROLENAME} - gate: - jobs: - - tripleo-validations-centos-8-molecule-${NEWROLENAME} - -.. note:: - - Adding `Molecule` test is highly recommended but remains **optional**. Some - validations might require a real OpenStack Infrastructure in order to run - them and this, by definition, will make the `Molecule` test very complex to - implement. - - If you are in this case when creating a new validation, please - add `-e validation_init_molecule=false` to the above `ansible-playbook` - command. No molecule directory and no CI Job will be created. - -Finally it will add a role documentation file at -`doc/source/roles/role-${NEWROLENAME}.rst`. This file will need to contain -a title, a literal include of the defaults yaml and a literal include of -the molecule playbook, or playbooks, used to test the role, which is noted -as an "example" playbook. - -You will now be able to develop your new validation! - -Developing your own molecule test(s) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The role addition process will create a default Molecule scenario from the -skeleton. By using Molecule, you will be able to test it locally and of course -it will be executed during the CI checks. - -In your role directory, you will notice a `molecule` folder which contains a -single `Scenario` called `default`. Scenarios are the starting point for a lot -of powerful functionality that Molecule offers. A scenario is a kind of a test -suite for your newly created role. - -The Scenario layout -+++++++++++++++++++ - -Within the `molecule/default` folder, you will find those files: - -.. code-block:: console - - $ ls - molecule.yml converge.yml prepare.yml verify.yml - -* ``molecule.yml`` is the central configuration entrypoint for `Molecule`. With this - file, you can configure each tool that `Molecule` will employ when testing - your role. - -.. note:: - - `Tripleo-validations` uses a global configuration file for `Molecule`. - This file is located at the repository level (``tripleo-validations/.config/molecule/.config.yml``). - and defines all the default values for all the ``molecule.yml``. By default, - the role addition process will produce an empty ``molecule.yml`` inheriting - this ``config.yml`` file. Any key defined in the role ``molecule.yml`` file - will override values from the ``config.yml`` file. - - But, if you want to override the default values set in the ``config.yml`` - file, you will have to redefine them completely in your ``molecule.yml`` - file. `Molecule` won't merge both configuration files and that's why you - will have to redefine them completely. - -* ``prepare.yml`` is the playbook file that contains everything you need to - include before your test. It could include packages installation, file - creation, whatever your need on the instance created by the driver. - -* ``converge.yml`` is the playbook file that contains the call for you - role. `Molecule` will invoke this playbook with ``ansible-playbook`` and run - it against and instance created by the driver. - -* ``verify.yml`` is the Ansible file used for testing as Ansible is the default - ``Verifier``. This allows you to write specific tests against the state of the - container after your role has finished executing. - -Inspecting the Global Molecule Configuration file -+++++++++++++++++++++++++++++++++++++++++++++++++ - -As mentioned above, ``tripleo-validations`` uses a global configuration for -Molecule. - -.. literalinclude:: ../../../.config/molecule/config.yml - :language: yaml - -* The ``Driver`` provider: ``podman`` is the default. Molecule will use the - driver to delegate the task of creating instances. -* The ``Platforms`` definitions: Molecule relies on this to know which instances - to create, name and to which group each instance - belongs. ``Tripleo-validations`` uses ``CentOS 8 Stream image``. -* The ``Provisioner``: Molecule only provides an Ansible provisioner. Ansible - manages the life cycle of the instance based on this configuration. -* The ``Scenario`` definition: Molecule relies on this configuration to control - the scenario sequence order. -* The ``Verifier`` framework. Molecule uses Ansible by default to provide a way - to write specific stat checking tests (such as deployment smoke tests) on the - target instance. - -Local testing of new roles --------------------------- - -Local testing of new roles can be done in two ways: - -* Via the script `scripts/run-local-test`, -* or manually by following the procedure described below. - -Running molecule tests with the script run-local-test -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This script will setup the local work environment to execute tests mimicking -what Zuul does on a *CentOS 8* machine. - -.. warning:: - - This script makes the assumption the executing user has the - ability to escalate privileges and will modify the local system. - -To use this script execute the following command. - -.. code-block:: console - - $ cd tripleo-validations - $ ./scripts/run-local-test ${NEWROLENAME} - -When using the `run-local-test` script, the TRIPLEO_JOB_ANSIBLE_ARGS -environment variable can be used to pass arbitrary Ansible arguments. -For example, the following shows how to use `--skip-tags` when testing -a role with tags. - -.. code-block:: console - - $ export TRIPLEO_JOB_ANSIBLE_ARGS="--skip-tags tag_one,tag_two" - $ ./scripts/run-local-test ${ROLENAME} - - -Running molecule tests manually -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Role based testing with `molecule`_ can be executed directly from within -the role directory. - -.. note:: - - All tests require `Podman`_ for container based testing. If `Podman`_ is not - available on the local workstation it will need to be installed prior to - executing most molecule based tests. - - -.. note:: - - The script `bindep-install`, in the **scripts** path, is available and will - install all system dependencies. - - -.. note:: - - Each molecule tests are configured to bind mount a read-only volume on the - container where the tests are running: - - .. code-block:: yaml - - volumes: - - /etc/ci/mirror_info.sh:/etc/ci/mirror_info.sh:ro - - It is an OpenStack Zuul requirement for detecting if we are on a CI node. Of - course, when running your `molecule`_ test on your workstation, it is going - to fail because you don't have the empty `mirror_info.sh` script in the - `/etc/ci/` directory. You can workaround this by creating it in your - workstation or removing the volume key in the global configuration file for - molecule. - - .. code-block:: console - - $ sudo mkdir -p /etc/ci - $ sudo touch /etc/ci/mirror_info.sh - - - -Before running basic `molecule`_ tests, it is recommended to install all -of the python dependencies in a virtual environment. - -.. code-block:: console - - $ sudo dnf install python3 python3-virtualenv - $ python3 -m virtualenv --system-site-packages "${HOME}/test-python" - $ source "${HOME}/test-python/bin/activate" - (test-python) $ python3 -m pip install "pip>=19.1.1" setuptools bindep --upgrade - (test-python) $ scripts/./bindep-install - (test-python) $ python3 -m pip install -r requirements.txt \ - -r test-requirements.txt \ - -r molecule-requirements.txt - (test-python) $ ansible-galaxy install -fr ansible-collections-requirements.yml - - -Now, it is important to install `validations-common` and `tripleo-ansible` as -dependencies. - -.. note:: - - `validation-common` contains Ansible Custom modules needed by - `tripleo-validations` roles. That's the reason why we will need to clone it - beforehand. - - Cloning `tripleo-ansible` project is only necessary in order to run the - `molecule` test(s) for the `image_serve` role. Otherwise, you probably won't - need it. - - -.. code-block:: console - - $ cd tripleo-validations/ - $ for REPO in validations-common tripleo-ansible; do git clone https://opendev.org/openstack/${REPO} roles/roles.galaxy/${REPO}; done - - -To run a basic `molecule`_ test, simply source the `ansible-test-env.rc` -file from the project root, and then execute the following commands. - -.. code-block:: console - - (test-python) $ source ansible-test-env.rc - (test-python) $ cd roles/${NEWROLENAME}/ - (test-python) $ molecule test --all - - -If a role has more than one scenario, a specific scenario can be -specified on the command line. Running specific scenarios will -help provide developer feedback faster. To pass-in a scenario use -the `--scenario-name` flag with the name of the desired scenario. - -.. code-block:: console - - (test-python) $ cd roles/${NEWROLENAME}/ - (test-python) $ molecule test --scenario-name ${EXTRA_SCENARIO_NAME} - - -When debugging `molecule`_ tests its sometimes useful to use the -`--debug` flag. This flag will provide extra verbose output about -test being executed and running the environment. - -.. code-block:: console - - (test-python) $ molecule --debug test - -.. _molecule: https://github.com/ansible-community/molecule -.. _podman: https://podman.io/ diff --git a/doc/source/groups.rst b/doc/source/groups.rst deleted file mode 100644 index 7bf09e40e..000000000 --- a/doc/source/groups.rst +++ /dev/null @@ -1,44 +0,0 @@ -About Group -=========== - -For now, the validations are grouped by the deployment stage they should be run -on. A validation can belong to multiple groups. - -Adding a new group ------------------- - -To add a new group, you will need to edit the ``groups.yaml`` file located in -the root of the TripleO Validations directory: - -.. code-block:: yaml - - ... - pre-update: - - description: >- - Validations which try to validate your OpenStack deployment before you - update it. - ... - -And a new entry in the sphinx documentation index ``doc/source/index.rst``: - -.. code-block:: RST - - Existing validations - ==================== - - .. toctree:: - :maxdepth: 2 - - validations-no-op-details - validations-prep-details - validations-pre-introspection-details - validations-pre-deployment-details - validations-post-deployment-details - ... - -Group list ----------- - -Here is a list of groups and their associated validations. - -.. include:: validations-groups.rst diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 93044990e..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,87 +0,0 @@ -.. tripleo-validations documentation master file, created by - sphinx-quickstart on Tue Jul 9 22:26:36 2013. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -=============================================== -Welcome to tripleo-validations's documentation! -=============================================== - -Introduction -============ - -.. include:: ../../README.rst - -Installation Guide -================== - -.. toctree:: - :maxdepth: 2 - - installation - -Contributing -============ - -.. toctree:: - :maxdepth: 2 - - contributing/contributing - contributing/developer_guide - -Existing Groups -=============== - -.. toctree:: - :maxdepth: 2 - - groups - -Existing Validations -==================== - -.. toctree:: - :maxdepth: 2 - - validations-no-op-details - validations-prep-details - validations-pre-introspection-details - validations-pre-deployment-details - validations-post-deployment-details - validations-openshift-on-openstack-details - validations-pre-upgrade-details - validations-post-upgrade-details - validations-pre-system-upgrade-details - validations-post-system-upgrade-details - validations-pre-undercloud-upgrade-details - validations-post-undercloud-upgrade-details - validations-pre-overcloud-prepare-details - validations-post-overcloud-prepare-details - validations-pre-overcloud-upgrade-details - validations-post-overcloud-upgrade-details - validations-pre-overcloud-converge-details - validations-post-overcloud-converge-details - validations-pre-ceph-details - validations-post-ceph-details - validations-pre-update-details - validations-pre-update-prepare-details - validations-pre-update-run-details - validations-pre-update-converge-details - validations-post-update-details - validations-backup-and-restore-details - -Existing Roles and Modules -========================== - -.. toctree:: - :maxdepth: 2 - - roles - modules - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/doc/source/installation.rst b/doc/source/installation.rst deleted file mode 100644 index 7e47a8084..000000000 --- a/doc/source/installation.rst +++ /dev/null @@ -1,13 +0,0 @@ -Prerequisites -============= - -The TripleO validations requires Ansible ">=2.8,<2.0.0":: - - $ sudo pip install 'ansible>=2.8,<2.10.0' - -Installation -============ - -At the command line:: - - $ python3 -m pip install tripleo-validations diff --git a/doc/source/modules.rst b/doc/source/modules.rst deleted file mode 100644 index a353ba099..000000000 --- a/doc/source/modules.rst +++ /dev/null @@ -1,9 +0,0 @@ -Documented modules in TripleO-Validations -========================================= - -Contents: - -.. toctree:: - :glob: - - modules/* diff --git a/doc/source/modules/modules-ceph_pools_pg_protection.rst b/doc/source/modules/modules-ceph_pools_pg_protection.rst deleted file mode 100644 index 71b64fa83..000000000 --- a/doc/source/modules/modules-ceph_pools_pg_protection.rst +++ /dev/null @@ -1,14 +0,0 @@ -================================= -Module - ceph_pools_pg_protection -================================= - - -This module provides for the following ansible plugin: - - * ceph_pools_pg_protection - - -.. ansibleautoplugin:: - :module: library/ceph_pools_pg_protection.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-check_cpus_aligned_with_dpdk_nics.rst b/doc/source/modules/modules-check_cpus_aligned_with_dpdk_nics.rst deleted file mode 100644 index 5eaae600e..000000000 --- a/doc/source/modules/modules-check_cpus_aligned_with_dpdk_nics.rst +++ /dev/null @@ -1,14 +0,0 @@ -========================================== -Module - check_cpus_aligned_with_dpdk_nics -========================================== - - -This module provides for the following ansible plugin: - - * check_cpus_aligned_with_dpdk_nics - - -.. ansibleautoplugin:: - :module: library/check_cpus_aligned_with_dpdk_nics.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-check_flavors.rst b/doc/source/modules/modules-check_flavors.rst deleted file mode 100644 index b6e57fcbc..000000000 --- a/doc/source/modules/modules-check_flavors.rst +++ /dev/null @@ -1,14 +0,0 @@ -====================== -Module - check_flavors -====================== - - -This module provides for the following ansible plugin: - - * check_flavors - - -.. ansibleautoplugin:: - :module: library/check_flavors.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-check_ironic_boot_config.rst b/doc/source/modules/modules-check_ironic_boot_config.rst deleted file mode 100644 index fed7cce3c..000000000 --- a/doc/source/modules/modules-check_ironic_boot_config.rst +++ /dev/null @@ -1,14 +0,0 @@ -================================= -Module - check_ironic_boot_config -================================= - - -This module provides for the following ansible plugin: - - * check_ironic_boot_config - - -.. ansibleautoplugin:: - :module: library/check_ironic_boot_config.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-check_other_processes_pmd_usage.rst b/doc/source/modules/modules-check_other_processes_pmd_usage.rst deleted file mode 100644 index 048568f29..000000000 --- a/doc/source/modules/modules-check_other_processes_pmd_usage.rst +++ /dev/null @@ -1,14 +0,0 @@ -======================================== -Module - check_other_processes_pmd_usage -======================================== - - -This module provides for the following ansible plugin: - - * check_other_processes_pmd_usage - - -.. ansibleautoplugin:: - :module: library/check_other_processes_pmd_usage.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-convert_range_to_numbers_list.rst b/doc/source/modules/modules-convert_range_to_numbers_list.rst deleted file mode 100644 index 4b458a996..000000000 --- a/doc/source/modules/modules-convert_range_to_numbers_list.rst +++ /dev/null @@ -1,14 +0,0 @@ -====================================== -Module - convert_range_to_numbers_list -====================================== - - -This module provides for the following ansible plugin: - - * convert_range_to_numbers_list - - -.. ansibleautoplugin:: - :module: library/convert_range_to_numbers_list.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-get_dpdk_nics_numa_info.rst b/doc/source/modules/modules-get_dpdk_nics_numa_info.rst deleted file mode 100644 index 11a053b48..000000000 --- a/doc/source/modules/modules-get_dpdk_nics_numa_info.rst +++ /dev/null @@ -1,14 +0,0 @@ -================================ -Module - get_dpdk_nics_numa_info -================================ - - -This module provides for the following ansible plugin: - - * get_dpdk_nics_numa_info - - -.. ansibleautoplugin:: - :module: library/get_dpdk_nics_numa_info.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-icmp_ping.rst b/doc/source/modules/modules-icmp_ping.rst deleted file mode 100644 index 3daad9fca..000000000 --- a/doc/source/modules/modules-icmp_ping.rst +++ /dev/null @@ -1,14 +0,0 @@ -================== -Module - icmp_ping -================== - - -This module provides for the following ansible plugin: - - * icmp_ping - - -.. ansibleautoplugin:: - :module: library/icmp_ping.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-ip_range.rst b/doc/source/modules/modules-ip_range.rst deleted file mode 100644 index de6314cb6..000000000 --- a/doc/source/modules/modules-ip_range.rst +++ /dev/null @@ -1,14 +0,0 @@ -================= -Module - ip_range -================= - - -This module provides for the following ansible plugin: - - * ip_range - - -.. ansibleautoplugin:: - :module: library/ip_range.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-network_environment.rst b/doc/source/modules/modules-network_environment.rst deleted file mode 100644 index db4fd8e7f..000000000 --- a/doc/source/modules/modules-network_environment.rst +++ /dev/null @@ -1,14 +0,0 @@ -============================ -Module - network_environment -============================ - - -This module provides for the following ansible plugin: - - * network_environment - - -.. ansibleautoplugin:: - :module: library/network_environment.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-ovs_dpdk_pmd_cpus_check.rst b/doc/source/modules/modules-ovs_dpdk_pmd_cpus_check.rst deleted file mode 100644 index b4e2376f9..000000000 --- a/doc/source/modules/modules-ovs_dpdk_pmd_cpus_check.rst +++ /dev/null @@ -1,14 +0,0 @@ -================================ -Module - ovs_dpdk_pmd_cpus_check -================================ - - -This module provides for the following ansible plugin: - - * ovs_dpdk_pmd_cpus_check - - -.. ansibleautoplugin:: - :module: library/ovs_dpdk_pmd_cpus_check.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-pacemaker.rst b/doc/source/modules/modules-pacemaker.rst deleted file mode 100644 index 2a5eea942..000000000 --- a/doc/source/modules/modules-pacemaker.rst +++ /dev/null @@ -1,14 +0,0 @@ -================== -Module - pacemaker -================== - - -This module provides for the following ansible plugin: - - * pacemaker - - -.. ansibleautoplugin:: - :module: library/pacemaker.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-pmd_threads_siblings_check.rst b/doc/source/modules/modules-pmd_threads_siblings_check.rst deleted file mode 100644 index aecd0ffdb..000000000 --- a/doc/source/modules/modules-pmd_threads_siblings_check.rst +++ /dev/null @@ -1,14 +0,0 @@ -=================================== -Module - pmd_threads_siblings_check -=================================== - - -This module provides for the following ansible plugin: - - * pmd_threads_siblings_check - - -.. ansibleautoplugin:: - :module: library/pmd_threads_siblings_check.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-switch_vlans.rst b/doc/source/modules/modules-switch_vlans.rst deleted file mode 100644 index 82bf0280b..000000000 --- a/doc/source/modules/modules-switch_vlans.rst +++ /dev/null @@ -1,14 +0,0 @@ -===================== -Module - switch_vlans -===================== - - -This module provides for the following ansible plugin: - - * switch_vlans - - -.. ansibleautoplugin:: - :module: library/switch_vlans.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-tripleo_haproxy_conf.rst b/doc/source/modules/modules-tripleo_haproxy_conf.rst deleted file mode 100644 index 23b1de6ed..000000000 --- a/doc/source/modules/modules-tripleo_haproxy_conf.rst +++ /dev/null @@ -1,14 +0,0 @@ -============================= -Module - tripleo_haproxy_conf -============================= - - -This module provides for the following ansible plugin: - - * tripleo_haproxy_conf - - -.. ansibleautoplugin:: - :module: library/tripleo_haproxy_conf.py - :documentation: true - :examples: true diff --git a/doc/source/modules/modules-verify_profiles.rst b/doc/source/modules/modules-verify_profiles.rst deleted file mode 100644 index 1c99e6a63..000000000 --- a/doc/source/modules/modules-verify_profiles.rst +++ /dev/null @@ -1,14 +0,0 @@ -======================== -Module - verify_profiles -======================== - - -This module provides for the following ansible plugin: - - * verify_profiles - - -.. ansibleautoplugin:: - :module: library/verify_profiles.py - :documentation: true - :examples: true diff --git a/doc/source/roles.rst b/doc/source/roles.rst deleted file mode 100644 index 37ab8a205..000000000 --- a/doc/source/roles.rst +++ /dev/null @@ -1,9 +0,0 @@ -Documented roles in TripleO-Validations -======================================= - -Contents: - -.. toctree:: - :glob: - - roles/* diff --git a/doc/source/roles/role-ceph.rst b/doc/source/roles/role-ceph.rst deleted file mode 100644 index 1f6796aa2..000000000 --- a/doc/source/roles/role-ceph.rst +++ /dev/null @@ -1,6 +0,0 @@ -==== -ceph -==== - -.. ansibleautoplugin:: - :role: roles/ceph diff --git a/doc/source/roles/role-check_for_dangling_images.rst b/doc/source/roles/role-check_for_dangling_images.rst deleted file mode 100644 index 9191b7882..000000000 --- a/doc/source/roles/role-check_for_dangling_images.rst +++ /dev/null @@ -1,45 +0,0 @@ -========================= -check_for_dangling_images -========================= - --------------- -About the role --------------- - -Ansible role to check for dangling images - -Requirements -============ - -This role will be executed pre Update. - -Dependencies -============ - -No Dependencies - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: servers - roles: - - { role: check_for_dangling_images, check_for_dangling_images_debug: true } - -License -======= - -Apache - -Author Information -================== - -**Red Hat TripleO DFG:Upgrades** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/check_for_dangling_images diff --git a/doc/source/roles/role-check_kernel_version.rst b/doc/source/roles/role-check_kernel_version.rst deleted file mode 100644 index b3c77a513..000000000 --- a/doc/source/roles/role-check_kernel_version.rst +++ /dev/null @@ -1,6 +0,0 @@ -==================== -check_kernel_version -==================== - -.. ansibleautoplugin:: - :role: roles/check_kernel_version diff --git a/doc/source/roles/role-check_manila_policy_file.rst b/doc/source/roles/role-check_manila_policy_file.rst deleted file mode 100644 index 82c1501d4..000000000 --- a/doc/source/roles/role-check_manila_policy_file.rst +++ /dev/null @@ -1,53 +0,0 @@ -============ -policy_file -============ - --------------- -About The Role --------------- - -This role will check if there is a file named Policy.yaml in the controlers. -The file should be located at the manila's configuration folder in the container. - -Requirements -============ - -No Requirements. - -Dependencies -============ - -No dependencies. - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: "{{ controller_rolename | default('Controller') }}" - vars: - metadata: - name: Verify that keystone admin token is disabled - description: | - This validation checks that policy file of manilas configuration folder inside of the container,exists. - groups: - - post-deployment - categories: - - controller - products: - - tripleo - manilas_policy_file: "/var/lib/config-data/puppet-generated/manila/etc/manila/policy.yaml" - roles: - - check_manila_policy_file - -Author Information -================== - -**Red Hat Manila** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/check_manila_policy_file diff --git a/doc/source/roles/role-check_network_gateway.rst b/doc/source/roles/role-check_network_gateway.rst deleted file mode 100644 index bd2c91045..000000000 --- a/doc/source/roles/role-check_network_gateway.rst +++ /dev/null @@ -1,6 +0,0 @@ -===================== -check_network_gateway -===================== - -.. ansibleautoplugin:: - :role: roles/check_network_gateway diff --git a/doc/source/roles/role-check_nfv_ovsdpdk_zero_packet_loss.rst b/doc/source/roles/role-check_nfv_ovsdpdk_zero_packet_loss.rst deleted file mode 100644 index 34a4a4f0a..000000000 --- a/doc/source/roles/role-check_nfv_ovsdpdk_zero_packet_loss.rst +++ /dev/null @@ -1,50 +0,0 @@ -================================== -check_nfv_ovsdpdk_zero_packet_loss -================================== - --------------- -About the role --------------- - -This role validates the NFV OvS DPDK zero packet loss rules on OvS DPDK Compute nodes to find out the issues with NFV OvS Dpdk configuration. - -Requirements -============ - -- Validates PMD threads configuration. -- Validates PMD threads included as part of isolcpus. -- Checks any interrupts on Isolated CPU's. -- Validates all the data paths are same on the server if ovs user bridge is used. -- Validates bandwidth of the PCI slots. -- Validates hugepages, CPU pinning, emulatorpin threads and libvirt queue size configuration on NFV instances. - -Dependencies -============ - -- Expects all the configuration files that are passed. - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: servers - roles: - - - { role: check_nfv_ovsdpdk_zero_packet_loss } - -License -======= -Apache - -Author Information -================== - -**Red Hat TripleO DFG:NFV Integration** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/check_nfv_ovsdpdk_zero_packet_loss diff --git a/doc/source/roles/role-check_ntp_reachability.rst b/doc/source/roles/role-check_ntp_reachability.rst deleted file mode 100644 index af905be71..000000000 --- a/doc/source/roles/role-check_ntp_reachability.rst +++ /dev/null @@ -1,30 +0,0 @@ -====================== -check_ntp_reachability -====================== - --------------- -About The Role --------------- - -An Ansible role that will check if the time is synchronised with the NTP servers. -The role fails, if the time is not NTP synchronised and prints NTP servers which -chrony is trying to synchronise with. This role is recommended to run, if the -``Undercloud`` deployment fails on NTP synchronisation task. - -Requirements -============ - -This role runs on ``Undercloud``. - -License -======= - -Apache - -Author Information -================== - -Red Hat TripleO Validations Team - -.. ansibleautoplugin:: - :role: roles/check_ntp_reachability diff --git a/doc/source/roles/role-check_reboot.rst b/doc/source/roles/role-check_reboot.rst deleted file mode 100644 index 842183956..000000000 --- a/doc/source/roles/role-check_reboot.rst +++ /dev/null @@ -1,6 +0,0 @@ -============ -check_reboot -============ - -.. ansibleautoplugin:: - :role: roles/check_reboot diff --git a/doc/source/roles/role-check_rhsm_version.rst b/doc/source/roles/role-check_rhsm_version.rst deleted file mode 100644 index 9b77b1324..000000000 --- a/doc/source/roles/role-check_rhsm_version.rst +++ /dev/null @@ -1,6 +0,0 @@ -================== -check_rhsm_version -================== - -.. ansibleautoplugin:: - :role: roles/check_rhsm_version diff --git a/doc/source/roles/role-check_uc_hostname.rst b/doc/source/roles/role-check_uc_hostname.rst deleted file mode 100644 index 2feca2cb3..000000000 --- a/doc/source/roles/role-check_uc_hostname.rst +++ /dev/null @@ -1,72 +0,0 @@ -================= -check_uc_hostname -================= - --------------- -About the role --------------- - -Ansible role to check ``DockerInsecureRegistryAddress`` matches the UC hostname. - -The purpose of this validation is mostly target for the FFWD 13 to 16.X procedure. - -Customer is expected to follow the step `9.3. Configuring access to the -undercloud registry -`_ - -The customer needs to retrieve the control plane host name on the -undercloud and add it into the ``DockerInsecureRegistryAddress``. - -It might happen that the user misses this step or doesn't really add -the right control plan host name and then ``podman`` fails to retrieve the -containers. - -To summarize what customer is expected to do: - -- Run ``sudo hiera container_image_prepare_node_names`` to get host name(s) -- Edit the containers-prepare-parameter.yaml file and the ``DockerInsecureRegistryAddress`` parameter with - host name and IP of the undercloud. - -This validation will: - -- Pull ``DockerInsecureRegistryAddress`` (list) from the Openstack environment -- Run ``sudo hiera container_image_prepare_node_names`` -- Verify the container_image_prepare_node_names returned from ``hiera`` is contained in the ``DockerInsecureRegistryAddress`` list. - -Requirements -============ - -This role will be executed pre Overcloud Update. - -Dependencies -============ - -No Dependencies - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: servers - vars: - check_uc_hostname_debug: true - roles: - - check_uc_hostname - -License -======= - -Apache - -Author Information -================== - -**Red Hat TripleO DFG:Upgrades** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/check_uc_hostname diff --git a/doc/source/roles/role-check_undercloud_conf.rst b/doc/source/roles/role-check_undercloud_conf.rst deleted file mode 100644 index 3c341e96b..000000000 --- a/doc/source/roles/role-check_undercloud_conf.rst +++ /dev/null @@ -1,6 +0,0 @@ -===================== -check_undercloud_conf -===================== - -.. ansibleautoplugin:: - :role: roles/check_undercloud_conf diff --git a/doc/source/roles/role-collect_flavors_and_verify_profiles.rst b/doc/source/roles/role-collect_flavors_and_verify_profiles.rst deleted file mode 100644 index 7536335c4..000000000 --- a/doc/source/roles/role-collect_flavors_and_verify_profiles.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== -collect_flavors_and_verify_profiles -=================================== - -.. ansibleautoplugin:: - :role: roles/collect_flavors_and_verify_profiles diff --git a/doc/source/roles/role-compute_tsx.rst b/doc/source/roles/role-compute_tsx.rst deleted file mode 100644 index eed9a83cb..000000000 --- a/doc/source/roles/role-compute_tsx.rst +++ /dev/null @@ -1,73 +0,0 @@ -=========== -compute_tsx -=========== - --------------- -About The Role --------------- - -An Ansible role to verify that the compute nodes have the appropriate TSX flags -before proceeding with an upgrade. - -``RHEL-8.3`` kernel disabled the **Intel TSX** (Transactional Synchronization -Extensions) feature by default as a preemptive security measure, but it breaks -live migration from ``RHEL-7.9`` (or even ``RHEL-8.1`` or ``RHEL-8.2``) to -``RHEL-8.3``. - -Operators are expected to explicitly define the TSX flag in their KernelArgs for -the compute role to prevent live-migration issues during the upgrade process. - -This role is intended to be called by tripleo via the kernel deployment -templates. - -It's also possible to call the role as a standalone. - -This also impacts upstream CentOS systems - -Requirements -============ - -This role needs to be run on an ``Undercloud`` with a deployed ``Overcloud``. - -Dependencies -============ - -No dependencies. - -Example Playbook -================ - -Standard playbook: - -.. code-block:: yaml - - - hosts: nova_libvirt - roles: - - { role: compute_tsx} - -Reporting playbook with no failure: - -.. code-block:: yaml - - - hosts: nova_libvirt - vars: - - compute_tsx_warning: true - roles: - - { role: compute_tsx } - -License -======= - -Apache - -Author Information -================== - -**Red Hat TripleO DFG:Compute Squad:Deployment** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/compute_tsx diff --git a/doc/source/roles/role-container_status.rst b/doc/source/roles/role-container_status.rst deleted file mode 100644 index 01ba5d78a..000000000 --- a/doc/source/roles/role-container_status.rst +++ /dev/null @@ -1,6 +0,0 @@ -================ -container_status -================ - -.. ansibleautoplugin:: - :role: roles/container_status diff --git a/doc/source/roles/role-controller_token.rst b/doc/source/roles/role-controller_token.rst deleted file mode 100644 index 6c362b264..000000000 --- a/doc/source/roles/role-controller_token.rst +++ /dev/null @@ -1,6 +0,0 @@ -================ -controller_token -================ - -.. ansibleautoplugin:: - :role: roles/controller_token diff --git a/doc/source/roles/role-controller_ulimits.rst b/doc/source/roles/role-controller_ulimits.rst deleted file mode 100644 index 11631c0c3..000000000 --- a/doc/source/roles/role-controller_ulimits.rst +++ /dev/null @@ -1,6 +0,0 @@ -================== -controller_ulimits -================== - -.. ansibleautoplugin:: - :role: roles/controller_ulimits diff --git a/doc/source/roles/role-ctlplane_ip_range.rst b/doc/source/roles/role-ctlplane_ip_range.rst deleted file mode 100644 index 4f6b11b0b..000000000 --- a/doc/source/roles/role-ctlplane_ip_range.rst +++ /dev/null @@ -1,6 +0,0 @@ -================= -ctlplane_ip_range -================= - -.. ansibleautoplugin:: - :role: roles/ctlplane_ip_range diff --git a/doc/source/roles/role-default_node_count.rst b/doc/source/roles/role-default_node_count.rst deleted file mode 100644 index 0a1395334..000000000 --- a/doc/source/roles/role-default_node_count.rst +++ /dev/null @@ -1,6 +0,0 @@ -================== -default_node_count -================== - -.. ansibleautoplugin:: - :role: roles/default_node_count diff --git a/doc/source/roles/role-deprecated_services.rst b/doc/source/roles/role-deprecated_services.rst deleted file mode 100644 index 6e2c52b41..000000000 --- a/doc/source/roles/role-deprecated_services.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================== -deprecated_services -=================== - -.. ansibleautoplugin:: - :role: roles/deprecated_services diff --git a/doc/source/roles/role-dhcp_validations.rst b/doc/source/roles/role-dhcp_validations.rst deleted file mode 100644 index 532d5be1b..000000000 --- a/doc/source/roles/role-dhcp_validations.rst +++ /dev/null @@ -1,6 +0,0 @@ -================ -dhcp_validations -================ - -.. ansibleautoplugin:: - :role: roles/dhcp_validations diff --git a/doc/source/roles/role-fips_enabled.rst b/doc/source/roles/role-fips_enabled.rst deleted file mode 100644 index 05befb642..000000000 --- a/doc/source/roles/role-fips_enabled.rst +++ /dev/null @@ -1,49 +0,0 @@ -============ -fips_enabled -============ - --------------- -About The Role --------------- - -This role will check if system has turned on FIPS. -This validation can be enabled or disabled within the variable: -`enforce_fips_validation`, setting it to `true` will -enable the validation, setting to `false` will disable it. - -Requirements -============ - -Turned on FIPS. - -Dependencies -============ - -No dependencies. - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: localhost - gather_facts: false - roles: - - { role: fips_enabled } - -Licence -======= - -Apache - -Author Information -================== - -**Red Hat TripleO DFG:Security Squad:OG** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/fips_enabled diff --git a/doc/source/roles/role-frr_status.rst b/doc/source/roles/role-frr_status.rst deleted file mode 100644 index 4b10636bc..000000000 --- a/doc/source/roles/role-frr_status.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========== -frr_status -=========== - -.. ansibleautoplugin:: - :role: roles/frr_status diff --git a/doc/source/roles/role-healthcheck_service_status.rst b/doc/source/roles/role-healthcheck_service_status.rst deleted file mode 100644 index 2ac903964..000000000 --- a/doc/source/roles/role-healthcheck_service_status.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -healthcheck_service_status -========================== - -.. ansibleautoplugin:: - :role: roles/healthcheck_service_status diff --git a/doc/source/roles/role-image_serve.rst b/doc/source/roles/role-image_serve.rst deleted file mode 100644 index 91372e2ff..000000000 --- a/doc/source/roles/role-image_serve.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========== -image_serve -=========== - -.. ansibleautoplugin:: - :role: roles/image_serve diff --git a/doc/source/roles/role-ironic_boot_configuration.rst b/doc/source/roles/role-ironic_boot_configuration.rst deleted file mode 100644 index 24cf91cbc..000000000 --- a/doc/source/roles/role-ironic_boot_configuration.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================= -ironic_boot_configuration -========================= - -.. ansibleautoplugin:: - :role: roles/ironic_boot_configuration diff --git a/doc/source/roles/role-mysql_open_files_limit.rst b/doc/source/roles/role-mysql_open_files_limit.rst deleted file mode 100644 index 4d597efe2..000000000 --- a/doc/source/roles/role-mysql_open_files_limit.rst +++ /dev/null @@ -1,6 +0,0 @@ -====================== -mysql_open_files_limit -====================== - -.. ansibleautoplugin:: - :role: roles/mysql_open_files_limit diff --git a/doc/source/roles/role-network_environment.rst b/doc/source/roles/role-network_environment.rst deleted file mode 100644 index 8f4c96298..000000000 --- a/doc/source/roles/role-network_environment.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================== -network_environment -=================== - -.. ansibleautoplugin:: - :role: roles/network_environment diff --git a/doc/source/roles/role-neutron_sanity_check.rst b/doc/source/roles/role-neutron_sanity_check.rst deleted file mode 100644 index 992ef07aa..000000000 --- a/doc/source/roles/role-neutron_sanity_check.rst +++ /dev/null @@ -1,6 +0,0 @@ -==================== -neutron_sanity_check -==================== - -.. ansibleautoplugin:: - :role: roles/neutron_sanity_check diff --git a/doc/source/roles/role-node_disks.rst b/doc/source/roles/role-node_disks.rst deleted file mode 100644 index 0a8bc6b58..000000000 --- a/doc/source/roles/role-node_disks.rst +++ /dev/null @@ -1,6 +0,0 @@ -========== -node_disks -========== - -.. ansibleautoplugin:: - :role: roles/node_disks diff --git a/doc/source/roles/role-node_health.rst b/doc/source/roles/role-node_health.rst deleted file mode 100644 index 940e5e74c..000000000 --- a/doc/source/roles/role-node_health.rst +++ /dev/null @@ -1,13 +0,0 @@ -=========== -node_health -=========== - -Role is used by the :ref:`pre-upgrade_node-health` validation to verify state of the overcloud -compute services and baremetal nodes they are running on. - -As the clients contacted require Keystone authentication, the role requires -relevant values, such as Keystone endpoint and username, for correct operation. -Otherwise it will produce authentication error. - -.. ansibleautoplugin:: - :role: roles/node_health diff --git a/doc/source/roles/role-nova_event_callback.rst b/doc/source/roles/role-nova_event_callback.rst deleted file mode 100644 index 52cee7bbd..000000000 --- a/doc/source/roles/role-nova_event_callback.rst +++ /dev/null @@ -1,48 +0,0 @@ -=================== -nova_event_callback -=================== - --------------- -About the role --------------- - -An Ansible role to check if the **Nova** ``auth_url`` in **Neutron** is -configured correctly on the **Overcloud Controller(s)**. - -Requirements -============ - -None. - -Dependencies -============ - -None. - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: Controller - vars: - neutron_config_file: /path/to/neutron.conf - roles: - - nova_event_callback - -License -======= - -Apache - -Author Information -================== - -**Red Hat TripleO DFG:Compute Deployment Squad** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/nova_event_callback diff --git a/doc/source/roles/role-nova_status.rst b/doc/source/roles/role-nova_status.rst deleted file mode 100644 index 94d29053a..000000000 --- a/doc/source/roles/role-nova_status.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========== -nova_status -=========== - -.. ansibleautoplugin:: - :role: roles/nova_status diff --git a/doc/source/roles/role-nova_svirt.rst b/doc/source/roles/role-nova_svirt.rst deleted file mode 100644 index 1141929ad..000000000 --- a/doc/source/roles/role-nova_svirt.rst +++ /dev/null @@ -1,6 +0,0 @@ -================= -nova_svirt -================= - -.. ansibleautoplugin:: - :role: roles/nova_svirt diff --git a/doc/source/roles/role-openshift_on_openstack.rst b/doc/source/roles/role-openshift_on_openstack.rst deleted file mode 100644 index e040ae0d4..000000000 --- a/doc/source/roles/role-openshift_on_openstack.rst +++ /dev/null @@ -1,6 +0,0 @@ -====================== -openshift_on_openstack -====================== - -.. ansibleautoplugin:: - :role: roles/openshift_on_openstack diff --git a/doc/source/roles/role-openstack_endpoints.rst b/doc/source/roles/role-openstack_endpoints.rst deleted file mode 100644 index c8935d27b..000000000 --- a/doc/source/roles/role-openstack_endpoints.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================== -openstack_endpoints -=================== - -.. ansibleautoplugin:: - :role: roles/openstack_endpoints diff --git a/doc/source/roles/role-oslo_config_validator.rst b/doc/source/roles/role-oslo_config_validator.rst deleted file mode 100644 index 5191eeedf..000000000 --- a/doc/source/roles/role-oslo_config_validator.rst +++ /dev/null @@ -1,93 +0,0 @@ -===================== -oslo_config_validator -===================== - --------------- -About the role --------------- - -An Ansible role that will loop through all the containers on selected host, find the Openstack service configuration file -and leverage the [oslo-config-validator](https://docs.openstack.org/oslo.config/latest/cli/validator.html) utility to validate the current running configuration. - -It's also possible to generate a report that contains all differences between the sample or default values with current running configuration. - -Finally, it will also verify that the current running configuration doesn't contain any known invalid settings that might have been deprecated and removed in previous versions. - -Exceptions -========== - -Some services like ``cinder`` can have dynamic configuration sections. In ``cinder``'s case, this is for the storage backends. To perform validation on these dynamic sections, we need to generate a yaml formatted config sample with ``oslo-config-generator`` beforehand, append a new sample configuration for each storage backends, and validate against that newly generated configuration file by passing ``--opt-data`` to the ``oslo-config-validator`` command instead of using ``--namespaces``. Since generating a sample config adds some delay to the validation, this is not the default way of validating, we prefer to validate directly using ``--namespaces``. - -NOTE: At the time of writing this role, ``oslo-config-generator`` has a bug [1] when generating yaml config files, most notably with ``cinder``. Since the inclusion of oslo.config patch can't be garanteed, the role will inject this patch [2] to the oslo.config code, inside the validation container. This code change is ephemeral for the time of the configuration file generation. The reason why we want to inject this patch is because it's possible that we run the validation on containers that were created before it was merged. This ensures a smooth validation across the board. - -[1] https://bugs.launchpad.net/oslo.config/+bug/1928582 -[2] https://review.opendev.org/c/openstack/oslo.config/+/790883 - - -Requirements -============ - -This role needs to be run on an Undercloud with a deployed Overcloud. - -Role Variables -============== - -- oslo_config_validator_validation: Wether or not to run assertions on produced outputs. That also means that the role will fail if anything is output post-filtering. If this is enabled with the reporting, this will most likely trigger a failure unless executed against default configuration -- oslo_config_validator_report: Wether or not we compare the configuration files found with the default config -- oslo_config_validator_invalid_settings: When running validation, wether or not we should check for invalid settings. This adds to the time it takes to complete validation because of the way the validations_read_ini module works. This won't work without ``oslo_config_validator_validation`` enabled. -- oslo_config_validator_report_path: The folder used when generating the reports. -- oslo_config_validator_global_ignored_messages: List of regular expressions that will filter out messages globally, across all namespaces -- oslo_config_validator_namespaces_config: Specific namespace configurations. It contains namespace-specific ignored patterns as well as invalid settings configuration. -- oslo_config_validator_service_configs: Mapping of known Openstack services with their namespace configuration. -- oslo_config_validator_checked_services: List of services being validated. - -Dependencies -============ - -- podman_container -- podman_container_info -- validations_read_ini -- https://review.opendev.org/c/openstack/oslo.config/+/790883 - - - -Example Reporting Playbook -========================== - -.. code-block:: yaml - - - hosts: all - vars: - - oslo_config_validator_report: true - - oslo_config_validator_validation: false - roles: - - { role: oslo_config_validator} - -Example playbook to validate only one service -============================================= - -.. code-block:: yaml - - - hosts: all - vars: - - oslo_config_validator_checked_services: - - nova - roles: - - { role: oslo_config_validator} - -License -======= - -Apache - -Author Information -================== - -**Red Hat TripleO DFG:Compute Deployment Squad** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/oslo_config_validator diff --git a/doc/source/roles/role-overcloud_service_status.rst b/doc/source/roles/role-overcloud_service_status.rst deleted file mode 100644 index ebe804de3..000000000 --- a/doc/source/roles/role-overcloud_service_status.rst +++ /dev/null @@ -1,47 +0,0 @@ -======================== -overcloud_service_status -======================== - --------------- -About The Role --------------- - -An Ansible role to verify the ``Overcloud`` services states after a deployment -or an update. It checks the ``API /os-services`` and looks for deprecated -services (``nova-consoleauth``) or any down services. - -Requirements -============ - -This role needs to be run on an ``Undercloud`` with a deployed ``Overcloud``. - -Dependencies -============ - -No dependencies. - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: undercloud - roles: - - { role: overcloud_service_status } - -License -======= - -Apache - -Author Information -================== - -**Red Hat TripleO DFG:Compute Squad:Deployment** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/overcloud_service_status diff --git a/doc/source/roles/role-ovs_dpdk_pmd.rst b/doc/source/roles/role-ovs_dpdk_pmd.rst deleted file mode 100644 index 537370005..000000000 --- a/doc/source/roles/role-ovs_dpdk_pmd.rst +++ /dev/null @@ -1,6 +0,0 @@ -============ -ovs_dpdk_pmd -============ - -.. ansibleautoplugin:: - :role: roles/ovs_dpdk_pmd diff --git a/doc/source/roles/role-pacemaker_status.rst b/doc/source/roles/role-pacemaker_status.rst deleted file mode 100644 index 8dcb10387..000000000 --- a/doc/source/roles/role-pacemaker_status.rst +++ /dev/null @@ -1,6 +0,0 @@ -================ -pacemaker_status -================ - -.. ansibleautoplugin:: - :role: roles/pacemaker_status diff --git a/doc/source/roles/role-package_version.rst b/doc/source/roles/role-package_version.rst deleted file mode 100644 index f6bc6ac7d..000000000 --- a/doc/source/roles/role-package_version.rst +++ /dev/null @@ -1,6 +0,0 @@ -=============== -package_version -=============== - -.. ansibleautoplugin:: - :role: roles/package_version diff --git a/doc/source/roles/role-rabbitmq_limits.rst b/doc/source/roles/role-rabbitmq_limits.rst deleted file mode 100644 index 8e4782a55..000000000 --- a/doc/source/roles/role-rabbitmq_limits.rst +++ /dev/null @@ -1,6 +0,0 @@ -=============== -rabbitmq_limits -=============== - -.. ansibleautoplugin:: - :role: roles/rabbitmq_limits diff --git a/doc/source/roles/role-repos.rst b/doc/source/roles/role-repos.rst deleted file mode 100644 index 300f13a3d..000000000 --- a/doc/source/roles/role-repos.rst +++ /dev/null @@ -1,47 +0,0 @@ -===== -repos -===== - -An Ansible role to check the correctness of current repositories. - -Requirements ------------- - -This role could be used before/after an Undercloud or an Overcloud has been -deployed. - -Role Variables --------------- - -- None - -Dependencies ------------- - -No dependencies. - -Example Playbook ----------------- - -.. code-block:: yaml - - - hosts: undercloud - roles: - - role: repos - - - hosts: overcloud - roles: - - role: repos - -License -------- - -Apache - -Author Information ------------------- - -Red Hat TripleO Validations Team - -.. ansibleautoplugin:: - :role: roles/repos diff --git a/doc/source/roles/role-stack_health.rst b/doc/source/roles/role-stack_health.rst deleted file mode 100644 index 8308b24ea..000000000 --- a/doc/source/roles/role-stack_health.rst +++ /dev/null @@ -1,6 +0,0 @@ -============ -stack_health -============ - -.. ansibleautoplugin:: - :role: roles/stack_health diff --git a/doc/source/roles/role-stonith_exists.rst b/doc/source/roles/role-stonith_exists.rst deleted file mode 100644 index 158d59915..000000000 --- a/doc/source/roles/role-stonith_exists.rst +++ /dev/null @@ -1,6 +0,0 @@ -============== -stonith_exists -============== - -.. ansibleautoplugin:: - :role: roles/stonith_exists diff --git a/doc/source/roles/role-switch_vlans.rst b/doc/source/roles/role-switch_vlans.rst deleted file mode 100644 index a48079fc3..000000000 --- a/doc/source/roles/role-switch_vlans.rst +++ /dev/null @@ -1,6 +0,0 @@ -============ -switch_vlans -============ - -.. ansibleautoplugin:: - :role: roles/switch_vlans diff --git a/doc/source/roles/role-system_encoding.rst b/doc/source/roles/role-system_encoding.rst deleted file mode 100644 index 704c737ba..000000000 --- a/doc/source/roles/role-system_encoding.rst +++ /dev/null @@ -1,6 +0,0 @@ -=============== -system_encoding -=============== - -.. ansibleautoplugin:: - :role: roles/system_encoding diff --git a/doc/source/roles/role-tls_everywhere.rst b/doc/source/roles/role-tls_everywhere.rst deleted file mode 100644 index 49543846a..000000000 --- a/doc/source/roles/role-tls_everywhere.rst +++ /dev/null @@ -1,6 +0,0 @@ -============== -tls_everywhere -============== - -.. ansibleautoplugin:: - :role: roles/tls_everywhere diff --git a/doc/source/roles/role-tripleo_haproxy.rst b/doc/source/roles/role-tripleo_haproxy.rst deleted file mode 100644 index a95248550..000000000 --- a/doc/source/roles/role-tripleo_haproxy.rst +++ /dev/null @@ -1,46 +0,0 @@ -=============== -tripleo_haproxy -=============== - --------------- -About The Role --------------- - -An Ansible role to check if the ``HAProxy`` configuration has recommended -values. - -Requirements -============ - -This role requires and Up and Running Overcloud. - -Dependencies -============ - -None. - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: undercloud - roles: - - { role: tripleo_haproxy } - -License -======= - -Apache - -Author Information -================== - -**Red Hat Tripleo DFG:PIDONE** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/tripleo_haproxy diff --git a/doc/source/roles/role-undercloud_debug.rst b/doc/source/roles/role-undercloud_debug.rst deleted file mode 100644 index ca621bb40..000000000 --- a/doc/source/roles/role-undercloud_debug.rst +++ /dev/null @@ -1,54 +0,0 @@ -================ -undercloud_debug -================ - --------------- -About the role --------------- - -An Ansible role to check if debug is enabled on Undercloud services. - -Requirements -============ - -This role needs to be run against an installed Undercloud. -The tested services must use one of the specified configuration files -to set their debug status. - -Role Variables -============== - -- debug_check: -- services_conf_files: List of paths for configuration files of services - you want to check - -Dependencies -============ - -- 'validations_read_ini' custom plugin - -Example Playbook -================ - -.. code-block:: yaml - - - hosts: undercloud - roles: - - { role: undercloud-debug } - -License -======= - -Apache - -Author Information -================== - -Red Hat TripleO Validations Team - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/undercloud_debug diff --git a/doc/source/roles/role-undercloud_disabled_services.rst b/doc/source/roles/role-undercloud_disabled_services.rst deleted file mode 100644 index 186469f81..000000000 --- a/doc/source/roles/role-undercloud_disabled_services.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ -undercloud_disabled_services -============================ - -.. ansibleautoplugin:: - :role: roles/undercloud_disabled_services diff --git a/doc/source/roles/role-undercloud_disk_space.rst b/doc/source/roles/role-undercloud_disk_space.rst deleted file mode 100644 index 929c532c4..000000000 --- a/doc/source/roles/role-undercloud_disk_space.rst +++ /dev/null @@ -1,46 +0,0 @@ -===================== -Undercloud-disk-space -===================== - -An Ansible role to verify if the Undercloud fits the disk space requirements. - -Requirements ------------- - -This role could be used before or/and after the Undercloud installation. - -Role Variables --------------- - -- Volumes: a dictionary of mount points and their minimum sizes - -Dependencies ------------- - -No Dependencies - -Example Playbook ----------------- - -.. code-block:: yaml - - - hosts: servers - roles: - - { role: undercloud-disk-space} - -License -------- - -Apache - -Author Information ------------------- - -Red Hat TripleO Validation Team - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/undercloud_disk_space diff --git a/doc/source/roles/role-undercloud_heat_purge_deleted.rst b/doc/source/roles/role-undercloud_heat_purge_deleted.rst deleted file mode 100644 index cc6a6f6a4..000000000 --- a/doc/source/roles/role-undercloud_heat_purge_deleted.rst +++ /dev/null @@ -1,51 +0,0 @@ -============================= -undercloud_heat_purge_deleted -============================= - -.. warning:: - This role is not suited for Wallaby and more recent releases. - As it expects presence 'heat_api_cron'. - -An Ansible role to check if `heat-manage purge_deleted` is enabled in the -crontab - -Requirements ------------- - -This role requires an installed and working Undercloud. - -Role Variables --------------- - -- cron_check: <'heat-manage purge_deleted'> -- String to check in the crontab - -Dependencies ------------- - -No dependencies. - -Example Playbook ----------------- - -.. code-block:: yaml - - - hosts: undercloud - roles: - - { role: undercloud-heat-purge-deleted } - -License -------- - -Apache - -Author Information ------------------- - -Red Hat TripleO Validations Team - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/undercloud_heat_purge_deleted diff --git a/doc/source/roles/role-undercloud_process_count.rst b/doc/source/roles/role-undercloud_process_count.rst deleted file mode 100644 index 3e9c509f0..000000000 --- a/doc/source/roles/role-undercloud_process_count.rst +++ /dev/null @@ -1,47 +0,0 @@ -======================== -undercloud_process_count -======================== - -An Ansible role to check the number of OpenStack processes on the Undercloud - -Requirements ------------- - -This role requires an installed and working Undercloud - - -Role Variables --------------- - -- max_process_count: <'8'> -- Maximum number of process - -Dependencies ------------- - -No dependencies. - -Example Playbook ----------------- - -.. code-block:: yaml - - - hosts: servers - roles: - - { role: undercloud-process-count } - -License -------- - -Apache - -Author Information ------------------- - -Red Hat TripleO Validations Team - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/undercloud_process_count diff --git a/doc/source/roles/role-undercloud_proxy_validation.rst b/doc/source/roles/role-undercloud_proxy_validation.rst deleted file mode 100644 index 961dbc771..000000000 --- a/doc/source/roles/role-undercloud_proxy_validation.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -undercloud_proxy_validation -=========================== - -.. ansibleautoplugin:: - :role: roles/undercloud_proxy_validation diff --git a/doc/source/roles/role-undercloud_service_status.rst b/doc/source/roles/role-undercloud_service_status.rst deleted file mode 100644 index 10b870ceb..000000000 --- a/doc/source/roles/role-undercloud_service_status.rst +++ /dev/null @@ -1,47 +0,0 @@ -========================= -undercloud_service_status -========================= - -An Ansible role to verify the Undercloud services states before running an -Update or Upgrade. - -Requirements ------------- - -This role needs to be run against an installed Undercloud. - -Role Variables --------------- - -- undercloud_service_list: A list of services actually coming from the tripleo-ansible-inventory - -Dependencies ------------- - -No dependencies. - -Example Playbook ----------------- - -.. code-block:: yaml - - - hosts: undercloud - roles: - - { role: undercloud-service-status } - -License -------- - -Apache - -Author Information ------------------- - -Red Hat TripleO Validations Team. - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/undercloud_service_status diff --git a/doc/source/roles/role-undercloud_sysctl.rst b/doc/source/roles/role-undercloud_sysctl.rst deleted file mode 100644 index 7ef195266..000000000 --- a/doc/source/roles/role-undercloud_sysctl.rst +++ /dev/null @@ -1,6 +0,0 @@ -================= -undercloud_sysctl -================= - -.. ansibleautoplugin:: - :role: roles/undercloud_sysctl diff --git a/doc/source/roles/role-validate_passwords_file.rst b/doc/source/roles/role-validate_passwords_file.rst deleted file mode 100644 index a4af409c8..000000000 --- a/doc/source/roles/role-validate_passwords_file.rst +++ /dev/null @@ -1,6 +0,0 @@ -======================= -validate_passwords_file -======================= - -.. ansibleautoplugin:: - :role: roles/validate_passwords_file diff --git a/doc/source/roles/role-validation_init.rst b/doc/source/roles/role-validation_init.rst deleted file mode 100644 index afb51c7c8..000000000 --- a/doc/source/roles/role-validation_init.rst +++ /dev/null @@ -1,48 +0,0 @@ -=============== -validation_init -=============== - --------------- -About The Role --------------- - -The ``validation_init`` role aims to create new validation from a skeleton. - -Requirements -============ - -None. - -Dependencies -============ - -None. - -Example Playbook -================ - -.. code-block:: yaml - - - name: Create my new role - hosts: localhost - connection: local - gather_facts: false - roles: - - { role: validation_init, validation_init_role_name: "mynewrolename"} - -License -======= - -Apache - -Author Information -================== - -**Red Hat TripleO DFG:DF Squad:VF** - ----------------- -Full Description ----------------- - -.. ansibleautoplugin:: - :role: roles/validation_init diff --git a/groups.yaml b/groups.yaml deleted file mode 100644 index 48bed336d..000000000 --- a/groups.yaml +++ /dev/null @@ -1,106 +0,0 @@ ---- -no-op: - - description: >- - Validations which will run a no-op operation to verify that the workflow is - working as it supposed to, it will run in both the Undercloud and Overcloud - Nodes. -prep: - - description: >- - Validations check the hardware configuration of the Undercloud node and - should be run before **openstack undercloud install**. -backup-and-restore: - - description: >- - Validations which should be run to validate your OpenStack - deployment before you backup it and after restore it. -openshift-on-openstack: - - description: >- - Validations which will check that the environment meets the requirements - to be able to deploy OpenShift on OpenStack. -pre-introspection: - - description: >- - Validations which should be run before the nodes introspection using - Ironic Inspector. -pre-deployment: - - description: >- - Validations which should be executed before **openstack overcloud - deploy**. -post-deployment: - - description: >- - Validations which should be executed after the Overcloud deployment has - finished. -pre-upgrade: - - description: >- - Validations which try to validate your OpenStack deployment before you - upgrade it. -post-upgrade: - - description: >- - Validations which try to validate your OpenStack deployment after you - upgrade it. -pre-system-upgrade: - - description: >- - Validations which verify that the operating system can be upgraded in - the target node -post-system-upgrade: - - description: >- - Validations which verify that the operating system upgrade went fine - in the target node. -pre-undercloud-upgrade: - - description: >- - Validations which verify that the undercloud is in a correct state - before upgrading. -post-undercloud-upgrade: - - description: >- - Validations which verify that the undercloud is in a correct state - after upgrading. -pre-overcloud-prepare: - - description: >- - Set of validations that check the right content for the templates passed - to the overcloud upgrade prepare step. -post-overcloud-prepare: - - description: >- - Validations which verify that the overcloud is in a correct state - after running overcloud upgrade prepare step. -pre-overcloud-upgrade: - - description: >- - Validations which verify that the overcloud is in a correct state - before running overcloud upgrade run command. -post-overcloud-upgrade: - - description: >- - Validations which verify that the overcloud is in a correct state - before running overcloud upgrade run command. -pre-overcloud-converge: - - description: >- - Set of validations that check the right content for the templates passed - to the overcloud upgrad converge step. -post-overcloud-converge: - - description: >- - Validations which verify that the overcloud is in a correct state - after running overcloud upgrade converge command. -pre-ceph: - - description: >- - Validations to run on undercloud before deploying OpenStack - with Ceph. -post-ceph: - - description: >- - Validations to run on overcloud after deploying OpenStack - with Ceph. -pre-update: - - description: >- - Validations which should be run to validate your OpenStack - deployment before you update it. -pre-update-prepare: - - description: >- - Validations which should be run to verify undercloud - updated correctly. -pre-update-run: - - description: >- - Validations which should be run to verify image and registration - of the overcloud nodes. -pre-update-converge: - - description: >- - Validations which should be run to verfiy containers and - services are ready for pre-deployment. -post-update: - - description: >- - Validations which should be run to validate your OpenStack - deployment after you update it. diff --git a/hosts.sample b/hosts.sample deleted file mode 100644 index c9c907453..000000000 --- a/hosts.sample +++ /dev/null @@ -1,33 +0,0 @@ -# Your undercloud servers. This can be `localhost` if you cloned the -# repository on the undercloud: -[undercloud] -undercloud.example.com - -[undercloud:vars] -#controller_vip= - -[overcloud:children] -controller -compute - -[controller] -controller.example.com - -[controller:vars] -#put username here if different than compute nodes, ie stack user -##ansible_ssh_user=stack - -[compute] -compute-1.example.com -compute-2.example.com - -[compute:vars] -#put username here if different than compute nodes, ie stack user -##ansible_ssh_user=heat-admin - -[all:vars] -# Username to SSH as: -ansible_ssh_user=stack -# Set to `true` if the SSH user is not root. Ansible will run `sudo` -# for commands requiring root: -ansible_sudo=true diff --git a/library/ceph_pools_pg_protection.py b/library/ceph_pools_pg_protection.py deleted file mode 100644 index edf3ab507..000000000 --- a/library/ceph_pools_pg_protection.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""ceph_pools_pg_protection module -Used by the ceph-pg validation. -""" - -from yaml import safe_load as yaml_safe_load -from ansible.module_utils.basic import AnsibleModule - - -ANSIBLE_METADATA = { - 'metadata_version': '0.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -DOCUMENTATION = ''' ---- -module: ceph_pools_pg_protection -short_description: Warn if Ceph will not create CephPools based on PG and OSD numbers -description: - - The Ceph PG overdose protection check (https://ceph.com/community/new-luminous-pg-overdose-protection) - is executed by Ceph before a pool is created. - If the check does not pass, then the pool is not created. - - When TripleO deploys Ceph it triggers ceph-ansible which creates the pools that OpenStack needs. - - This validation runs the same check that the overdose protection uses - to determine if the user should update their CephPools, PG count, or number of OSDs. - Without this check a deployer may have to wait until after Ceph is running, - but before the pools are created to realize the deployment will fail. - - Used by the ceph-pg validation. - - Owned by the "DFG:Storage Squad:Ceph" -options: - num_osds: - description: - - The number of Ceph OSDs expected to be running during Pool creation. - - TripleO does not have this parameter - - In theory you can derive this parameter from TripleO parameters - required: True - type: int - ceph_pool_default_size: - description: - - The same as the TripleO CephPoolDefaultSize parameter - - Number of replicas of the data - required: False - default: 3 - type: int - ceph_pool_default_pg_num: - description: - - The same as the TripleO CephPoolDefaultPgNum parameter - - The default number of Placement Groups a pool should have - - Ceph defaults this number to 16 - - TripleO defaults this number to 128 - required: False - default: 128 - type: int - ceph_pools: - description: - - The same as the TripleO CephPools parameter - - A list of dictionaries - - Each embedded dict must have a name parameter - - Optional pg_num and size parameters may be set per pool - required: True - type: list -author: - - John Fulton (fultonj) -''' - -EXAMPLES = ''' -# Call this module from TripleO Ansible Validations - -- name: Is the CephPools parameter configured correctly? - ceph_pools_pg_protection: - num_osds: 36 - ceph_pool_default_size: 3 - ceph_pool_default_pg_num: 128 - ceph_pools: - - {"name": volumes, "pg_num": 1024,"pgp_num": 1024, "application": rbd, "size": 3} - - {"name": vms, "pg_num": 512, "pgp_num": 512, "application": rbd, "size": 3} - - {"name": images, "pg_num": 128, "pgp_num": 128, "application": rbd, "size": 3} - register: pool_creation_simulation -- name: Fail if CephPools parameter is not configured correctly - fail: - msg: pool_creation_simulation["message"] - when: not pool_creation_simulation["valid_input"] - -# Call this module from within TripleO Heat Templates (if only num_osds was derived) -- name: Is the CephPools parameter configured correctly? - ceph_pools_pg_protection: - num_osds: 36 - ceph_pool_default_size: {get_param: CephPoolDefaultSize} - ceph_pool_default_pg_num: {get_param: CephPoolDefaultPgNum} - ceph_pools: {get_param: CephPools} - register: pool_creation_simulation - -''' - -RETURN = ''' -message: - description: A description of why Ceph might refuse to create the requested CephPools - type: str - returned: always -valid_input: - description: True only if Ceph would create all requested pools - type: boolean - returned: always -''' - - -def check_pg_num(pool, pg_num, size, num_osds=0, max_pgs_per_osd=200, pools={}): - """ - Returns empty string only if the Pool PG numbers are correct for the OSDs. - Otherwise returns an error message like the one Ceph would return. - """ - # The original check in C++ from the Ceph source code is: - # - # int OSDMonitor::check_pg_num(int64_t pool, int pg_num, int size, ostream *ss) - # { - # auto max_pgs_per_osd = g_conf->get_val("mon_max_pg_per_osd"); - # auto num_osds = std::max(osdmap.get_num_in_osds(), 3u); // assume min cluster size 3 - # auto max_pgs = max_pgs_per_osd * num_osds; - # uint64_t projected = 0; - # if (pool < 0) { - # projected += pg_num * size; - # } - # for (const auto& i : osdmap.get_pools()) { - # if (i.first == pool) { - # projected += pg_num * size; - # } else { - # projected += i.second.get_pg_num() * i.second.get_size(); - # } - # } - # if (projected > max_pgs) { - # if (pool >= 0) { - # *ss << "pool id " << pool; - # } - # *ss << " pg_num " << pg_num << " size " << size - # << " would mean " << projected - # << " total pgs, which exceeds max " << max_pgs - # << " (mon_max_pg_per_osd " << max_pgs_per_osd - # << " * num_in_osds " << num_osds << ")"; - # return -ERANGE; - # } - # return 0; - # } - msg = "" - max_pgs = max_pgs_per_osd * num_osds - projected = 0 - if len(pool) < 0: - projected = projected + (pg_num * size) - for pool_name, pool_sizes in pools.items(): - if pool_name == pool: - projected = projected + (pg_num * size) - else: - projected = projected + (int(pool_sizes['pg_num']) * int(pool_sizes['size'])) - if projected > max_pgs: - msg = "Cannot add pool: " + str(pool) + \ - " pg_num " + str(pg_num) + " size " + str(size) + \ - " would mean " + str(projected) + \ - " total pgs, which exceeds max " + str(max_pgs) + \ - " (mon_max_pg_per_osd " + str(max_pgs_per_osd) + \ - " * num_in_osds " + str(num_osds) + ")" - return msg - - -def simulate_pool_creation(num_osds, ceph_pools, - ceph_pool_default_size=3, - ceph_pool_default_pg_num=128, - max_pgs_per_osd=200): - """ - Simulate ceph-ansible asking Ceph to create the pools in the ceph_pools list - """ - msg = "" - failed = False - created_pools = {} - for pool in ceph_pools: - if 'size' not in pool: - pool['size'] = ceph_pool_default_size - if 'pg_num' not in pool: - pool['pg_num'] = ceph_pool_default_pg_num - ceph_msg = check_pg_num(pool['name'], pool['pg_num'], pool['size'], - num_osds, max_pgs_per_osd, created_pools) - if len(ceph_msg) == 0: - created_pools[pool['name']] = {'pg_num': pool['pg_num'], 'size': pool['size']} - else: - failed = True - break - if failed: - msg = "The following Ceph pools would be created (but no others):" + \ - "\n" + str(created_pools) + "\n" + \ - "Pool creation would then fail with the following from Ceph:" + \ - "\n" + ceph_msg + "\n" + \ - "Please use https://ceph.io/pgcalc and then update the CephPools parameter" - simulation_results = {} - simulation_results['failed'] = failed - simulation_results['msg'] = msg - return simulation_results - - -def run_module(): - """This module never changes state of a target system, it only - evaluates if inputs will work when Ceph processes then. - There shouldn't be anything like the following - result['changed'] = True - - This module does not currently have fail options. It should - only evaluate input and make result of the evaluation available. - So it doesn't currently do anything like the following by design. - module.fail_json(msg='Failing for invalid input', **result) - - Exit and pass the key/value results of the simulation - """ - # Seed the result dict in the object - result = dict( - changed=False, - valid_input=True, - message='' - ) - - # Use AnsibleModule object abstraction to work with Ansible - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'], - supports_check_mode=False - ) - - # Check mode not supported - if module.check_mode: - module.exit_json(**result) - - # Simulate Ceph pool creation - simulation = simulate_pool_creation(module.params['num_osds'], - module.params['ceph_pools'], - module.params['ceph_pool_default_size'], - module.params['ceph_pool_default_pg_num']) - if simulation['failed']: - result['message'] = "Invalid Ceph configuration: " + simulation['msg'] - result['valid_input'] = False - else: - result['message'] = 'Provided CephPools satisfy PG overdose protection' - result['valid_input'] = True - - module.exit_json(**result) - - -def main(): - run_module() - -if __name__ == '__main__': - main() diff --git a/library/check_cpus_aligned_with_dpdk_nics.py b/library/check_cpus_aligned_with_dpdk_nics.py deleted file mode 100644 index 4b08c9a27..000000000 --- a/library/check_cpus_aligned_with_dpdk_nics.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python - -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""check_cpus_aligned_with_dpdk_nics module -Used by the `check_nfv_ovsdpdk_zero_packet_loss` role. -""" - -from ansible.module_utils.basic import AnsibleModule -from yaml import safe_load as yaml_safe_load - -import json -import yaml - -DOCUMENTATION = ''' ---- -module: OVS DPDK PMD CPU's check -short_description: Run PMD CPU's from all the NUMA nodes check -description: - - Run PMD CPU's from all the NUMA nodes check - - Owned by the DFG:NFV Integration -options: - cpus: - required: true - description: - - The CPU's list - type: str - numa_node: - required: true - description: - - The NUMA node - type: int - dpdk_nics_numa_info: - required: true - description: - - The DPDK NIC's NUMA details - type: list -author: "Jaganathan Palanisamy" -''' - -EXAMPLES = ''' -# Call this module from TripleO Ansible Validations - -- name: Check CPU's aligned with DPDK NIC's NUMA - become: true - check_cpus_aligned_with_dpdk_nics: - cpus: "2,3,4,5" - numa_node: "0" - dpdk_nics_numa_info: [{"numa_node": 0, "mac": "mac1", "pci": "pci1"}, - {"numa_node": 0, "mac": "mac2", "pci": "pci2"}] - register: valid_cpus -''' - - -def get_nodes_cpus_info(module): - """Gets the logical cpus info for all numa nodes.""" - - dict_cpus = {} - # Gets numa node and cpu details - cmd = "lscpu -p=NODE,CPU" - result = module.run_command(cmd) - if (not result or (result[0] != 0) or not (str(result[1]).strip(' '))): - err = "Unable to determine NUMA cpus" - module.fail_json(msg=err) - else: - output = str(result[1]) - try: - for line in output.split('\n'): - if line and '#' not in line: - cpu_info = line.split(',') - node = int(cpu_info[0]) - thread = int(cpu_info[1]) - if node in dict_cpus: - if thread not in dict_cpus[node]: - dict_cpus[node].append(thread) - else: - dict_cpus[node] = [thread] - except (IndexError, ValueError): - err = "Unable to determine NUMA cpus" - module.fail_json(msg=err) - return dict_cpus - - -def check_cpus_aligned_with_dpdk_nics(module, cpus, numa_node, dpdk_nics_numa_info): - """Checks cpus aligned with NUMA with DPDK NIC's.""" - - result = dict( - changed=False, - valid_cpus=False, - message='' - ) - nodes = [] - valid_numa = False - invalid_cpus = [] - nodes_cpus = get_nodes_cpus_info(module) - for dpdk_nics_numa in dpdk_nics_numa_info: - if (dpdk_nics_numa['numa_node'] == numa_node): - valid_numa = True - break - if not valid_numa: - err = "NFV instance is not aligned with DPDK NIC's NUMA." - module.fail_json(msg=err) - for cpu in cpus.split(','): - if not int(cpu) in nodes_cpus[numa_node]: - invalid_cpus.append(cpu) - if invalid_cpus: - err = "CPU's are not aligned with DPDK NIC's NUMA, Invalid CPU's: "+','.join(invalid_cpus) - result['message'] = err - result['valid_cpus'] = False - module.fail_json(msg=err) - else: - result['message'] = "CPU's configured correctly: " + cpus - result['valid_cpus'] = True - module.exit_json(**result) - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - check_cpus_aligned_with_dpdk_nics(module, - module.params.get('cpus'), - module.params.get('numa_node'), - module.params.get('dpdk_nics_numa_info')) - - -if __name__ == '__main__': - main() diff --git a/library/check_flavors.py b/library/check_flavors.py deleted file mode 100644 index 88caff111..000000000 --- a/library/check_flavors.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python -# Copyright 2018 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""check_flavors module -Used by the collect_flavors_and_verify_profiles validation. -""" -from ansible.module_utils.basic import AnsibleModule # noqa -from yaml import safe_load as yaml_safe_load - -import re - -DOCUMENTATION = ''' ---- -module: check_flavors -short_description: Check that assigned flavors exist and are configured -description: - - Validate that the flavors assigned to roles exist and have the correct - settings. Right now, that means that boot_option is unset or set to - 'local', or if set to 'netboot', issue a warning. - - Used by the collect_flavors_and_verify_profiles - - Owned by the 'DFG Compute & DFG Hardware Provisioning' -options: - roles_info: - required: true - description: - - A list of role info - type: list - flavors: - required: true - description: - - A dictionary of flavors from Nova - type: dict - -author: "Brad P. Crochet" -''' - -EXAMPLES = ''' -- hosts: undercloud - tasks: - - name: Check the flavors - check_flavors: - roles_info: "{{ lookup('roles_info', wantlist=True) }}" - flavors: "{{ lookup('nova_flavors', wantlist=True) }}" -''' - - -def validate_roles_and_flavors(roles_info, flavors): - """Check if roles info is correct - - :param roles_info: list of role data - :param flavors: dictionary of flavors - :returns result: Flavors and scale - warnings: List of warning messages - errors: List of error messages - """ - - result = {} - errors = [] - warnings = [] - custom_resource_class = None - custom_resource_class_val = None - - message = "Flavor '{1}' provided for the role '{0}', does not exist" - missing_message = "Role '{0}' is in use, but has no flavor assigned" - warning_message = ( - 'Flavor {0} "capabilities:boot_option" is set to ' - '"netboot". Nodes will PXE boot from the ironic ' - 'conductor instead of using a local bootloader. ' - 'Make sure that enough nodes are marked with the ' - '"boot_option" capability set to "netboot".') - resource_class_missing = ( - 'Flavor {0} does not have a custom resource class ' - 'associated with it') - resource_class_name_incorrect = ( - 'Flavor {0} has an incorrectly named custom ' - 'resource class associated with it') - resource_class_value_incorrect = ( - 'Flavor {0} has a resource class that is not ' - 'offering exactly 1 resource') - disable_standard_scheduling = ( - 'Flavor {0} has to have scheduling based on ' - 'standard properties disabled by setting ' - 'resources:VCPU=0 resources:MEMORY_MB=0 ' - 'resources:DISK_GB=0 in the flavor property') - - for role in roles_info: - target = role.get('name') - flavor_name = role.get('flavor') - scale = role.get('count', 0) - - if flavor_name is None or not scale: - if scale: - errors.append(missing_message.format(target)) - continue - - old_flavor_name, old_scale = result.get(flavor_name, (None, None)) - - if old_flavor_name: - result[flavor_name] = (old_flavor_name, scale) - else: - flavor = flavors.get(flavor_name) - - if flavor: - keys = flavor.get('keys', None) - if keys: - if keys.get('capabilities:boot_option', '') \ - == 'netboot': - warnings.append( - warning_message.format(flavor_name)) - # check if the baremetal flavor has custom resource class - # required for scheduling since queens - resource_specs = {key.split( - "resources:", 1)[-1]: val - for key, val in keys.items() - if key.startswith("resources:")} - if not resource_specs: - errors.append(resource_class_missing.format( - flavor_name)) - else: - for key, val in resource_specs.items(): - if key.startswith("CUSTOM_"): - custom_resource_class = True - match = re.match('CUSTOM_[A-Z_]+', key) - if match is None: - errors.append( - resource_class_name_incorrect, - flavor_name) - else: - if int(val) == 1: - custom_resource_class_val = True - if not custom_resource_class: - errors.append(resource_class_missing.format( - flavor_name)) - if key not in ["DISK_GB", "MEMORY_MB", "VCPU"] and \ - not custom_resource_class_val: - errors.append(resource_class_value_incorrect. - format(flavor_name)) - disk = resource_specs.get("DISK_GB", None) - memory = resource_specs.get("MEMORY_MB", None) - vcpu = resource_specs.get("VCPU", None) - if any(int(resource) != 0 for resource in [disk, - memory, vcpu]): - errors.append(disable_standard_scheduling. - format(flavor_name)) - - result[flavor_name] = (flavor, scale) - else: - errors.append(message.format(target, flavor_name)) - - return result, warnings, errors - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - roles_info = module.params.get('roles_info') - flavors = module.params.get('flavors') - - flavor_result, warnings, errors = validate_roles_and_flavors(roles_info, - flavors) - - if errors: - module.fail_json(msg="\n".join(errors)) - elif warnings: - module.exit_json(warnings="\n".join(warnings)) - else: - module.exit_json( - msg="All flavors configured on roles", - flavors=flavor_result) - - -if __name__ == '__main__': - main() diff --git a/library/check_ironic_boot_config.py b/library/check_ironic_boot_config.py deleted file mode 100644 index ffdb192af..000000000 --- a/library/check_ironic_boot_config.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from ansible.module_utils.basic import AnsibleModule # noqa -from oslo_utils import uuidutils -from yaml import safe_load as yaml_safe_load - -DOCUMENTATION = ''' ---- -module: check_ironic_boot_config -short_description: - - Check that overcloud nodes have the correct associated ramdisk and kernel - image -description: - - Each overcloud node needs to have the correct associated ramdisk and - kernel image according to its architecture and platform. This can be - validated by making sure that like nodes have associated deploy images - not exceeding a certain standard of diversity. -options: - nodes: - required: true - description: - - A list of nodes from Ironic - type: list - -author: Jeremy Freudberg -''' - -EXAMPLES = ''' -- hosts: undercloud - tasks: - - name: Check Ironic boot config - check_ironic_boot_config: - nodes: "{{ lookup('ironic_nodes', wantlist=True) }}" -''' - - -GLANCE = 'Glance' -FILE = 'file-based' - - -def _too_diverse(mapping_type, node_info, images): - image_type = "deploy_%s" % node_info[0] - return ( - "There is more than one {} {} associated to nodes with architecture " - "{} and platform {}. Probably only one of {} should be associated." - ).format(mapping_type, image_type, node_info[1], node_info[2], images) - - -def _invalid_image_entry(image_type_base, image_entry, node_id): - image_type = "deploy_%s" % image_type_base - return ( - "The {} associated to node {} is of an invalid form. Could not " - "determine whether {} refers to a file or Glance image." - ).format(image_type, node_id, image_entry) - - -def validate_boot_config(nodes): - errors = [] - - associated_images = { - GLANCE: collections.defaultdict(set), - FILE: collections.defaultdict(set) - } - - for node in nodes: - arch = node["properties"].get("cpu_arch", None) - platform = node["extra"].get("tripleo_platform", None) - - for image_type in ['kernel', 'ramdisk']: - image_entry = ( - node["driver_info"].get("deploy_%s" % image_type, None) - ) - if uuidutils.is_uuid_like(image_entry): - mapping = GLANCE - elif str(image_entry).startswith("file://"): - mapping = FILE - # TODO(jfreud): uncomment when Ironic supports empty driver_info -# elif image_entry is None: -# continue - else: - errors.append(_invalid_image_entry( - image_type, image_entry, node["uuid"])) - continue - node_info = (image_type, arch, platform) - associated_images[mapping][node_info].add(image_entry) - - for mapping_type, mapping in associated_images.items(): - for node_info, images in mapping.items(): - if len(images) > 1: - errors.append(_too_diverse(mapping_type, node_info, images)) - - return errors - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - nodes = module.params.get('nodes') - - errors = validate_boot_config(nodes) - - if errors: - module.fail_json("".join(errors)) - else: - module.exit_json() - - -if __name__ == '__main__': - main() diff --git a/library/check_other_processes_pmd_usage.py b/library/check_other_processes_pmd_usage.py deleted file mode 100644 index 06e7352ce..000000000 --- a/library/check_other_processes_pmd_usage.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env python - -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""check_other_processes_pmd_usage module -Used by the `check_nfv_ovsdpdk_zero_packet_loss` role. -""" - -from ansible.module_utils.basic import AnsibleModule -from yaml import safe_load as yaml_safe_load - -DOCUMENTATION = ''' ---- -module: Check OVS DPDK PMD threads used by other processes or not -short_description: Run PMD threads used by other processes or not check -description: - - Run PMD threads used by other processes or not check - - Owned by the DFG:NFV Integration -options: - pmd_cpus: - required: true - description: - - The pmd cpus list - type: list - exclude_processes_pid: - required: false - description: - - The processes pid list which need to be excluded. - - This option is optional. - default: [] - type: list - -author: "Jaganathan Palanisamy" -''' - -EXAMPLES = ''' -# Call this module from TripleO Ansible Validations - -- name: Run PMD threads used by other processes or not check - become: true - check_other_processes_pmd_usage: - pmd_cpus: [6, 7, 9, 11] - register: pmd_interrupts - -- name: Run PMD threads used by other processes or not with exclude processes - become: true - check_other_processes_pmd_usage: - pmd_cpus: [6, 7, 9, 11] - exclude_processes_pid: ['24', '26'] - register: pmd_interrupts -''' - - -def check_current_process_pmd_usage(module, pmd_cpus, process_id, range_list): - """Check pmd usage in current process cpus range list.""" - - messages = [] - num_list = [] - exclude_num_list = [] - threads_used = [] - try: - for val in range_list.split(','): - val = val.strip(' ') - if '^' in val: - exclude_num_list.append(int(val[1:])) - elif '-' in val: - split_list = val.split("-") - range_min = int(split_list[0]) - range_max = int(split_list[1]) - num_list.extend(range(range_min, (range_max + 1))) - else: - num_list.append(int(val)) - except ValueError as exc: - err = ("Invalid number in input param " - "'range_list': %s" % exc) - module.fail_json(msg=err) - # here, num_list is a list of integers - threads_list = [str(num) for num in num_list if num not in exclude_num_list] - for thread in threads_list: - if thread in pmd_cpus: - if threads_used: - threads_used.append(thread) - else: - threads_used = [thread] - if threads_used: - messages.append("pmd threads: " + ','.join(threads_used) + " used in process: " + process_id) - return list(messages) - - -def check_other_processes_pmd_usage(module, pmd_cpus, exclude_processes_pid): - """Checks PMD threads used in any other process or not""" - - output = dict( - pmd_interrupts=False, - messages=[] - ) - messages = [] - threads_used = {} - current_processes = [] - - # Gets all the processes and corresponding threads usage - # except processes mentioned in exclude_processes_pid list - # processes pid and threads information - cmd = ("find -L /proc/[0-9]*/exe ! -type l | cut -d / -f3 | " - "xargs -l -i sh -c 'ps -p {} -o comm=; taskset -acp {}' | " - "grep -vE '" + '|'.join(exclude_processes_pid) + "' | " - "awk '{printf \"%s %s\\n\", $2, $6}'") - result = module.run_command(cmd, use_unsafe_shell=True) - if (not result or (result[0] != 0) or not (str(result[1]).strip(' '))): - err = "Unable to determine current processes" - module.fail_json(msg=err) - else: - current_processes = str(result[1]).split('\n') - - pmd_threads_processes = [] - # Gets processes associated to PMD and corresponding threads usage - # proceses pid and threads information - cmd = ("ps -T -o spid,comm -p $(pidof ovs-vswitchd) |grep '\ endIP: - errors.append("Lower IP bound ({}) must be smaller than upper " - "bound ({})".format(startIP, endIP)) - - if min_size < 0: - errors.append('Argument min_size({}) must be greater than 0' - .format(min_size)) - - return errors - - -def check_IP_range(start, end, min_size): - '''Compare IP range with minimum size''' - - errors = [] - iprange = netaddr.IPRange(start, end) - - if len(iprange) < min_size: - errors = [ - 'The IP range {} - {} contains {} addresses.'.format( - start, end, len(iprange)), - 'This might not be enough for the deployment or later scaling.' - ] - - return errors - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - start = module.params.get('start') - end = module.params.get('end') - min_size = module.params.get('min_size') - - # Check arguments - errors = check_arguments(start, end, min_size) - if errors: - module.fail_json(msg='\n'.join(errors)) - else: - # Check IP range - range_errors = check_IP_range(start, end, min_size) - - if range_errors: - module.fail_json(msg='\n'.join(range_errors)) - else: - module.exit_json(msg='success') - - -if __name__ == '__main__': - main() diff --git a/library/network_environment.py b/library/network_environment.py deleted file mode 100644 index 5f167b750..000000000 --- a/library/network_environment.py +++ /dev/null @@ -1,536 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""network_environment module -Used by the network_environment validation. -""" -import collections -import collections.abc -import itertools -import netaddr -import os.path - -from ansible.module_utils.basic import AnsibleModule -# from os_net_config import validator - -from tripleo_validations.utils import get_nested -from yaml import safe_load as yaml_safe_load - - -DOCUMENTATION = ''' ---- -module: network_environment -short_description: Validate networking templates -description: - - Performs networking-related checks on a set of TripleO templates - - Used by the network_environment role. - - Owned by the DFG Networking -options: - netenv_path: - required: true - description: - - The path of the base network environment file - type: str - plan_env_path: - required: true - description: - - The path of the plan environment file - type: str - ip_pools_path: - required: true - description: - - The path of the IP pools network environment file - type: str - template_files: - required: true - description: - - A list of template files and contents - type: list -author: - - "Tomas Sedovic" - - "Martin André" - - "Florian Fuchs" -''' - -EXAMPLES = ''' -- hosts: webservers - tasks: - - name: Check the Network environment - network_environment: - netenv_path: environments/network-environment.yaml - template_files: "{{ lookup('tht') }}" - plan_env_path: plan-environment.yaml - ip_pools_path: environments/ips-from-pool-all.yaml -''' - - -def open_network_environment_files(netenv_path, template_files): - errors = [] - - try: - network_data = yaml_safe_load(template_files[netenv_path]) - except IOError as e: - return ({}, {}, ["Can't open network environment file '{}': {}" - .format(netenv_path, e)]) - nic_configs = [] - resource_registry = network_data.get('resource_registry', {}) - for nic_name, relative_path in iter(resource_registry.items()): - if nic_name.endswith("Net::SoftwareConfig"): - nic_config_path = os.path.normpath( - os.path.join(os.path.dirname(netenv_path), relative_path)) - try: - nic_configs.append(( - nic_name, nic_config_path, - yaml_safe_load(template_files[nic_config_path]))) - except IOError as e: - errors.append( - "Can't open the resource '{}' reference file '{}': {}" - .format(nic_name, nic_config_path, e)) - - return (network_data, nic_configs, errors) - - -def validate(netenv_path, template_files): - network_data, nic_configs, errors = open_network_environment_files( - netenv_path, template_files) - errors.extend(validate_network_environment(network_data, nic_configs)) - return errors - - -def validate_network_environment(network_data, nic_configs): - errors = [] - - cidrinfo = {} - poolsinfo = {} - vlaninfo = {} - staticipinfo = {} - parameter_defaults = network_data.get('parameter_defaults', {}) - - for item, data in parameter_defaults.items(): - if item.endswith('NetCidr'): - cidrinfo[item] = data - elif item.endswith('AllocationPools'): - poolsinfo[item] = data - elif item.endswith('NetworkVlanID'): - vlaninfo[item] = data - elif item.endswith('IPs'): - staticipinfo[item] = data - - for nic_config_name, nic_config_path, nic_config in nic_configs: - errors.extend(check_nic_configs(nic_config_path, nic_config)) - - errors.extend(check_cidr_overlap(cidrinfo.values())) - errors.extend( - check_allocation_pools_pairing( - network_data.get('parameter_defaults', {}), poolsinfo)) - errors.extend(check_static_ip_pool_collision(staticipinfo, poolsinfo)) - errors.extend(check_vlan_ids(vlaninfo)) - errors.extend(check_static_ip_in_cidr(cidrinfo, staticipinfo)) - errors.extend(duplicate_static_ips(staticipinfo)) - - return errors - - -def check_nic_configs(path, nic_data): - errors = [] - - if not isinstance(nic_data, collections.abc.Mapping): - return ["The nic_data parameter must be a dictionary."] - - # Look though every resources bridges and make sure there is only a single - # bond per bridge and only 1 interface per bridge if there are no bonds. - resources = nic_data.get('resources') - if not isinstance(resources, collections.abc.Mapping): - return ["The nic_data must contain the 'resources' key and it must be " - "a dictionary."] - for name, resource in iter(resources.items()): - try: - nested_path = [ - ('properties', collections.abc.Mapping, 'dictionary'), - ('config', collections.abc.Mapping, 'dictionary'), - ('network_config', collections.abc.Iterable, 'list'), - ] - bridges = get_nested(resource, name, nested_path) - except ValueError as e: - errors.append('{}'.format(e)) - continue - # Not all resources contain a network config: - if not bridges: - continue - - # TODO(flfuchs) 2018-11-22: Rocky introduced a couple of - # template changes using a schema that cant't be found in - # os-net-config's schema.yaml file yet, so the validator fails - # even though the templates are working. Until this is done, we - # skip the schema validation. - # Validate the os_net_config object against the schema. - # v_errors = validator.validate_config(bridges, path) - # errors.extend(v_errors) - # if len(v_errors) > 0: - # continue - - # If we get here, the nic config file conforms to the schema and - # there is no more need to check for existence and type of - # properties. - for bridge in bridges: - if bridge['type'] == 'ovs_bridge': - bond_count = 0 - interface_count = 0 - for bridge_member in bridge['members']: - if bridge_member['type'] in ('ovs_bond', 'ovs_dpdk_bond'): - bond_count += 1 - elif bridge_member['type'] == 'interface': - interface_count += 1 - else: - pass - - if bond_count >= 2: - errors.append( - 'Invalid bonding: There are >= 2 bonds for' - ' bridge {} of resource {} in {}'.format( - bridge['name'], name, path)) - if bond_count == 0 and interface_count > 1: - errors.append( - 'Invalid interface: When not using a bond, ' - 'there can only be 1 interface for bridge {} ' - 'of resource {} in {}'.format( - bridge['name'], name, path)) - if bond_count == 0 and interface_count == 0: - errors.append( - 'Invalid config: There must be at least ' - '1 interface or 1 bond for bridge {}' - 'of resource {} in {}'.format( - bridge['name'], name, path)) - # check if the bridge has name br-int - if bridge['name'] == 'br-int': - errors.append( - 'br-int bridge name is reserved for ' - 'integration bridge') - return errors - - -def check_cidr_overlap(networks): - errors = [] - objs = [] - if not isinstance(networks, collections.abc.Iterable): - return ["The argument must be iterable."] - for x in networks: - try: - objs.append(netaddr.IPNetwork(x)) - except (ValueError, TypeError): - errors.append('Invalid network: {}'.format(x)) - - for net1, net2 in itertools.combinations(objs, 2): - if (net1 in net2 or net2 in net1): - errors.append( - 'Networks {} and {} overlap.' - .format(net1, net2)) - return errors - - -def check_allocation_pools_pairing(filedata, pools): - if not isinstance(filedata, collections.abc.Mapping): - return ["The `filedata` argument must be a dictionary."] - if not isinstance(pools, collections.abc.Mapping): - return ["The `pools` argument must be a dictionary."] - errors = [] - for poolitem, pooldata in iter(pools.items()): - pool_objs = [] - if not isinstance(pooldata, collections.abc.Iterable): - errors.append('The IP ranges in {} must form a list.' - .format(poolitem)) - continue - - # Check IP range format - for dict_range in pooldata: - try: - pool_objs.append(netaddr.IPRange( - netaddr.IPAddress(dict_range['start']), - netaddr.IPAddress(dict_range['end']))) - except (ValueError, TypeError, KeyError, netaddr.AddrFormatError): - errors.append("Invalid format of the IP range in {}: {}" - .format(poolitem, dict_range)) - continue - - # Check if CIDR is specified and IP network is valid - subnet_item = poolitem.split('AllocationPools')[0] + 'NetCidr' - try: - network = filedata[subnet_item] - subnet_obj = netaddr.IPNetwork(network) - except KeyError: - errors.append('The {} CIDR is not specified for {}.' - .format(subnet_item, poolitem)) - continue - except (netaddr.AddrFormatError, ValueError): - errors.append('Invalid IP network: {}'.format(network)) - continue - - for address_range in pool_objs: - # Check if pool is included in subnet - if address_range not in subnet_obj: - errors.append('Allocation pool {} {} outside of subnet' - ' {}: {}'.format(poolitem, - pooldata, - subnet_item, - subnet_obj)) - break - - # Check for overlapping pools - for other in [r for r in pool_objs if r != address_range]: - if address_range.first in other or address_range.last in other: - errors.append('Some pools in {} are overlapping.'.format( - poolitem)) - break - - return errors - - -def check_static_ip_pool_collision(static_ips, pools): - """Statically defined IP address must not conflict with allocation pools. - - The allocation pools come as a dict of items in the following format: - - InternalApiAllocationPools: [ - {'start': '10.35.191.150', 'end': '10.35.191.240'} - ] - - The static IP addresses are dicts of: - - ComputeIPs: { - 'internal_api': ['10.35.191.100', etc.], - 'storage': ['192.168.100.45', etc.] - } - """ - if not isinstance(static_ips, collections.abc.Mapping): - return ["The static IPs input must be a dictionary."] - if not isinstance(pools, collections.abc.Mapping): - return ["The Pools input must be a dictionary."] - errors = [] - pool_ranges = [] - for pool_name, ranges in iter(pools.items()): - if not isinstance(ranges, collections.abc.Iterable): - errors.append("The IP ranges in {} must form a list." - .format(pool_name)) - continue - for allocation_range in ranges: - try: - ip_range = netaddr.IPRange(allocation_range['start'], - allocation_range['end']) - except (netaddr.AddrFormatError, TypeError, KeyError): - errors.append("Invalid format of the IP range in {}: {}" - .format(pool_name, allocation_range)) - continue - pool_ranges.append((pool_name, ip_range)) - - for role, services in iter(static_ips.items()): - if not isinstance(services, collections.abc.Mapping): - errors.append("The {} must be a dictionary.".format(role)) - continue - for service, ips in iter(services.items()): - if not isinstance(ips, collections.abc.Iterable): - errors.append("The {}->{} must be an array." - .format(role, service)) - continue - for ip in ips: - try: - ip = netaddr.IPAddress(ip) - except netaddr.AddrFormatError as e: - errors.append("{} is not a valid IP address: {}" - .format(ip, e)) - continue - ranges_with_conflict = ranges_conflicting_with_ip( - ip, pool_ranges) - if ranges_with_conflict: - for pool_name, ip_range in ranges_with_conflict: - msg = "IP address {} from {}[{}] is in the {} pool." - errors.append(msg.format( - ip, role, service, pool_name)) - return errors - - -def ranges_conflicting_with_ip(ip_address, ip_ranges): - """Check for all conflicts of the IP address conflicts. - - This takes a single IP address and a list of `(pool_name, - netenv.IPRange)`s. - - We return all ranges that the IP address conflicts with. This is to - improve the final error messages. - """ - return [(pool_name, ip_range) for (pool_name, ip_range) in ip_ranges - if ip_address in ip_range] - - -def check_vlan_ids(vlans): - if not isinstance(vlans, collections.abc.Mapping): - return ["The vlans parameter must be a dictionary."] - errors = [] - invertdict = {} - for k, v in vlans.items(): - if v not in invertdict: - invertdict[v] = k - else: - errors.append('Vlan ID {} ({}) already exists in {}'.format( - v, k, invertdict[v])) - return errors - - -def check_static_ip_in_cidr(networks, static_ips): - """Check all static IP addresses are from the corresponding network range. - - """ - if not isinstance(networks, collections.abc.Mapping): - return ["The networks argument must be a dictionary."] - if not isinstance(static_ips, collections.abc.Mapping): - return ["The static_ips argument must be a dictionary."] - errors = [] - network_ranges = {} - # TODO(shadower): Refactor this so networks are always valid and already - # converted to `netaddr.IPNetwork` here. Will be useful in the other - # checks. - for name, cidr in iter(networks.items()): - try: - network_ranges[name] = netaddr.IPNetwork(cidr) - except (netaddr.AddrFormatError, ValueError): - errors.append("Network '{}' has an invalid CIDR: '{}'" - .format(name, cidr)) - for role, services in iter(static_ips.items()): - if not isinstance(services, collections.abc.Mapping): - errors.append("The {} must be a dictionary.".format(role)) - continue - for service, ips in iter(services.items()): - range_name = service.title().replace('_', '') + 'NetCidr' - if range_name in network_ranges: - if not isinstance(ips, collections.abc.Iterable): - errors.append("The {}->{} must be a list." - .format(role, service)) - continue - for ip in ips: - if ip not in network_ranges[range_name]: - errors.append( - "The IP address {} is outside of the {} range: {}" - .format(ip, range_name, networks[range_name])) - else: - errors.append( - "Service '{}' does not have a " - "corresponding range: '{}'.".format(service, range_name)) - return errors - - -def duplicate_static_ips(static_ips): - errors = [] - if not isinstance(static_ips, collections.abc.Mapping): - return ["The static_ips argument must be a dictionary."] - ipset = collections.defaultdict(list) - # TODO(shadower): we're doing this netsted loop multiple times. Turn it - # into a generator or something. - for role, services in iter(static_ips.items()): - if not isinstance(services, collections.abc.Mapping): - errors.append("The {} must be a dictionary.".format(role)) - continue - for service, ips in iter(services.items()): - if not isinstance(ips, collections.abc.Iterable): - errors.append("The {}->{} must be a list." - .format(role, service)) - continue - for ip in ips: - ipset[ip].append((role, service)) - for ip, sources in ipset.items(): - if len(sources) > 1: - msg = "The {} IP address was entered multiple times: {}." - formatted_sources = ("{}[{}]" - .format(*source) for source in sources) - errors.append(msg.format(ip, ", ".join(formatted_sources))) - return errors - - -def validate_node_pool_size(plan_env_path, ip_pools_path, template_files): - warnings = [] - plan_env = yaml_safe_load(template_files[plan_env_path]) - ip_pools = yaml_safe_load(template_files[ip_pools_path]) - - param_defaults = plan_env.get('parameter_defaults') - node_counts = { - param.replace('Count', ''): count - for param, count in param_defaults.items() - if param.endswith('Count') and count > 0 - } - - # TODO(akrivoka): There are a lot of inconsistency issues with parameter - # naming in THT :( Once those issues are fixed, this block should be - # removed. - if 'ObjectStorage' in node_counts: - node_counts['SwiftStorage'] = node_counts['ObjectStorage'] - del node_counts['ObjectStorage'] - - param_defaults = ip_pools.get('parameter_defaults') - role_pools = { - param.replace('IPs', ''): pool - for param, pool in param_defaults.items() - if param.endswith('IPs') and param.replace('IPs', '') in node_counts - } - - for role, node_count in iter(node_counts.items()): - try: - pools = role_pools[role] - except KeyError: - warnings.append( - "Found {} node(s) assigned to '{}' role, but no static IP " - "pools defined.".format(node_count, role) - ) - continue - for pool_name, pool_ips in pools.items(): - if len(pool_ips) < node_count: - warnings.append( - "Insufficient number of IPs in '{}' pool for '{}' role: " - "{} IP(s) found in pool, but {} nodes assigned to role." - .format(pool_name, role, len(pool_ips), node_count) - ) - - return warnings - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - netenv_path = module.params.get('netenv_path') - plan_env_path = module.params.get('plan_env_path') - ip_pools_path = module.params.get('ip_pools_path') - template_files = {name: content[1] for (name, content) in - module.params.get('template_files')} - - errors = validate(netenv_path, template_files) - warnings = [] - - try: - warnings = validate_node_pool_size(plan_env_path, ip_pools_path, - template_files) - except IOError as e: - errors.append("{}".format(e)) - - if errors: - module.fail_json(msg="\n".join(errors)) - else: - module.exit_json( - msg="No errors found for the '{}' file.".format(netenv_path), - warnings=warnings, - ) - - -if __name__ == '__main__': - main() diff --git a/library/ovs_dpdk_pmd_cpus_check.py b/library/ovs_dpdk_pmd_cpus_check.py deleted file mode 100644 index ee4282f9a..000000000 --- a/library/ovs_dpdk_pmd_cpus_check.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python - -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""ovs_dpdk_pmd_cpus_check module -Used by the ovs_dpdk_pmd validation. -""" -from ansible.module_utils.basic import AnsibleModule -from yaml import safe_load as yaml_safe_load - -DOCUMENTATION = ''' ---- -module: OVS DPDK PMD CPU's check -short_description: Run PMD CPU's from all the NUMA nodes check -description: - - Run PMD CPU's from all the NUMA nodes check - - Used by ovs_dpdk_pmd validation. - - Owned by the DFG:NFV Integration -options: - pmd_cpu_mask: - required: true - description: - - The pmd cpu mask value - type: str -author: "Jaganathan Palanisamy" -''' - -EXAMPLES = ''' -- hosts: ComputeOvsDpdk - vars: - pmd_cpu_mask: "1010010000000001" - tasks: - - name: Run PMD CPU's check - become: true - ovs_dpdk_pmd_cpus_check: pmd_cpu_mask={{ pmad_cpu_mask }} -''' - - -def get_cpus_list_from_mask_value(mask_val): - """Gets CPU's list from the mask value - - :return: comma separated CPU's list - """ - mask_val = mask_val.strip('\\"') - cpus_list = [] - int_mask_val = int(mask_val, 16) - bin_mask_val = bin(int_mask_val) - bin_mask_val = str(bin_mask_val).replace('0b', '') - rev_bin_mask_val = bin_mask_val[::-1] - thread = 0 - for bin_val in rev_bin_mask_val: - if bin_val == '1': - cpus_list.append(thread) - thread += 1 - return ','.join([str(cpu) for cpu in cpus_list]) - - -# Gets the distinct numa nodes, physical and logical cpus info -# for all numa nodes. -def get_nodes_cores_info(module): - dict_cpus = {} - numa_nodes = [] - cmd = "sudo lscpu -p=NODE,CORE,CPU" - result = module.run_command(cmd) - if (not result or (result[0] != 0) or not (str(result[1]).strip(' '))): - err = "Unable to determine physical and logical cpus." - module.fail_json(msg=err) - else: - for line in str(result[1]).split('\n'): - if (line.strip(' ') and not line.strip(' ').startswith('#')): - cpu_info = line.strip(' ').split(',') - try: - node = int(cpu_info[0]) - cpu = int(cpu_info[1]) - thread = int(cpu_info[2]) - if node not in numa_nodes: - numa_nodes.append(node) - # CPU and NUMA node together forms a unique value, - # as cpu is specific to a NUMA node - # NUMA node id and cpu id tuple is used for unique key - key = node, cpu - if key in dict_cpus: - if thread not in dict_cpus[key]['thread_siblings']: - dict_cpus[key]['thread_siblings'].append(thread) - else: - cpu_item = {} - cpu_item['thread_siblings'] = [thread] - cpu_item['cpu'] = cpu - cpu_item['numa_node'] = node - dict_cpus[key] = cpu_item - except (IndexError, ValueError): - err = "Unable to determine physical and logical cpus." - module.fail_json(msg=err) - return (numa_nodes, list(dict_cpus.values())) - - -def validate_pmd_cpus(module, pmd_cpu_mask): - pmd_cpus = get_cpus_list_from_mask_value(pmd_cpu_mask) - pmd_cpu_list = pmd_cpus.split(',') - cpus = [] - numa_nodes = [] - numa_nodes, cpus = get_nodes_cores_info(module) - valid_numa_nodes = {} - for numa_node in numa_nodes: - valid_numa_nodes[str(numa_node)] = False - for cpu in cpus: - if cpu['numa_node'] == numa_node: - if True in [int(pmd_cpu) in cpu['thread_siblings'] - for pmd_cpu in pmd_cpu_list]: - valid_numa_nodes[str(numa_node)] = True - invalid_numa_nodes = [node for node, val in valid_numa_nodes.items() - if not val] - if invalid_numa_nodes: - failed_nodes = ','.join(invalid_numa_nodes) - err = ("Invalid PMD CPU's, cpu is not used from " - "NUMA node(s): %(node)s." % {'node': failed_nodes}) - module.fail_json(msg=err) - else: - module.exit_json(msg="PMD CPU's configured correctly.") - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - validate_pmd_cpus(module, - module.params.get('pmd_cpu_mask')) - - -if __name__ == '__main__': - main() diff --git a/library/pacemaker.py b/library/pacemaker.py deleted file mode 100644 index c78d79bcb..000000000 --- a/library/pacemaker.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""pacemaker module -Used by the pacemaker_status validation. -""" -from ansible.module_utils.basic import AnsibleModule -from xml.etree import ElementTree -from yaml import safe_load as yaml_safe_load - -DOCUMENTATION = ''' ---- -module: pacemaker -short_description: Return status from a pacemaker status XML -description: - - Return status from a pacemaker status XML - - Used by the pacemaker_status validation. - - Owned by the DF PIDONE -options: - status: - required: true - description: - - pacemaker status XML - type: str -author: "Tomas Sedovic" -''' - -EXAMPLES = ''' -- hosts: webservers - tasks: - - name: Get pacemaker status - become: true - command: pcs status xml - register: pcs_status - - name: Check pacemaker status - pacemaker: status="{{ pcs_status.stdout }}" -''' - - -def parse_pcs_status(pcs_status_xml): - root = ElementTree.fromstring(pcs_status_xml) - result = { - 'failures': root.findall('failures/failure'), - } - return result - - -def format_failure(failure): - return ("Task {task} {op_key} failed on node {node}. Exit reason: " - "'{exitreason}'. Exit status: '{exitstatus}'." - .format(task=failure.get('task'), - op_key=failure.get('op_key'), - node=failure.get('node'), - exitreason=failure.get('exitreason'), - exitstatus=failure.get('exitstatus'))) - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - pcs_status = parse_pcs_status(module.params.get('status')) - failures = pcs_status['failures'] - failed = len(failures) > 0 - if failed: - msg = "The pacemaker status contains some failed actions:\n" +\ - '\n'.join((format_failure(failure) for failure in failures)) - else: - msg = "The pacemaker status reports no errors." - module.exit_json( - failed=failed, - msg=msg, - ) - - -if __name__ == '__main__': - main() diff --git a/library/pmd_threads_siblings_check.py b/library/pmd_threads_siblings_check.py deleted file mode 100644 index fe03025f8..000000000 --- a/library/pmd_threads_siblings_check.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python - -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""pmd_threads_siblings_check module -Used by the `check_nfv_ovsdpdk_zero_packet_loss` role. -""" - -from ansible.module_utils.basic import AnsibleModule -from yaml import safe_load as yaml_safe_load - -DOCUMENTATION = ''' ---- -module: OVS DPDK PMD CPU's check -short_description: Run PMD CPU's from all the NUMA nodes check -description: - - Run PMD CPU's from all the NUMA nodes check - - Owned by the DFG:NFV Integration -options: - pmd_cpu_mask: - required: true - description: - - The pmd cpu mask value - type: str -author: "Jaganathan Palanisamy" -''' - -EXAMPLES = ''' -# Call this module from TripleO Ansible Validations - -- name: Run PMD CPU's check - become: true - ovs_dpdk_pmd_cpus_check: - pmd_cpu_mask: 1010010000000001 - register: pmd_cpus -''' - - -def get_cpus_list_from_mask_value(mask_val): - """Gets CPU's list from the mask value - - :return: comma separated CPU's list - """ - - mask_val = mask_val.strip('\\"') - cpus_list = [] - int_mask_val = int(mask_val, 16) - bin_mask_val = bin(int_mask_val) - bin_mask_val = str(bin_mask_val).replace('0b', '') - rev_bin_mask_val = bin_mask_val[::-1] - thread = 0 - for bin_val in rev_bin_mask_val: - if bin_val == '1': - cpus_list.append(thread) - thread += 1 - return [str(cpu) for cpu in sorted(cpus_list)] - - -def get_nodes_cores_info(module): - """Gets the distinct numa nodes, physical and logical cpus info - for all numa nodes. - """ - - dict_cpus = {} - numa_nodes = [] - # Gets NUMA nodes core informations - cmd = "lscpu -p=NODE,CORE,CPU" - result = module.run_command(cmd) - if (not result or (result[0] != 0) or not (str(result[1]).strip(' '))): - err = "Unable to determine physical and logical cpus." - module.fail_json(msg=err) - else: - for line in str(result[1]).split('\n'): - if (line.strip(' ') and not line.strip(' ').startswith('#')): - cpu_info = line.strip(' ').split(',') - try: - node = int(cpu_info[0]) - cpu = int(cpu_info[1]) - thread = int(cpu_info[2]) - if node not in numa_nodes: - numa_nodes.append(node) - # CPU and NUMA node together forms a unique value, - # as cpu is specific to a NUMA node - # NUMA node id and cpu id tuple is used for unique key - key = node, cpu - if key in dict_cpus: - if thread not in dict_cpus[key]['thread_siblings']: - dict_cpus[key]['thread_siblings'].append(thread) - else: - cpu_item = {} - cpu_item['thread_siblings'] = [thread] - cpu_item['cpu'] = cpu - cpu_item['numa_node'] = node - dict_cpus[key] = cpu_item - except (IndexError, ValueError): - err = "Unable to determine physical and logical cpus." - module.fail_json(msg=err) - return (numa_nodes, list(dict_cpus.values())) - - -def get_thread_siblings(module, numa, cpu): - # Gets the threads siblings information - cmd = ("cat /sys/devices/system/node/node"+str(numa)+"/" - "cpu"+str(cpu)+"/topology/thread_siblings_list") - result = module.run_command(cmd) - if (not result or (result[0] != 0) or not (str(result[1]).strip(' '))): - err = "Unable to determine thread sibling" - module.fail_json(msg=err) - else: - core = str(result[1]).split(',') - return [cpu for cpu in core if not str(cpu) in core] - - -def validate_pmd_cpus(module, pmd_cpu_mask): - result = dict( - changed=False, - pmd_cpus_list=[], - message='' - ) - pmd_cpu_list = get_cpus_list_from_mask_value(pmd_cpu_mask) - cpus = [] - numa_nodes = [] - numa_nodes, cpus = get_nodes_cores_info(module) - - # Ensure all thread siblings included in PMD CPU's - pmd_cpu_numa = 0 - for pmd_cpu in pmd_cpu_list: - core_id = -1 - for cpu in cpus: - if (int(pmd_cpu) in cpu['thread_siblings']): - pmd_cpu_numa = cpu['numa_node'] - core_id = cpu['cpu'] - break - if core_id == -1: - err = "Invalid PMD CPU: " + pmd_cpu - module.fail_json(msg=err) - thread_siblings = get_thread_siblings(module, pmd_cpu_numa, core_id) - for thread in thread_siblings: - if thread not in pmd_cpu_list: - err = "Invalid PMD CPU's, thread siblings missed" - module.fail_json(msg=err) - - # Ensure PMD CPU's used for all NUMA nodes - valid_numa_nodes = {} - for numa_node in numa_nodes: - valid_numa_nodes[str(numa_node)] = False - for cpu in cpus: - if cpu['numa_node'] == numa_node: - if True in [int(pmd_cpu) in cpu['thread_siblings'] - for pmd_cpu in pmd_cpu_list]: - valid_numa_nodes[str(numa_node)] = True - invalid_numa_nodes = [node for node, val in valid_numa_nodes.items() - if not val] - if invalid_numa_nodes: - failed_nodes = ','.join(invalid_numa_nodes) - err = ("Invalid PMD CPU's, cpu is not used from " - "NUMA node(s): %(node)s." % {'node': failed_nodes}) - module.fail_json(msg=err) - else: - pmd_cpus = ','.join(pmd_cpu_list) - result['message'] = "PMD CPU's configured correctly: " + pmd_cpus - result['pmd_cpus_list'] = pmd_cpu_list - module.exit_json(**result) - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - validate_pmd_cpus(module, - module.params.get('pmd_cpu_mask')) - - -if __name__ == '__main__': - main() diff --git a/library/switch_vlans.py b/library/switch_vlans.py deleted file mode 100644 index 169db651e..000000000 --- a/library/switch_vlans.py +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""switch_vlans module -Used by the switch_vlans validation. -""" -import collections.abc as collectionsAbc - -import os.path - -from ansible.module_utils.basic import AnsibleModule # noqa -from tripleo_validations import utils -from yaml import safe_load as yaml_safe_load - -DOCUMENTATION = ''' ---- -module: switch_vlans -short_description: Check configured VLANs against Ironic introspection data -description: - - Validate that the VLANs defined in TripleO nic config files are in the - LLDP info received from network switches. The LLDP data is stored in - Ironic introspection data per interface. - - Used by the switch_vlans validation - - Owned by the DF Networking -options: - path: - required: true - description: - - The path of the base network environment file - type: str - template_files: - required: true - description: - - A list of template files and contents - type: list - introspection_data: - required: true - description: - - Introspection data for all nodes - type: list - -author: "Bob Fournier" -''' - -EXAMPLES = ''' -- hosts: undercloud - tasks: - - name: Check that switch vlans are present if used in nic-config files - network_environment: - path: environments/network-environment.yaml - template_files: "{{ lookup('tht') }}" - introspection_data: "{{ lookup('introspection_data', - auth_url=auth_url.value, password=password.value) }}" -''' - - -def open_network_environment_files(netenv_path, template_files): - errors = [] - - try: - network_data = yaml_safe_load(template_files[netenv_path]) - except IOError as e: - return ({}, {}, ["Can't open network environment file '{}': {}" - .format(netenv_path, e)]) - nic_configs = [] - resource_registry = network_data.get('resource_registry', {}) - for nic_name, relative_path in iter(resource_registry.items()): - if nic_name.endswith("Net::SoftwareConfig"): - nic_config_path = os.path.normpath( - os.path.join(os.path.dirname(netenv_path), relative_path)) - try: - nic_configs.append(( - nic_name, nic_config_path, - yaml_safe_load(template_files[nic_config_path]))) - except IOError as e: - errors.append( - "Can't open the resource '{}' reference file '{}': {}" - .format(nic_name, nic_config_path, e)) - - return (network_data, nic_configs, errors) - - -def validate_switch_vlans(netenv_path, template_files, introspection_data): - """Check if VLAN exists in introspection data for node - - :param netenv_path: path to network_environment file - :param template_files: template files being checked - :param introspection_data: introspection data for all node - :returns warnings: List of warning messages - errors: List of error messages - """ - - network_data, nic_configs, errors =\ - open_network_environment_files(netenv_path, template_files) - warnings = [] - vlans_in_templates = False - - # Store VLAN IDs from network-environment.yaml. - vlaninfo = {} - parameter_defaults = network_data.get('parameter_defaults', {}) - for item in parameter_defaults.keys(): - if item.endswith('NetworkVlanID'): - vlaninfo[item] = parameter_defaults[item] - - # Get the VLANs which are actually used in nic configs - for nic_config_name, nic_config_path, nic_config in nic_configs: - resources = nic_config.get('resources') - if not isinstance(nic_config, collectionsAbc.Mapping): - return [], ["nic_config parameter must be a dictionary."] - - if not isinstance(resources, collectionsAbc.Mapping): - return [], ["The nic_data must contain the 'resources' key " - "and it must be a dictionary."] - for name, resource in iter(resources.items()): - try: - nested_path = [ - ('properties', collectionsAbc.Mapping, 'dictionary'), - ('config', collectionsAbc.Mapping, 'dictionary'), - ('network_config', collectionsAbc.Iterable, 'list'), - ] - nw_config = utils.get_nested(resource, name, nested_path) - except ValueError as e: - errors.append('{}'.format(e)) - continue - # Not all resources contain a network config: - if not nw_config: - continue - - for elem in nw_config: - # VLANs will be in bridge - if elem['type'] == 'ovs_bridge' \ - or elem['type'] == 'linux_bridge': - for member in elem['members']: - if member['type'] != 'vlan': - continue - - vlans_in_templates = True - vlan_id_str = member['vlan_id'] - vlan_id = vlaninfo[vlan_id_str['get_param']] - - msg, result = vlan_exists_on_switch( - vlan_id, introspection_data) - warnings.extend(msg) - - if not msg and result is False: - errors.append( - "VLAN ID {} not on attached switch".format( - vlan_id)) - - if not vlans_in_templates: - warnings.append("No VLANs are used on templates files") - - return set(warnings), set(errors) - - -def vlan_exists_on_switch(vlan_id, introspection_data): - """Check if VLAN exists in introspection data - - :param vlan_id: VLAN id - :param introspection_data: introspection data for all nodes - :returns msg: Error or warning message - result: boolean indicating if VLAN was found - """ - - for node, data in introspection_data.items(): - node_valid_lldp = False - - all_interfaces = data.get('all_interfaces', []) - - # Check lldp data on all interfaces for this vlan ID - for interface in all_interfaces: - lldp_proc = all_interfaces[interface].get('lldp_processed', {}) - - if lldp_proc: - node_valid_lldp = True - - switch_vlans = lldp_proc.get('switch_port_vlans', []) - if switch_vlans: - if any(vlan['id'] == vlan_id for vlan in switch_vlans): - return [], True - - # If no lldp data for node return warning, not possible to locate vlan - if not node_valid_lldp: - node_uuid = node.split("-", 1)[1] - return ["LLDP data not available for node {}".format(node_uuid)],\ - False - - return [], False # could not find VLAN ID - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - netenv_path = module.params.get('path') - template_files = {name: content[1] for (name, content) in - module.params.get('template_files')} - introspection_data = {name: content for (name, content) in - module.params.get('introspection_data')} - - warnings, errors = validate_switch_vlans(netenv_path, template_files, - introspection_data) - - if errors: - module.fail_json(msg="\n".join(errors)) - elif warnings: - module.exit_json(warnings="\n".join(warnings)) - else: - module.exit_json(msg="All VLANs configured on attached switches") - - -if __name__ == '__main__': - main() diff --git a/library/tripleo_haproxy_conf.py b/library/tripleo_haproxy_conf.py deleted file mode 100644 index 926e7ff23..000000000 --- a/library/tripleo_haproxy_conf.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from ansible.module_utils.basic import AnsibleModule -from yaml import safe_load as yaml_safe_load - -DOCUMENTATION = ''' ---- -module: tripleo_haproxy_conf -short_description: Gather the HAProxy config -description: - - Gather the HAProxy config - - Owned by DFG:PIDONE -options: - path: - required: true - description: - - file path to the config file - type: str -author: "Tomas Sedovic" -''' - -EXAMPLES = ''' -- hosts: webservers - tasks: - - name: Gather the HAProxy config - tripleo_haproxy_conf: path=/etc/haproxy/haproxy.cfg -''' - - -def generic_ini_style_conf_parser(file_path, section_regex, option_regex): - """ - ConfigParser chokes on both mariadb and haproxy files. Luckily, they have - a syntax approaching ini config file so they are relatively easy to parse. - This generic ini style config parser is not perfect, as it can ignore some - valid options, but it is good enough for our use case. - - :return: parsed haproxy configuration - :rtype: dict - """ - config = {} - current_section = None - with open(file_path) as config_file: - for line in config_file: - match_section = re.match(section_regex, line) - if match_section: - current_section = match_section.group(1) - config[current_section] = {} - match_option = re.match(option_regex, line) - if match_option and current_section: - option = re.sub(r'\s+', ' ', match_option.group(1)) - config[current_section][option] = match_option.group(2) - return config - - -def parse_haproxy_conf(file_path): - """ - Provides section and option regex to the parser. - Essentially a wrapper for generic_ini_style_conf_parser. - Provides no extra functionality but simplifies the call, somewhat. - - :return: parsed haproxy configuration - :rtype: dict - - ..note:: - - Both regular expressions bellow are used for parsing haproxy.cfg, - which has a rather vague syntax. The regexes are supposed to match all - possibilities described here, and some more: - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/load_balancer_administration/ch-haproxy-setup-vsa - """ - section_regex = r'^(\w+)' - option_regex = r'^(?:\s+)(\w+(?:\s+\w+)*?)\s+([\w/]*)$' - return generic_ini_style_conf_parser(file_path, section_regex, - option_regex) - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - haproxy_conf_path = module.params.get('path') - - try: - config = parse_haproxy_conf(haproxy_conf_path) - except IOError: - module.fail_json(msg="Could not open the haproxy conf file at: '%s'" % - haproxy_conf_path) - - module.exit_json(changed=False, ansible_facts={u'haproxy_conf': config}) - - -if __name__ == '__main__': - main() diff --git a/library/verify_profiles.py b/library/verify_profiles.py deleted file mode 100644 index 3b8552671..000000000 --- a/library/verify_profiles.py +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/env python -# Copyright 2018 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""verify_profiles module -Used by the collect_flavors_and_verify_profiles validation. -""" -from ansible.module_utils.basic import AnsibleModule # noqa -from yaml import safe_load as yaml_safe_load - -DOCUMENTATION = ''' ---- -module: verify_profiles -short_description: Check that profiles have enough nodes -description: - - Validate that the profiles assigned have enough nodes available. - - Used by the collect_flavors_and_verify_profiles - - Owned jointly by DFG Compute & DFG Hardware Provisioning -options: - nodes: - required: true - description: - - A list of nodes - type: list - flavors: - required: true - description: - - A dictionary of flavors - type: dict - -author: "Brad P. Crochet" -''' - -EXAMPLES = ''' -- hosts: undercloud - tasks: - - name: Collect the flavors - check_flavors: - roles_info: "{{ lookup('roles_info', wantlist=True) }}" - flavors: "{{ lookup('nova_flavors', wantlist=True) }}" - register: flavor_result - - name: Check the profiles - verify_profiles: - nodes: "{{ lookup('ironic_nodes', wantlist=True) }}" - flavors: flavor_result.flavors -''' - - -def _capabilities_to_dict(caps): - """Convert the Node's capabilities into a dictionary.""" - if not caps: - return {} - if isinstance(caps, dict): - return caps - return dict([key.split(':', 1) for key in caps.split(',')]) - - -def _node_get_capabilities(node): - """Get node capabilities.""" - return _capabilities_to_dict( - node['properties'].get('capabilities')) - - -def verify_profiles(nodes, flavors): - """Check if roles info is correct - - :param nodes: list of nodes - :param flavors: dictionary of flavors - :returns warnings: List of warning messages - errors: List of error messages - """ - errors = [] - warnings = [] - - bm_nodes = {node['uuid']: node for node in nodes - if node['provision_state'] in ('available', 'active')} - - free_node_caps = {uu: _node_get_capabilities(node) - for uu, node in bm_nodes.items()} - - profile_flavor_used = False - for flavor_name, (flavor, scale) in flavors.items(): - if not scale: - continue - - profile = None - keys = flavor.get('keys') - if keys: - profile = keys.get('capabilities:profile') - - if not profile and len(flavors) > 1: - message = ('Error: The {flavor} flavor has no profile ' - 'associated.\n' - 'Recommendation: assign a profile with openstack ' - 'flavor set --property ' - '"capabilities:profile"="PROFILE_NAME" {flavor}') - - errors.append(message.format(flavor=flavor_name)) - continue - - profile_flavor_used = True - - assigned_nodes = [uu for uu, caps in free_node_caps.items() - if caps.get('profile') == profile] - required_count = int(scale) - len(assigned_nodes) - - if required_count < 0: - warnings.append('%d nodes with profile %s won\'t be used ' - 'for deployment now' % (-required_count, - profile)) - required_count = 0 - - for uu in assigned_nodes: - free_node_caps.pop(uu) - - if required_count > 0: - message = ('Error: only {total} of {scale} requested ironic ' - 'nodes are tagged to profile {profile} (for flavor ' - '{flavor}).\n' - 'Recommendation: tag more nodes using openstack ' - 'baremetal node set --property "capabilities=' - 'profile:{profile}" ') - errors.append(message.format(total=scale - required_count, - scale=scale, - profile=profile, - flavor=flavor_name)) - - nodes_without_profile = [uu for uu, caps in free_node_caps.items() - if not caps.get('profile')] - if nodes_without_profile and profile_flavor_used: - warnings.append("There are %d ironic nodes with no profile that " - "will not be used: %s" % ( - len(nodes_without_profile), - ', '.join(nodes_without_profile))) - - return warnings, errors - - -def main(): - module = AnsibleModule( - argument_spec=yaml_safe_load(DOCUMENTATION)['options'] - ) - - nodes = module.params.get('nodes') - flavors = module.params.get('flavors') - - warnings, errors = verify_profiles(nodes, - flavors) - - if errors: - module.fail_json(msg="\n".join(errors)) - elif warnings: - module.exit_json(warnings="\n".join(warnings)) - else: - module.exit_json( - msg="No profile errors detected.") - - -if __name__ == '__main__': - main() diff --git a/lookup_plugins/glance_images.py b/lookup_plugins/glance_images.py deleted file mode 100644 index 0160c820f..000000000 --- a/lookup_plugins/glance_images.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ansible.plugins.lookup import LookupBase - -from glanceclient.exc import HTTPNotFound - -from tripleo_validations import utils - - -DOCUMENTATION = """ - lookup: glance_images - description: Retrieve image information from Glance - long_description: - - Load image information using the Glance API and search by attribute. - options: - _terms: - description: Optional filter attribute and filter value - author: Brad P. Crochet -""" - -EXAMPLES = """ - - name: Get all image ids from glance - debug: - msg: | - {{ lookup('glance_images', wantlist=True) | - map(attribute='id') | join(', ') }} - - - name: Get image with name 'overcloud-full' - debug: - msg: | - {{ lookup('glance_images', 'name', ['overcloud-full'], - wantlist=True) | map(attribute='name') }}" -""" - -RETURN = """ -_raw: - description: A Python list with results from the API call. -""" - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """Returns server information from nova.""" - glance = utils.get_glance_client(variables) - - images = [] - if len(terms) > 0: - # Look up images by name - if terms[0] == 'name': - for value in terms[1]: - try: - search_data = {terms[0]: value} - images.extend( - [image for image in - glance.images.list(filters=search_data)] - ) - except HTTPNotFound: - pass - else: - images = [image for image in glance.images.list()] - - return images diff --git a/lookup_plugins/introspection_data.py b/lookup_plugins/introspection_data.py deleted file mode 100644 index d16d72c22..000000000 --- a/lookup_plugins/introspection_data.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ansible.plugins.lookup import LookupBase -from ironic_inspector_client import ClientError -from ironic_inspector_client import ClientV1 -from ironicclient import client - -from tripleo_validations.utils import get_auth_session - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """Returns Ironic Inspector introspection data. - - Access swift and return introspection data for all nodes. - - :returns a list of tuples, one for each node. - """ - - session = get_auth_session({ - 'auth_url': kwargs.get('auth_url'), - 'password': kwargs.get('password'), - 'username': 'ironic', - 'project_name': 'service', - }) - ironic = client.get_client(1, session=session) - ironic_inspector = ClientV1(session=session) - - ret = [] - for node in ironic.node.list(): - try: - ret.append((node.name, ironic_inspector.get_data(node.uuid))) - except ClientError: - pass - - return ret diff --git a/lookup_plugins/ironic_nodes.py b/lookup_plugins/ironic_nodes.py deleted file mode 100644 index b744e84c2..000000000 --- a/lookup_plugins/ironic_nodes.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ansible.plugins.lookup import LookupBase - -from tripleo_validations import utils - -DOCUMENTATION = """ - lookup: ironic_nodes - description: Retrieve node information from Ironic - long_description: - - Load node information using the Ironic API - options: - _terms: - description: Optional filter attribute and filter value - author: Florian Fuchs -""" - -EXAMPLES = """ - - name: Get all nodes from Ironic - debug: - msg: "{{ lookup('ironic_nodes', wantlist=True) }}" - - - name: Lookup all nodes that match a list of IDs - debug: - msg: | - {{ lookup('ironic_nodes', 'id', - ['c8a1c7b8-d6b1-408b-b4a6-5881efdfd65c', - '4bea536d-9d37-432f-a77e-7c65f1cf3acb'], - wantlist=True) }}" - - - name: Get all nodes for a set of instance UUIDs - debug: - msg: | - {{ lookup('ironic_nodes', 'instance_uuid', - ['1691a1c7-9974-4bcc-a07a-5dec7fc04da0', - '07f2435d-820c-46ce-9097-cf8a7282293e'], - wantlist=True) }}" - - - name: Get all nodes marked as 'associated' - debug: - msg: | - {{ lookup('ironic_nodes', 'associated', - wantlist=True) }}" - - - name: Get nodes in provision state, and not associated or in maintenance - debug: - msg: | - {{ lookup('ironic_nodes', 'provision_state', - ['available', 'inspect'], wantlist=True)}} -""" - -RETURN = """ -_raw: - description: A Python list with results from the API call. -""" - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """Returns node information from ironic.""" - ironic = utils.get_ironic_client(variables) - - if len(terms) > 0: - if terms[0] == 'id': - nodes = [ironic.node.get(id) for id in terms[1]] - return [utils.filtered(node) for node in nodes] - elif terms[0] == 'instance_uuid': - nodes = [ironic.node.get_by_instance_uuid(uuid) - for uuid in terms[1]] - return [utils.filtered(node) for node in nodes] - elif terms[0] == 'associated': - nodes = ironic.node.list(associated=True, detail=True) - return [utils.filtered(node) for node in nodes] - elif terms[0] == 'provision_state': - nodes = [] - for term in terms[1]: - nodes.extend(ironic.node.list( - provision_state=term, - associated=False, - maintenance=False, - detail=True)) - return [utils.filtered(node) for node in nodes] - else: - return [utils.filtered(node) - for node in ironic.node.list(detail=True)] diff --git a/lookup_plugins/nova_flavors.py b/lookup_plugins/nova_flavors.py deleted file mode 100644 index e52cd3f67..000000000 --- a/lookup_plugins/nova_flavors.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ansible.plugins.lookup import LookupBase - -from tripleo_validations import utils - - -DOCUMENTATION = """ - lookup: nova_flavors - description: Retrieve flavor information from Nova - long_description: - - Load flavor information using the Nova API. - author: Brad P. Crochet -""" - -EXAMPLES = """ - - name: Get all flavors from nova - debug: - msg: | - {{ lookup('nova_flavors') }} -""" - -RETURN = """ -_raw: - description: A Python list with results from the API call. -""" - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """Returns server information from nova.""" - nova = utils.get_nova_client(variables) - return {f.name: {'name': f.name, - 'id': f.id, - 'disk': f.disk, - 'ram': f.ram, - 'vcpus': f.vcpus, - 'ephemeral': f.ephemeral, - 'swap': f.swap, - 'is_public': f.is_public, - 'rxtx_factor': f.rxtx_factor, - 'keys': f.get_keys()} - for f in nova.flavors.list()} diff --git a/lookup_plugins/nova_hypervisor_statistics.py b/lookup_plugins/nova_hypervisor_statistics.py deleted file mode 100644 index 6f2e3e774..000000000 --- a/lookup_plugins/nova_hypervisor_statistics.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ansible.plugins.lookup import LookupBase - -from tripleo_validations import utils - - -DOCUMENTATION = """ - lookup: nova_hypervisor_statistics - description: Retrieve hypervisor statistic information from Nova - long_description: - - Load hypervisor statistics using the Nova API. - author: Brad P. Crochet -""" - -EXAMPLES = """ - - name: Get all hypervisor statistics from nova - debug: - msg: | - {{ lookup('nova_hypervisor_statistics') }} -""" - -RETURN = """ -_raw: - description: A Python list with results from the API call. -""" - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """Returns server information from nova.""" - nova = utils.get_nova_client(variables) - statistics = nova.hypervisor_stats.statistics() - return utils.filtered(statistics) diff --git a/lookup_plugins/nova_servers.py b/lookup_plugins/nova_servers.py deleted file mode 100644 index 647c0f2ad..000000000 --- a/lookup_plugins/nova_servers.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ansible.plugins.lookup import LookupBase -from novaclient.exceptions import NotFound - -from tripleo_validations import utils - - -DOCUMENTATION = """ - lookup: nova_servers - description: Retrieve server information from Nova - long_description: - - Load server information using the Nova API and search by attribute. - options: - _terms: - description: Optional filter attribute and filter value - author: Florian Fuchs -""" - -EXAMPLES = """ - - name: Get all server ids from nova - debug: - msg: | - {{ lookup('nova_servers', wantlist=True) | - map(attribute='id') | join(', ') }} - - - name: Lookup all server ids from nova with a certain ctlplane IP - debug: - msg: | - {{ lookup('nova_servers', 'ip', 'ctlplane', ['192.168.24.15'], - wantlist=True) | map(attribute='id') | join(', ') }}" - - - name: Get server with name 'overcloud-controller-0' - debug: - msg: | - {{ lookup('nova_servers', 'name', ['overcloud-controller-0'], - wantlist=True) | map(attribute='name') }}" -""" - -RETURN = """ -_raw: - description: A Python list with results from the API call. -""" - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """Returns server information from nova.""" - nova = utils.get_nova_client(variables) - - servers = [] - if len(terms) > 0: - # Look up servers by network and IP - if terms[0] == 'ip': - for ip in terms[2]: - try: - servers.append(nova.servers.find( - networks={terms[1]: [ip]})) - except NotFound: - pass - # Look up servers by attribute - else: - for value in terms[1]: - try: - search_data = {terms[0]: value} - servers.append(nova.servers.find(**search_data)) - except NotFound: - pass - else: - servers = nova.servers.list() - - # For each server only return properties whose value - # can be properly serialized. (Things like - # novaclient.v2.servers.ServerManager will make - # Ansible return the whole result as a string.) - return [utils.filtered(server) for server in servers] diff --git a/lookup_plugins/roles_info.py b/lookup_plugins/roles_info.py deleted file mode 100644 index 1c6dbfe57..000000000 --- a/lookup_plugins/roles_info.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import yaml - -from ansible.plugins.lookup import LookupBase - -from tripleo_validations import utils - - -DOCUMENTATION = """ - lookup: roles_info - description: Retrieve role information from Heat and Swift. - long_description: - - Load role information using the Heat API. - options: - _terms: - description: Optional filter attribute and filter value - author: Brad P. Crochet -""" - -EXAMPLES = """ - - name: Get all role info from Heat and Swift - debug: - msg: | - {{ lookup('roles_info', wantlist=True) }} - -""" - -RETURN = """ -_raw: - description: A Python list with results from the API call. -""" - - -class LookupModule(LookupBase): - def _get_object_yaml(self, swiftclient, container, obj): - obj_ret = swiftclient.get_object(container=container, obj=obj) - return yaml.safe_load(obj_ret[1]) - - def run(self, terms, variables=None, **kwargs): - """Returns server information from nova.""" - swift = utils.get_swift_client(variables) - plan = variables.get('plan') - plan_env = self._get_object_yaml(swift, plan, 'plan-environment.yaml') - roles_data = self._get_object_yaml(swift, plan, 'roles_data.yaml') - - def default_role_data(role): - return { - 'name': role['name'], - 'count': role.get('CountDefault', 0), - 'flavor': role.get('FlavorDefault', 'baremetal') - } - - roles = list(map(default_role_data, roles_data)) - - parameter_defaults = plan_env.get('parameter_defaults', {}) - - for role in roles: - new_count = parameter_defaults.get("%sCount" % role['name']) - if new_count: - role['count'] = new_count - - new_flavor = parameter_defaults.get("Overcloud%sFlavor" % - role['name']) - if new_flavor: - role['flavor'] = new_flavor - - return roles diff --git a/lookup_plugins/stack_resources.py b/lookup_plugins/stack_resources.py deleted file mode 100644 index 9f26fa95f..000000000 --- a/lookup_plugins/stack_resources.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ansible.plugins.lookup import LookupBase - - -from tripleo_validations import utils - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """Returns the current plan's stack resources. - - :return: A list of dicts - """ - ret = [] - heat = utils.get_heat_client(variables) - resource_list = heat.resources.list(variables['plan']) - for resource in resource_list: - ret.append(dict( - resource_name=resource.resource_name, - resource_status=resource.resource_status, - logical_resource_id=resource.logical_resource_id, - links=resource.links, - creation_time=resource.creation_time, - resource_status_reason=resource.resource_status_reason, - updated_time=resource.updated_time, - required_by=resource.required_by, - physical_resource_id=resource.physical_resource_id, - resource_type=resource.resource_type - )) - return ret diff --git a/lookup_plugins/tht.py b/lookup_plugins/tht.py deleted file mode 100644 index f0921fe0d..000000000 --- a/lookup_plugins/tht.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from ansible.plugins.lookup import LookupBase - -from tripleo_validations import utils - - -EXCLUDED_EXT = ( - '.pyc', - '.pyo', -) - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """Returns the current plan files. - - Returns a list of tuples, one for each plan file, - containing the template path and the template content. - """ - ret = [] - swift = utils.get_swift_client(variables) - container = swift.get_container(variables['plan']) - for item in container[1]: - obj = swift.get_object(variables['plan'], item['name']) - try: - obj = (obj[0], obj[1].decode('utf-8')) - except AttributeError: - pass - - if os.path.splitext(item['name'])[-1] not in EXCLUDED_EXT: - ret.append((item['name'], obj)) - - return ret diff --git a/molecule-requirements.txt b/molecule-requirements.txt deleted file mode 100644 index fad7d7a14..000000000 --- a/molecule-requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -# this is required for the molecule jobs -ansi2html>=1.6.0 # LGPLv3+ -ansible-core<2.12 # GPLv3+ -jinja2>=2.8.0 # BSD-3-Clause -molecule>=3.3.1,<4 # MIT -molecule-podman>=0.3.0 # MIT -pytest>=6.2.4 # MIT -pytest-cov>=2.12.1 # MIT -pytest-html>=3.1.1 # MPL 2.0 -pytest-xdist>=2.3.0 # MIT -selinux>=0.2.1 # MIT -lxml>=4.6.3 # BSD diff --git a/playbooks/ceph-dependencies-installed.yaml b/playbooks/ceph-dependencies-installed.yaml deleted file mode 100644 index f9e4e720c..000000000 --- a/playbooks/ceph-dependencies-installed.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- hosts: allovercloud - vars: - metadata: - name: Check if Ceph dependencies are installed - description: | - Prints a message if a ceph dependency is missed - groups: - - pre-deployment - - pre-ceph - categories: - - storage - products: - - tripleo - - ceph - fail_without_deps: true - tripleo_delegate_to: "{{ groups['allovercloud'] | default([]) }}" - packages: - - lvm2 - tasks: - - include_role: - name: ceph - tasks_from: "ceph-dependencies-installed" diff --git a/playbooks/ceph-health.yaml b/playbooks/ceph-health.yaml deleted file mode 100644 index 5fc9ca6b5..000000000 --- a/playbooks/ceph-health.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- hosts: ceph_mon - vars: - metadata: - name: Check the status of the ceph cluster - description: | - Uses `ceph health` to check if cluster is in HEALTH_WARN state - and prints a debug message. - groups: - - backup-and-restore - - post-deployment - - post-ceph - categories: - - storage - products: - - tripleo - - ceph - tripleo_delegate_to: "{{ groups['ceph_mon'] | default([]) }}" - osd_percentage_min: 0 - tasks: - - include_role: - name: ceph - tasks_from: ceph-health diff --git a/playbooks/ceph-pg.yaml b/playbooks/ceph-pg.yaml deleted file mode 100644 index 6a131a346..000000000 --- a/playbooks/ceph-pg.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Validate requested Ceph Placement Groups - description: | - In Ceph Lumionus and newer the Placement Group overdose protection check - (https://ceph.com/community/new-luminous-pg-overdose-protection) is - executed by Ceph before a pool is created. If the check does not pass, - then the pool is not created. When TripleO deploys Ceph it triggers - ceph-ansible which creates the pools that OpenStack needs. This - validation runs the same check that the overdose protection uses to - determine if the user should update their CephPools, PG count, or number - of OSD. Without this check a deployer may have to wait until after Ceph - is running but before the pools are created to realize the deployment - will fail. - groups: [] - categories: - - storage - products: - - tripleo - - ceph - tasks: - - include_role: - name: ceph - tasks_from: ceph-pg diff --git a/playbooks/check-for-dangling-images.yaml b/playbooks/check-for-dangling-images.yaml deleted file mode 100644 index 14a405484..000000000 --- a/playbooks/check-for-dangling-images.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- hosts: undercloud - gather_facts: false - vars: - metadata: - name: Check for podman dangling images - description: | - Make sure before update we do not have any dangling images. - groups: - - pre-update - categories: - - container - products: - - tripleo - check_for_dangling_images_debug: false - roles: - - check_for_dangling_images diff --git a/playbooks/check-kernel-version.yaml b/playbooks/check-kernel-version.yaml deleted file mode 100644 index 2bf6fc65f..000000000 --- a/playbooks/check-kernel-version.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: all - gather_facts: true - vars: - metadata: - name: Verify the kernel version contains el8 in its name - description: | - This validation checks the kernel has been upgaded by checking - el8 is in kernel (uname -r) version string - groups: - - post-deployment - categories: - - os - - kernel - - system - products: - - tripleo - roles: - - check_kernel_version diff --git a/playbooks/check-manila-policy-file.yaml b/playbooks/check-manila-policy-file.yaml deleted file mode 100644 index 56caf6a9c..000000000 --- a/playbooks/check-manila-policy-file.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- hosts: "{{ controller_rolename | default('Controller') }}" - vars: - metadata: - name: Verify that keystone admin token is disabled - description: | - This validation checks that policy file of manilas configuration folder inside of the container,exists. - groups: - - post-deployment - categories: - - controller - products: - - tripleo - - manila - manilas_policy_file: "/var/lib/config-data/puppet-generated/manila/etc/manila/policy.yaml" - roles: - - check_manila_policy_file diff --git a/playbooks/check-network-gateway.yaml b/playbooks/check-network-gateway.yaml deleted file mode 100644 index e5e270920..000000000 --- a/playbooks/check-network-gateway.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Check network_gateway on the provisioning network - description: | - If `gateway` in `undercloud.conf` is different from `local_ip`, - verify that the gateway exists and is reachable. - groups: - - pre-introspection - categories: - - networking - - undercloud-config - products: - - tripleo - roles: - - check_network_gateway diff --git a/playbooks/check-ntp-reachability.yaml b/playbooks/check-ntp-reachability.yaml deleted file mode 100644 index 54bae4bf3..000000000 --- a/playbooks/check-ntp-reachability.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- hosts: undercloud - gather_facts: false - vars: - metadata: - name: Check if the NTP pool is reachable for the time synchronization - description: | - Time synchronization is essential for successful deployment. The chrony - daemon is used for connection with the NTP pool for receiving time data, - if the connection is unsuccessful the deployment fails. This validation - is recommended to run if the deployment is failing on time synchronization - task. This validation prints NTP servers chrony is trying to access. - groups: - - pre-deployment - categories: - - undercloud - products: - - tripleo - roles: - - check_ntp_reachability diff --git a/playbooks/check-reboot.yaml b/playbooks/check-reboot.yaml deleted file mode 100644 index 165b49f41..000000000 --- a/playbooks/check-reboot.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: all - gather_facts: true - vars: - metadata: - name: Verify if a reboot is necessary with yum-utils - description: | - This validation checks if a reboot is necessary with yum-utils - with the option: needs-restarting -r - groups: - - pre-upgrade - - pre-update - - post-upgrade - - post-update - categories: - - os - - kernel - - system - products: - - tripleo - roles: - - check_reboot diff --git a/playbooks/check-rhsm-version.yaml b/playbooks/check-rhsm-version.yaml deleted file mode 100644 index a53bf9045..000000000 --- a/playbooks/check-rhsm-version.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- hosts: all - gather_facts: false - vars: - metadata: - name: Verify that the RSHM version configured is the right one - description: | - This validation checks the RHSM version of the target node via - subscription-manager release --show and makes sure that the - release matches the value set in check_rhsm_version_expected. - groups: - - pre-undercloud-upgrade - - pre-overcloud-upgrade - categories: - - rhel - - os - - system - - subscription_management - products: - - tripleo - - rhsm - check_rhsm_version_debug: false - roles: - - check_rhsm_version diff --git a/playbooks/check-uc-hostname.yaml b/playbooks/check-uc-hostname.yaml deleted file mode 100644 index 3369ccfe2..000000000 --- a/playbooks/check-uc-hostname.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: undercloud - gather_facts: false - vars: - metadata: - name: Check DockerInsecureRegistryAddress parameter points to correct UC hostname - description: | - This validation checks the DockerInsecureRegistryAddress parameter - points to the right UC hostname - groups: - - pre-overcloud-upgrade - categories: - - container - - undercloud-config - products: - - tripleo - check_uc_hostname_debug: false - roles: - - check_uc_hostname diff --git a/playbooks/check-undercloud-conf.yaml b/playbooks/check-undercloud-conf.yaml deleted file mode 100644 index e2c4faa2d..000000000 --- a/playbooks/check-undercloud-conf.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify undercloud.conf correctness - description: | - This validation verifies that no deprecated, invalid or - incompatible parameters are being passed in the Undercloud's - undercloud.conf configuration file. - groups: - - prep - - pre-undercloud-upgrade - categories: - - undercloud-config - products: - - tripleo - check_undercloud_conf_debug: false - roles: - - check_undercloud_conf diff --git a/playbooks/collect-flavors-and-verify-profiles.yaml b/playbooks/collect-flavors-and-verify-profiles.yaml deleted file mode 100644 index ff674f3c3..000000000 --- a/playbooks/collect-flavors-and-verify-profiles.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Collect and verify role flavors - description: | - This validation checks the flavors assigned to roles exist and have the - correct capabilities set. - groups: - - pre-upgrade - categories: - - compute - - baremetal - - provisioning - - undercloud - products: - - tripleo - roles: - - collect_flavors_and_verify_profiles diff --git a/playbooks/compute-tsx.yaml b/playbooks/compute-tsx.yaml deleted file mode 100644 index 13ba0423e..000000000 --- a/playbooks/compute-tsx.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -- hosts: nova_libvirt - gather_facts: false - vars: - metadata: - name: RHEL8.x kernel flag for Compute nodes validation - description: | - RHEL-8.3 kernel disabled the Intel TSX (Transactional - Synchronization Extensions) feature by default as a preemptive - security measure, but it breaks live migration from RHEL-7.9 - (or even RHEL-8.1 or RHEL-8.2) to RHEL-8.3. - - Operators are expected to explicitly define the TSX flag in - their KernelArgs for the compute role to prevent live-migration - issues during the upgrade process. - - This also impacts upstream CentOS systems. - groups: - - pre-upgrade - - pre-system-upgrade - - pre-overcloud-prepare - - pre-overcloud-upgrade - - pre-overcloud-converge - - pre-update - - pre-update-prepare - - pre-update-run - - pre-update-converge - categories: - - os - - kernel - - system - - tsx - products: - - tripleo - - rhel - - centos - - intel - compute_tsx_debug: false - compute_tsx_warning: false - roles: - - compute_tsx diff --git a/playbooks/container-status.yaml b/playbooks/container-status.yaml deleted file mode 100644 index d515587cb..000000000 --- a/playbooks/container-status.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: undercloud, allovercloud - vars: - metadata: - name: Ensure container status - description: | - Detect failed containers and raise an error. - groups: - - backup-and-restore - - pre-upgrade - - pre-update - - post-deployment - - post-upgrade - categories: - - container - - undercloud - - overcloud - - undercloud-config - products: - - tripleo - roles: - - container_status diff --git a/playbooks/controller-token.yaml b/playbooks/controller-token.yaml deleted file mode 100644 index d10e096bc..000000000 --- a/playbooks/controller-token.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: - - undercloud - - "{{ controller_rolename | default('Controller') }}" - vars: - metadata: - name: Verify that keystone admin token is disabled - description: | - This validation checks that keystone admin token is disabled on both - undercloud and overcloud controller after deployment. - groups: - - post-deployment - categories: - - keystone - - identity - - undercloud - - controller - products: - - tripleo - keystone_conf_file: "/var/lib/config-data/puppet-generated/keystone/etc/keystone/keystone.conf" - roles: - - controller_token diff --git a/playbooks/controller-ulimits.yaml b/playbooks/controller-ulimits.yaml deleted file mode 100644 index bcdd28c90..000000000 --- a/playbooks/controller-ulimits.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- hosts: "{{ controller_rolename | default('Controller') }}" - vars: - metadata: - name: Check controller ulimits - description: | - This will check the ulimits of each controller. - groups: - - post-deployment - categories: - - os - - system - - resources - - overcloud - - controller - products: - - tripleo - nofiles_min: 1024 - nproc_min: 2048 - roles: - - controller_ulimits diff --git a/playbooks/ctlplane-ip-range.yaml b/playbooks/ctlplane-ip-range.yaml deleted file mode 100644 index a235c09df..000000000 --- a/playbooks/ctlplane-ip-range.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Check the number of IP addresses available for the overcloud nodes - description: | - Verify that the number of IP addresses defined in `dhcp_start` and - `dhcp_end` fields in `undercloud.conf` is not too low. - groups: - - pre-introspection - categories: - - networking - - dhcp - - undercloud - products: - - tripleo - ctlplane_iprange_min_size: 20 - roles: - - ctlplane_ip_range diff --git a/playbooks/default-node-count.yaml b/playbooks/default-node-count.yaml deleted file mode 100644 index 02d42aaa4..000000000 --- a/playbooks/default-node-count.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify hypervisor statistics - description: | - This validation checks that the nodes and hypervisor statistics - add up. Validation requires system installation of tripleo-ansible package. - groups: - - pre-deployment - categories: - - compute - - roles - - baremetal - - undercloud - products: - - tripleo - roles: - - default_node_count diff --git a/playbooks/deprecated-services.yaml b/playbooks/deprecated-services.yaml deleted file mode 100644 index 76b521df7..000000000 --- a/playbooks/deprecated-services.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: undercloud - gather_facts: false - vars: - metadata: - name: Check if roles data doesn't include deprecated services - description: | - Validate that the custom roles_data.yaml provided in the upgrade prepare step doesn't have any deprecated services. - This has been an issue among customers for a long time, which has been addressed by documentation. - However, we should have some way to alert the user that the roles_data they provide has deprecated services - before starting the upgrade procedure and getting an ugly error. - groups: - - pre-upgrade - categories: - - undercloud - products: - - tripleo - roles: - - deprecated_services diff --git a/playbooks/dhcp-introspection.yaml b/playbooks/dhcp-introspection.yaml deleted file mode 100644 index 62ed4d379..000000000 --- a/playbooks/dhcp-introspection.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- hosts: undercloud - become: true - vars: - metadata: - name: DHCP on the Introspection Network - description: | - An unexpected DHCP server on the network used for node - introspection can cause some nodes to not be inspected. - - This validations checks for the DHCP responses on the - interface specified in ironic-inspector.conf. - groups: - - pre-introspection - categories: - - networking - - dhcp - - dnsmasq - - undercloud - - baremetal - products: - - tripleo - tasks: - - include_role: - name: dhcp_validations - tasks_from: dhcp-introspection diff --git a/playbooks/dhcp-provisioning.yaml b/playbooks/dhcp-provisioning.yaml deleted file mode 100644 index f918b15d4..000000000 --- a/playbooks/dhcp-provisioning.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- hosts: undercloud - become: true - vars: - metadata: - name: DHCP on the Provisioning Network - description: | - An unexpected DHCP server on the provisioning network can - cause problems with deploying the Ironic nodes. - - This validation checks for DHCP responses on the undercloud's - provisioning interface (eth1 by default) and fails if there - are any. - groups: - - pre-deployment - categories: - - networking - - dhcp - - undercloud - - undercloud-config - products: - - tripleo - tasks: - - include_role: - name: dhcp_validations - tasks_from: dhcp-provisioning diff --git a/playbooks/fips-enabled.yaml b/playbooks/fips-enabled.yaml deleted file mode 100644 index 9983092cc..000000000 --- a/playbooks/fips-enabled.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: all - gather_facts: false - vars: - metadata: - name: Confirm that undercloud has fips enabled - description: | - Check if the undercloud is ready to deploy an environment - using fips. - groups: - - prep - - post-deployment - categories: - - security - - fips - products: - - tripleo - roles: - - fips_enabled diff --git a/playbooks/frr-status.yaml b/playbooks/frr-status.yaml deleted file mode 100644 index 64d7804e4..000000000 --- a/playbooks/frr-status.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: all - vars: - metadata: - name: FRR Daemons Status Check - description: | - Runs 'show watchfrr' and checks for any non-operational daemon. - - A failed status post-deployment indicates at least one enabled FRR - daemon is not operational. - groups: - - post-deployment - categories: - - container - - frrouting - - iprouting - - undercloud - - overcloud - products: - - tripleo - roles: - - frr_status diff --git a/playbooks/healthcheck-service-status.yaml b/playbooks/healthcheck-service-status.yaml deleted file mode 100644 index 8daf1a86d..000000000 --- a/playbooks/healthcheck-service-status.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- hosts: undercloud, allovercloud - vars: - metadata: - name: Healthcheck systemd services Check - description: | - Check for failed healthcheck systemd services. - groups: - - backup-and-restore - - post-deployment - categories: - - os - - system - - systemd - - healthcheck - - services - - undercloud - - overcloud - products: - - tripleo - retries_number: 1 - delay_number: 1 - inflight_healthcheck_services: [] - roles: - - healthcheck_service_status diff --git a/playbooks/image-serve.yaml b/playbooks/image-serve.yaml deleted file mode 100644 index afe1aadf5..000000000 --- a/playbooks/image-serve.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify image-serve service is working and answering - description: | - Ensures image-serve vhost is configured and httpd is running. - groups: - - backup-and-restore - - pre-upgrade - - post-deployment - - post-upgrade - categories: - - httpd - - container - - registry - - undercloud - products: - - tripleo - roles: - - image_serve diff --git a/playbooks/ironic-boot-configuration.yaml b/playbooks/ironic-boot-configuration.yaml deleted file mode 100644 index ecada86d2..000000000 --- a/playbooks/ironic-boot-configuration.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- hosts: localhost - vars: - metadata: - name: Check Ironic boot configuration - description: | - Check if baremetal boot configuration is correct. - groups: - - pre-deployment - - pre-upgrade - categories: - - baremetal - - provisioning - - undercloud - products: - - tripleo - roles: - - ironic_boot_configuration diff --git a/playbooks/mysql-open-files-limit.yaml b/playbooks/mysql-open-files-limit.yaml deleted file mode 100644 index 2fe391093..000000000 --- a/playbooks/mysql-open-files-limit.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- hosts: - - "{{ controller_rolename | default('Controller') }}" - - mysql - vars: - metadata: - name: MySQL Open Files Limit - description: | - Verify the `open-files-limit` configuration is high enough - - https://access.redhat.com/solutions/1598733 - groups: - - post-deployment - categories: - - database - - overcloud - - controller - products: - - tripleo - - mysql - - galera - min_open_files_limit: 16384 - roles: - - mysql_open_files_limit diff --git a/playbooks/network-environment.yaml b/playbooks/network-environment.yaml deleted file mode 100644 index f174f0df5..000000000 --- a/playbooks/network-environment.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Validate the Heat environment file for network configuration - description: | - This validates the network environment and nic-config files - that specify the overcloud network configuration and are stored - in the current plan's Swift container. - - The deployers are expected to write these files themselves as - described in the Network Isolation guide: - - http://tripleo.org/advanced_deployment/network_isolation.html - groups: - - pre-deployment - categories: - - networking - - undercloud - - network_isolation - - undercloud - products: - - tripleo - network_environment_path: environments/network-environment.yaml - plan_env_path: plan-environment.yaml - ip_pools_path: environments/ips-from-pool-all.yaml - roles: - - network_environment diff --git a/playbooks/neutron-sanity-check.yaml b/playbooks/neutron-sanity-check.yaml deleted file mode 100644 index 7d0ed83c2..000000000 --- a/playbooks/neutron-sanity-check.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- hosts: "{{ controller_rolename | default('Controller') }}" - gather_facts: false - vars: - metadata: - name: Neutron Sanity Check - description: | - Run `neutron-sanity-check` on the controller nodes to find out - potential issues with Neutron's configuration. - - The tool expects all the configuration files that are passed - to the Neutron services. - groups: - - backup-and-restore - - post-deployment - categories: - - networking - - neutron - - overcloud - - controller - - undercloud-config - - container - products: - - tripleo - roles: - - neutron_sanity_check diff --git a/playbooks/nfv-ovsdpdk-zero-packet-loss-check.yaml b/playbooks/nfv-ovsdpdk-zero-packet-loss-check.yaml deleted file mode 100644 index 7fb265d0c..000000000 --- a/playbooks/nfv-ovsdpdk-zero-packet-loss-check.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: "{{ compute_ovsdpdk_rolename | default('ComputeOvsDpdk') }}" - vars: - metadata: - name: NFV OvS DPDK Zero Packet Loss Validations - description: | - Run `check-nfv-ovsdpdk-zero-packet-loss` on the compute ovsdpdk nodes - to find out the issues with NFV OvS Dpdk configuration. - The tool expects all the configuration files that are passed. - groups: - - post-deployment - categories: - - networking - - compute - - nfv - products: - - tripleo - roles: - - check_nfv_ovsdpdk_zero_packet_loss diff --git a/playbooks/node-disks.yaml b/playbooks/node-disks.yaml deleted file mode 100644 index a5a376409..000000000 --- a/playbooks/node-disks.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- hosts: localhost - vars: - metadata: - name: Check node disk configuration - description: | - Check node primary/root disk sizes and compare with flavor expectations. - Fitness of other disks can be checked manually using introspection. - groups: - - pre-deployment - categories: - - baremetal - - provisioning - - compute - - introspection - - undercloud - products: - - tripleo - roles: - - node_disks diff --git a/playbooks/node-health.yaml b/playbooks/node-health.yaml deleted file mode 100644 index d0af18a5f..000000000 --- a/playbooks/node-health.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- hosts: localhost - vars: - metadata: - name: Node health check - description: | - Check if all overcloud nodes can be connected to before starting a - scale-up or an upgrade. Validation requires cloud authentication details - in the form of accessible clouds.yaml file to be correctly executed. - groups: - - pre-upgrade - categories: - - icmp - - compute - - baremetal - - provisioning - - undercloud - products: - - tripleo - roles: - - node_health diff --git a/playbooks/nova-event-callback.yaml b/playbooks/nova-event-callback.yaml deleted file mode 100644 index 10d4a1490..000000000 --- a/playbooks/nova-event-callback.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: "{{ controller_rolename | default('Controller') }}" - vars: - metadata: - name: Nova Event Callback Configuration Check - description: | - This validations verifies that the Nova auth_url in neutron, - which is generally enabled by default, is configured correctly - It checks the following files on the Overcloud Controller(s): - - - **/etc/neutron/neutron.conf**: [nova]/auth_url = 'http://nova_admin_auth_ip:5000' - groups: - - post-deployment - categories: - - networking - - compute - - neutron - products: - - tripleo - neutron_config_file: /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf - roles: - - nova_event_callback diff --git a/playbooks/nova-status.yaml b/playbooks/nova-status.yaml deleted file mode 100644 index 6fbf96b5f..000000000 --- a/playbooks/nova-status.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- hosts: nova_api - vars: - metadata: - name: Nova Status Upgrade Check - description: | - Performs a release-specific readiness check before restarting services with - new code. This command expects to have complete configuration and access to - databases and services within a cell. For example, this check may query the - Nova API database and one or more cell databases. It may also make requests - to other services such as the Placement REST API via the Keystone service - catalog - - The nova-status upgrade check command has three standard return codes: - - 0 -> All upgrade readiness checks passed successfully and there is nothing to do. - 1 -> At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. - 2 -> There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. - groups: - - backup-and-restore - - pre-upgrade - categories: - - compute - products: - - tripleo - roles: - - nova_status diff --git a/playbooks/nova-svirt.yaml b/playbooks/nova-svirt.yaml deleted file mode 100644 index f7127f4eb..000000000 --- a/playbooks/nova-svirt.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: nova_libvirt - gather_facts: false - vars: - metadata: - name: Check nova sVirt support - description: | - Ensures all running VM are correctly protected with sVirt - groups: - - post-deployment - - post-upgrade - categories: - - compute - - selinux - - security - products: - - tripleo - roles: - - nova_svirt diff --git a/playbooks/openshift-hw-requirements.yaml b/playbooks/openshift-hw-requirements.yaml deleted file mode 100644 index 4fa8cdf9b..000000000 --- a/playbooks/openshift-hw-requirements.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Check resources for an OpenShift on OpenStack deployment - description: | - Check if there are enough resources for an OpenShift deployment on top - of Openstack - deployment: - - Is there a flavor that meets the minimum requirements for a test environment? (4GB RAM, 40GB disk) - - Is there a flavor that meets the minimum requirements for a production environment? (16GB RAM, 40GB disk, 4 VCPUs) - - Are images named centos or rhel available? - - Are there sufficient compute resources available for a default setup? (1 Master node, 1 Infra node, 2 App nodes) - groups: - - openshift-on-openstack - categories: - - hardware - - provisioning - - compute - - storage - products: - - tripleo - min_total_ram_testing: 16384 # 4 per node - min_total_vcpus_testing: 4 # 1 per node - min_total_disk_testing: 93 # Master: 40, others: 17 per node - min_total_ram_prod: 40960 # Master: 16, others: 8 per node - min_total_vcpus_prod: 7 # Master: 4, others 1 per node - min_total_disk_prod: 93 # Master: 42, others: 17 per node - min_node_ram_testing: 4096 # Minimum ram per node for testing - min_node_disk_testing: 40 # Minimum disk per node for testing - min_node_ram_prod: 16384 # Minimum ram per node for production - min_node_disk_prod: 42 # Minimum disk per node for production - resource_reqs_testing: false - resource_reqs_prod: false - tasks: - - include_role: - name: openshift_on_openstack - tasks_from: openshift-hw-requirements.yaml diff --git a/playbooks/openshift-nw-requirements.yaml b/playbooks/openshift-nw-requirements.yaml deleted file mode 100644 index d5ea5f2ef..000000000 --- a/playbooks/openshift-nw-requirements.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Check network requirements for an OpenShift on OpenStack deployment - description: | - Checks if an external network has been configured on the overcloud as - required for an OpenShift deployment on top of OpenStack. - groups: - - openshift-on-openstack - categories: - - networking - products: - - tripleo - tasks: - - include_role: - name: openshift_on_openstack - tasks_from: openshift-nw-requirements.yaml diff --git a/playbooks/openstack-endpoints.yaml b/playbooks/openstack-endpoints.yaml deleted file mode 100644 index 1d65a9df8..000000000 --- a/playbooks/openstack-endpoints.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Check connectivity to various OpenStack services - # TODO: this could also check for undercloud endpoints - description: | - This validation gets the PublicVip address from the deployment and - tries to access Horizon and get a Keystone token. - groups: - - post-deployment - - pre-upgrade - - post-upgrade - - pre-update - - post-update - categories: - - ui - - connectivity - - undercloud - products: - - tripleo - roles: - - openstack_endpoints diff --git a/playbooks/oslo-config-validator.yaml b/playbooks/oslo-config-validator.yaml deleted file mode 100644 index da689d3d9..000000000 --- a/playbooks/oslo-config-validator.yaml +++ /dev/null @@ -1,88 +0,0 @@ ---- -- hosts: all - gather_facts: false - vars: - metadata: - name: Openstack services configuration validation - description: | - This role is intended to leverage the `oslo-config-validator` on each one - of the configuration files found on a deployment. The goal is to quickly - catch erroneous configurations. - - When called manually, it will also be possible to generate a report - returning all the differences between the current configuration and the - default configuration - groups: - - backup-and-restore - - post-deployment - - post-system-upgrade - - post-update - - post-upgrade - categories: - - undercloud - - overcloud - - oslo - - compute - - storage - - baremetal - - networking - - identity - products: - - tripleo - - # Overriding simple namespaces config can be done on a per-namespace basis. - # We simply need to add a dict to this list of dictionnary. It will replace - # its corresponding entry from the default config - # This is usefull if we want to target the validation on a set of hosts - # that would otherwise trigger an error. - # oslo_config_validator_namespaces_config_override: - # - namespace: nova.conf - # invalid_settings: - # - section: filter_scheduler - # option: enabled_filters - # separator: "," - # value_list: - # - SomeBadFilter - - # Debug log level - oslo_config_validator_debug: false - - # Comparison report with current settings and default settings - oslo_config_validator_report: false - - # Returns all settings with possibly invalid values or simply inexistent - # oslo.config uses a typed system for possible values of each settings. - # if a setting is not typed correctly, or is non-existent, oslo-config-validator - # will trigger an error here. - oslo_config_validator_validation: true - - # Returns all settings erroneous values - # Developpers have identified some settings that can't have certain values anymore. - # This will scan all the config files and return those settings with invalid values. - oslo_config_validator_invalid_settings: true - - # This is a temporary folder used when generating reports. - # It will be created and deleted if necessary on all the nodes - oslo_config_validator_report_path: "/var/tmp/config_validator_report" - - # Whether or not we should archive into a single file the report after creation - oslo_config_validator_report_archive: true - - # This is the working folder when running validations. It will be bind mounted - # on the validation containers - # It will be created and deleted if necessary on all the nodes - oslo_config_validator_work_path: "/var/lib/tripleo-config/oslo_config_validator" - - # List of services we want to validate the settings - oslo_config_validator_checked_services: - - nova - - cinder - - glance - - heat - - ironic - - placement - - neutron - - keystone - - roles: - - oslo_config_validator diff --git a/playbooks/overcloud-service-status.yaml b/playbooks/overcloud-service-status.yaml deleted file mode 100644 index 0eb40323f..000000000 --- a/playbooks/overcloud-service-status.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- hosts: Undercloud - gather_facts: false - vars: - metadata: - name: Verify overcloud services state after running a deployment or an update - description: | - An Ansible role to verify the Overcloud services states after a deployment - or an update. It checks the API /os-services and looks for deprecated - services (nova-consoleauth) or any down services. - groups: - - post-deployment - - post-upgrade - - post-overcloud-upgrade - - post-overcloud-converge - categories: - - api - - compute - - storage - - undercloud - - overcloud - products: - - tripleo - overcloud_service_status_debug: false - overcloud_service_api: - - nova - - cinderv3 - overcloud_deprecated_services: - nova: - - nova-consoleauth - roles: - - overcloud_service_status diff --git a/playbooks/ovs-dpdk-pmd-cpus-check.yaml b/playbooks/ovs-dpdk-pmd-cpus-check.yaml deleted file mode 100644 index 0e0610469..000000000 --- a/playbooks/ovs-dpdk-pmd-cpus-check.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- hosts: "{{ compute_ovsdpdk_rolename | default('ComputeOvsDpdk') }}" - vars: - metadata: - name: Validates OVS DPDK PMD cores from all NUMA nodes. - description: | - OVS DPDK PMD cpus must be provided from all NUMA nodes. - - A failed status post-deployment indicates PMD CPU list is not - configured correctly. - groups: - - post-deployment - categories: - - networking - - compute - - nfv - products: - - tripleo - roles: - - ovs_dpdk_pmd diff --git a/playbooks/pacemaker-status.yaml b/playbooks/pacemaker-status.yaml deleted file mode 100644 index 0a19b8b73..000000000 --- a/playbooks/pacemaker-status.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- hosts: "{{ controller_rolename | default('Controller') }}" - vars: - metadata: - name: Check the status of the pacemaker cluster - description: | - This runs `pcs status` and checks for any failed actions. - - A failed status post-deployment indicates something is not configured - correctly. This should also be run before upgrade as the process will - likely fail with a cluster that's not completely healthy. - - This validation fails if pacemaker service is found failed or inactive. - groups: - - backup-and-restore - - post-deployment - categories: - - ha - - clustering - - system - - os - products: - - tripleo - - pacemaker - roles: - - pacemaker_status diff --git a/playbooks/package-version.yaml b/playbooks/package-version.yaml deleted file mode 100644 index 1852c11a2..000000000 --- a/playbooks/package-version.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- hosts: all - vars: - metadata: - name: package-version - description: | - Ensures we can access the wanted package version. Especially useful - when you are switching repositories, for instance during an upgrade. - groups: - - prep - - pre-deployment - - pre-upgrade - - pre-update - - pre-system-upgrade - - pre-undercloud-upgrade - - pre-overcloud-prepare - - pre-overcloud-upgrade - - pre-overcloud-converge - - pre-ceph - categories: - - rpm - - os - - system - - packaging - products: - - tripleo - package_version_debug: false - roles: - - package_version diff --git a/playbooks/rabbitmq-limits.yaml b/playbooks/rabbitmq-limits.yaml deleted file mode 100644 index 7b68530ed..000000000 --- a/playbooks/rabbitmq-limits.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: "{{ controller_rolename | default('Controller') }}" - vars: - metadata: - name: Rabbitmq limits - description: | - Make sure the rabbitmq file descriptor limits are set to reasonable values. - groups: - - post-deployment - categories: - - container - - messaging - - broker - products: - - tripleo - - rabbitmq - min_fd_limit: 16384 - roles: - - rabbitmq_limits diff --git a/playbooks/repos.yaml b/playbooks/repos.yaml deleted file mode 100644 index 9400d6652..000000000 --- a/playbooks/repos.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- hosts: undercloud, allovercloud - gather_facts: true - vars: - metadata: - name: Check correctness of current repositories - description: | - Detect whether the repositories listed in `yum repolist` - can be connected to and that there is at least one repo - configured. - - Detect if there are any unwanted repositories (such as EPEL) enabled. - groups: - - pre-upgrade - - pre-update - categories: - - os - - system - - packaging - - rpm - - repository - products: - - tripleo - roles: - - repos diff --git a/playbooks/stack-health.yaml b/playbooks/stack-health.yaml deleted file mode 100644 index 78635b632..000000000 --- a/playbooks/stack-health.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Stack Health Check - description: | - Check if all stack resources are in a 'COMPLETE' state before starting - an upgrade. - groups: - - pre-upgrade - - post-upgrade - categories: - - orchestration - - healthcheck - - stack - products: - - tripleo - roles: - - stack_health diff --git a/playbooks/stonith-exists.yaml b/playbooks/stonith-exists.yaml deleted file mode 100644 index d1a2e4b28..000000000 --- a/playbooks/stonith-exists.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- hosts: "{{ controller_rolename | default('Controller') }}" - vars: - metadata: - name: Validate stonith devices - description: | - Verify that stonith devices are configured for your OpenStack Platform HA cluster. - We don't configure stonith device with TripleO Installer. Because the hardware - configuration may be differ in each environment and requires different fence agents. - How to configure fencing please read - https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes - groups: - - post-deployment - categories: - - ha - - clustering - - os - - system - products: - - tripleo - - pacemaker - roles: - - stonith_exists diff --git a/playbooks/switch-vlans.yaml b/playbooks/switch-vlans.yaml deleted file mode 100644 index c6afa6c7d..000000000 --- a/playbooks/switch-vlans.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Compare switch port VLANs to VLANs in nic config - description: | - LLDP data received during introspection contains the configured VLANs - for each switch port attached to the nodes interfaces. Compare the - VLAN IDs set on the switch port to those configured in nic config - files. Since the mapping of roles to nodes isn't known prior to - deployment, this check can only check VLANs across all switch ports, - not on a particular switch port. - groups: - - pre-deployment - categories: - - networking - - vlans - - lldp - - introspection - - baremetal - - provisioning - products: - - tripleo - network_environment_path: environments/network-environment.yaml - ironic_inspector_conf_file: "/var/lib/config-data/puppet-generated/ironic_inspector/etc/ironic-inspector/inspector.conf" - roles: - - switch_vlans diff --git a/playbooks/system-encoding.yaml b/playbooks/system-encoding.yaml deleted file mode 100644 index 5d29f6e22..000000000 --- a/playbooks/system-encoding.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- hosts: all - vars: - metadata: - name: System encoding - description: | - Ensure the local is unicode - groups: - - pre-deployment - - pre-upgrade - - pre-update - categories: - - os - - system - - encoding - products: - - tripleo - system_encoding_debug: false - roles: - - system_encoding diff --git a/playbooks/tls-everywhere-post-deployment.yaml b/playbooks/tls-everywhere-post-deployment.yaml deleted file mode 100644 index dde141849..000000000 --- a/playbooks/tls-everywhere-post-deployment.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- hosts: allovercloud - vars: - metadata: - name: Confirm that overcloud nodes are setup correctly - description: | - Checks that overcloud nodes are registered with IdM - and that all certs being tracked by certmonger are in the - MONITORING state. - groups: - - post-deployment - categories: - - security - - tls-everywhere - - identity - - dns - - kerberos - products: - - tripleo - - freeipa - tasks: - - include_role: - name: tls_everywhere - tasks_from: common.yaml - - include_role: - name: tls_everywhere - tasks_from: overcloud-post-deployment.yaml diff --git a/playbooks/tls-everywhere-pre-deployment.yaml b/playbooks/tls-everywhere-pre-deployment.yaml deleted file mode 100644 index 8ce44c608..000000000 --- a/playbooks/tls-everywhere-pre-deployment.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Confirm undercloud is setup correctly prior to overcloud deploy - description: | - Checks that the undercloud has novajoin set up corectly and - that we are ready to do the overcloud deploy with tls-everywhere. - groups: - - pre-deployment - categories: - - security - - tls-everywhere - - identity - - dns - - kerberos - products: - - tripleo - - freeipa - tasks: - - include_role: - name: tls_everywhere - tasks_from: common.yaml - - include_role: - name: tls_everywhere - tasks_from: pre-deployment.yaml diff --git a/playbooks/tls-everywhere-prep.yaml b/playbooks/tls-everywhere-prep.yaml deleted file mode 100644 index a9070205b..000000000 --- a/playbooks/tls-everywhere-prep.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Confirm that undercloud is setup to register to IdM - description: | - Checks that the undercloud is ready to set up novajoin and - to register to IdM as a client as part of undercloud-install. - groups: - - prep - categories: - - security - - tls-everywhere - - identity - - dns - - kerberos - products: - - tripleo - - freeipa - tasks: - - include_role: - name: tls_everywhere - tasks_from: prep.yaml diff --git a/playbooks/tripleo-haproxy.yaml b/playbooks/tripleo-haproxy.yaml deleted file mode 100644 index c1c7c91f4..000000000 --- a/playbooks/tripleo-haproxy.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- hosts: haproxy - vars: - metadata: - name: TripleO HAProxy configuration - description: Verify the HAProxy configuration has recommended values. - groups: - - post-deployment - categories: - - os - - system - - ha - - loadbalancing - - proxy - products: - - tripleo - - haproxy - config_file: '/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg' - global_maxconn_min: 20480 - defaults_maxconn_min: 4096 - defaults_timeout_queue: '2m' - defaults_timeout_client: '2m' - defaults_timeout_server: '2m' - defaults_timeout_check: '10s' - roles: - - tripleo_haproxy diff --git a/playbooks/tripleo-latest-packages-version.yaml b/playbooks/tripleo-latest-packages-version.yaml deleted file mode 100644 index 6200b555f..000000000 --- a/playbooks/tripleo-latest-packages-version.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: undercloud - gather_facts: false - vars: - metadata: - name: Check if latest version of TripleO packages is installed - description: | - Make sure a list of TripleO packages are at its latest version - before starting an upgrade. - groups: - - pre-upgrade - categories: - - os - - system - - packaging - - rpm - products: - - tripleo - packages_list: - - python3-tripleoclient - roles: - - check_latest_packages_version diff --git a/playbooks/undercloud-debug.yaml b/playbooks/undercloud-debug.yaml deleted file mode 100644 index 2bc7a9ab9..000000000 --- a/playbooks/undercloud-debug.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Undercloud Services Debug Check - description: | - The undercloud's openstack services should _not_ have debug enabled. - This will check if debug is enabled on undercloud services. - If debug is enabled, the root filesystem can fill up quickly, and - is not a good thing. - This role needs to be run against an installed Undercloud. - The tested services must use one of the specified configuration files - to set their debug status. - groups: - - pre-deployment - categories: - - compute - - networking - - telemetry - - baremetal - products: - - tripleo - debug_check: "True" - roles: - - undercloud_debug diff --git a/playbooks/undercloud-disabled-services.yaml b/playbooks/undercloud-disabled-services.yaml deleted file mode 100644 index cb8d32306..000000000 --- a/playbooks/undercloud-disabled-services.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify undercloud services state before running update or upgrade - description: | - Check undercloud status before running a stack update - especially minor update and major upgrade. - groups: - - backup-and-restore - - post-upgrade - - pre-upgrade - - post-update - - pre-update - categories: - - os - - system - - systemd - - services - products: - - tripleo - roles: - - undercloud_disabled_services diff --git a/playbooks/undercloud-disk-space-pre-upgrade.yaml b/playbooks/undercloud-disk-space-pre-upgrade.yaml deleted file mode 100644 index d1c7adec1..000000000 --- a/playbooks/undercloud-disk-space-pre-upgrade.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify undercloud fits the disk space requirements to perform an upgrade - description: | - Make sure that the root partition on the undercloud node has enough - free space before starting an upgrade - - http://tripleo.org/install/environments/baremetal.html#minimum-system-requirements - groups: - - pre-upgrade - categories: - - storage - - disk - - os - - system - products: - - tripleo - volumes: - - {mount: /var/lib/docker, min_size: 10} - - {mount: /var/lib/config-data, min_size: 3} - - {mount: /var, min_size: 16} - - {mount: /, min_size: 20} - - roles: - - undercloud_disk_space diff --git a/playbooks/undercloud-disk-space.yaml b/playbooks/undercloud-disk-space.yaml deleted file mode 100644 index ea3a7d431..000000000 --- a/playbooks/undercloud-disk-space.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify undercloud fits the disk space requirements - description: | - Make sure that the root partition on the undercloud node has enough - free space. - - http://tripleo.org/install/environments/baremetal.html#minimum-system-requirements - groups: - - prep - - pre-introspection - categories: - - storage - - disk - - os - - system - products: - - tripleo - volumes: - - {mount: /var/lib/docker, min_size: 10} - - {mount: /var/lib/config-data, min_size: 3} - - {mount: /var/log, min_size: 3} - - {mount: /usr, min_size: 5} - - {mount: /var, min_size: 20} - - {mount: /, min_size: 25} - - roles: - - undercloud_disk_space diff --git a/playbooks/undercloud-heat-purge-deleted.yaml b/playbooks/undercloud-heat-purge-deleted.yaml deleted file mode 100644 index 03217e643..000000000 --- a/playbooks/undercloud-heat-purge-deleted.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify heat-manage purge_deleted is enabled in crontab - description: | - Without a purge_deleted crontab enabled, the - heat database can grow very large. This validation checks that - the purge_deleted crontab has been set up. - This validation should only be used on systems using 'heat_api_cron'. - Starting from Wallaby, the 'heat_api_cron' is no longer installed by - default, and the role may behave upredictably. - groups: [] - categories: - - orchestration - - os - - system - - cron - products: - - tripleo - cron_check: "heat-manage purge_deleted" - roles: - - undercloud_heat_purge_deleted diff --git a/playbooks/undercloud-ipa-server-check.yaml b/playbooks/undercloud-ipa-server-check.yaml deleted file mode 100644 index 00655d4e8..000000000 --- a/playbooks/undercloud-ipa-server-check.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: undercloud - gather_facts: true - vars: - metadata: - name: Verify that the IPA server has the right permissions and ACI - description: | - This validation is relevant for systems where TLS Everywhere is enabled. - - A new ACI is needed on the FreeIPA server to ensure that certificates with IP SANs can be - issued. This ACI will be delivered by default from FreeIPA 4.8.5. - - In addition, a new permission is needed to add DNS zones for tripleo-ipa. This - permission is an addition to the current permissions for the Nova Host Manager role. - - This validation confirms that the new permission and ACI are present. - - https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/features/tls-introduction.html - groups: - - pre-upgrade - - pre-update - categories: - - security - - tls-everywhere - - identity - - dns - - kerberos - products: - - tripleo - - freeipa - tasks: - - include_role: - name: tls_everywhere - tasks_from: ipa-server-check.yaml diff --git a/playbooks/undercloud-neutron-sanity-check.yaml b/playbooks/undercloud-neutron-sanity-check.yaml deleted file mode 100644 index 812c3ecda..000000000 --- a/playbooks/undercloud-neutron-sanity-check.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Undercloud Neutron Sanity Check - description: | - Run `neutron-sanity-check` on the undercloud node to find out - potential issues with Neutron's configuration. - - The tool expects all the configuration files that are passed - to the Neutron services. - - groups: - - pre-introspection - categories: - - networking - - neutron - - overcloud - - controller - - undercloud-config - - container - products: - - tripleo - - # The list of Neutron configuration files and directories that - # will be passed to the Neutron services. The order is important - # here: the values in later files take precedence. - configs: - - /etc/neutron/neutron.conf - - /usr/share/neutron/neutron-dist.conf - - /etc/neutron/metadata_agent.ini - - /etc/neutron/dhcp_agent.ini - - /etc/neutron/plugins/ml2/openvswitch_agent.ini - - /etc/neutron/l3_agent.ini - - roles: - - neutron_sanity_check diff --git a/playbooks/undercloud-process-count.yaml b/playbooks/undercloud-process-count.yaml deleted file mode 100644 index 1894f7436..000000000 --- a/playbooks/undercloud-process-count.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Check the number of OpenStack processes on undercloud - description: | - The default settings for OpenStack is to run one process (heat-engine, - keystone, etc.) per CPU core. On a machine with a lot of cores this is - both unnecessary and can consume a significant amount of RAM, leading - to crashes due to OOMKiller. - groups: - - pre-deployment - categories: - - os - - system - products: - - tripleo - max_process_count: 8 - roles: - - undercloud_process_count diff --git a/playbooks/undercloud-proxy-validation.yaml b/playbooks/undercloud-proxy-validation.yaml deleted file mode 100644 index c20d85207..000000000 --- a/playbooks/undercloud-proxy-validation.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify proxy variables are properly set - description: | - Check proxy configuration before running a stack update - especially minor update and major upgrade. - groups: - - backup-and-restore - - post-upgrade - - pre-upgrade - - post-update - - pre-update - categories: - - os - - system - - systemd - - services - products: - - tripleo - roles: - - undercloud_proxy_validation diff --git a/playbooks/undercloud-service-status.yaml b/playbooks/undercloud-service-status.yaml deleted file mode 100644 index 18bf1d047..000000000 --- a/playbooks/undercloud-service-status.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify undercloud services state before running update or upgrade - description: | - Check undercloud status before running a stack update - especially minor update and major upgrade. - groups: - - backup-and-restore - - post-upgrade - - pre-upgrade - - post-update - - pre-update - categories: - - os - - system - - systemd - - services - products: - - tripleo - roles: - - undercloud_service_status diff --git a/playbooks/undercloud-sysctl.yaml b/playbooks/undercloud-sysctl.yaml deleted file mode 100644 index 0b278ed32..000000000 --- a/playbooks/undercloud-sysctl.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Verify undercloud sysctl option availability - description: | - The undercloud will not install properly if some of the expected sysctl - values are not available to be set. - groups: - - prep - - pre-upgrade - - pre-update - categories: - - os - - system - products: - - tripleo - roles: - - undercloud_sysctl diff --git a/playbooks/validate-passwords-file.yaml b/playbooks/validate-passwords-file.yaml deleted file mode 100644 index 228db3e3c..000000000 --- a/playbooks/validate-passwords-file.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- hosts: undercloud - vars: - metadata: - name: Check Undercloud passwords file - description: | - Disallow updates if the passwords file is missing. - If the undercloud was already deployed, the passwords file needs to be - present so passwords that can't be changed are persisted. If the file - is missing it will break the undercloud, so we should fail-fast and let - the user know about the problem. Both the old and new path to the file - is checked. If either is found, the validation will pass as the old - path will be migrated to the new during and update/upgrade. - groups: - - prep - - pre-upgrade - - pre-update - categories: - - os - products: - - tripleo - roles: - - validate_passwords_file diff --git a/releasenotes/notes/.gitkeep b/releasenotes/notes/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/notes/add-ceph-health-check-000bab9581c759d3.yaml b/releasenotes/notes/add-ceph-health-check-000bab9581c759d3.yaml deleted file mode 100644 index 6561a8faf..000000000 --- a/releasenotes/notes/add-ceph-health-check-000bab9581c759d3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a ceph-health check which prints a warning message if Ceph cluster - is in HEALTH_WARN state at the end of the Overcloud deployment diff --git a/releasenotes/notes/add-overcloud-service-status-128bf74993d035fa.yaml b/releasenotes/notes/add-overcloud-service-status-128bf74993d035fa.yaml deleted file mode 100644 index a5fb770da..000000000 --- a/releasenotes/notes/add-overcloud-service-status-128bf74993d035fa.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Introducing the overcloud_service_status role. This role will hit the - overcloud API for nova and cinder, retrieve the services and will trig a - failure if one of these services are either down or deprecated. - The original intent was to validate that nova-consoleauth was deleted after - an update to RHOSP16. diff --git a/releasenotes/notes/add-selinux-validation-e23694aaf94d2a66.yaml b/releasenotes/notes/add-selinux-validation-e23694aaf94d2a66.yaml deleted file mode 100644 index 07d2636ce..000000000 --- a/releasenotes/notes/add-selinux-validation-e23694aaf94d2a66.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - New validation to check for the SELinux Enforcing mode on the Undercloud. diff --git a/releasenotes/notes/add-undercloud-service-check-fe0ac64b9608f2da.yaml b/releasenotes/notes/add-undercloud-service-check-fe0ac64b9608f2da.yaml deleted file mode 100644 index 3a97d5b26..000000000 --- a/releasenotes/notes/add-undercloud-service-check-fe0ac64b9608f2da.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds simple undercloud service list (static) to the inventory - and uses that for pre-upgrade/update check that services are OK. diff --git a/releasenotes/notes/add-warn-helper-a586ba13c7e8b43b.yaml b/releasenotes/notes/add-warn-helper-a586ba13c7e8b43b.yaml deleted file mode 100644 index d7c2d53f4..000000000 --- a/releasenotes/notes/add-warn-helper-a586ba13c7e8b43b.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add a new `warn` ansible module that simply adds a string to the 'warnings' - ansible output. diff --git a/releasenotes/notes/add_a_lookup_plugin_for_nova_servers-33a934236e6652fb.yaml b/releasenotes/notes/add_a_lookup_plugin_for_nova_servers-33a934236e6652fb.yaml deleted file mode 100644 index ff7246a42..000000000 --- a/releasenotes/notes/add_a_lookup_plugin_for_nova_servers-33a934236e6652fb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a lookup plugin to retrieve server information from - the Nova API. Results can be filtered by server attributes. diff --git a/releasenotes/notes/add_nova_event_callback_validation-bd966e11a459d638.yaml b/releasenotes/notes/add_nova_event_callback_validation-bd966e11a459d638.yaml deleted file mode 100644 index 52c84e573..000000000 --- a/releasenotes/notes/add_nova_event_callback_validation-bd966e11a459d638.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - New validation to check for the Nova Event Callback feature configuration - on the Overcloud Controller(s). diff --git a/releasenotes/notes/bug-1776721-2e0abe371abee71c.yaml b/releasenotes/notes/bug-1776721-2e0abe371abee71c.yaml deleted file mode 100644 index b534f3444..000000000 --- a/releasenotes/notes/bug-1776721-2e0abe371abee71c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Adds an undercloud heat-manage purge_deleted cron job validation. diff --git a/releasenotes/notes/ceilometerdb-size-validation-removal-a5c42940eac102df.yaml b/releasenotes/notes/ceilometerdb-size-validation-removal-a5c42940eac102df.yaml deleted file mode 100644 index d5417120e..000000000 --- a/releasenotes/notes/ceilometerdb-size-validation-removal-a5c42940eac102df.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -others: - - | - The ceilometerdb-size is deprecated since the Train cycle and it's now time - to retire it. diff --git a/releasenotes/notes/ceilometerdb-size_deprecation-c4b7e28bdfa5862d.yaml b/releasenotes/notes/ceilometerdb-size_deprecation-c4b7e28bdfa5862d.yaml deleted file mode 100644 index f9873d28f..000000000 --- a/releasenotes/notes/ceilometerdb-size_deprecation-c4b7e28bdfa5862d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - Since Panko has been deprecated during the Train cycle, the - ceilometerdb-size validation is now considered as deprecated as well. diff --git a/releasenotes/notes/check-latest-minor-version-14befc616a59002b.yaml b/releasenotes/notes/check-latest-minor-version-14befc616a59002b.yaml deleted file mode 100644 index b9c1491b9..000000000 --- a/releasenotes/notes/check-latest-minor-version-14befc616a59002b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - New validation to check for latest minor version of python-tripleoclient - - | - New module to check for new minor and major versions of a package diff --git a/releasenotes/notes/check-repo-availability-ef1bdd45586929f7.yaml b/releasenotes/notes/check-repo-availability-ef1bdd45586929f7.yaml deleted file mode 100644 index b3e12a26e..000000000 --- a/releasenotes/notes/check-repo-availability-ef1bdd45586929f7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds a validation to the pre-upgrade group that checks if current - repositories can be connected to and if there is at least one repo - configured in yum repolist. diff --git a/releasenotes/notes/check-working-dns-72237308d554468d.yaml b/releasenotes/notes/check-working-dns-72237308d554468d.yaml deleted file mode 100644 index 3f45eb020..000000000 --- a/releasenotes/notes/check-working-dns-72237308d554468d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add a new validation to ensure DNS resolution working on both undercloud - and overcloud nodes. diff --git a/releasenotes/notes/compute-tsx-validation-5d976a3fc5166536.yaml b/releasenotes/notes/compute-tsx-validation-5d976a3fc5166536.yaml deleted file mode 100644 index 572018c6e..000000000 --- a/releasenotes/notes/compute-tsx-validation-5d976a3fc5166536.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - RHEL-8.3 kernel disabled the Intel “TSX” (Transactional - Synchronization Extensions) feature by default as a preemptive - security measure, but it breaks live migration from RHEL-7.9 - (or even RHEL-8.1 or RHEL-8.2) to RHEL-8.3. - - Operators are expected to explicitly define the TSX flag in - their KernelArgs for the compute role to prevent live-migration - issues during the upgrade process. - - This also impacts upstream CentOS systems. diff --git a/releasenotes/notes/configurable-ssh-user-840a9ef5416675e9.yaml b/releasenotes/notes/configurable-ssh-user-840a9ef5416675e9.yaml deleted file mode 100644 index c411cffeb..000000000 --- a/releasenotes/notes/configurable-ssh-user-840a9ef5416675e9.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - The ansible_ssh_user in the generated inventory by - tripleo-ansible-inventory is now configurable instead - of hardcoded to heat-admin. The default is still heat-admin. - Use the `--ansible_ssh_user` param or the `ANSIBLE_SSH_USER` - env var to define a custom value. diff --git a/releasenotes/notes/consider_existing_resources-addc5b2527d9db1b.yaml b/releasenotes/notes/consider_existing_resources-addc5b2527d9db1b.yaml deleted file mode 100644 index b710b9199..000000000 --- a/releasenotes/notes/consider_existing_resources-addc5b2527d9db1b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Existing resources "DISK_GB", "MEMORY_MB", "VCPU" were not considered to - be available in case of custom_resource_class_val == False, also use correct - types for resource comparison and required_count calculation. diff --git a/releasenotes/notes/containerized-undercloud-validations-9866cb575681d100.yaml b/releasenotes/notes/containerized-undercloud-validations-9866cb575681d100.yaml deleted file mode 100644 index 52714e6d1..000000000 --- a/releasenotes/notes/containerized-undercloud-validations-9866cb575681d100.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Validation of required running Docker containers on a containerized - undercloud - - Validation of open ports running on a containerized undercloud diff --git a/releasenotes/notes/deployment-images_multi-arch-031eea343453e67c.yaml b/releasenotes/notes/deployment-images_multi-arch-031eea343453e67c.yaml deleted file mode 100644 index 21cd25c24..000000000 --- a/releasenotes/notes/deployment-images_multi-arch-031eea343453e67c.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - | - The behavior of the ``ironic-boot-configuration`` validation has changed - in order to suppport multi-arch. It now checks that each node has the - correct associated ramdisk and kernel image according to the node's - architecture and platform, and, when it does appear that the correct image - is associated, checks that there is only one image in Glance with that - name. Also, the vars ``deploy_kernel_name`` and ``deploy_ramdisk_name`` - have changed to ``deploy_kernel_name_base`` and - ``deploy_ramdisk_name_base`` respectively. -other: - - | - The ``deployment-images`` validation has been removed, as its intended - functionality became inseparable from ``ironic-boot-configuration`` in the - multi-arch case. diff --git a/releasenotes/notes/deprecate-ini-inventory-d7446df7e967adfe.yaml b/releasenotes/notes/deprecate-ini-inventory-d7446df7e967adfe.yaml deleted file mode 100644 index 66196aeec..000000000 --- a/releasenotes/notes/deprecate-ini-inventory-d7446df7e967adfe.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The ``--static-inventory`` argument to ``tripleo-ansible-inventory`` has - been deprecated and aliased to ``--static-yaml-inventory``. See - `bug 1751855 `__. diff --git a/releasenotes/notes/drop-python-3-6-and-3-7-9688231ce7089eb9.yaml b/releasenotes/notes/drop-python-3-6-and-3-7-9688231ce7089eb9.yaml deleted file mode 100644 index db420d739..000000000 --- a/releasenotes/notes/drop-python-3-6-and-3-7-9688231ce7089eb9.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - Python 3.6 & 3.7 support has been dropped. The minimum version of Python now - supported is Python 3.8. diff --git a/releasenotes/notes/enhanced-diskspace-validation-1c6f6a9d97e67b47.yaml b/releasenotes/notes/enhanced-diskspace-validation-1c6f6a9d97e67b47.yaml deleted file mode 100644 index 8e8cb2e3c..000000000 --- a/releasenotes/notes/enhanced-diskspace-validation-1c6f6a9d97e67b47.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Allows to list specific directories to check - - Matches listed directories to actual mounts - - Ensure sufficient free space is available on volumes of interest diff --git a/releasenotes/notes/ip-range-input-23493c5850ddbf49.yaml b/releasenotes/notes/ip-range-input-23493c5850ddbf49.yaml deleted file mode 100644 index 9c4d26e80..000000000 --- a/releasenotes/notes/ip-range-input-23493c5850ddbf49.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Added additional input validation to the ctlplane-ip-range validation. - The validation now ensures the lower IP bound is smaller than the upper - bound. diff --git a/releasenotes/notes/ip-range-validation-result-daddc8c015dd34c0.yaml b/releasenotes/notes/ip-range-validation-result-daddc8c015dd34c0.yaml deleted file mode 100644 index 455aba7b8..000000000 --- a/releasenotes/notes/ip-range-validation-result-daddc8c015dd34c0.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Instead of only outputting a warning, the IP range validation now fails if - the number of available addresses is lower than the recommended minimum. - (Fixes https://bugs.launchpad.net/tripleo/+bug/1713483) diff --git a/releasenotes/notes/ironic-boot-config-changes-de40ef071e13b51b.yaml b/releasenotes/notes/ironic-boot-config-changes-de40ef071e13b51b.yaml deleted file mode 100644 index a65db4ec9..000000000 --- a/releasenotes/notes/ironic-boot-config-changes-de40ef071e13b51b.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - The behavior of the ``ironic-boot-configuration`` validations has changed - substantially in acknowledgment that deploy images may or may not be - Glance images. The new validatoin logic is more generalized than before, - in that now it checks that the deploy images associated to the nodes of a - certain arch/platform do not exceed a certain standard of diversity. diff --git a/releasenotes/notes/ironic-lookup-plugin-cd01332f326f4125.yaml b/releasenotes/notes/ironic-lookup-plugin-cd01332f326f4125.yaml deleted file mode 100644 index e21e7b504..000000000 --- a/releasenotes/notes/ironic-lookup-plugin-cd01332f326f4125.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a lookup plugin for ironic nodes. Nodes can be queried by id or - instance uuid. diff --git a/releasenotes/notes/migrate-to-keystoneauth-e660753d5577e387.yaml b/releasenotes/notes/migrate-to-keystoneauth-e660753d5577e387.yaml deleted file mode 100644 index 166cdb2cd..000000000 --- a/releasenotes/notes/migrate-to-keystoneauth-e660753d5577e387.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Migrate tripleo-ansible-inventory to use keystoneauth instead of - keystoneclient. diff --git a/releasenotes/notes/module-documentation-a35b1ef77c3f6847.yaml b/releasenotes/notes/module-documentation-a35b1ef77c3f6847.yaml deleted file mode 100644 index eeb33a5c7..000000000 --- a/releasenotes/notes/module-documentation-a35b1ef77c3f6847.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Ansible-style module documentation has been added to our custom modules - so they can be described with the ansible-doc command. diff --git a/releasenotes/notes/network-environment-validation-68f51e604819bfdf.yaml b/releasenotes/notes/network-environment-validation-68f51e604819bfdf.yaml deleted file mode 100644 index 89048dd48..000000000 --- a/releasenotes/notes/network-environment-validation-68f51e604819bfdf.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Added a network environment validation that checks network settings - based on environments/network-environment.yaml and nic config files - stored in the plan's Swift container. diff --git a/releasenotes/notes/network-environment-validation-ce98d775d9e1b17f.yaml b/releasenotes/notes/network-environment-validation-ce98d775d9e1b17f.yaml deleted file mode 100644 index c445f111a..000000000 --- a/releasenotes/notes/network-environment-validation-ce98d775d9e1b17f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Updated the network environment validation to use the new schema-based - validation method that would detect more error cases. diff --git a/releasenotes/notes/node-health-validation-7663706ff8c711a2.yaml b/releasenotes/notes/node-health-validation-7663706ff8c711a2.yaml deleted file mode 100644 index a1c3dce05..000000000 --- a/releasenotes/notes/node-health-validation-7663706ff8c711a2.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds a node health validation. The validation tries to ping all overcloud - nodes. For those that can't be reached some information is collected from - nova and ironic to display in the validation output. diff --git a/releasenotes/notes/node-pool-size-0e109b2c41ad6680.yaml b/releasenotes/notes/node-pool-size-0e109b2c41ad6680.yaml deleted file mode 100644 index 64c091b89..000000000 --- a/releasenotes/notes/node-pool-size-0e109b2c41ad6680.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Enhanced the network environment validation to validate the node IP pool - size. Warnings are issued if there are not enough IPs in any pools for all - assigned nodes. diff --git a/releasenotes/notes/openshift-hardware-validation-bb70ba88165450d2.yaml b/releasenotes/notes/openshift-hardware-validation-bb70ba88165450d2.yaml deleted file mode 100644 index 94d0b129a..000000000 --- a/releasenotes/notes/openshift-hardware-validation-bb70ba88165450d2.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds a hardware requirements validation that checks flavors and compute - resources for minimum values needed for either a test or production - environment and looks for the existence of the required images. diff --git a/releasenotes/notes/openshift-network-check-d495367316c83147.yaml b/releasenotes/notes/openshift-network-check-d495367316c83147.yaml deleted file mode 100644 index 6437d4e22..000000000 --- a/releasenotes/notes/openshift-network-check-d495367316c83147.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a network requirements validation for OpenShift which checks if there - is an external network set on the overcloud. diff --git a/releasenotes/notes/plan-stack-cli-59b3f3a213ded859.yaml b/releasenotes/notes/plan-stack-cli-59b3f3a213ded859.yaml deleted file mode 100644 index 8148a46fb..000000000 --- a/releasenotes/notes/plan-stack-cli-59b3f3a213ded859.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - In order to use tripleo-ansible-inventory with a different stack name, that - name had to be specified with --plan. This commit adds help for the --plan - cli arg, and also adds a new --stack cli arg that has the same effect. If - both are specified, --stack will take precedence. diff --git a/releasenotes/notes/remove-deprecated-services-process-count-b5d1d3e9555f445b.yaml b/releasenotes/notes/remove-deprecated-services-process-count-b5d1d3e9555f445b.yaml deleted file mode 100644 index e2b279760..000000000 --- a/releasenotes/notes/remove-deprecated-services-process-count-b5d1d3e9555f445b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -others: - - | - Several services were retired and are no longer in use. Therefore it is - pointless to check for their associated processes. The retired services - are, in alphabetical order: mistral, nova-compute, swift and zaqar. diff --git a/releasenotes/notes/remove-deprecated-services-service-status-8e353ca6219ed921.yaml b/releasenotes/notes/remove-deprecated-services-service-status-8e353ca6219ed921.yaml deleted file mode 100644 index b0a273730..000000000 --- a/releasenotes/notes/remove-deprecated-services-service-status-8e353ca6219ed921.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -others: - - | - Several services were retired and are no longer in use. Therefore it is - pointless to check for their status. The retired services are, - in alphabetical order: mistral, nova-compute, swift and zaqar. diff --git a/releasenotes/notes/remove-neutron-lbaas-e8168b4595506b91.yaml b/releasenotes/notes/remove-neutron-lbaas-e8168b4595506b91.yaml deleted file mode 100644 index 5546a3c95..000000000 --- a/releasenotes/notes/remove-neutron-lbaas-e8168b4595506b91.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - | - The Neutron LBaaS project was retired. Upgrading to deployment to Train - release will not upgrade Neutron LBaaS. Learn more about its retirement and - Octavia as its successor at - https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation -deprecations: - - | - The Neutron LBaaS project was retired and support for it in TripleO - removed. diff --git a/releasenotes/notes/remove-overcloudrc-a6c28a4a34dad951.yaml b/releasenotes/notes/remove-overcloudrc-a6c28a4a34dad951.yaml deleted file mode 100644 index 905601276..000000000 --- a/releasenotes/notes/remove-overcloudrc-a6c28a4a34dad951.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -others: - - | - The 'overcloudrc' validation module was removed as it was not used by any valdiation - for a significant period of time. No other party or project was known - to have used the module in any capacity. diff --git a/releasenotes/notes/specify-vars-after-children-280b1d5767503df3.yaml b/releasenotes/notes/specify-vars-after-children-280b1d5767503df3.yaml deleted file mode 100644 index 693149086..000000000 --- a/releasenotes/notes/specify-vars-after-children-280b1d5767503df3.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - tripleo-ansible-inventory has been updated to output the vars sections - after the children sections. Due to a change in ansible 2.4, having them - in the other order caused no hosts to match the generated static inventory. - See https://bugs.launchpad.net/tripleo/+bug/1729058 diff --git a/releasenotes/notes/stack-health-validation-c2174bc5f0bd585e.yaml b/releasenotes/notes/stack-health-validation-c2174bc5f0bd585e.yaml deleted file mode 100644 index fe49ce924..000000000 --- a/releasenotes/notes/stack-health-validation-c2174bc5f0bd585e.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - New validation to check the health of the current stack; to be run before - an update or upgrade. - - | - New lookup plugin to access the current stack's resources in Ansible - playbooks. diff --git a/releasenotes/notes/tht-lookup-111fb8a9748e9fa7.yaml b/releasenotes/notes/tht-lookup-111fb8a9748e9fa7.yaml deleted file mode 100644 index 8ef41a6fa..000000000 --- a/releasenotes/notes/tht-lookup-111fb8a9748e9fa7.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added a lookup plugin to access or loop over the current plan's template - files in a validation playbook. diff --git a/releasenotes/notes/validate-xfs-ftype-equals-0-8fdb1f8c99bee975.yaml b/releasenotes/notes/validate-xfs-ftype-equals-0-8fdb1f8c99bee975.yaml deleted file mode 100644 index 431b0ca9b..000000000 --- a/releasenotes/notes/validate-xfs-ftype-equals-0-8fdb1f8c99bee975.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -upgrade: - - | - Validate that there are no volumes formatted with XFS - and ftype=0. - Deployments from OpenStack Kilo or Liberty have XFS - partitions formatted with ftype=0, which is incompatible - with the docker overlayfs driver. - From OpenStack Newton, we have support for XFS ftype=1 - by default. - This check will make fail the pre-upgrade validations - if there are deployments coming back from Kilo or Liberty - and have XFS partitions with ftype=0. diff --git a/releasenotes/source/_static/.gitkeep b/releasenotes/source/_static/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 3c4059ab4..000000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,322 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# flake8: noqa - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -authors = 'TripleO Developers' -project = 'tripleo-validations Release Notes' -copyright = '2017, ' + authors - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# -# today = '' -# -# Else, today_fmt is used as the format for a strftime call. -# -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -# todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. -# " v documentation" by default. -# -# html_title = u'tripleo-validations v1.0' - -# A shorter title for the navigation bar. Default is the same as html_title. -# -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# -# html_logo = None - -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# -# html_extra_path = [] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# -# html_additional_pages = {} - -# If false, no module index is generated. -# -# html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -# -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' -# -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# 'ja' uses this config value. -# 'zh' user can custom change `jieba` dictionary path. -# -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'tripleo-validationsReleaseNotesdoc' - -# -- Options for LaTeX output --------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'tripleo-validationsReleaseNotes.tex', - 'tripleo-validations Release Notes Documentation', - authors, 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# -# latex_use_parts = False - -# If true, show page references after internal links. -# -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# -# latex_appendices = [] - -# It false, will not define \strong, \code, itleref, \crossref ... but only -# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added -# packages. -# -# latex_keep_old_macro_names = True - -# If false, no module index is generated. -# -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'tripleo-validationsreleasenotes', - 'tripleo-validations Release Notes Documentation', - [authors], 1) -] - -# If true, show URL addresses after external links. -# -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'tripleo-validationsReleaseNotes', - 'tripleo-validations Release Notes Documentation', - authors, 'tripleo-validationsReleaseNotes', - 'A collection of Ansible playbooks to detect and report potential issues during TripleO deployments.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# -# texinfo_appendices = [] - -# If false, no module index is generated. -# -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/tripleo-validations' -openstackdocs_auto_name = False -openstackdocs_bug_project = 'tripleo' -openstackdocs_bug_tag = 'documentation' diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index b4528933d..000000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -=============================================== -Welcome to tripleo-validations' Release Notes! -=============================================== - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - unreleased - zed - wallaby - victoria - ussuri - train - stein - rocky - queens - pike - ocata - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42e..000000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst deleted file mode 100644 index e43bfc0ce..000000000 --- a/releasenotes/source/pike.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Pike Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/pike diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst deleted file mode 100644 index 36ac6160c..000000000 --- a/releasenotes/source/queens.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Queens Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/queens diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst deleted file mode 100644 index 40dd517b7..000000000 --- a/releasenotes/source/rocky.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Rocky Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/rocky diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst deleted file mode 100644 index efaceb667..000000000 --- a/releasenotes/source/stein.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Stein Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst deleted file mode 100644 index 583900393..000000000 --- a/releasenotes/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Train Series Release Notes -========================== - -.. release-notes:: - :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index b7be79ea2..000000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== -Current Series Release Notes -============================== - -.. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst deleted file mode 100644 index e21e50e0c..000000000 --- a/releasenotes/source/ussuri.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Ussuri Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst deleted file mode 100644 index 4efc7b6f3..000000000 --- a/releasenotes/source/victoria.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= -Victoria Series Release Notes -============================= - -.. release-notes:: - :branch: stable/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst deleted file mode 100644 index d77b56599..000000000 --- a/releasenotes/source/wallaby.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ -Wallaby Series Release Notes -============================ - -.. release-notes:: - :branch: stable/wallaby diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst deleted file mode 100644 index 9608c05e4..000000000 --- a/releasenotes/source/zed.rst +++ /dev/null @@ -1,6 +0,0 @@ -======================== -Zed Series Release Notes -======================== - -.. release-notes:: - :branch: stable/zed diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 433454cca..000000000 --- a/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr>=3.1.1 # Apache-2.0 -oslo.config>=5.2.0 # Apache-2.0 -keystoneauth1>=3.16.0 # Apache-2.0 -python-novaclient>=15.1.0 # Apache-2.0 -python-heatclient>=1.10.0 # Apache-2.0 -python-glanceclient>=2.9.1 # Apache-2.0 -python-ironicclient>=2.7.0 # Apache-2.0 -python-ironic-inspector-client>=3.1.1 # Apache-2.0 -os-net-config>=7.1.0 # Apache-2.0 -oslo.utils>=3.40.2 # Apache-2.0 -tripleo-common>=7.1.0 # Apache-2.0 -setuptools>=50.3.0 # MIT -tripleo-ansible>=6.0.0 # Apache-2.0 diff --git a/role-addition.yml b/role-addition.yml deleted file mode 100644 index 05c8f8aaf..000000000 --- a/role-addition.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Create a new role for TripleO-Validations - hosts: localhost - connection: local - gather_facts: false - roles: - - validation_init diff --git a/roles/ceph/defaults/main.yml b/roles/ceph/defaults/main.yml deleted file mode 100644 index 830c7dead..000000000 --- a/roles/ceph/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fail_without_deps: false -fail_on_ceph_health_err: false -fail_on_ceph_health_warn: false -osd_percentage_min: 0 -container_client: "podman" diff --git a/roles/ceph/molecule/default/converge.yml b/roles/ceph/molecule/default/converge.yml deleted file mode 100644 index 29d1c78f2..000000000 --- a/roles/ceph/molecule/default/converge.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Nothing to do - debug: - msg: 'No "main" to run, nothing to do' diff --git a/roles/ceph/molecule/default/molecule.yml b/roles/ceph/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/ceph/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/ceph/tasks/ceph-dependencies-installed.yaml b/roles/ceph/tasks/ceph-dependencies-installed.yaml deleted file mode 100644 index aa0f1daf9..000000000 --- a/roles/ceph/tasks/ceph-dependencies-installed.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Gather the package facts - package_facts: - manager: auto - -- name: Warn about missing dependencies - warn: - msg: "If planning to use Ceph, it is necessary to check {{ item[0] }} is installed!" - when: - - "'{{ item[0] }}' not in ansible_facts.packages" - - not fail_without_deps|default(false)|bool - delegate_to: "{{ item[1] }}" - with_nested: - - "{{ packages }}" - - "{{ tripleo_delegate_to }}" - -- name: Fail if a ceph dependency is missing - fail: - msg: "If planning to use Ceph, it is necessary to install {{ item[0] }} package" - when: - - "'{{ item[0] }}' not in ansible_facts.packages" - - fail_without_deps|default(false)|bool - delegate_to: "{{ item[1] }}" - with_nested: - - "{{ packages }}" - - "{{ tripleo_delegate_to }}" diff --git a/roles/ceph/tasks/ceph-health.yaml b/roles/ceph/tasks/ceph-health.yaml deleted file mode 100644 index fa59a7f60..000000000 --- a/roles/ceph/tasks/ceph-health.yaml +++ /dev/null @@ -1,90 +0,0 @@ ---- -- name: Check if ceph_mon is deployed - become: true - shell: hiera -c /etc/puppet/hiera.yaml enabled_services | egrep -sq ceph_mon - ignore_errors: true - register: ceph_mon_enabled - changed_when: false - delegate_to: "{{ tripleo_delegate_to | first | default(omit) }}" - -- when: "ceph_mon_enabled is succeeded" - delegate_to: "{{ tripleo_delegate_to | first | default(omit) }}" - block: - - name: Check for docker cli - stat: - path: "/var/run/docker.sock" - register: check_docker_cli - check_mode: false - - - name: Set container_client fact - set_fact: - container_client: |- - {% set container_client = 'podman' %} - {% if check_docker_cli.stat.exists|bool %} - {% set container_client = 'docker' %} - {% endif %} - {{ container_client }} - - - name: Set container filter format - set_fact: - container_filter_format: !unsafe "--format '{{ .Names }}'" - - - name: Set ceph_mon_container name - become: true - shell: "{{ container_client }} ps {{ container_filter_format }} | grep ceph-mon" - register: ceph_mon_container - changed_when: false - - - name: Set ceph cluster name - become: true - shell: find /etc/ceph -name '*.conf' -prune -print -quit | xargs basename -s '.conf' - register: ceph_cluster_name - changed_when: false - - - name: Get ceph health - become: true - shell: "{{ container_client }} exec {{ ceph_mon_container.stdout }} ceph --cluster {{ ceph_cluster_name.stdout }} health | awk '{print $1}'" - register: ceph_health - - - name: Check ceph health - warn: - msg: Ceph is in {{ ceph_health.stdout }} state. - when: - - ceph_health.stdout != 'HEALTH_OK' - - not fail_on_ceph_health_err|default(true)|bool - - - name: Fail if ceph health is HEALTH_WARN - fail: - msg: Ceph is in {{ ceph_health.stdout }} state. - when: - - ceph_health.stdout == 'HEALTH_WARN' - - fail_on_ceph_health_warn|default(false)|bool - - - name: Fail if ceph health is HEALTH_ERR - fail: - msg: Ceph is in {{ ceph_health.stdout }} state. - when: - - ceph_health.stdout == 'HEALTH_ERR' - - fail_on_ceph_health_err|default(true)|bool - - - when: - - osd_percentage_min|default(0) > 0 - block: - # 1.3+ <= jq <= 1.5 doesn't support try-catch: we rely on if - then -else approach to check if ceph has .osdmap layout - - name: set jq osd percentage filter - set_fact: - jq_osd_percentage_filter: '(if .osdmap != null then { osds: (.osdmap.num_in_osds / .osdmap.num_osds * 100) } - else { osds: (.num_in_osds / .num_osds * 100) } end) | .osds' - - - name: Get OSD stat percentage - become: true - shell: >- - "{{ container_client }}" exec "{{ ceph_mon_container.stdout }}" ceph - --cluster "{{ ceph_cluster_name.stdout }}" osd stat -f json | jq '{{ jq_osd_percentage_filter }}' - register: ceph_osd_in_percentage - - - name: Fail if there is an unacceptable percentage of in OSDs - fail: - msg: "Only {{ ceph_osd_in_percentage.stdout|float }}% of OSDs are in, but {{ osd_percentage_min|default(0) }}% are required" - when: - - ceph_osd_in_percentage.stdout|float < osd_percentage_min|default(0) diff --git a/roles/ceph/tasks/ceph-pg.yaml b/roles/ceph/tasks/ceph-pg.yaml deleted file mode 100644 index f317c5fb5..000000000 --- a/roles/ceph/tasks/ceph-pg.yaml +++ /dev/null @@ -1,145 +0,0 @@ ---- -- name: Lookup ANSIBLE_HASH_BEHAVIOUR - set_fact: - hash_behavior: "{{ lookup('config', 'DEFAULT_HASH_BEHAVIOUR', on_missing='skip')|default('replace') }}" - -- name: Fail unless ANSIBLE_HASH_BEHAVIOUR=merge - fail: - msg: | - In order to simulate Tripleo Heat Template behavior this role requires - that it be run with Ansible's hash_behaviour set to merge. Please - re-run with 'export ANSIBLE_HASH_BEHAVIOUR=merge'" - when: - - hash_behavior != 'merge' - -- name: Fail if number of OSDs is not specified - fail: - msg: "Please pass the expected number of OSDs, e.g. '-e num_osds=36'" - when: num_osds is not defined - -- name: Get ceph_pool_default_size - set_fact: - ceph_pool_default_size: "{{ parameter_defaults['CephPoolDefaultSize']|default(3) }}" - -- name: Get ceph_pool_default_pg_num - set_fact: - ceph_pool_default_pg_num: "{{ parameter_defaults['CephPoolDefaultPgNum']|default(128) }}" - -- name: Set ceph_pools default - set_fact: - ceph_pools: - - name: "{{ parameter_defaults['CinderBackupRbdPoolName']|default('backups') }}" - application: rbd - - name: "{{ parameter_defaults['CinderRbdPoolName']|default('volumes') }}" - application: rbd - - name: "{{ parameter_defaults['NovaRbdPoolName']|default('vms') }}" - application: rbd - - name: "{{ parameter_defaults['GlanceRbdPoolName']|default('images') }}" - application: rbd - - name: "{{ parameter_defaults['GnocchiRbdPoolName']|default('metrics') }}" - application: openstack_gnocchi - -- when: - - parameter_defaults['CephPools'] is defined - - (parameter_defaults['CephPools']|length) > 0 - block: - - name: Get names of custom pools - set_fact: - custom_names: "{{ custom_names | default([]) + [ item.name ] }}" - loop: "{{ parameter_defaults['CephPools'] }}" - - - name: Get names of default pools - set_fact: - default_names: "{{ default_names | default([]) + [ item.name ] }}" - loop: "{{ ceph_pools }}" - - - name: Base updated ceph_pools list on custom list - set_fact: - new_ceph_pools: "{{ parameter_defaults['CephPools'] }}" - - - name: Add default pools not in custom list to updated ceph_pools list - set_fact: - new_ceph_pools: "{{ new_ceph_pools | default([]) + [item] }}" - loop: "{{ ceph_pools }}" - when: - - item.name in default_names|difference(custom_names) - - - name: redefine ceph_pools based on updated ceph_pools list - set_fact: - ceph_pools: "{{ new_ceph_pools }}" - -- name: Add CinderRbdExtraPools if provided and not in custom pool list - set_fact: - ceph_pools: "{{ ceph_pools | default([]) + [dict(name=item, application='rbd') ] }}" - loop: "{{ parameter_defaults.CinderRbdExtraPools.split(',')|list }}" - when: - - parameter_defaults['CinderRbdExtraPools'] is defined - - (parameter_defaults['CinderRbdExtraPools']|length) > 0 - - item not in custom_names|default([]) - -- name: Warn if deprecated Manila parameters are being used - warn: - msg: | - One or more of following parameters is in use but is - deprecated in Stein and newer; ManilaCephFSDataPoolPGNum, - ManilaCephFSMetadataPoolPGNum, and ManilaCephFSShareBackendName - when: - - (parameter_defaults['ManilaCephFSDataPoolPGNum'] is defined or - parameter_defaults['ManilaCephFSMetadataPoolPGNum'] is defined or - parameter_defaults['ManilaCephFSShareBackendName'] is defined) - -- name: Add Manila MDS pools if provided and not in custom pool list - set_fact: - ceph_pools: "{{ ceph_pools | default([]) + [item] }}" - loop: - - name: "{{ parameter_defaults['ManilaCephFSDataPoolName']|default('manila_data') }}" - application: cephfs - pg_num: "{{ parameter_defaults['ManilaCephFSDataPoolPGNum']|default(128) }}" - pgp_num: "{{ parameter_defaults['ManilaCephFSDataPoolPGNum']|default(128) }}" - - name: "{{ parameter_defaults['ManilaCephFSMetadataPoolName']|default('manila_metadata') }}" - application: cephfs - pg_num: "{{ parameter_defaults['ManilaCephFSMetadataPoolPGNum']|default(128) }}" - pgp_num: "{{ parameter_defaults['ManilaCephFSMetadataPoolPGNum']|default(128) }}" - when: - - resource_registry['OS::TripleO::Services::ManilaBackendCephFs'] is defined - - resource_registry['OS::TripleO::Services::ManilaBackendCephFs'] != 'OS::Heat::None' - - item.name not in custom_names|default([]) - -- name: Add RGW pools if requested unless in custom pool list (only for default zone) - set_fact: - ceph_pools: "{{ ceph_pools | default([]) + [dict(name=item, application='rgw') ] }}" - loop: - - .rgw.root - - default.rgw.control - - default.rgw.meta - - default.rgw.log - - default.rgw.buckets.index - - default.rgw.buckets.data - when: - - resource_registry['OS::TripleO::Services::CephRgw'] is defined - - resource_registry['OS::TripleO::Services::CephRgw'] != 'OS::Heat::None' - - item not in custom_names|default([]) - -- name: Warn if an application is not set for each pool - warn: - msg: "The pool {{ item.name }} should have an application set, e.g. {'name': {{ item.name }}, 'application': rbd}" - when: item.application is not defined - loop: "{{ ceph_pools }}" - -- name: Simulate OpenStack pool creation in proposed Ceph Cluster - ceph_pools_pg_protection: - num_osds: "{{ num_osds }}" - ceph_pool_default_size: "{{ ceph_pool_default_size }}" - ceph_pool_default_pg_num: "{{ ceph_pool_default_pg_num }}" - ceph_pools: "{{ ceph_pools }}" - register: pool_creation_simulation - -- name: Fail if CephPools parameter is not configured correctly - fail: - msg: '{{ pool_creation_simulation["message"] }}' - when: not pool_creation_simulation["valid_input"] - -- name: Succeed if CephPools parameter will satisfy PG overdoce protection - debug: - msg: '{{ pool_creation_simulation["message"] }}' - when: pool_creation_simulation["valid_input"] diff --git a/roles/check_for_dangling_images/defaults/main.yml b/roles/check_for_dangling_images/defaults/main.yml deleted file mode 100644 index d89cc3534..000000000 --- a/roles/check_for_dangling_images/defaults/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "check_for_dangling_images" - -# Debugging mode -check_for_dangling_images_debug: false diff --git a/roles/check_for_dangling_images/molecule/default/converge.yml b/roles/check_for_dangling_images/molecule/default/converge.yml deleted file mode 100644 index 194cc1d2a..000000000 --- a/roles/check_for_dangling_images/molecule/default/converge.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Populate successful podman CLI - copy: - dest: /usr/bin/podman - mode: 0755 - content: | - #!/bin/bash - - - name: Test good values - include_role: - name: check_for_dangling_images - - - name: Populate failing podman CLI - copy: - dest: /usr/bin/podman - mode: 0755 - content: | - #!/bin/bash - echo 4199acc83c6a43243392aecbff22764dbb501aef81a26d7c4c8c69064f84ef47 - - - name: Test failing - block: - - name: Catch when images exist - include_role: - name: check_for_dangling_images - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - Found dangling podman images diff --git a/roles/check_for_dangling_images/molecule/default/molecule.yml b/roles/check_for_dangling_images/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_for_dangling_images/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_for_dangling_images/tasks/main.yml b/roles/check_for_dangling_images/tasks/main.yml deleted file mode 100644 index 951ae35f5..000000000 --- a/roles/check_for_dangling_images/tasks/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# "check_for_dangling_images" tasks - -- name: Get podman check for images - shell: podman images --filter "dangling=true" -q --no-trunc - register: dangling_images - -- name: Verify no images exist - fail: - msg: >- - Error - podman images were found using 'podman images --filter "dangling=true" -q --no-trunc' - failed_when: dangling_images.stdout != "" diff --git a/roles/check_kernel_version/defaults/main.yml b/roles/check_kernel_version/defaults/main.yml deleted file mode 100644 index e1f54eb11..000000000 --- a/roles/check_kernel_version/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -check_kernel_version_fact: "{{ ansible_facts.kernel }}" -check_kernel_version_expected: "el8" diff --git a/roles/check_kernel_version/molecule/default/converge.yml b/roles/check_kernel_version/molecule/default/converge.yml deleted file mode 100644 index 0c98b4691..000000000 --- a/roles/check_kernel_version/molecule/default/converge.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Converge - hosts: all - gather_facts: true - - tasks: - - name: validate correct kernel version - include_role: - name: "check_kernel_version" - - - name: detect wrong kernel version - block: - - name: run validation against wrong kernel version - include_role: - name: "check_kernel_version" - vars: - check_kernel_version_fact: "4.18.0-193.14.2.el7_2.x86_64" - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Status message - debug: - msg: "Detected kernel version mismatch!" - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The kernel version mismatch should have been detected but it wasn't. diff --git a/roles/check_kernel_version/molecule/default/molecule.yml b/roles/check_kernel_version/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_kernel_version/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_kernel_version/tasks/main.yml b/roles/check_kernel_version/tasks/main.yml deleted file mode 100644 index 8f7127e6e..000000000 --- a/roles/check_kernel_version/tasks/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Check kernel version - fail: - msg: '{{ check_kernel_version_expected }} does not match configured kernel_version {{ check_kernel_version_fact }}' - when: check_kernel_version_expected not in check_kernel_version_fact diff --git a/roles/check_manila_policy_file/molecule/default/converge.yml b/roles/check_manila_policy_file/molecule/default/converge.yml deleted file mode 100644 index cea8a09d4..000000000 --- a/roles/check_manila_policy_file/molecule/default/converge.yml +++ /dev/null @@ -1,64 +0,0 @@ ---- -# Copyright 2022 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -- name: Converge - hosts: all - vars: - manila_policy_path: /var/lib/config-data/puppet-generated/manila/etc/manila/ - policy_file_name: policy.yaml - tasks: - - name: Create manila directory - file: - path: "{{ manila_policy_path }}" - state: directory - mode: 0755 - recurse: true - - - name: Add policy file - file: - path: "{{ manila_policy_path }}{{ policy_file_name }}" - state: touch - mode: 0755 - - - name: Test pass - policy file exists - include_role: - name: check_manila_policy_file - vars: - manila_policy_file: "{{ manila_policy_path }}{{ policy_file_name }}" - - - name: Remove policy file - file: - path: "{{ manila_policy_path }}{{ policy_file_name }}" - state: absent - - - name: Test fails - policy file doesnt exists - block: - - name: Load role with failure - include_role: - name: "check_manila_policy_file" - rescue: - - name: Clear host error - meta: clear_host_errors - - - name: Status message - debug: - msg: 'Successfully detected policy file doesnt exists!' - - - name: End play - meta: end_play - - - name: Fail if we get to this place - fail: - msg: 'Unit test failed: Did not detect that policy file doesnt exists!' diff --git a/roles/check_manila_policy_file/molecule/default/molecule.yml b/roles/check_manila_policy_file/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_manila_policy_file/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_manila_policy_file/molecule/default/prepare.yml b/roles/check_manila_policy_file/molecule/default/prepare.yml deleted file mode 100644 index bec69d3d9..000000000 --- a/roles/check_manila_policy_file/molecule/default/prepare.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# Copyright 2022 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -- name: Prepare - hosts: all - gather_facts: false diff --git a/roles/check_manila_policy_file/tasks/main.yml b/roles/check_manila_policy_file/tasks/main.yml deleted file mode 100644 index a260d9028..000000000 --- a/roles/check_manila_policy_file/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2022 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. ---- -- name: Get stats of check policy file - ansible.builtin.stat: - path: "{{ manilas_policy_file }}" - register: result - -- name: Mention file does not exists - fail: - msg: "Test failed: Did not detect file. Policy file does not exists!" - when: - - not result.stat.exists diff --git a/roles/check_manila_policy_file/vars/main.yml b/roles/check_manila_policy_file/vars/main.yml deleted file mode 100644 index 304a46c56..000000000 --- a/roles/check_manila_policy_file/vars/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# Copyright 2022 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# While options found within the vars/ path can be overridden using extra -# vars, items within this path are considered part of the role and not -# intended to be modified. - -# All variables within this role should have a prefix of "check_policy_file" - -# !!!! IMPORTANT !!!! -# Add a comment above every variables describing them. -# This will be included in the sphinx role documentation -# !!!! IMPORTANT !!!! -manilas_policy_file: /var/lib/config-data/puppet-generated/manila/etc/manila/policy.yaml diff --git a/roles/check_network_gateway/molecule/default/converge.yml b/roles/check_network_gateway/molecule/default/converge.yml deleted file mode 100644 index 48ca2fe14..000000000 --- a/roles/check_network_gateway/molecule/default/converge.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - tasks: - - name: successful check with ctlplane-subnet - include_role: - name: check_network_gateway - - - name: override undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [DEFAULT] - local_subnet = 192.168.254.0/24 - [192.168.254.0/24] - gateway = 0.0.0.0 - - - name: successful check with local_ip - include_role: - name: check_network_gateway - - - name: fail the validation - block: - - name: override undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [ctlplane-subnet] - gateway = 192.168.254.254 - - - name: run the validation - include_role: - name: check_network_gateway - - rescue: - - name: Clear host error - meta: clear_host_errors - - - name: Validation output - debug: - msg: The validation works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The check_network_gateway validation didn't properly detect faulty - gateway! diff --git a/roles/check_network_gateway/molecule/default/molecule.yml b/roles/check_network_gateway/molecule/default/molecule.yml deleted file mode 100644 index c7e33ebb9..000000000 --- a/roles/check_network_gateway/molecule/default/molecule.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -platforms: - - name: centos - hostname: centos - image: centos/centos:stream8 - registry: - url: quay.io - dockerfile: ../../../../.config/molecule/Dockerfile - pkg_extras: python*setuptools python*-pyyaml iputils - volumes: - - /etc/ci/mirror_info.sh:/etc/ci/mirror_info.sh:ro - privileged: true - environment: &env - http_proxy: "{{ lookup('env', 'http_proxy') }}" - https_proxy: "{{ lookup('env', 'https_proxy') }}" - ulimits: &ulimit - - host diff --git a/roles/check_network_gateway/molecule/default/prepare.yml b/roles/check_network_gateway/molecule/default/prepare.yml deleted file mode 100644 index 41a258296..000000000 --- a/roles/check_network_gateway/molecule/default/prepare.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - - tasks: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: populate undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [ctlplane-subnet] - gateway = 0.0.0.0 diff --git a/roles/check_network_gateway/tasks/main.yml b/roles/check_network_gateway/tasks/main.yml deleted file mode 100644 index e73b7a735..000000000 --- a/roles/check_network_gateway/tasks/main.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - -- name: Get the local_subnet name from the undercloud_conf file - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: local_subnet - ignore_missing_file: true - register: local_subnet - -- name: Get gateway value from the undercloud.conf file - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: "{% if local_subnet.value %}{{ local_subnet.value }}{% else %}ctlplane-subnet{% endif %}" - key: gateway - ignore_missing_file: true - register: gateway - -- name: Get local_ip value from the undercloud.conf file - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: local_ip - ignore_missing_file: true - register: local_ip - -- name: Test network_gateway if different from local_ip - icmp_ping: - host: "{{ gateway.value | default('0.0.0.0', true) }}" - when: > - "local_ip.value | default('0.0.0.0', true) | ipaddr('address')" - != - "gateway.value | default('0.0.0.0', true) | ipaddr('address')" diff --git a/roles/check_network_gateway/vars/main.yml b/roles/check_network_gateway/vars/main.yml deleted file mode 100644 index 26a5d8ae7..000000000 --- a/roles/check_network_gateway/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -metadata: - name: Check network_gateway on the provisioning network - description: > - If `gateway` in `undercloud.conf` is different from `local_ip`, - verify that the gateway exists and is reachable. - groups: - - pre-introspection diff --git a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/check_nfv_instances.yml b/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/check_nfv_instances.yml deleted file mode 100644 index 6690aab1b..000000000 --- a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/check_nfv_instances.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# Gets only NFV instances and validates all the NFV instances. -- name: Get instance {{ vm_name }} xml - become: true - shell: >- - "{{ container_cli }}" exec -u root nova_libvirt virsh dumpxml "{{ vm_name }}" - register: instance_xml - -- name: Set instance {{ vm_name }} xml data - set_fact: - instance_xml_data: "{{ instance_xml.stdout }}" - -- name: Get interfaces from xml string - community.general.xml: - xmlstring: "{{ instance_xml_data }}" - xpath: /domain/devices/interface - content: attribute - register: xmlinterfacetype - ignore_errors: true - -- name: Set default no valid nfv instance {{ vm_name }} - set_fact: - valid_nfv_instance: false - -- name: Check whether valid nfv instance {{ vm_name }} - set_fact: - valid_nfv_instance: true - when: "{{ interface_type.interface.type == 'vhostuser' }}" - loop: "{{ xmlinterfacetype.matches }}" - loop_control: - loop_var: interface_type - ignore_errors: true - -- name: Validate NFV instance - vars: - instance_name: "{{ vm_name }}" - import_tasks: validate_instance.yml - when: valid_nfv_instance diff --git a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/check_nfv_pci_address.yml b/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/check_nfv_pci_address.yml deleted file mode 100644 index 468d2bfff..000000000 --- a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/check_nfv_pci_address.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Initialize PCI bandwidth default value - set_fact: - pci_bw: 1 - -- name: Check pci addresses bandwidth - become: true - shell: "lspci -s {{ pci }} -vvnn | grep -i width" - register: pci_bandwidth - -- name: Compute bandwidth on pci address - set_fact: - pci_bw: "{{ pci_bw | int * ((bw_param.split(' ')[1] | replace('x', '')) | replace('GT/s', '') | int) }}" - when: "{{ 'Speed' in bw_param or 'Width' in bw_param }}" - loop: "{{ pci_bandwidth.stdout.split('\n')[0].split(', ') }}" - loop_control: - loop_var: bw_param - -- name: Get interface bandwidth - vars: - dpdk_pci_bw: "{{ pci_bw }}" - dpdk_port_name: "{{ dpdk_port }}" - include_tasks: validate_dpdk_port_bandwidth.yml - loop: "{{ dpdk_ports_list }}" - loop_control: - loop_var: dpdk_port diff --git a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/main.yml b/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/main.yml deleted file mode 100644 index e9d8777e5..000000000 --- a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/main.yml +++ /dev/null @@ -1,219 +0,0 @@ ---- -- name: Initialize validation message list - set_fact: - validation_msg: [] - pmd_isolated: true - -# Gets applied PMD cpus list -- name: Get OVS DPDK PMD cores mask value - become: true - register: pmd_cpu_mask - command: ovs-vsctl --no-wait get Open_vSwitch . other_config:pmd-cpu-mask - changed_when: false - -- name: Check OVS DPDK PMD cores thread siblings - become: true - pmd_threads_siblings_check: - pmd_cpu_mask: "{{ pmd_cpu_mask.stdout }}" - register: pmd_cpus_list - -- name: Set PMD cpus - set_fact: - pmd_cpus: "{{ pmd_cpus_list.pmd_cpus_list }}" - when: pmd_cpus_list.pmd_cpus_list is defined - -# Validates datapath's are mixed or not. -- name: Verify system and netdev datapath's are not mixed on compute node - block: - - name: Check bridge datapath type - become: true - shell: "ovs-vsctl list bridge | grep datapath_type" - register: bridge_data_path_type - - - name: Check if any datapath type is netdev - set_fact: - datapath_type_list: "{{ bridge_data_path_type.stdout.split('\n') }}" - when: "{{ 'netdev' in bridge_data_path_type.stdout }}" - - - name: Check if all the datapath type netdev - set_fact: - validation_msg: "{{ validation_msg }} + {{ ['Mixed system and netdev datapath's on the same compute node.'] }}" - when: "{{ not 'netdev' in datapath }}" - loop: "{{ datapath_type_list }}" - loop_control: - loop_var: datapath - -- name: Get DPDK NIC's NUMA info - become: true - get_dpdk_nics_numa_info: - dpdk_mapping_file: /var/lib/os-net-config/dpdk_mapping.yaml - register: dpdk_nics_numa - -- name: Set DPDK NIC's NUMA info - set_fact: - dpdk_nics_numa_info: "{{ dpdk_nics_numa.dpdk_nics_numa_info }}" - when: - - dpdk_nics_numa is defined - -- name: Get libvirtd Process - become: true - shell: |- - ps -Leaf | awk '/\/usr\/sbin\/libvirt[d]/ {print $2; exit}' - register: libvirtd - -- name: Get nova libvirt namespace processes - become: true - shell: |- - pgrep --ns {{ libvirtd.stdout }} - register: nova_libvirt_proceses - -- name: Update nova libvirt namespace processes pid - set_fact: - nova_libvirt_proceses_pid: "{{ nova_libvirt_proceses.stdout.split('\n') | join('|') }}" - -- name: Get nova libvirt launch processes id - become: true - shell: |- - ps -Leaf | grep -E '{{ nova_libvirt_proceses_pid }}' | grep -v pts | awk '{printf "%s\n", $4}' - register: nova_libvirt_launch_pids - -- name: Update nova libvirt launch processes pid - set_fact: - nova_lib_launch_proceses_pid: "{{ nova_libvirt_launch_pids.stdout.split('\n') }}" - -- name: Check pmd cpus used in any other processes - become: true - check_other_processes_pmd_usage: - pmd_cpus: "{{ pmd_cpus | list }}" - exclude_processes_pid: "{{ nova_lib_launch_proceses_pid | list }}" - register: pmd_threads_usage - -- name: Update validation message if any PMD threads usage by other processes message - set_fact: - validation_msg: "{{ validation_msg }} + {{ pmd_threads_usage.messages }}" - when: - - pmd_threads_usage.pmd_interrupts - -# Validates PMD cpus are isolated or not. -- name: Check PMD cores should be isolated - become: true - shell: "cat /proc/cmdline" - register: kernel_args - -- name: Get isolcpus using kernel args - set_fact: - isol_cpus: "{{ kernel_arg.split('=')[1] }}" - when: "{{ 'isolcpus' == kernel_arg.split('=')[0] }}" - loop: "{{ kernel_args.stdout.split(' ') }}" - loop_control: - loop_var: kernel_arg - -- name: Convert isolcpus range list into number list - convert_range_to_numbers_list: - range_list: "{{ isol_cpus }}" - register: isol_cpus_list - -- name: check PMD threads isolated or not - set_fact: - pmd_isolated: false - when: "{{ not pmd_thread | int in isol_cpus_list.number_list }}" - loop: "{{ pmd_cpus }}" - loop_control: - loop_var: pmd_thread - -- name: Set message if pmd threads are not isolated - set_fact: - validation_msg: "{{ validation_msg }} + ['PMD threads are not isolated.']" - when: - - not pmd_isolated - -# Validates any interuppts happened on isolcpus list. -- name: Set isol cpus required columns - set_fact: - cpu_columns_format: "{{ (cpu_columns_format | default('')) + '%s,' }}" - cpu_columns: "{{ (cpu_columns | default('')) + '$'+ ((cpu | int + 2) | string) + ',' }}" - loop: "{{ isol_cpus_list.number_list | list }}" - loop_control: - loop_var: cpu - -- name: Update cpu columns in required format - set_fact: - cpu_columns_format: "{{ cpu_columns_format | regex_replace(',$', '') }}" - cpu_columns: "{{ cpu_columns | regex_replace(',$', '') }}" - -- name: Check interrupts on isolcpus list - become: true - shell: |- - cat /proc/interrupts | awk '{printf "%s-{{ cpu_columns_format }}\n", $1,{{ cpu_columns }} }' | grep -v [A-Za-z] - register: isolcpus_interrupts - -- name: Isol CPU's interrupts - set_fact: - validation_msg: "{{ validation_msg }} + {{ ['Interrupts exist in Isol cpus ' + ( isol_cpus_list.number_list | join(',')) +': ' + interrupts_line ] }}" - when: "{{ 'CPU' not in interrupts_line and interrupts_line.split('-')[1].replace('0', '').split(',') is any }}" - loop: "{{ isolcpus_interrupts.stdout.split('\n') }}" - loop_control: - loop_var: interrupts_line - -- name: Get list of dpdk ports - become: true - shell: "ovs-appctl dpctl/show | grep 'dpdk: configured'" - register: ovs_dpdk_ports - -- name: Get list of dpdk ports name - set_fact: - dpdk_ports: "{{ dpdk_ports | default([]) }} + {{ [dpdk_port_line.split(': ')[1].split(' ')[0]] }}" - loop: "{{ ovs_dpdk_ports.stdout.split('\n') }}" - loop_control: - loop_var: dpdk_port_line - -- name: Get DPDK NIC's PCI addresses - set_fact: - dpdk_pci_list: |- - {{ (dpdk_pci_list | default([])) }} + {{ [dpdk_nic_info.pci] }} - loop: "{{ dpdk_nics_numa_info }}" - loop_control: - loop_var: dpdk_nic_info - -- name: Check pci addresses bandwidth - vars: - pci: "{{ dpdk_pci }}" - dpdk_ports_list: "{{ dpdk_ports }}" - include_tasks: check_nfv_pci_address.yml - loop: "{{ dpdk_pci_list }}" - loop_control: - loop_var: dpdk_pci - -# validates the NFV instances on OvS DPDK node. -- name: Set container_cli fact from the inventory - set_fact: - container_cli: "{{ hostvars[inventory_hostname].container_cli | default('podman', true) }}" - when: container_cli is not defined - -- name: Get instances list on node - become: true - shell: >- - "{{ container_cli }}" exec -u root nova_libvirt virsh list --all | awk 'NR > 2 { printf $2 "\n"}' - register: instances_list - -- name: Get instances id list - set_fact: - vm_list: "{{ instances_list.stdout.split('\n') }}" - -# Validate all the instances using instances xml -- name: Loop on all instances and validate xml - include_tasks: check_nfv_instances.yml - when: "{{ vm_name }}" - loop: "{{ vm_list }}" - loop_control: - loop_var: vm_name - -# Prints all the validation errors if found. -- name: Validation errors - fail: - msg: - - "Failed NFV zero packet loss rules:" - - "{{ validation_msg }}" - when: - - validation_msg is defined - - validation_msg | length > 0 diff --git a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/validate_dpdk_port_bandwidth.yml b/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/validate_dpdk_port_bandwidth.yml deleted file mode 100644 index 85dbb6961..000000000 --- a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/validate_dpdk_port_bandwidth.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Get DPDK port bandwidth - become: true - shell: "ovs-vsctl list interface {{ dpdk_port_name }} | grep -e link_speed= -e dpdk-devargs=" - register: dpdk_port_bw - -- name: Check dpdk port matching pci or not - set_fact: - inf_bw: "{{ dpdk_port_param.split('=')[1] | replace('Gbps', '') }}" - when: "{{ (dpdk_port_bw is defined) and (pci in dpdk_port_bw.stdout) and ('link_speed=' in dpdk_port_param) }}" - loop: "{{ dpdk_port_bw.stdout.split(', ') }}" - loop_control: - loop_var: dpdk_port_param - -- name: Update invalid bandwidth validation message - set_fact: - validation_msg: "{{ validation_msg }} + {{ ['PCI bandwidth configured less than interface link speed.'] }}" - when: - - dpdk_port_bw is defined - - pci in dpdk_port_bw.stdout - - inf_bw | int > dpdk_pci_bw | int diff --git a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/validate_instance.yml b/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/validate_instance.yml deleted file mode 100644 index 30a3a04a2..000000000 --- a/roles/check_nfv_ovsdpdk_zero_packet_loss/tasks/validate_instance.yml +++ /dev/null @@ -1,99 +0,0 @@ ---- -- name: Get instance numa node from xml string - community.general.xml: - xmlstring: "{{ instance_xml_data }}" - xpath: /domain/numatune/memory - content: attribute - register: xml_instance_node - when: '"numatune" in instance_xml_data' - -- name: Get instance associated numa nodes - set_fact: - instance_numa: "{{ xml_instance_node.matches[0].memory.nodeset }}" - when: '"numatune" in instance_xml_data' - -# Validates the instance vcpus list. -- name: Get vcpu list from xml string - community.general.xml: - xmlstring: "{{ instance_xml_data }}" - xpath: /domain/cputune/vcpupin - content: attribute - register: xml_vcpus - when: '"vcpupin" in instance_xml_data' - -- name: Get instance vcpus list - set_fact: - vcpus_list: |- - {{ (vcpus_list | default([])) }} + {{ [vcpu.vcpupin.cpuset] }} - loop: "{{ xml_vcpus.matches }}" - loop_control: - loop_var: vcpu - when: '"vcpupin" in instance_xml_data' - -- name: Check vcpu's aligned with DPDK NIC's NUMA - become: true - check_cpus_aligned_with_dpdk_nics: - cpus: "{{ vcpus_list | join(',') }}" - numa_node: "{{ instance_numa | int }}" - dpdk_nics_numa_info: "{{ dpdk_nics_numa_info }}" - register: valid_cpus - when: '"vcpupin" in instance_xml_data' - -- name: Check vcpu's valid or not - set_fact: - validation_msg: "{{ validation_msg }} + {{ [valid_cpus.message] }}" - when: - - '"vcpupin" in instance_xml_data' - - not valid_cpus.valid_cpus - -# Validates instance emulatorpin threads -- name: Get emulatorpin list from xml string - community.general.xml: - xmlstring: "{{ instance_xml_data }}" - xpath: /domain/cputune/emulatorpin - content: attribute - register: xml_emulatorpin - when: '"emulatorpin" in instance_xml_data' - -- name: Check emulatorpin valid or not - set_fact: - validation_msg: "{{ validation_msg }} + {{ ['Invalid emulatorpin configured for instance ' \ - + instance_name + ': ' + emulatorpin.emulatorpin.cpuset] }}" - when: - - '"emulatorpin" in instance_xml_data' - - emulatorpin.emulatorpin.cpuset in vcpus_list | list - loop: "{{ xml_emulatorpin.matches }}" - loop_control: - loop_var: emulatorpin - -# Validates instance huge page size length is greater than or equal to 6. -- name: Get hugepages from xml string - community.general.xml: - xmlstring: "{{ instance_xml_data }}" - xpath: /domain/memoryBacking/hugepages/page - content: attribute - register: xmlhugepages - when: '"memoryBacking" in instance_xml_data and "hugepages" in instance_xml_data' - -- name: Set instance {{ instance_name }} hugepages details - set_fact: - msg: |- - Huge page size '{{ xmlhugepages.matches[0].page.size }}' - when: - - '"memoryBacking" in instance_xml_data and "hugepages" in instance_xml_data' - - 'xmlhugepages.matches[0].page.size | length >= 6' - -# Validates instance tx rx queue sizes and should be greater than or equal to 1024. -- name: Get {{ instance_name }} libvirt tx | rx queue sizes from xml string - community.general.xml: - xmlstring: "{{ instance_xml_data }}" - xpath: /domain/devices/interface/driver - content: attribute - register: xmlqueues - -- name: Set instance {{ instance_name }} devices tx and rx queue size details - set_fact: - validation_msg: "{{ validation_msg }} + {{ ['Invalid tx/rx queues configured for instance ' - + instance_id + ', tx queue size: ' + xmlqueues.matches[0].driver.tx_queue_size + ' \ - & rx queue size: ' + xmlqueues.matches[0].driver.rx_queue_size] }}" - when: "{{ xmlqueues.matches[0].driver.tx_queue_size | int < 1024 and xmlqueues.matches[0].driver.rx_queue_size | int < 1024 }}" diff --git a/roles/check_ntp_reachability/tasks/main.yml b/roles/check_ntp_reachability/tasks/main.yml deleted file mode 100644 index b067efffe..000000000 --- a/roles/check_ntp_reachability/tasks/main.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -# Copyright 2022 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Check NTP server reachability - shell: - chronyc -n sources | awk 'NR > 2 { print $2 }' - register: sources_output - -- name: Check if the time is synchronised - command: chronyc tracking - register: tracking_output - -- name: Fail if chrony is not synchronised - fail: - msg: > - The time is not synchronized with the NTP servers. Currently daemon is trying - to connect with the following NTP servers: - {{ sources_output.stdout_lines }} - when: tracking_output.stdout is search("Not synchronised") diff --git a/roles/check_reboot/defaults/main.yaml b/roles/check_reboot/defaults/main.yaml deleted file mode 100644 index c2a2c97db..000000000 --- a/roles/check_reboot/defaults/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fail_if_reboot: true -no_reboot_message: "Reboot should not be necessary." diff --git a/roles/check_reboot/tasks/main.yaml b/roles/check_reboot/tasks/main.yaml deleted file mode 100644 index 37f6b6e77..000000000 --- a/roles/check_reboot/tasks/main.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Make sure yum-utils is installed - become: true - package: - name: yum-utils - -- name: Check if reboot is required - register: command_output - shell: | - needs-restarting -r - -- name: Fail if core packages need reboot - fail: - msg: | - {{ command_output.stdout }} - when: - - no_reboot_message not in command_output.stdout - - fail_if_reboot diff --git a/roles/check_rhsm_version/defaults/main.yml b/roles/check_rhsm_version/defaults/main.yml deleted file mode 100644 index 845ef8b1a..000000000 --- a/roles/check_rhsm_version/defaults/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "check_rhsm_version" -check_rhsm_version_debug: false - -# subscription manager command used to retrieve the -# rhsm release. Parametrized mostly for testing -check_rhsm_version_command: "subscription-manager release --show" - -# Version expected by the validation, if the configured RHSM version -# does not match the validation will fail. -check_rhsm_version_expected: "8.2" diff --git a/roles/check_rhsm_version/molecule/default/converge.yml b/roles/check_rhsm_version/molecule/default/converge.yml deleted file mode 100644 index 9e6aba82c..000000000 --- a/roles/check_rhsm_version/molecule/default/converge.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - roles: - - role: "check_rhsm_version" - vars: - check_rhsm_version_command: "echo 'Release: 8.2'" - check_rhsm_version_expected: "8.2" diff --git a/roles/check_rhsm_version/molecule/default/molecule.yml b/roles/check_rhsm_version/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_rhsm_version/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_rhsm_version/molecule/rhsm_mismatch/converge.yml b/roles/check_rhsm_version/molecule/rhsm_mismatch/converge.yml deleted file mode 100644 index 6af3dba30..000000000 --- a/roles/check_rhsm_version/molecule/rhsm_mismatch/converge.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - tasks: - - name: detect wrong rhsm version - block: - - name: run validation with wrong rhsm version - include_role: - name: "check_rhsm_version" - vars: - check_rhsm_version_command: "echo 'Release: 8.4'" - check_rhsm_version_expected: "8.2" - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Status message - debug: - msg: "Detected RHSM version mismatch!" - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The rhsm version mismatch should have detected but it wasn't.. diff --git a/roles/check_rhsm_version/molecule/rhsm_mismatch/molecule.yml b/roles/check_rhsm_version/molecule/rhsm_mismatch/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_rhsm_version/molecule/rhsm_mismatch/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_rhsm_version/tasks/main.yml b/roles/check_rhsm_version/tasks/main.yml deleted file mode 100644 index 95b8cf913..000000000 --- a/roles/check_rhsm_version/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# "check_rhsm_version" tasks -- name: Retrieve RHSM version - become: true - command: "{{ check_rhsm_version_command }}" - register: rhsm_version_ret - -- name: Check RHSM version - fail: - msg: '{{ check_rhsm_version_expected }} does not match configured rhsm_version {{ rhsm_version_ret.stdout }}' - when: "check_rhsm_version_expected not in rhsm_version_ret.stdout" diff --git a/roles/check_uc_hostname/defaults/main.yml b/roles/check_uc_hostname/defaults/main.yml deleted file mode 100644 index afde89b5b..000000000 --- a/roles/check_uc_hostname/defaults/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "check_uc_hostname" - -# Debugging mode -check_uc_hostname_debug: false - -# Contains the ansible inventory hosts variable, -# which is used to get the hostvars stack (plan) name -check_uc_hostname_inventory_host: undercloud diff --git a/roles/check_uc_hostname/molecule/default/converge.yml b/roles/check_uc_hostname/molecule/default/converge.yml deleted file mode 100644 index b853b141c..000000000 --- a/roles/check_uc_hostname/molecule/default/converge.yml +++ /dev/null @@ -1,238 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - tasks: - - name: Working 1 - openstack one entry in list - copy: - dest: /usr/bin/openstack - mode: 0755 - content: | - #!/bin/bash - echo "{" - echo " \"parameters\": {}," - echo " \"parameter_defaults\": {" - echo " \"DockerInsecureRegistryAddress\": [" - echo " \"undercloud-0.ctlplane.redhat.local:8787\"" - echo " ]" - echo " }" - echo "}" - - - name: Working 1 - hiera one entry in list - copy: - dest: /usr/bin/hiera - mode: 0755 - content: | - #!/bin/bash - echo [\"undercloud-0.ctlplane.redhat.local\"] - exit 0 - - - name: Test Working 1 - include_role: - name: check_uc_hostname - vars: - check_uc_hostname_inventory_host: centos - - - name: Working 2 - openstack multiple entry in list - copy: - dest: /usr/bin/openstack - mode: 0755 - content: | - #!/bin/bash - echo "{" - echo " \"parameters\": {}," - echo " \"parameter_defaults\": {" - echo " \"DockerInsecureRegistryAddress\": [" - echo " \"undercloud-0.ctlplane.redhat.local:8787\"," - echo " \"undercloud-1.ctlplane.redhat.local:8787\"," - echo " \"undercloud-2.ctlplane.redhat.local:8787\"" - echo " ]" - echo " }" - echo "}" - - - - name: Working 2 - hiera multiple entry in list - copy: - dest: /usr/bin/hiera - mode: 0755 - content: | - #!/bin/bash - echo "[" - echo " \"undercloud-0.ctlplane.redhat.local\"," - echo " \"undercloud-1.ctlplane.redhat.local\"," - echo " \"undercloud-2.ctlplane.redhat.local\"" - echo "]" - exit 0 - - - name: Test Working 2 - include_role: - name: check_uc_hostname - vars: - check_uc_hostname_inventory_host: centos - - - name: Working 3 - openstack multiple entry in list - copy: - dest: /usr/bin/openstack - mode: 0755 - content: | - #!/bin/bash - echo "{" - echo " \"parameters\": {}," - echo " \"parameter_defaults\": {" - echo " \"DockerInsecureRegistryAddress\": [" - echo " \"undercloud-0.ctlplane.redhat.local:8787\"," - echo " \"redhat.registry.io:8787\"," - echo " \"mysuper-registry.io\"," - echo " ]" - echo " }" - echo "}" - - - name: Working 3 - hiera single entry in list - copy: - dest: /usr/bin/hiera - mode: 0755 - content: | - #!/bin/bash - echo "[" - echo " \"undercloud-0.ctlplane.redhat.local\"," - echo "]" - exit 0 - - - name: Test Working 3 - include_role: - name: check_uc_hostname - vars: - check_uc_hostname_inventory_host: centos - - - name: Working 4 - openstack multiple entry in list - copy: - dest: /usr/bin/openstack - mode: 0755 - content: | - #!/bin/bash - echo "{" - echo " \"parameters\": {}," - echo " \"parameter_defaults\": {" - echo " \"DockerInsecureRegistryAddress\": [" - echo " \"BROKENCLOUD-0.ctlplane.redhat.local:8787\"," - echo " \"redhat.registry.io:8787\"," - echo " \"mysuper-registry.io\"," - echo " ]" - echo " }" - echo "}" - - - name: Working 4 - we do not fail if hiera returns empty list - copy: - dest: /usr/bin/hiera - mode: 0755 - content: | - #!/bin/bash - echo "[]" - exit 0 - - - name: Test Working 4 - include_role: - name: check_uc_hostname - vars: - check_uc_hostname_inventory_host: centos - - - name: Failing 1 - we fail if DockerInsecureRegistryAddress is not found - copy: - dest: /usr/bin/openstack - mode: 0755 - content: | - #!/bin/bash - echo "{" - echo " \"parameters\": {}," - echo " \"parameter_defaults\": {" - echo " }" - echo "}" - - - name: Failing 1 - hiera no entry found - copy: - dest: /usr/bin/hiera - mode: 0755 - content: | - #!/bin/bash - exit 0 - - - name: Failing 1 - block: - - include_role: - name: check_uc_hostname - vars: - check_uc_hostname_inventory_host: centos - - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation Case Failing 1 works! Continuing playbook run... - - - name: Failing 2 - openstack multiple entry in list - copy: - dest: /usr/bin/openstack - mode: 0755 - content: | - #!/bin/bash - echo "{" - echo " \"parameters\": {}," - echo " \"parameter_defaults\": {" - echo " \"DockerInsecureRegistryAddress\": [" - echo " \"BROKENCLOUD-0.ctlplane.redhat.local:8787\"," - echo " \"redhat.registry.io:8787\"," - echo " \"mysuper-registry.io\"," - echo " ]" - echo " }" - echo "}" - - - - name: Failing 2 - hiera single entry in list - copy: - dest: /usr/bin/hiera - mode: 0755 - content: | - #!/bin/bash - echo "[" - echo " \"undercloud-0.ctlplane.redhat.local\"," - echo "]" - exit 0 - - - name: Failing 2 - block: - - include_role: - name: check_uc_hostname - vars: - check_uc_hostname_inventory_host: centos - - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation Case Failing 2 works! Ending playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The check-uc-hostname validation failed to check that the hostnames - did not match. diff --git a/roles/check_uc_hostname/molecule/default/molecule.yml b/roles/check_uc_hostname/molecule/default/molecule.yml deleted file mode 100644 index 6651c40ce..000000000 --- a/roles/check_uc_hostname/molecule/default/molecule.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -provisioner: - name: ansible - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - vars: - plan: qe-Cloud-0 - plans: [qe-Cloud-0] - log: true - options: - vvv: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles" - ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}" diff --git a/roles/check_uc_hostname/tasks/main.yml b/roles/check_uc_hostname/tasks/main.yml deleted file mode 100644 index 4fd55e247..000000000 --- a/roles/check_uc_hostname/tasks/main.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Make sure stack plan is in the inventory - fail: - msg: | - Stack plan not found - Check your ansible inventory to make sure your plan attribute is defined - when: hostvars[ check_uc_hostname_inventory_host ].plan is not defined - -- name: Get stack plan from inventory - set_fact: - plan: "{{ hostvars[ check_uc_hostname_inventory_host ].plan }}" - -- name: Retrieve stack environment - command: "openstack stack environment show -f yaml {{ plan }}" - register: stack_env - -- name: Fail if we can't find DockerInsecureRegistryAddress - fail: - msg: | - DockerInsecureRegistryAddress not defined - when: stack_env.stdout.find("DockerInsecureRegistryAddress") == -1 - -- when: - - stack_env.stdout.find("DockerInsecureRegistryAddress") != -1 - block: - - name: Query stack for DockerInsecureRegistryAddress - become: true - hiera: - name: container_image_prepare_node_names - - - name: Get DockerInsecureRegistryAddress value from stack's environment - set_fact: - uc_hostname_parameter_defaults: "{{ dict(stack_env.get('stdout', default_stdout) - | from_yaml).parameter_defaults.DockerInsecureRegistryAddress - | regex_replace(':\\w+', '') }}" - - - name: Get UC hostname (DockerInsecureRegistryAddress) from hiera - set_fact: - uc_hostname_stack: "{{ ansible_facts.container_image_prepare_node_names }}" - - - name: Get Intersect between UC hostname from stack and parameter_defaults - set_fact: - intersect: "{{ uc_hostname_stack | intersect(uc_hostname_parameter_defaults) }}" - - - name: An Intersect match means parameter_defaults was updated by customer correctly - set_fact: - mismatch: "{{ uc_hostname_stack | difference(intersect) }}" - - - name: Verify UC hostnames match - fail: - msg: | - The UC hostnames from stack and containers-prepare-parameter.yaml must match - UC hostname from parameter_defaults is {{ uc_hostname_parameter_defaults }} - UC hostname from stack (hiera) is {{ uc_hostname_stack }} - failed_when: mismatch != [] diff --git a/roles/check_undercloud_conf/defaults/main.yml b/roles/check_undercloud_conf/defaults/main.yml deleted file mode 100644 index c50aa69ca..000000000 --- a/roles/check_undercloud_conf/defaults/main.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "check_undercloud_conf" -check_undercloud_conf_debug: false -required_params: - - container_images_file -deprecated_params: - - masquerade_network - - docker_images_file - - enabled_drivers - - docker_bip - - network_gateway - - network_cidr - - discovery_interface - - discovery_runbench - - docker_insecure_registries - - docker_registry_mirror - - ipxe_enabled -unsupported_drivers: - - pxe_ipmitool - - pxe_drac - - pxe_ilo - - pxe_irmc - - fake_pxe diff --git a/roles/check_undercloud_conf/molecule/config_OK/converge.yml b/roles/check_undercloud_conf/molecule/config_OK/converge.yml deleted file mode 100644 index f2e75960f..000000000 --- a/roles/check_undercloud_conf/molecule/config_OK/converge.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Validate a correct undercloud.conf with content: -# -# [DEFAULT] -# local_interface = em0 -# local_ip = 192.168.24.1/24 -# undercloud_public_host = 192.168.24.2 -# undercloud_admin_host = 192.168.24.3 -# undercloud_nameservers = 172.16.0.1,10.0.0.1 -# undercloud_ntp_servers=clock1.rdu2.redhat.com -# overcloud_domain_name=redhat.local -# undercloud_service_certificate = /undercloud.pem -# hieradata_override = /hiera_override.yaml -# container_images_file= /containers-prepare-parameter.yaml -# enabled_hw_types = ipmi,redfish -# custom_env_files = /skip_rhel_release.yaml -# -# [ctlplane-subnet] -# local_subnet = ctlplane-subnet -# cidr = 192.168.24.0/24 -# dhcp_start = 192.168.24.5 -# dhcp_end = 192.168.24.55 -# gateway = 192.168.24.1 -# inspection_iprange = 192.168.24.100,192.168.24.120 -# -- name: Converge - hosts: all - gather_facts: false - roles: - - role: "check_undercloud_conf" diff --git a/roles/check_undercloud_conf/molecule/config_OK/molecule.yml b/roles/check_undercloud_conf/molecule/config_OK/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_undercloud_conf/molecule/config_OK/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_undercloud_conf/molecule/config_OK/prepare.yml b/roles/check_undercloud_conf/molecule/config_OK/prepare.yml deleted file mode 100644 index 97c7df874..000000000 --- a/roles/check_undercloud_conf/molecule/config_OK/prepare.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - tasks: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: populate undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [DEFAULT] - local_interface = em0 - local_ip = 192.168.24.1/24 - undercloud_public_host = 192.168.24.2 - undercloud_admin_host = 192.168.24.3 - undercloud_nameservers = 172.16.0.1,10.0.0.1 - undercloud_ntp_servers=clock1.rdu2.redhat.com - overcloud_domain_name=redhat.local - undercloud_service_certificate = /undercloud.pem - hieradata_override = /hiera_override.yaml - container_images_file= /containers-prepare-parameter.yaml - enabled_hardware_types = ipmi,redfish - custom_env_files = /skip_rhel_release.yaml - - [ctlplane-subnet] - local_subnet = ctlplane-subnet - cidr = 192.168.24.0/24 - dhcp_start = 192.168.24.5 - dhcp_end = 192.168.24.55 - gateway = 192.168.24.1 - inspection_iprange = 192.168.24.100,192.168.24.120 - - - name: create files with random content - copy: - dest: "/{{ item }}" - content: | - I'm the file {{ item }} - loop: - - 'undercloud.pem' - - 'hiera_override.yaml' - - 'containers-prepare-parameter.yaml' - - 'skip_rhel_release.yaml' diff --git a/roles/check_undercloud_conf/molecule/default/converge.yml b/roles/check_undercloud_conf/molecule/default/converge.yml deleted file mode 100644 index e00ae6fe5..000000000 --- a/roles/check_undercloud_conf/molecule/default/converge.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Validate an undercloud.conf with the following content: -# -# [DEFAULT] -# container_images_file= /home/stack/containers-prepare-parameter.yaml -# -# The validation should fail as the file /home/stack/containers-prepare-parameter.yaml -# does not exist. - -- name: Converge - hosts: all - gather_facts: false - tasks: - - name: detect non existing file assigned to a parameter - block: - - name: run validation check_undercloud_conf - include_role: - name: "check_undercloud_conf" - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Detect right error message - fail: - msg: "Test failed due to wrong error: {{ ansible_failed_result.results.0.msg }}" - when: - - not ansible_failed_result.results.0.msg | - regex_search("^File configured in .* does not exist$") - - - name: Status message - debug: - msg: "Detected undercloud.conf configuration issue!" - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - A configuration issue should have been detected but it wasn't.. diff --git a/roles/check_undercloud_conf/molecule/default/molecule.yml b/roles/check_undercloud_conf/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_undercloud_conf/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_undercloud_conf/molecule/default/prepare.yml b/roles/check_undercloud_conf/molecule/default/prepare.yml deleted file mode 100644 index 72ce14cc0..000000000 --- a/roles/check_undercloud_conf/molecule/default/prepare.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Prepare - hosts: all - gather_facts: false - tasks: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: populate undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [DEFAULT] - container_images_file= /home/stack/containers-prepare-parameter.yaml diff --git a/roles/check_undercloud_conf/molecule/deprecated_drivers/converge.yml b/roles/check_undercloud_conf/molecule/deprecated_drivers/converge.yml deleted file mode 100644 index e88f6cbe4..000000000 --- a/roles/check_undercloud_conf/molecule/deprecated_drivers/converge.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Validate that the validation fails when the undercloud.conf contains -# deprecated drivers in the enabled_hardware_types: -# -# [DEFAULT] -# local_interface = em0 -# local_ip = 192.168.24.1/24 -# undercloud_public_host = 192.168.24.2 -# undercloud_admin_host = 192.168.24.3 -# undercloud_nameservers = 172.16.0.1,10.0.0.1 -# undercloud_ntp_servers=clock1.rdu2.redhat.com -# overcloud_domain_name=redhat.local -# undercloud_service_certificate = /undercloud.pem -# hieradata_override = /hiera_override.yaml -# container_images_file= /containers-prepare-parameter.yaml -# enabled_hardware_types = pxe_ipmitool,pxe_drac,fake_pxe,ipmi,redfish -# custom_env_files = /skip_rhel_release.yaml -# -# [ctlplane-subnet] -# local_subnet = ctlplane-subnet -# cidr = 192.168.24.0/24 -# dhcp_start = 192.168.24.5 -# dhcp_end = 192.168.24.55 -# gateway = 192.168.24.1 -# inspection_iprange = 192.168.24.100,192.168.24.120 -# -- name: Converge - hosts: all - gather_facts: false - tasks: - - name: detect deprecated parameters in undercloud conf - block: - - name: run validation check_undercloud_conf - include_role: - name: "check_undercloud_conf" - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Detect right error message - fail: - msg: "Test failed due to wrong error: {{ ansible_failed_result.msg }}" - when: - - not ansible_failed_result.msg | regex_search("^Hardware type\\(s\\) .* are not supported any more$") - - - name: Status message - debug: - msg: "Detected undercloud.conf configuration issue!" - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - A configuration issue should have been detected but it wasn't.. diff --git a/roles/check_undercloud_conf/molecule/deprecated_drivers/molecule.yml b/roles/check_undercloud_conf/molecule/deprecated_drivers/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_undercloud_conf/molecule/deprecated_drivers/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_undercloud_conf/molecule/deprecated_drivers/prepare.yml b/roles/check_undercloud_conf/molecule/deprecated_drivers/prepare.yml deleted file mode 100644 index 9df3264c7..000000000 --- a/roles/check_undercloud_conf/molecule/deprecated_drivers/prepare.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - tasks: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: populate undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [DEFAULT] - local_interface = em0 - local_ip = 192.168.24.1/24 - undercloud_public_host = 192.168.24.2 - undercloud_admin_host = 192.168.24.3 - undercloud_nameservers = 172.16.0.1,10.0.0.1 - undercloud_ntp_servers=clock1.rdu2.redhat.com - overcloud_domain_name=redhat.local - undercloud_service_certificate = /undercloud.pem - hieradata_override = /hiera_override.yaml - container_images_file= /containers-prepare-parameter.yaml - enabled_hardware_types = pxe_ipmitool,pxe_drac,fake_pxe,ipmi,redfish - custom_env_files = /skip_rhel_release.yaml - - [ctlplane-subnet] - local_subnet = ctlplane-subnet - cidr = 192.168.24.0/24 - dhcp_start = 192.168.24.5 - dhcp_end = 192.168.24.55 - gateway = 192.168.24.1 - inspection_iprange = 192.168.24.100,192.168.24.120 - - - name: create files with random content - copy: - dest: "/{{ item }}" - content: | - I'm the file {{ item }} - loop: - - 'undercloud.pem' - - 'hiera_override.yaml' - - 'containers-prepare-parameter.yaml' - - 'skip_rhel_release.yaml' diff --git a/roles/check_undercloud_conf/molecule/deprecated_params/converge.yml b/roles/check_undercloud_conf/molecule/deprecated_params/converge.yml deleted file mode 100644 index 8e7583b35..000000000 --- a/roles/check_undercloud_conf/molecule/deprecated_params/converge.yml +++ /dev/null @@ -1,61 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Validate that the validation fails when undercloud.conf contains -# deprecated parameters: -# -# -# [DEFAULT] -# container_images_file= /containers-prepare-parameter.yaml -# enabled_drivers = ipxe -# masquerade_network=192.168.24.0/24 -# ipxe_enabled = true -# docker_insecure_registries = 'docker.io' -# custom_env_files = /skip_rhel_release.yaml -# network_cidr = 192.168.24.0/24 -# network_gateway =192.168.24.1 -# -- name: Converge - hosts: all - gather_facts: false - tasks: - - name: detect deprecated parameters in undercloud conf - block: - - name: run validation check_undercloud_conf - include_role: - name: "check_undercloud_conf" - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Detect right error message - fail: - msg: "Test failed due to wrong error: {{ ansible_failed_result.msg }}" - when: - - not ansible_failed_result.msg | - regex_search("^Parameters .* are deprecated for .*$") - - - name: Status message - debug: - msg: "Detected undercloud.conf configuration issue!" - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - A configuration issue should have been detected but it wasn't.. diff --git a/roles/check_undercloud_conf/molecule/deprecated_params/molecule.yml b/roles/check_undercloud_conf/molecule/deprecated_params/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_undercloud_conf/molecule/deprecated_params/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_undercloud_conf/molecule/deprecated_params/prepare.yml b/roles/check_undercloud_conf/molecule/deprecated_params/prepare.yml deleted file mode 100644 index fdaca9a1c..000000000 --- a/roles/check_undercloud_conf/molecule/deprecated_params/prepare.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - tasks: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: populate undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [DEFAULT] - container_images_file= /containers-prepare-parameter.yaml - enabled_drivers = ipxe - masquerade_network=192.168.24.0/24 - ipxe_enabled = true - docker_insecure_registries = 'docker.io' - custom_env_files = /skip_rhel_release.yaml - network_cidr = 192.168.24.0/24 - network_gateway =192.168.24.1 - - - name: create files with random content - copy: - dest: "/{{ item }}" - content: | - I'm the file {{ item }} - loop: - - 'containers-prepare-parameter.yaml' - - 'skip_rhel_release.yaml' diff --git a/roles/check_undercloud_conf/molecule/required_missing/converge.yml b/roles/check_undercloud_conf/molecule/required_missing/converge.yml deleted file mode 100644 index a350d7ccd..000000000 --- a/roles/check_undercloud_conf/molecule/required_missing/converge.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Validate that the validation fails when the undercloud.conf does not contain -# a required parameter. In this case, container_images_file: -# -# [DEFAULT] -# local_interface = em0 -# local_ip = 192.168.24.1/24 -# undercloud_public_host = 192.168.24.2 -# undercloud_admin_host = 192.168.24.3 -# undercloud_nameservers = 172.16.0.1,10.0.0.1 -# undercloud_ntp_servers=clock1.rdu2.redhat.com -# overcloud_domain_name=redhat.local -# undercloud_service_certificate = /undercloud.pem -# hieradata_override = /hiera_override.yaml -# enabled_hardware_types = ipmi,redfish -# custom_env_files = /skip_rhel_release.yaml -# -# [ctlplane-subnet] -# local_subnet = ctlplane-subnet -# cidr = 192.168.24.0/24 -# dhcp_start = 192.168.24.5 -# dhcp_end = 192.168.24.55 -# gateway = 192.168.24.1 -# inspection_iprange = 192.168.24.100,192.168.24.120 -# -- name: Converge - hosts: all - gather_facts: false - tasks: - - name: detect missing required parameter from conf - block: - - name: run validation check_undercloud_conf - include_role: - name: "check_undercloud_conf" - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Detect right error message - fail: - msg: "Test failed due to wrong error: {{ ansible_failed_result.results.0.msg }}" - when: - - not ansible_failed_result.results.0.msg | - regex_search("^Property .* is required in .*$") - - - name: Status message - debug: - msg: "Detected undercloud.conf configuration issue!" - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - A configuration issue should have been detected but it wasn't.. diff --git a/roles/check_undercloud_conf/molecule/required_missing/molecule.yml b/roles/check_undercloud_conf/molecule/required_missing/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/check_undercloud_conf/molecule/required_missing/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/check_undercloud_conf/molecule/required_missing/prepare.yml b/roles/check_undercloud_conf/molecule/required_missing/prepare.yml deleted file mode 100644 index 54951b878..000000000 --- a/roles/check_undercloud_conf/molecule/required_missing/prepare.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - tasks: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: populate undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [DEFAULT] - local_interface = em0 - local_ip = 192.168.24.1/24 - undercloud_public_host = 192.168.24.2 - undercloud_admin_host = 192.168.24.3 - undercloud_nameservers = 172.16.0.1,10.0.0.1 - undercloud_ntp_servers=clock1.rdu2.redhat.com - overcloud_domain_name=redhat.local - undercloud_service_certificate = /undercloud.pem - hieradata_override = /hiera_override.yaml - enabled_hardware_types = ipmi,redfish - custom_env_files = /skip_rhel_release.yaml - - [ctlplane-subnet] - local_subnet = ctlplane-subnet - cidr = 192.168.24.0/24 - dhcp_start = 192.168.24.5 - dhcp_end = 192.168.24.55 - gateway = 192.168.24.1 - inspection_iprange = 192.168.24.100,192.168.24.120 - - - name: create files with random content - copy: - dest: "/{{ item }}" - content: | - I'm the file {{ item }} - loop: - - 'undercloud.pem' - - 'hiera_override.yaml' - - 'skip_rhel_release.yaml' diff --git a/roles/check_undercloud_conf/tasks/check_syntax.yml b/roles/check_undercloud_conf/tasks/check_syntax.yml deleted file mode 100644 index cd422ad57..000000000 --- a/roles/check_undercloud_conf/tasks/check_syntax.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: Verify required properties - lineinfile: - name: "{{ ansible_env.HOME }}/undercloud.conf" - regexp: "^{{ item }}\\s*=.*$" - state: absent - check_mode: true - register: uc_conf - loop: "{{ required_params }}" - -- name: "Fail if parameter not present in {{ ansible_env.HOME }}/undercloud.conf" - fail: - msg: >- - Property {{ item.item }} is required in - {{ ansible_env.HOME }}/undercloud.conf - failed_when: not item.changed - loop: "{{ uc_conf.results }}" - -- name: Check deprecated parameters - vars: - common_deprecated: "{{ uc_conf_props | intersect(deprecated_params) }}" - fail: - msg: >- - Parameters {{ common_deprecated | join(', ') }} are deprecated for - {{ ansible_env.HOME }}/undercloud.conf - when: - - common_deprecated is defined - - common_deprecated|length > 0 - -- name: Get hw_types from conf file - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: enabled_hardware_types - register: hw_types - -- name: Check deprecated drivers - vars: - hw_types_common: "{{ hw_types.value.split(',')|intersect(unsupported_drivers) }}" - fail: - msg: >- - Hardware type(s) {{ hw_types_common | join(', ') }} - are not supported any more - when: - - "'enabled_hardware_types' in uc_conf_props" - - hw_types_common|length > 0 diff --git a/roles/check_undercloud_conf/tasks/main.yml b/roles/check_undercloud_conf/tasks/main.yml deleted file mode 100644 index e627f8bcb..000000000 --- a/roles/check_undercloud_conf/tasks/main.yml +++ /dev/null @@ -1,69 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - -# "check_undercloud_conf" tasks -- name: "Retrieve {{ ansible_env.HOME }}/undercloud.conf content" - slurp: - src: "{{ ansible_env.HOME }}/undercloud.conf" - register: uc_conf_content - -- name: Store all properties and file type properties - # b64decode will return all lines delimited by \n character in a - # single string. Therefore, we can really use ^/$. - # Get the string ([a-zA-Z0-9_]) after each \n skipping whitespaces - # until the equals. - # For the file properties, store all properties which after the = - # matches a directory. Being the directory optionally started - # by . or ~, followed by / and anything else until getting to \n. - # Ex: container_images_file= /home/stack/containers-prepare-parameter.yaml - set_fact: - uc_conf_props: "{{ uc_conf_content['content'] | b64decode | - regex_findall('\\\n\\s*(\\w+)\\s*=[^\\\n]') }}" - uc_file_props: "{{ uc_conf_content['content'] | b64decode | - regex_findall('\\\n\\s*(\\w+)\\s*=\\s*[.~]*\\/[^\\\n]') }}" - -- name: Get value from conf file - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: "{{ item }}" - register: conf_values - loop: "{{ uc_file_props }}" - -- name: Check file property point to existing file - stat: - path: "{{ item.value }}" - register: file_prop_stat - loop: "{{ conf_values.results }}" - -- name: fail if location doesn't exist - fail: - msg: >- - File configured in {{ ansible_env.HOME }}/undercloud.conf {{ item.item.item }} - does not exist - when: not item.stat.exists - loop: "{{ file_prop_stat.results }}" - -- include_tasks: check_syntax.yml diff --git a/roles/collect_flavors_and_verify_profiles/molecule/default/converge.yml b/roles/collect_flavors_and_verify_profiles/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/collect_flavors_and_verify_profiles/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/collect_flavors_and_verify_profiles/molecule/default/molecule.yml b/roles/collect_flavors_and_verify_profiles/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/collect_flavors_and_verify_profiles/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/collect_flavors_and_verify_profiles/tasks/main.yml b/roles/collect_flavors_and_verify_profiles/tasks/main.yml deleted file mode 100644 index 310ff87b3..000000000 --- a/roles/collect_flavors_and_verify_profiles/tasks/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Collect and check the flavors - check_flavors: - roles_info: "{{ lookup('roles_info', wantlist=True) }}" - flavors: "{{ lookup('nova_flavors', wantlist=True) }}" - register: flavor_result - -- name: Verify the profiles - verify_profiles: - nodes: "{{ lookup('ironic_nodes', wantlist=True) }}" - flavors: "{{ flavor_result.flavors }}" diff --git a/roles/collect_flavors_and_verify_profiles/vars/main.yml b/roles/collect_flavors_and_verify_profiles/vars/main.yml deleted file mode 100644 index b687bb7f2..000000000 --- a/roles/collect_flavors_and_verify_profiles/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -metadata: - name: Collect and verify role flavors - description: > - This validation checks the flavors assigned to roles exist and have the - correct capabilities set. - groups: - - pre-upgrade diff --git a/roles/compute_tsx/defaults/main.yml b/roles/compute_tsx/defaults/main.yml deleted file mode 100644 index 56ed32765..000000000 --- a/roles/compute_tsx/defaults/main.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "compute_tsx" - -# Debugging mode - Whether or not to print the computed variables during execution -compute_tsx_debug: false - -# If set to `true` it will not return a failure, but will simply print the failure -compute_tsx_warning: false - -# Variable used when this validation is called by tripleo-heat-templates -compute_tsx_kernel_args: "" - -# Information message to be printed in warning/failure mode -compute_tsx_information_msg: | - For more information on why we must explicitly define the TSX flag, please visit: - https://access.redhat.com/solutions/6036141 diff --git a/roles/compute_tsx/molecule/default/converge.yml b/roles/compute_tsx/molecule/default/converge.yml deleted file mode 100644 index 974fc09a0..000000000 --- a/roles/compute_tsx/molecule/default/converge.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - vars: - tsx_assertion: {} - tasks: - - name: Assert a failure - block: - - name: Loading role with failure - include_role: - name: compute_tsx - vars: - tsx_rhel_8_2: true - tsx_cmdline: false - tsx_cpu_support: true - tsx_grub: false - rescue: - - name: Fail if no failure - fail: - msg: | - {{ tsx_assertion }} - when: - # The logic is reversed here - - tsx_assertion.failed - - - name: Assert a failure, with warning only - block: - - name: Loading role with failure - include_role: - name: compute_tsx - vars: - tsx_rhel_8_2: true - tsx_cmdline: false - tsx_cpu_support: true - tsx_grub: false - compute_tsx_warning: true - rescue: - - name: Fail if failure - fail: - msg: | - {{ tsx_assertion }} - when: - # The logic is reversed here - - not tsx_assertion.failed - - - name: Assert a success - block: - - name: Loading role with passed - include_role: - name: compute_tsx - vars: - tsx_rhel_8_2: true - tsx_cmdline: true - tsx_cpu_support: true - tsx_grub: false - rescue: - - name: Fail if failure - fail: - msg: | - {{ tsx_assertion }} - when: - # The logic is reversed here - - not tsx_assertion.failed diff --git a/roles/compute_tsx/molecule/default/molecule.yml b/roles/compute_tsx/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/compute_tsx/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/compute_tsx/tasks/main.yml b/roles/compute_tsx/tasks/main.yml deleted file mode 100644 index a79307f7b..000000000 --- a/roles/compute_tsx/tasks/main.yml +++ /dev/null @@ -1,82 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -- name: Gathering TSX information - shell: | - uname -r | grep -oP "^[\d]+\.[\d]+\.[\d-]+" - grep -qP "[^a-zA-Z]tsx=(on|off|auto)" /proc/cmdline && echo true || echo false - grep -qP "hle|rtm" /proc/cpuinfo && echo true || echo false - grep -qP "[^a-zA-Z]tsx=(on|off|auto)" /etc/default/grub && echo true || echo false - register: node_infos - check_mode: false - changed_when: false - -- name: Parse custom node facts - set_fact: - tsx_rhel_8_2: "{{ node_infos.stdout_lines[0] is version(compute_tsx_8_3_version, '<') }}" - tsx_cmdline: "{{ node_infos.stdout_lines[1] | bool }}" - tsx_cpu_support: "{{ node_infos.stdout_lines[2] | bool }}" - tsx_grub: "{{ node_infos.stdout_lines[3] | bool }}" - tsx_kernel_args: "{{ 'tsx' in compute_tsx_kernel_args }}" - -- name: Print facts - when: - - compute_tsx_debug | bool - debug: - msg: | - tsx_rhel_8_2: {{ tsx_rhel_8_2 }} - tsx_cmdline: {{ tsx_cmdline }} - tsx_cpu_support: {{ tsx_cpu_support }} - tsx_grub: {{ tsx_grub }} - -# It's cleaner to assert only ANDs so we do a reverse assertion -- name: Validating facts - assert: - that: - - tsx_rhel_8_2 - - tsx_cpu_support - - not tsx_cmdline - - not tsx_grub - - not tsx_kernel_args - success_msg: | - {{ inventory_hostname }} doesn't have TSX flag configured - fail_msg: | - This is not a failure, assertion is successful. - {{ inventory_hostname }} has the right TSX setting according to its running or startup configuration - ignore_errors: true - register: tsx_assertion - -- name: Asserting errors - fail: - msg: | - {{ tsx_assertion.msg }} - - {{ compute_tsx_information_msg }} - - To prevent this validation from failing, you can run it with the compute_tsx_warning flag set to true like this: - openstack tripleo validator run --extra-vars compute_tsx_warning=true --validation compute-tsx - when: - - not tsx_assertion.failed - - not compute_tsx_warning | bool - -- name: Displaying errors - warn: - msg: | - {{ tsx_assertion.msg }} - - {{ compute_tsx_information_msg }} - when: - - not tsx_assertion.failed - - compute_tsx_warning | bool diff --git a/roles/compute_tsx/vars/main.yml b/roles/compute_tsx/vars/main.yml deleted file mode 100644 index 14911f27c..000000000 --- a/roles/compute_tsx/vars/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# While options found within the vars/ path can be overridden using extra -# vars, items within this path are considered part of the role and not -# intended to be modified. - -# All variables within this role should have a prefix of "compute_tsx" - -# This is the kernel version that changed the tsx default from true to false -compute_tsx_8_3_version: "4.18.0-240" diff --git a/roles/container_status/molecule/default/converge.yml b/roles/container_status/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/container_status/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/container_status/molecule/default/molecule.yml b/roles/container_status/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/container_status/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/container_status/tasks/main.yaml b/roles/container_status/tasks/main.yaml deleted file mode 100644 index e274d0ca7..000000000 --- a/roles/container_status/tasks/main.yaml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- name: Set oc_container_cli fact for the Overcloud nodes - set_fact: - oc_container_cli: "{{ hostvars[inventory_hostname].container_cli | default('podman', true) }}" - when: - - "'overcloud' in group_names or 'allovercloud' in group_names" - - oc_container_cli is not defined - -- when: "'Undercloud' in group_names" - block: - - name: Set container_cli fact from undercloud.conf - block: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: Get container client from undercloud.conf - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: container_cli - ignore_missing_file: true - register: container_cli - - - name: Set uc_container_cli for the Undercloud - set_fact: - uc_container_cli: "{{ container_cli.value|default('podman', true) }}" - when: uc_container_cli is not defined - -- name: Get failed containers for podman - changed_when: false - become: true - command: > - {% if oc_container_cli is defined %}{{ oc_container_cli }}{% else %}{{ uc_container_cli }}{% endif %} - {% raw %} - ps -a --filter 'status=exited' --format '{{ .Names }} {{ .Status }}' - {% endraw %} - register: failed_containers - -- name: Fail if we detect failed containers - fail: - msg: "Failed container detected: {{ item }}." - when: item is not match(".* Exited \((0|137|142|143)\) .* ago") - loop: "{{ failed_containers.stdout_lines }}" diff --git a/roles/controller_token/defaults/main.yml b/roles/controller_token/defaults/main.yml deleted file mode 100644 index 8ab2acf8f..000000000 --- a/roles/controller_token/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -keystone_conf_file: "/var/lib/config-data/puppet-generated/keystone/etc/keystone/keystone.conf" diff --git a/roles/controller_token/molecule/default/converge.yml b/roles/controller_token/molecule/default/converge.yml deleted file mode 100644 index 5952826b6..000000000 --- a/roles/controller_token/molecule/default/converge.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: pass validation - include_role: - name: controller_token - - - name: fail validation - block: - - name: provide configuration file - copy: - dest: /keystone.conf - content: | - [DEFAULT] - admin_token = CHANGEME - - - include_role: - name: controller_token - vars: - keystone_conf_file: /keystone.conf - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - Controller-token validation failed finding bad configuration! diff --git a/roles/controller_token/molecule/default/molecule.yml b/roles/controller_token/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/controller_token/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/controller_token/tasks/main.yml b/roles/controller_token/tasks/main.yml deleted file mode 100644 index e2e7bba80..000000000 --- a/roles/controller_token/tasks/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Fetch token value - become: true - validations_read_ini: - path: "{{ keystone_conf_file }}" - section: DEFAULT - key: admin_token - ignore_missing_file: true - register: token_result - -- name: Check if token value is disabled. - fail: msg="Keystone admin token is not disabled." - when: token_result.value != None diff --git a/roles/controller_token/vars/main.yml b/roles/controller_token/vars/main.yml deleted file mode 100644 index ff770158a..000000000 --- a/roles/controller_token/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -metadata: - name: Verify that keystone admin token is disabled - description: > - This validation checks that keystone admin token is disabled on both - undercloud and overcloud controller after deployment. - groups: - - post-deployment diff --git a/roles/controller_ulimits/defaults/main.yml b/roles/controller_ulimits/defaults/main.yml deleted file mode 100644 index ca0d52b12..000000000 --- a/roles/controller_ulimits/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -nofiles_min: 1024 -nproc_min: 2048 diff --git a/roles/controller_ulimits/molecule/default/converge.yml b/roles/controller_ulimits/molecule/default/converge.yml deleted file mode 100644 index 49c463c04..000000000 --- a/roles/controller_ulimits/molecule/default/converge.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - block: - - include_role: - name: controller_ulimits - vars: - nofiles_min: 102400 - nproc_min: 512 - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation works for detecting nofiles_min! - - - block: - - include_role: - name: controller_ulimits - vars: - nofiles_min: 512 - nproc_min: 204800 - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: >- - The validation works for detecting noproc_min! - End of playbook run. - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The controller-ulimits failed detecting bad limits diff --git a/roles/controller_ulimits/molecule/default/molecule.yml b/roles/controller_ulimits/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/controller_ulimits/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/controller_ulimits/tasks/main.yml b/roles/controller_ulimits/tasks/main.yml deleted file mode 100644 index 8eddb5baf..000000000 --- a/roles/controller_ulimits/tasks/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: Get nofiles limit - become: true - # NOTE: `ulimit` is a shell builtin so we have to invoke it like this: - command: sh -c "ulimit -n" - register: nofilesval - changed_when: false - -- name: Check nofiles limit - fail: - msg: > - nofiles is set to {{ nofilesval.stdout }}. It should be at least - {{ nofiles_min }} or higher, depending on available resources. - failed_when: "nofilesval.stdout|int < nofiles_min" - -- name: Get nproc limit - become: true - # NOTE: `ulimit` is a shell builtin so we have to invoke it like this: - command: sh -c "ulimit -u" - register: nprocval - changed_when: false - -- name: Check nproc limit - fail: - msg: > - nproc is set to {{ nprocval.stdout }}. It should be at least - {{ nproc_min }} or higher, depending on available resources. - failed_when: "nprocval.stdout|int < nproc_min" diff --git a/roles/controller_ulimits/vars/main.yml b/roles/controller_ulimits/vars/main.yml deleted file mode 100644 index 3f00b888a..000000000 --- a/roles/controller_ulimits/vars/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -metadata: - name: Check controller ulimits - description: > - This will check the ulimits of each controller. - groups: - - post-deployment diff --git a/roles/ctlplane_ip_range/defaults/main.yml b/roles/ctlplane_ip_range/defaults/main.yml deleted file mode 100644 index 70aebf232..000000000 --- a/roles/ctlplane_ip_range/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -ctlplane_iprange_min_size: 20 diff --git a/roles/ctlplane_ip_range/molecule/default/converge.yml b/roles/ctlplane_ip_range/molecule/default/converge.yml deleted file mode 100644 index ad96ed27b..000000000 --- a/roles/ctlplane_ip_range/molecule/default/converge.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - tasks: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: Test good values - block: - - name: populate undercloud.conf with right range length - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [ctlplane-subnet] - dhcp_start = 192.168.12.10 - dhcp_end = 192.168.12.100 - - include_role: - name: ctlplane_ip_range - - - name: Test failing - block: - - name: populate undercloud.conf with wrong range length - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [ctlplane-subnet] - dhcp_start = 192.168.12.10 - dhcp_end = 192.168.12.20 - - include_role: - name: ctlplane_ip_range - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The ctlplane-ip-range validation didn't properly detect too short - range! diff --git a/roles/ctlplane_ip_range/molecule/default/molecule.yml b/roles/ctlplane_ip_range/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/ctlplane_ip_range/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/ctlplane_ip_range/molecule/default/prepare.yml b/roles/ctlplane_ip_range/molecule/default/prepare.yml deleted file mode 100644 index bfb010ef1..000000000 --- a/roles/ctlplane_ip_range/molecule/default/prepare.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - - tasks: - - name: install python netaddr library - package: - name: python*-netaddr - state: installed diff --git a/roles/ctlplane_ip_range/tasks/main.yml b/roles/ctlplane_ip_range/tasks/main.yml deleted file mode 100644 index 8af492eb7..000000000 --- a/roles/ctlplane_ip_range/tasks/main.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - -- name: Get dhcp_start value from the undercloud.conf file - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: ctlplane-subnet - key: dhcp_start - ignore_missing_file: true - default: "192.0.2.5" - register: dhcp_start - -- name: Get dhcp_end value from the undercloud.conf file - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: ctlplane-subnet - key: dhcp_end - ignore_missing_file: true - default: "192.0.2.24" - register: dhcp_end - -- name: Check the size of the DHCP range for overcloud nodes - ip_range: - start: "{{ dhcp_start.value }}" - end: "{{ dhcp_end.value }}" - min_size: "{{ ctlplane_iprange_min_size }}" diff --git a/roles/ctlplane_ip_range/vars/main.yml b/roles/ctlplane_ip_range/vars/main.yml deleted file mode 100644 index 779ec0b81..000000000 --- a/roles/ctlplane_ip_range/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -metadata: - name: Check the number of IP addresses available for the overcloud nodes - description: > - Verify that the number of IP addresses defined in `dhcp_start` and - `dhcp_end` fields in `undercloud.conf` is not too low. - groups: - - pre-introspection diff --git a/roles/default_node_count/tasks/main.yml b/roles/default_node_count/tasks/main.yml deleted file mode 100644 index 60d5126ab..000000000 --- a/roles/default_node_count/tasks/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Get list of baremetal nodes - openstack.cloud.baremetal_node_info: - cloud: undercloud - register: baremetal_nodes - -- name: Get baremetal node details - openstack.cloud.baremetal_node_info: - cloud: undercloud - node: "{{ item }}" - with_items: "{{ baremetal_nodes | community.general.json_query('baremetal_nodes[*].name') }}" - register: node_details - -- name: Get clean node list - set_fact: - baremetal_nodes_details: "{{ [item] + baremetal_nodes_details }}" - with_items: "{{ node_details | community.general.json_query('results[*].baremetal_nodes') }}" - -- name: Get active node count - set_fact: - active_nodes: "{{ baremetal_nodes_details | community.general.json_query('[?provision_state==`available`]') | count() }}" - -- name: Get associated node count - set_fact: - associated_nodes: "{{ baremetal_nodes_details | community.general.json_query('[*].associated') | count() }}" - -- name: Set total available node count - set_fact: - available_count: "{{ active_nodes | int + associated_nodes | int }}" - -- name: Get overcloud role list - tripleo_overcloud_role_list: - register: role_list - -- name: Get details for each role - tripleo_overcloud_role_show: - role_name: "{{ item }}" - default_values: - CountDefault: 0 - FlavorDefault: 'baremetal' - with_items: "{{ role_list.role_list }}" - register: role_details - -- name: Get requested node count - set_fact: - requested_node_count: "{{ role_details | community.general.json_query('results[*].role_detail.CountDefault') | sum() }}" - -- name: Fail when requested is more than available - fail: - msg: > - Not enough baremetal nodes - available: {{ available_count }}, - requested: {{ requested_node_count }} - failed_when: requested_node_count|int > available_count|int diff --git a/roles/default_node_count/vars/main.yml b/roles/default_node_count/vars/main.yml deleted file mode 100644 index 520238d02..000000000 --- a/roles/default_node_count/vars/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -metadata: - name: Verify hypervisor statistics - description: > - This validation checks that the nodes and hypervisor statistics - add up. - groups: - - pre-deployment - -baremetal_nodes_details: [] diff --git a/roles/deprecated_services/defaults/main.yml b/roles/deprecated_services/defaults/main.yml deleted file mode 100644 index 623da2873..000000000 --- a/roles/deprecated_services/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -deprecated_service_list: [] diff --git a/roles/deprecated_services/tasks/main.yml b/roles/deprecated_services/tasks/main.yml deleted file mode 100644 index dcf5b9cc4..000000000 --- a/roles/deprecated_services/tasks/main.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Copyright 2022 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Retrieve roles list - tripleo_overcloud_role_list: - register: role_list - -- name: Retrieve roles data - tripleo_overcloud_role_show: - role_name: "{{ item }}" - loop: "{{ role_list.role_list }}" - register: roles_data - -- name: Get defined sevices - set_fact: - defined_services: "{{ roles_data.results | community.general.json_query('[*].role_detail.ServicesDefault') | - flatten | unique | map('split', '::') | map('last') }}" - -- name: Check for deprecated services - set_fact: - deprecated_services_in_roles: "{{ deprecated_service_list | intersect(defined_services) }}" - -- name: Fail if deprecated service list isn't empty - fail: - msg: > - Following services are marked as deprecated and should be removed - from roles data before upgrade: - {{ deprecated_services_in_roles }} - when: deprecated_services_in_roles diff --git a/roles/dhcp_validations/defaults/main.yml b/roles/dhcp_validations/defaults/main.yml deleted file mode 100644 index 0eb0b5bad..000000000 --- a/roles/dhcp_validations/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -ironic_inspector_conf: "/var/lib/config-data/puppet-generated/ironic_inspector/etc/ironic-inspector/inspector.conf" diff --git a/roles/dhcp_validations/files/rogue_dhcp.py b/roles/dhcp_validations/files/rogue_dhcp.py deleted file mode 100755 index 03171ed4b..000000000 --- a/roles/dhcp_validations/files/rogue_dhcp.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import fcntl -import socket -import struct -import sys -import threading -import time - -ETH_P_IP = 0x0800 -SIOCGIFHWADDR = 0x8927 - -dhcp_servers = [] -interfaces_addresses = {} - - -class DHCPDiscover(object): - def __init__(self, interface): - self.interface = interface - self.mac = interfaces_addresses[interface] - self.socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) - - def bind(self): - self.socket.bind((self.interface, 0)) - - def send(self): - packet = self.packet() - self.bind() - self.socket.send(packet) - - def close_socket(self): - self.socket.close() - - def packet(self): - return self.ethernet_header() \ - + self.ip_header() \ - + self.udp_header() \ - + self.dhcp_discover_payload() - - def ethernet_header(self): - return struct.pack('!6s6sH', - b'\xff\xff\xff\xff\xff\xff', # Dest HW address - self.mac, # Source HW address - ETH_P_IP) # EtherType - IPv4 - - def ip_header(self, checksum=None): - # 0 1 2 3 - # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - # |Version| IHL |Type of Service| Total Length | - # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - # | Identification |Flags| Fragment Offset | - # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - # | Time to Live | Protocol | Header Checksum | - # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - # | Source Address | - # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - # | Destination Address | - # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - # | Options | Padding | - # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - if checksum is None: - checksum = self.ip_checksum() - return struct.pack('!BBHHHBBHI4s', - (4 << 4) + 5, # IPv4 + 20 bytes header length - 0, # TOS - 272, # Total Length - 1, # Id - 0, # Flags & Fragment Offset - 64, # TTL - socket.IPPROTO_UDP, - checksum, - 0, # Source - socket.inet_aton('255.255.255.255')) # Destination - - def ip_checksum(self): - generated_checksum = self._checksum(self.ip_header(checksum=0)) - return socket.htons(generated_checksum) - - def udp_header(self, checksum=None): - # 0 7 8 15 16 23 24 31 - # +--------+--------+--------+--------+ - # | Source | Destination | - # | Port | Port | - # +--------+--------+--------+--------+ - # | | | - # | Length | Checksum | - # +--------+--------+--------+--------+ - if checksum is None: - checksum = self.udp_checksum() - return struct.pack('!HHHH', - 68, - 67, - 252, - checksum) - - def udp_checksum(self): - pseudo_header = self.ip_pseudo_header() - generated_checksum = self._checksum(pseudo_header + self.udp_header( - checksum=0) + self.dhcp_discover_payload()) - return socket.htons(generated_checksum) - - def ip_pseudo_header(self): - # 0 7 8 15 16 23 24 31 - # +--------+--------+--------+--------+ - # | source address | - # +--------+--------+--------+--------+ - # | destination address | - # +--------+--------+--------+--------+ - # | zero |protocol| UDP length | - # +--------+--------+--------+--------+ - return struct.pack('!I4sBBH', - 0, - socket.inet_aton('255.255.255.255'), - 0, - socket.IPPROTO_UDP, - 252) # Length - - def dhcp_discover_payload(self): - return struct.pack('!BBBBIHHIIII6s10s67s125s4s3s1s', - 1, # Message Type - Boot Request - 1, # Hardware Type - Ethernet - 6, # HW Address Length - 0, # Hops - 0, # Transaction ID - 0, # Seconds elapsed - 0, # Bootp flags - 0, # Client IP Address - 0, # Your IP Address - 0, # Next server IP Address - 0, # Relay Agent IP Address - self.mac, # Client MAC address - b'\x00' * 10, # Client HW address padding - b'\x00' * 67, # Server host name not given - b'\x00' * 125, # Boot file name not given - b'\x63\x82\x53\x63', # Magic Cookie - b'\x35\x01\x01', # DHCP Message Type = Discover - b'\xff' # Option End - ) - - def _checksum(self, msg): - s = 0 - for i in range(0, len(msg), 2): - w = msg[i] + (msg[i + 1] << 8) - s = s + w - s = (s >> 16) + (s & 0xffff) - s = s + (s >> 16) - s = ~s & 0xffff - return s - - -def get_hw_addresses(interfaces): - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - for interface in interfaces: - info = fcntl.ioctl(s.fileno(), - SIOCGIFHWADDR, - struct.pack('256s', interface[:15].encode('utf-8'))) - interfaces_addresses[interface] = info[18:24] - s.close() - - -def inspect_frame(data): - eth_type = struct.unpack('!H', data[12:14])[0] - protocol = data[23] - src_port = struct.unpack('!H', data[34:36])[0] - dst_port = struct.unpack('!H', data[36:38])[0] - msg_type = data[42] - # Make sure we got a DHCP Offer - if eth_type == ETH_P_IP \ - and protocol == socket.IPPROTO_UDP \ - and src_port == 67 \ - and dst_port == 68 \ - and msg_type == 2: # DHCP Boot Reply - server_ip_address = '.'.join(["%s" % m for m in data[26:30]]) - server_hw_address = ":".join(["%02x" % m for m in data[6:12]]) - dhcp_servers.append([server_ip_address, server_hw_address]) - - -def wait_for_dhcp_offers(interfaces, timeout): - listening_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, - socket.htons(ETH_P_IP)) - listening_socket.settimeout(timeout) - allowed_macs = interfaces_addresses.values() - end_of_time = time.time() + timeout - try: - while time.time() < end_of_time: - data = listening_socket.recv(1024) - dst_mac = struct.unpack('!6s', data[0:6])[0] - if dst_mac in allowed_macs: - inspect_frame(data) - except socket.timeout: - pass - listening_socket.close() - - -def main(): - interfaces = sys.argv[1:] - timeout = 5 - - get_hw_addresses(interfaces) - - listening_thread = threading.Thread(target=wait_for_dhcp_offers, - args=[interfaces, timeout]) - listening_thread.start() - - for interface in interfaces: - dhcp_discover = DHCPDiscover(interface) - dhcp_discover.send() - dhcp_discover.close_socket() - - listening_thread.join() - - if dhcp_servers: - sys.stderr.write('Found {} DHCP servers:'.format(len(dhcp_servers))) - for ip, mac in dhcp_servers: - sys.stderr.write("\n* {} ({})".format(ip, mac)) - sys.exit(1) - else: - print("No DHCP servers found.") - - -if __name__ == '__main__': - main() diff --git a/roles/dhcp_validations/molecule/default/converge.yml b/roles/dhcp_validations/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/dhcp_validations/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/dhcp_validations/molecule/default/molecule.yml b/roles/dhcp_validations/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/dhcp_validations/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/dhcp_validations/tasks/dhcp-introspection.yaml b/roles/dhcp_validations/tasks/dhcp-introspection.yaml deleted file mode 100644 index 63ef1db0b..000000000 --- a/roles/dhcp_validations/tasks/dhcp-introspection.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Look up the introspection interface - become: true - validations_read_ini: - path: "{{ ironic_inspector_conf }}" - section: iptables - key: dnsmasq_interface - register: interface - -- name: Look up the introspection interface from the deprecated option - become: true - validations_read_ini: - path: "{{ ironic_inspector_conf }}" - section: firewall - key: dnsmasq_interface - register: interface_deprecated - -- name: Look for rogue DHCP servers - script: files/rogue_dhcp.py {{ interface.value or interface_deprecated.value or 'br-ctlplane' }} - changed_when: false diff --git a/roles/dhcp_validations/tasks/dhcp-provisioning.yaml b/roles/dhcp_validations/tasks/dhcp-provisioning.yaml deleted file mode 100644 index 7650c6efc..000000000 --- a/roles/dhcp_validations/tasks/dhcp-provisioning.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - -- name: Gather undercloud.conf values - validations_read_ini: - path: "{{ lookup('env', 'HOME') }}/undercloud.conf" - section: DEFAULT - key: local_interface - ignore_missing_file: true - register: local_interface - -- name: Look for DHCP responses - script: files/rogue_dhcp.py {{ local_interface.value|default('eth1', true) }} diff --git a/roles/fips_enabled/molecule/default/converge.yml b/roles/fips_enabled/molecule/default/converge.yml deleted file mode 100644 index e6f018a6a..000000000 --- a/roles/fips_enabled/molecule/default/converge.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Converge - hosts: all - vars: - fips_enabled_path: /proc/sys/crypto/fips_enabled - tasks: - - name: FIPS enabled - copy: - dest: fips_enabled_path - mode: 0755 - content: "1" - - - name: Test FIPS Enabled - vars: - enforce_fips_validation: false - include_role: - name: "fips_enabled" - - - name: FIPS disabled - copy: - dest: fips_enabled_path - mode: 0755 - content: "0" - - - name: Test FIPS disabled fails - vars: - enforce_fips_validation: true - block: - - name: Load role with failure - include_role: - name: "fips_enabled" - rescue: - - name: Clear host error - meta: clear_host_errors - - - name: Status message - debug: - msg: 'Successfully detected FIPS is disabled!' - - - name: End play - meta: end_play - - - name: Fail if we get to this place - fail: - msg: 'Unit test failed: Did not detect that FIPS is disabled!' diff --git a/roles/fips_enabled/molecule/default/molecule.yml b/roles/fips_enabled/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/fips_enabled/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/fips_enabled/molecule/default/prepare.yml b/roles/fips_enabled/molecule/default/prepare.yml deleted file mode 100644 index 3cfc7568a..000000000 --- a/roles/fips_enabled/molecule/default/prepare.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Prepare - hosts: all - gather_facts: false diff --git a/roles/fips_enabled/tasks/main.yml b/roles/fips_enabled/tasks/main.yml deleted file mode 100644 index a9fa78b11..000000000 --- a/roles/fips_enabled/tasks/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# "fips_enabled" tasks - -- name: Read FIPS status in /proc/sys/crypto/fips_enabled - slurp: - src: "{{ fips_status_path }}" - register: fips_status - -- name: Fail when FIPS is not enabled - fail: - msg: "Fips is disabled" - when: - - fips_status != 1 - - enforce_fips_validation == true diff --git a/roles/fips_enabled/vars/main.yml b/roles/fips_enabled/vars/main.yml deleted file mode 100644 index 2c470cff7..000000000 --- a/roles/fips_enabled/vars/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# While options found within the vars/ path can be overridden using extra -# vars, items within this path are considered part of the role and not -# intended to be modified. - -# All variables within this role should have a prefix of "tripleo_check_fips_status" - -# !!!! IMPORTANT !!!! -# Add a comment above every variables describing them. -# This will be included in the sphinx role documentation -# !!!! IMPORTANT !!!! -fips_status_path: /proc/sys/crypto/fips_enabled -enforce_fips_validation: false diff --git a/roles/frr_status/molecule/default/converge.yml b/roles/frr_status/molecule/default/converge.yml deleted file mode 100644 index 2f6ec9795..000000000 --- a/roles/frr_status/molecule/default/converge.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: run validation - include_role: - name: frr_status - vars: - enabled_services: - - frr diff --git a/roles/frr_status/molecule/default/molecule.yml b/roles/frr_status/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/frr_status/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/frr_status/molecule/default/prepare.yml b/roles/frr_status/molecule/default/prepare.yml deleted file mode 100644 index 913322569..000000000 --- a/roles/frr_status/molecule/default/prepare.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - - tasks: - - name: Populate successful podman CLI - copy: - dest: /usr/bin/podman - mode: 0755 - content: | - #!/bin/bash - action="$1" - shift - container="$3" - shift - command="$@" - - case $container in - 'frr') - echo 'watchfrr global phase: Idle' - echo ' zebra Up' - echo ' bgpd Up' - ;; - *) - echo "Unknown container ${container}" - ;; - esac diff --git a/roles/frr_status/molecule/faulty/converge.yml b/roles/frr_status/molecule/faulty/converge.yml deleted file mode 100644 index 35e9f8a77..000000000 --- a/roles/frr_status/molecule/faulty/converge.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: make validation fail - block: - - name: run validation - include_role: - name: frr_status - vars: - enabled_services: - - frr - - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: check at least one FRR daemon is down - assert: - that: - - has_errors is defined - - - name: End play - meta: end_play - - - name: Fail playbook if reached - fail: - msg: | - The frr_status validation didn't properly detect non-operational FRR - daemons! diff --git a/roles/frr_status/molecule/faulty/molecule.yml b/roles/frr_status/molecule/faulty/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/frr_status/molecule/faulty/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/frr_status/molecule/faulty/prepare.yml b/roles/frr_status/molecule/faulty/prepare.yml deleted file mode 100644 index 059b5d089..000000000 --- a/roles/frr_status/molecule/faulty/prepare.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - - tasks: - - name: Populate successful podman CLI - copy: - dest: /usr/bin/podman - mode: 0755 - content: | - #!/bin/bash - action="$1" - shift - container="$3" - shift - command="$@" - - case $container in - 'frr') - echo 'watchfrr global phase: Idle' - echo ' zebra Up' - echo ' bgpd Down' - ;; - *) - echo "Unknown container ${container}" - ;; - esac diff --git a/roles/frr_status/tasks/main.yml b/roles/frr_status/tasks/main.yml deleted file mode 100644 index 2e2cb051a..000000000 --- a/roles/frr_status/tasks/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - -- name: Block for FRR daemon status checks - when: "'frr' in enabled_services" - block: - - name: Set container_cli fact from the inventory - set_fact: - container_cli: "{{ hostvars[inventory_hostname].container_cli |default('podman', true) }}" - when: container_cli is not defined - - - name: Check FRR daemon statuses - become: true - command: "{{ container_cli }} exec -u root frr vtysh -c 'show watchfrr'" - changed_when: false - register: watchfrr_return - - - name: Detect down FRR daemons - set_fact: - has_errors: "{{ watchfrr_return.stdout_lines - | select('search', '(Down)') - | list | length | int > 0 }}" - - - name: Fail when at least one enabled FRR daemon is down - fail: - msg: "{{ watchfrr_return.stdout_lines }}" - when: has_errors diff --git a/roles/frr_status/vars/main.yml b/roles/frr_status/vars/main.yml deleted file mode 100644 index c3883c64d..000000000 --- a/roles/frr_status/vars/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -metadata: - name: FRR status check - description: > - Runs 'show watchfrr' and checks for any non-operational daemon. - groups: - - post-deployment diff --git a/roles/healthcheck_service_status/defaults/main.yml b/roles/healthcheck_service_status/defaults/main.yml deleted file mode 100644 index 8c829ad4f..000000000 --- a/roles/healthcheck_service_status/defaults/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# The default values for the two following variables are set up to wait up to -# 300s for the first healthcheck to run. -retries_number: 10 -delay_number: 30 -# Please use inflight_healtcheck_services when using this role through the -# inflight validations. -inflight_healthcheck_services: [] diff --git a/roles/healthcheck_service_status/molecule/default/converge.yml b/roles/healthcheck_service_status/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/healthcheck_service_status/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/healthcheck_service_status/molecule/default/molecule.yml b/roles/healthcheck_service_status/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/healthcheck_service_status/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/healthcheck_service_status/tasks/main.yml b/roles/healthcheck_service_status/tasks/main.yml deleted file mode 100644 index 94ee45aeb..000000000 --- a/roles/healthcheck_service_status/tasks/main.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- name: Get the healthcheck services list enabled on node - shell: > - systemctl list-unit-files | grep "^tripleo.*healthcheck.*enabled" | awk -F'.' '{print $1}' - changed_when: false - register: healthcheck_services_list - when: inflight_healthcheck_services | length < 1 - -- name: Set hc_services - set_fact: - hc_services: > - {%- if inflight_healthcheck_services | length > 0 -%} - {{ inflight_healthcheck_services }} - {%- else -%} - {{ healthcheck_services_list.stdout_lines }} - {%- endif -%} - -- name: Get healthcheck status - systemd: - name: "{{ item }}" - retries: "{{ retries_number|int }}" - delay: "{{ delay_number|int }}" - until: - - systemd_healthcheck_state.status.ExecMainPID != '0' - - systemd_healthcheck_state.status.ActiveState in ['inactive', 'failed'] - ignore_errors: true - register: systemd_healthcheck_state - loop: "{{ hc_services }}" - loop_control: - label: "{{ item }}" - -- name: Fail if systemd healthcheck services are in failed status - fail: - msg: "Failed systemd healthcheck service detected: {{ item.item }}" - when: item.status.ExecMainStatus != '0' - loop: "{{ systemd_healthcheck_state.results }}" - loop_control: - label: "{{ item.item }}" diff --git a/roles/healthcheck_service_status/vars/main.yml b/roles/healthcheck_service_status/vars/main.yml deleted file mode 100644 index a3c7f80ec..000000000 --- a/roles/healthcheck_service_status/vars/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -metadata: - name: Healthcheck systemd services Check - description: > - Check for failed healthcheck systemd services. - groups: - - post-deployment diff --git a/roles/image_serve/defaults/main.yaml b/roles/image_serve/defaults/main.yaml deleted file mode 100644 index 6badfc970..000000000 --- a/roles/image_serve/defaults/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -container_registry_port: 8787 -container_registry_httpd_config: "/etc/httpd/conf.d/image-serve.conf" diff --git a/roles/image_serve/molecule/default/converge.yml b/roles/image_serve/molecule/default/converge.yml deleted file mode 100644 index 8b6fa211e..000000000 --- a/roles/image_serve/molecule/default/converge.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: detect wrong port - block: - - name: run validation for wrong port - include_role: - name: image_serve - vars: - container_registry_port: 9999 - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Status message - debug: - msg: "Detected faulty port!" - - - name: Run validation for wrong config file - block: - - name: Run validation for wrong config file - include_role: - name: image_serve - vars: - container_registry_httpd_config: /fake-image-serve.conf - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Status message - debug: - msg: "Detected wrong config file!" - - - name: Ensure we detect faulty tree - block: - - name: Stopping httpd - systemd: - name: httpd - state: stopped - - - name: run validation for 404 - include_role: - name: image_serve - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Status message - debug: - msg: "Detected faulty image serve tree!" - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The image_serve role should have detected httpd wasn't running or - index.json is absent. diff --git a/roles/image_serve/molecule/default/molecule.yml b/roles/image_serve/molecule/default/molecule.yml deleted file mode 100644 index 0fe5f15d7..000000000 --- a/roles/image_serve/molecule/default/molecule.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# inherits {REPO}/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -platforms: - - name: centos - hostname: centos - image: centos/centos:stream8 - registry: - url: quay.io - etc_hosts: - undercloud.ctlplane.mydomain.tld: "127.0.0.1" - dockerfile: ../../../../.config/molecule/Dockerfile - override_command: true - command: /sbin/init - privileged: true - pkg_extras: python*-setuptools - volumes: - - /etc/ci/mirror_info.sh:/etc/ci/mirror_info.sh:ro - environment: &env - http_proxy: "{{ lookup('env', 'http_proxy') }}" - https_proxy: "{{ lookup('env', 'https_proxy') }}" - ulimits: &ulimit - - host diff --git a/roles/image_serve/molecule/default/prepare.yml b/roles/image_serve/molecule/default/prepare.yml deleted file mode 100644 index 4140ceddc..000000000 --- a/roles/image_serve/molecule/default/prepare.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - tasks: - - name: Container registry installation - include_role: - name: tripleo_image_serve - vars: - tripleo_container_registry_host: undercloud.ctlplane.mydomain.tld diff --git a/roles/image_serve/tasks/main.yaml b/roles/image_serve/tasks/main.yaml deleted file mode 100644 index d5d7d285c..000000000 --- a/roles/image_serve/tasks/main.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Ensure we have the httpd config - stat: - path: "{{ container_registry_httpd_config }}" - register: registry_config - -- name: Fail early if no httpd config is present - when: not registry_config.stat.exists - fail: - msg: "Unable to find vhost config {{ container_registry_httpd_config }}. Exiting now." - -- name: Extract vhost name from httpd config - command: | - awk -F '[ >:]*' '/VirtualHost/ {print $2; exit}' {{ container_registry_httpd_config }} - register: virthost_name - -- name: Ensure port is open - wait_for: - port: "{{ container_registry_port }}" - host: "{{ virthost_name.stdout | regex_replace('^\\*$', 'localhost')}}" - timeout: 10 - -- name: Ensure registry does answer - uri: - method: HEAD - url: "http://{{ virthost_name.stdout | regex_replace('^\\*$', 'localhost')}}:{{ container_registry_port }}/v2/index.json" - status_code: - - 200 - - 204 - - 301 - - 302 diff --git a/roles/image_serve/vars/main.yml b/roles/image_serve/vars/main.yml deleted file mode 100644 index e8a11f1a4..000000000 --- a/roles/image_serve/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -metadata: - name: Image-serve availability - description: Verify that image-serve service is ready - groups: - - pre-upgrade - - post-deployment - - post-upgrade diff --git a/roles/ironic_boot_configuration/defaults/main.yml b/roles/ironic_boot_configuration/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/ironic_boot_configuration/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/ironic_boot_configuration/molecule/default/converge.yml b/roles/ironic_boot_configuration/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/ironic_boot_configuration/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/ironic_boot_configuration/molecule/default/molecule.yml b/roles/ironic_boot_configuration/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/ironic_boot_configuration/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/ironic_boot_configuration/tasks/main.yml b/roles/ironic_boot_configuration/tasks/main.yml deleted file mode 100644 index 54747a834..000000000 --- a/roles/ironic_boot_configuration/tasks/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Get list of baremetal nodes - openstack.cloud.baremetal_node_info: - cloud: undercloud - register: baremetal_nodes - -- name: Get baremetal node details - openstack.cloud.baremetal_node_info: - cloud: undercloud - node: "{{ item }}" - with_items: "{{ baremetal_nodes | community.general.json_query('baremetal_nodes[*].name') }}" - register: result - -- name: Get clean node list - set_fact: - baremetal_nodes_details: "{{ [item] + baremetal_nodes_details }}" - with_items: "{{ result | community.general.json_query('results[*].baremetal_nodes') }}" - -- name: Check ironic boot config - check_ironic_boot_config: - nodes: "{{ baremetal_nodes_details }}" diff --git a/roles/ironic_boot_configuration/vars/main.yml b/roles/ironic_boot_configuration/vars/main.yml deleted file mode 100644 index d962f4430..000000000 --- a/roles/ironic_boot_configuration/vars/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -metadata: - name: Check Ironic boot configuration - description: > - Check if baremetal boot configuration is correct. - groups: - - pre-deployment - - pre-upgrade - -baremetal_nodes_details: [] diff --git a/roles/mysql_open_files_limit/defaults/main.yml b/roles/mysql_open_files_limit/defaults/main.yml deleted file mode 100644 index e7aa4f35a..000000000 --- a/roles/mysql_open_files_limit/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -min_open_files_limit: 16384 diff --git a/roles/mysql_open_files_limit/molecule/default/converge.yml b/roles/mysql_open_files_limit/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/mysql_open_files_limit/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/mysql_open_files_limit/molecule/default/molecule.yml b/roles/mysql_open_files_limit/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/mysql_open_files_limit/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/mysql_open_files_limit/tasks/main.yml b/roles/mysql_open_files_limit/tasks/main.yml deleted file mode 100644 index 3eb6a641a..000000000 --- a/roles/mysql_open_files_limit/tasks/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Set container_cli fact from the inventory - set_fact: - container_cli: "{{ hostvars[inventory_hostname].container_cli |default('podman', true) }}" - -- name: Get the open_files_limit value - become: true - shell: >- - "{{ container_cli }}" exec -u root - $("{{ container_cli }}" ps -q --filter "name=mysql|galera-bundle" | head -1) - /bin/bash -c 'ulimit -n' - changed_when: false - register: mysqld_open_files_limit - -- name: Test the open-files-limit value - fail: - msg: > - The open_files_limit option for mysql must be higher than - {{ min_open_files_limit }}. Right now it's {{ mysqld_open_files_limit.stdout }}. - failed_when: "mysqld_open_files_limit.stdout|int < min_open_files_limit" diff --git a/roles/mysql_open_files_limit/vars/main.yml b/roles/mysql_open_files_limit/vars/main.yml deleted file mode 100644 index 008d07856..000000000 --- a/roles/mysql_open_files_limit/vars/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -metadata: - name: MySQL Open Files Limit - description: > - Verify the `open-files-limit` configuration is high enough - - https://access.redhat.com/solutions/1598733 - groups: - - post-deployment diff --git a/roles/network_environment/defaults/main.yml b/roles/network_environment/defaults/main.yml deleted file mode 100644 index 413e5ddaf..000000000 --- a/roles/network_environment/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -network_environment_path: environments/network-environment.yaml -plan_env_path: plan-environment.yaml -ip_pools_path: environments/ips-from-pool-all.yaml diff --git a/roles/network_environment/molecule/default/converge.yml b/roles/network_environment/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/network_environment/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/network_environment/molecule/default/molecule.yml b/roles/network_environment/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/network_environment/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/network_environment/tasks/main.yml b/roles/network_environment/tasks/main.yml deleted file mode 100644 index 1d974a6bf..000000000 --- a/roles/network_environment/tasks/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Validate the network environment files - network_environment: - netenv_path: "{{ network_environment_path }}" - plan_env_path: "{{ plan_env_path }}" - ip_pools_path: "{{ ip_pools_path }}" - template_files: "{{ lookup('tht') }}" diff --git a/roles/network_environment/vars/main.yml b/roles/network_environment/vars/main.yml deleted file mode 100644 index d2c436ce0..000000000 --- a/roles/network_environment/vars/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -metadata: - name: Validate the Heat environment file for network configuration - description: > - This validates the network environment and nic-config files - that specify the overcloud network configuration and are stored - in the current plan's Swift container. - - The deployers are expected to write these files themselves as - described in the Network Isolation guide: - - http://tripleo.org/advanced_deployment/network_isolation.html - groups: - - pre-deployment diff --git a/roles/neutron_sanity_check/defaults/main.yml b/roles/neutron_sanity_check/defaults/main.yml deleted file mode 100644 index a24558f3d..000000000 --- a/roles/neutron_sanity_check/defaults/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# The list of Neutron configuration files and directories that -# will be passed to the Neutron services. The order is important -# here: the values in later files take precedence. -configs: - - /etc/neutron/neutron.conf - - /usr/share/neutron/neutron-dist.conf - - /etc/neutron/metadata_agent.ini - - /etc/neutron/dhcp_agent.ini - - /etc/neutron/plugins/ml2/openvswitch_agent.ini - - /etc/neutron/l3_agent.ini diff --git a/roles/neutron_sanity_check/molecule/default/converge.yml b/roles/neutron_sanity_check/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/neutron_sanity_check/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/neutron_sanity_check/molecule/default/molecule.yml b/roles/neutron_sanity_check/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/neutron_sanity_check/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/neutron_sanity_check/tasks/main.yml b/roles/neutron_sanity_check/tasks/main.yml deleted file mode 100644 index 608bfc1d6..000000000 --- a/roles/neutron_sanity_check/tasks/main.yml +++ /dev/null @@ -1,77 +0,0 @@ ---- -- name: Set oc_container_cli and container_name for the Controller - set_fact: - oc_container_cli: "{{ hostvars[inventory_hostname].container_cli | default('podman', true) }}" - container_name: "neutron_ovs_agent" - when: "'Controller' in group_names" - -- when: "'Undercloud' in group_names" - block: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: Get the Container CLI from the undercloud.conf file - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: container_cli - ignore_missing_file: true - register: container_cli - - - name: Set uc_container_cli and container_name for the Undercloud - set_fact: - uc_container_cli: "{{ container_cli.value|default('podman', true) }}" - container_name: "neutron_ovs_agent" - -- name: Check if wanted container exists - command: > - {% if oc_container_cli is defined %}{{ oc_container_cli }}{% else %}{{ uc_container_cli }}{% endif %} - ps --filter name={{ container_name }} -q - become: true - register: container_exists - ignore_errors: true - -- name: Run sanity check only if container exists - when: container_exists.stdout != '' - block: - - name: Run neutron-sanity-check - command: > - {% if oc_container_cli is defined %}{{ oc_container_cli }}{% else %}{{ uc_container_cli }}{% endif %} - exec -u root {{ container_name }} - /bin/bash -c 'neutron-sanity-check --config-file {{ " --config-file ".join(configs) }} -d' - become: true - register: nsc_return - ignore_errors: true - changed_when: false - - - name: Detect errors - set_fact: - has_errors: "{{ nsc_return.stderr_lines - | select('search', '(ERROR)') - | list | length | int > 0 }}" - - - name: Detect warnings - set_fact: - has_warnings: "{{ nsc_return.stderr_lines - | select('search', '(WARNING)') - | list | length | int > 0 }}" - - - name: Create output - set_fact: - output_msg: "{{ nsc_return.stderr_lines - | select('search', '(ERROR|WARNING)') - | list }}" - - - name: Output warning - warn: msg="{{ output_msg | join('\n') }}" - when: has_warnings and not has_errors - - - name: Fail - fail: msg="{{ output_msg | join('\n') }}" - when: has_errors diff --git a/roles/node_disks/molecule/default/converge.yml b/roles/node_disks/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/node_disks/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/node_disks/molecule/default/molecule.yml b/roles/node_disks/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/node_disks/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/node_disks/tasks/main.yml b/roles/node_disks/tasks/main.yml deleted file mode 100644 index 4e39640f8..000000000 --- a/roles/node_disks/tasks/main.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- openstack.cloud.compute_flavor_info: - cloud: overcloud - register: result - -- name: Set required disk size - set_fact: - disk_sizes: "{{ result | community.general.json_query('openstack_flavors[*].{disk : disk, name : name}') }}" - -- name: Set minimum required disk size - set_fact: - min_disk_size: "{{ disk_sizes | community.general.json_query('[*].disk') | max }}" - -- name: Get list of baremetal nodes - openstack.cloud.baremetal_node_info: - cloud: undercloud - register: baremetal_nodes - -- name: Get baremetal node details - openstack.cloud.baremetal_node_info: - cloud: undercloud - node: "{{ item }}" - with_items: "{{ baremetal_nodes | community.general.json_query('baremetal_nodes[*].name') }}" - register: result - -- name: Set baremetal node details - set_fact: - baremetal_node_details: "{{ result.results | community.general.json_query('[*].baremetal_nodes[0]') }}" - -- name: Compare expected with available disk sizes - set_fact: - nodes_with_undersized_disks: "{{ baremetal_node_details | community.general.json_query('[?to_number(properties.local_gb) < to_number(min_disk_size) ]') }}" - -- fail: - msg: | - Following nodes have less storage available on their primary disks than requested by the flavor {{ min_disk_size.name }} : - {{ nodes_with_undersized_disks }} - Use node introspection to verify presence of possible additional disks. - when: nodes_with_undersized_disks | length > 0 - -- name: Print recommendation for further steps - debug: - msg: | - Validation was successful. Root disk is sufficient for all flavors. - Use node introspection to verify presence of possible additional disks. diff --git a/roles/node_disks/vars/main.yml b/roles/node_disks/vars/main.yml deleted file mode 100644 index f9057cc81..000000000 --- a/roles/node_disks/vars/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -metadata: - name: Check node disk configuration - description: > - Check node root disk sizes against available flavors. - groups: - - pre-deployment diff --git a/roles/node_health/molecule/default/converge.yml b/roles/node_health/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/node_health/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/node_health/molecule/default/molecule.yml b/roles/node_health/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/node_health/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/node_health/tasks/main.yml b/roles/node_health/tasks/main.yml deleted file mode 100644 index 74af07f95..000000000 --- a/roles/node_health/tasks/main.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: Retrieving compute services - ignore_errors: true - openstack.cloud.compute_service_info: - cloud: overcloud - register: result - -- name: Fail if the compute services can't be queried - fail: - msg: Compute services query failed with {{ result.msg }} - when: result.failed - -- name: Get nova nodes - set_fact: - nova_nodes: "{{ result.openstack_compute_services | community.general.json_query(query) }}" - vars: - query: "[?contains(name, 'nova')]" - -- name: Get failed nova nodes - set_fact: - failed_nodes: "{{ nova_nodes | community.general.json_query(failed_nodes_query) }}" - vars: - failed_nodes_query: "[?state!='up']" - -- when: failed_nodes | length > 0 - block: - - name: Get baremetal nodes info - become: true - openstack.cloud.baremetal_node_info: - cloud: undercloud - register: result - - - name: Get baremetal nodes - set_fact: - baremetal_nodes: "{{ result.baremetal_nodes }}" - - - name: Get failed node names - set_fact: - node_names: "{{ item.host.split('.')[0]}}" - with_items: "{{ failed_nodes }}" - - - name: Get failed baremetal nodes - set_fact: - failed_baremetal_nodes: "{{ baremetal_nodes | to_json | from_json | community.general.json_query(query) }}" - with_items: "{{ node_names }}" - vars: - query: "[?contains(name, '{{ item }}')]" - - - name: Fail if there are unreachable nodes - fail: - msg: | - {{ lookup('template', './templates/unreachable_nodes.j2', - template_vars=dict(nodes=failed_baremetal_nodes)) }} - when: failed_baremetal_nodes|length > 0 diff --git a/roles/node_health/templates/unreachable_nodes.j2 b/roles/node_health/templates/unreachable_nodes.j2 deleted file mode 100644 index 9bdf1c3d9..000000000 --- a/roles/node_health/templates/unreachable_nodes.j2 +++ /dev/null @@ -1,9 +0,0 @@ -The following nodes could not be reached ({{ nodes|length}} nodes): - -{% for node in nodes %} -* {{ node.name }} - UUID: {{ node.uuid }} - Instance: {{ node.instance_uuid }} - Last Error: {{ node.last_error }} - Power State: {{ node.power_state }} -{% endfor %} diff --git a/roles/node_health/vars/main.yml b/roles/node_health/vars/main.yml deleted file mode 100644 index 080d9a565..000000000 --- a/roles/node_health/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -metadata: - name: Node health check - description: > - Check if all overcloud nodes can be connected to before starting a - scale-up or an upgrade. - groups: - - pre-upgrade diff --git a/roles/nova_event_callback/defaults/main.yml b/roles/nova_event_callback/defaults/main.yml deleted file mode 100644 index 1389ac2b8..000000000 --- a/roles/nova_event_callback/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# Absolute path of the neutron configuration file -neutron_config_file: /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf diff --git a/roles/nova_event_callback/molecule/default/converge.yml b/roles/nova_event_callback/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/nova_event_callback/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/nova_event_callback/molecule/default/molecule.yml b/roles/nova_event_callback/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/nova_event_callback/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/nova_event_callback/tasks/main.yml b/roles/nova_event_callback/tasks/main.yml deleted file mode 100644 index 4ea4a82e3..000000000 --- a/roles/nova_event_callback/tasks/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Get auth_url value from hiera - become: true - command: hiera -c /etc/puppet/hiera.yaml neutron::server::notifications::nova::auth_url - ignore_errors: true - changed_when: false - register: auth_url - -- name: Get auth_url value from neutron.conf - become: true - validations_read_ini: - path: "{{ neutron_config_file }}" - section: nova - key: auth_url - ignore_missing_file: true - register: neutron_auth_url_result - -- name: Check [nova]/auth_url setting value from neutron.conf - fail: - msg: >- - [nova]/auth_url from {{ neutron_config_file }} is set to - {{ neutron_auth_url_result.value or 'None' }} - but it should be set to {{ auth_url.stdout }}. - failed_when: "neutron_auth_url_result.value != auth_url.stdout" diff --git a/roles/nova_event_callback/vars/main.yml b/roles/nova_event_callback/vars/main.yml deleted file mode 100644 index 65a6e584d..000000000 --- a/roles/nova_event_callback/vars/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -metadata: - name: Nova Event Callback Configuration Check - description: > - This validations verifies that the Nova auth_url in neutron, - which is generally enabled by default, is configured correctly - It checks the following files on the Overcloud Controller(s): - - /etc/neutron/neutron.conf: - [nova]/auth_url = 'http://nova_admin_auth_ip:5000' - groups: - - post-deployment diff --git a/roles/nova_status/molecule/default/converge.yml b/roles/nova_status/molecule/default/converge.yml deleted file mode 100644 index d7a39ab4b..000000000 --- a/roles/nova_status/molecule/default/converge.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: working detection - include_role: - name: nova_status - - - name: make validation fail - block: - - name: run validation - include_role: - name: nova_status - vars: - container_cli: docker - - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Test output - debug: - msg: The validation works! End play - - - name: End play - meta: end_play - - - name: Fail playbook if reached - fail: - msg: | - The nova_status validation didn't properly detect bad upgrade - status check! diff --git a/roles/nova_status/molecule/default/molecule.yml b/roles/nova_status/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/nova_status/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/nova_status/molecule/default/prepare.yml b/roles/nova_status/molecule/default/prepare.yml deleted file mode 100644 index e6cddc634..000000000 --- a/roles/nova_status/molecule/default/prepare.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - - tasks: - - name: Populate successful podman CLI - copy: - dest: /usr/bin/podman - mode: 0755 - content: | - #!/bin/bash - action="$1" - shift - container="$3" - shift - command="$@" - - case $container in - 'heat_api_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * heat-manage purge_deleted' - ;; - 'keystone_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * keystone-manage token_flush' - ;; - 'nova_api') - exit 0 - ;; - *) - echo "Unknown container ${container}" - ;; - esac - - - name: Populate buggy docker CLI - copy: - dest: /usr/bin/docker - mode: 0755 - content: | - #!/bin/bash - action="$1" - shift - container="$3" - shift - command="$@" - - case $container in - 'heat_api_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * some-other command' - ;; - 'keystone_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * some-other command' - ;; - 'nova_api') - exit 2 - ;; - *) - echo "Unknown container ${container}" - ;; - esac diff --git a/roles/nova_status/tasks/main.yml b/roles/nova_status/tasks/main.yml deleted file mode 100644 index 5f26b8173..000000000 --- a/roles/nova_status/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Set container_cli fact from the inventory - set_fact: - container_cli: "{{ hostvars[inventory_hostname].container_cli | default('podman', true) }}" - when: container_cli is not defined - -- name: Check nova upgrade status - become: true - command: "{{ container_cli }} exec -u root nova_api nova-status upgrade check" - changed_when: false - register: nova_upgrade_check - -- name: Warn if at least one check encountered an issue - warn: - msg: | - At least one check encountered an issue and requires further investigation. - This is considered a warning but the upgrade may be OK. - See the detail at https://docs.openstack.org/nova/latest/cli/nova-status.html#nova-status-checks - when: "nova_upgrade_check.rc == 1" - -- name: Fail if there was an upgrade status check failure - fail: - msg: | - There was an upgrade status check failure that needs to be investigated. - This should be considered something that stops an upgrade. - See the detail at https://docs.openstack.org/nova/latest/cli/nova-status.html#nova-status-checks - when: "nova_upgrade_check.rc not in [0, 1]" diff --git a/roles/nova_status/vars/main.yml b/roles/nova_status/vars/main.yml deleted file mode 100644 index e5965fa3e..000000000 --- a/roles/nova_status/vars/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -metadata: - name: Nova Status Upgrade Check - description: > - Performs a release-specific readiness check before restarting services with - new code. This command expects to have complete configuration and access to - databases and services within a cell. For example, this check may query the - Nova API database and one or more cell databases. It may also make requests - to other services such as the Placement REST API via the Keystone service - catalog - - The nova-status upgrade check command has three standard return codes: - - 0 -> All upgrade readiness checks passed successfully and there is nothing to do. - 1 -> At least one check encountered an issue and requires further investigation. - This is considered a warning but the upgrade may be OK. - 2 -> There was an upgrade status check failure that needs to be investigated. - This should be considered something that stops an upgrade. - groups: - - pre-upgrade diff --git a/roles/nova_svirt/defaults/main.yml b/roles/nova_svirt/defaults/main.yml deleted file mode 100644 index 94a8e4269..000000000 --- a/roles/nova_svirt/defaults/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "nova_svirt" -nova_svirt_directory: /run/libvirt/qemu diff --git a/roles/nova_svirt/molecule/default/converge.yml b/roles/nova_svirt/molecule/default/converge.yml deleted file mode 100644 index 4973c374e..000000000 --- a/roles/nova_svirt/molecule/default/converge.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - tasks: - - name: Run against a successful file - vars: - nova_svirt_directory: /libvirt/success - include_role: - name: nova_svirt - - - name: Run against failed file - vars: - nova_svirt_directory: /libvirt/failure - block: - - name: Run the validation - include_role: - name: nova_svirt - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Success output - debug: - msg: Validation successfully detected the failure - - - name: End play - meta: end_play - - - name: Fail if we reach this point - fail: - msg: The validation did not detect the error diff --git a/roles/nova_svirt/molecule/default/molecule.yml b/roles/nova_svirt/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/nova_svirt/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/nova_svirt/molecule/default/prepare.yml b/roles/nova_svirt/molecule/default/prepare.yml deleted file mode 100644 index ee5699658..000000000 --- a/roles/nova_svirt/molecule/default/prepare.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - tasks: - - name: Install python lxml library - package: - name: python*-lxml - state: present - - - name: Create directory tree - file: - path: "{{ item }}" - state: directory - loop: - - /libvirt - - /libvirt/success - - /libvirt/failure - - - name: Push correct xml - copy: - mode: 0644 - dest: /libvirt/success/instance-0001.xml - content: | - - - - - +107:+107 - - - - system_u:object_r:svirt_image_t:s0:c687,c775 - - - - - - name: Push incorrect xml - copy: - mode: 0644 - dest: /libvirt/failure/instance-0002.xml - content: | - - - - - +107:+107 - - - diff --git a/roles/nova_svirt/tasks/main.yml b/roles/nova_svirt/tasks/main.yml deleted file mode 100644 index 29148d7da..000000000 --- a/roles/nova_svirt/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Get all instance XMLs - register: xmls - find: - paths: "{{ nova_svirt_directory }}" - patterns: "*.xml" - recurse: true - depth: 1 - -- name: Loop on XMLs and validate sVirt availability - loop: "{{ xmls.files }}" - loop_control: - label: "{{ item.path }}" - include_tasks: validate.yml diff --git a/roles/nova_svirt/tasks/validate.yml b/roles/nova_svirt/tasks/validate.yml deleted file mode 100644 index 0035af0a3..000000000 --- a/roles/nova_svirt/tasks/validate.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: "Parse {{ item.path }}" - become: true - register: seclabels - community.general.xml: - path: "{{ item.path }}" - content: attribute - xpath: '/domstatus/domain/seclabel' - -- name: Set or reset svirt_enabled - set_fact: - svirt_enabled: false - -- name: Check enabled seclabels - loop: "{{ seclabels.matches }}" - loop_control: - loop_var: seclabel - when: - - seclabel.seclabel.model == 'selinux' - set_fact: - svirt_enabled: true - -- name: Fail if sVirt is not enabled - fail: - msg: | - sVirt not detected for {{ item.path }} - when: - - not svirt_enabled diff --git a/roles/openshift_on_openstack/defaults/main.yml b/roles/openshift_on_openstack/defaults/main.yml deleted file mode 100644 index 55318dd52..000000000 --- a/roles/openshift_on_openstack/defaults/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -min_total_ram_testing: 16384 # 4 per node -min_total_vcpus_testing: 4 # 1 per node -min_total_disk_testing: 93 # Master: 40, others: 17 per node -min_total_ram_prod: 40960 # Master: 16, others: 8 per node -min_total_vcpus_prod: 7 # Master: 4, others 1 per node -min_total_disk_prod: 93 # Master: 42, others: 17 per node -min_node_ram_testing: 4096 # Minimum ram per node for testing -min_node_disk_testing: 40 # Minimum disk per node for testing -min_node_ram_prod: 16384 # Minimum ram per node for production -min_node_disk_prod: 42 # Minimum disk per node for production -resource_reqs_testing: false -resource_reqs_prod: false diff --git a/roles/openshift_on_openstack/molecule/default/converge.yml b/roles/openshift_on_openstack/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/openshift_on_openstack/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/openshift_on_openstack/molecule/default/molecule.yml b/roles/openshift_on_openstack/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/openshift_on_openstack/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/openshift_on_openstack/tasks/openshift-hw-requirements.yaml b/roles/openshift_on_openstack/tasks/openshift-hw-requirements.yaml deleted file mode 100644 index 9f47b9475..000000000 --- a/roles/openshift_on_openstack/tasks/openshift-hw-requirements.yaml +++ /dev/null @@ -1,148 +0,0 @@ ---- -# Get auth token and service catalog from Keystone and extract service urls. -- name: Get token and catalog from Keystone - uri: - url: "{{ overcloud_keystone_url - | urlsplit('scheme') }}://{{ overcloud_keystone_url - | urlsplit('netloc') }}/v3/auth/tokens" - method: POST - body_format: json - body: - auth: - scope: - project: - name: admin - domain: - id: default - identity: - methods: - - password - password: - user: - name: admin - domain: - id: default - password: "{{ overcloud_admin_password }}" - return_content: true - status_code: 201 - register: keystone_result - no_log: true - when: overcloud_keystone_url|default('') - -- name: Set auth token - set_fact: auth_token="{{ keystone_result.x_subject_token }}" - -- name: Get Nova URL from catalog - set_fact: nova_url="{{ keystone_result.json.token - | json_query("catalog[?name=='nova'].endpoints") - | first - | selectattr('interface', 'equalto', 'public') - | map(attribute='url') | first }}" - -- name: Get Glance URL from catalog - set_fact: glance_url="{{ keystone_result.json.token - | json_query("catalog[?name=='glance'].endpoints") - | first - | selectattr('interface', 'equalto', 'public') - | map(attribute='url') | first }}" - -- name: Get flavors with required values for testing - uri: - url: "{{ nova_url }}/flavors/detail?minRam={{ min_node_ram_testing }}&minDisk={{ min_node_disk_testing }}" - method: GET - headers: - X-Auth-Token: "{{ auth_token }}" - Accept: application/vnd.openstack.compute.v2.1+json - return_content: true - follow_redirects: all - register: flavors_result_testing - -- name: Get flavors with required values for production - uri: - url: "{{ nova_url }}/flavors/detail?minRam={{ min_node_ram_prod }}&minDisk={{ min_node_disk_prod }}" - method: GET - headers: - X-Auth-Token: "{{ auth_token }}" - Accept: application/vnd.openstack.compute.v2.1+json - return_content: true - follow_redirects: all - register: flavors_result_prod - -- name: Set matching_flavors_testing variable - set_fact: - matching_flavors_testing: "{{ flavors_result_testing.json.flavors - | list | length > 0 }}" - -- name: Set matching_flavors_prod variable - set_fact: - matching_flavors_prod: "{{ flavors_result_prod.json.flavors - | selectattr('vcpus', 'ge', 4) - | list - | length > 0 }}" - -# Get hypervisor stats from nova and check if there are sufficient -# available resources. -- name: Get hypervisor details from nova - uri: - url: "{{ nova_url }}/os-hypervisors/statistics" - method: GET - headers: - X-Auth-Token: "{{ auth_token }}" - Accept: application/vnd.openstack.compute.v2.1+json - return_content: true - follow_redirects: all - register: hypervisors_result - -- name: Set hypervisor stats - set_fact: hv_stats="{{ hypervisors_result.json.hypervisor_statistics }}" - -- name: Set flag whether min resources for testing are available - set_fact: resource_reqs_testing=True - when: hv_stats.disk_available_least >= min_total_disk_testing - and hv_stats.free_ram_mb >= min_total_ram_testing - and hv_stats.vcpus - hv_stats.vcpus_used >= min_total_vcpus_testing - -- name: Set flag whether min resources for production are available - set_fact: resource_reqs_prod=True - when: hv_stats.disk_available_least >= min_total_disk_prod - and hv_stats.free_ram_mb >= min_total_ram_prod - and hv_stats.vcpus - hv_stats.vcpus_used >= min_total_vcpus_prod - -# Get overcloud images from Glance and check if there is one named either -# rhel or centos. -- name: Get images from glance - uri: - url: "{{ glance_url }}/v2/images" - method: GET - headers: - X-Auth-Token: "{{ auth_token }}" - return_content: true - follow_redirects: all - register: images - -- name: Find matching images - set_fact: - matching_image: "{{ images.json.images - | map(attribute='name') - | map('lower') - | select('search', '(centos|rhel)') - | list | length | int > 0 }}" - -- name: Create warning message - set_fact: - warning_msg: | - {{ lookup('template', './templates/openshift-hw-requirements-warnings.j2') }} - -- name: Fail if minimum requirements are not met - fail: - msg: "{{ warning_msg }}" - when: not matching_flavors_testing - or not matching_image - or not resource_reqs_testing - -- name: Warn if production requirements are not met - warn: - msg: "{{ warning_msg }}" - when: not matching_flavors_prod - or not matching_image - or not resource_reqs_prod diff --git a/roles/openshift_on_openstack/tasks/openshift-nw-requirements.yaml b/roles/openshift_on_openstack/tasks/openshift-nw-requirements.yaml deleted file mode 100644 index 8633856e5..000000000 --- a/roles/openshift_on_openstack/tasks/openshift-nw-requirements.yaml +++ /dev/null @@ -1,73 +0,0 @@ ---- -- name: Set fact to identify if the overcloud was deployed - set_fact: - overcloud_deployed: "{{ groups['allovercloud'] is defined }}" - -- name: Warn if no overcloud deployed yet - warn: - msg: >- - This validation should be executed on the Undercloud with a working - Overcloud. - when: not overcloud_deployed|bool - -- when: overcloud_deployed|bool - block: - # Get auth token and service catalog from Keystone and extract service urls. - - name: Get token and catalog from Keystone - uri: - url: "{{ overcloud_keystone_url - | urlsplit('scheme') }}://{{ overcloud_keystone_url - | urlsplit('netloc') }}/v3/auth/tokens" - method: POST - body_format: json - body: - auth: - scope: - project: - name: admin - domain: - id: default - identity: - methods: - - password - password: - user: - name: admin - domain: - id: default - password: "{{ overcloud_admin_password }}" - return_content: true - status_code: 201 - register: keystone_result - no_log: true - when: overcloud_keystone_url|default('') - - - name: Set auth token - set_fact: token="{{ keystone_result.x_subject_token }}" - - - name: Get Neutron URL from catalog - set_fact: neutron_url="{{ keystone_result.json.token - | json_query("catalog[?name=='neutron'].endpoints") - | first - | selectattr('interface', 'equalto', 'public') - | map(attribute='url') | first }}" - - # Get overcloud networks from Neutron and check if there is - # a network with a common name for external networks. - - name: Get networks from Neutron - uri: - url: "{{ neutron_url }}/v2.0/networks?router:external=true" - method: GET - headers: - X-Auth-Token: "{{ token }}" - return_content: true - follow_redirects: all - register: networks_result - - - name: Warn if there are no matching networks - warn: - msg: | - No external network found. It is strongly recommended that you - configure an external Neutron network with a floating IP address - pool. - when: networks_result.json.networks | length == 0 diff --git a/roles/openshift_on_openstack/templates/openshift-hw-requirements-warnings.j2 b/roles/openshift_on_openstack/templates/openshift-hw-requirements-warnings.j2 deleted file mode 100644 index c75e29474..000000000 --- a/roles/openshift_on_openstack/templates/openshift-hw-requirements-warnings.j2 +++ /dev/null @@ -1,17 +0,0 @@ -While checking the hardware requirements for an OpenShift deployment, the following problems were detected: - -{% if not matching_image %} - - No image with name "centos" or "rhel" could be found. -{% endif %} -{% if not matching_flavors_testing %} - - There is no flavor available that meets the hardware requirements for a test setup. -{% endif %} -{% if not matching_flavors_prod %} - - There is no flavor available that meets the hardware requirements for a production setup. -{% endif %} -{% if not resource_reqs_testing %} - - The resources necessary for a default test setup are not available on the hypervisors. -{% endif %} -{% if not resource_reqs_prod %} - - The resources necessary for a default production setup are not available on the hypervisors. -{% endif %} diff --git a/roles/openstack_endpoints/molecule/default/converge.yml b/roles/openstack_endpoints/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/openstack_endpoints/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/openstack_endpoints/molecule/default/molecule.yml b/roles/openstack_endpoints/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/openstack_endpoints/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/openstack_endpoints/tasks/main.yml b/roles/openstack_endpoints/tasks/main.yml deleted file mode 100644 index 98b1bd80e..000000000 --- a/roles/openstack_endpoints/tasks/main.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: Set fact to identify if the overcloud was deployed - set_fact: - overcloud_deployed: "{{ groups['allovercloud'] is defined }}" - -# Check that the Horizon endpoint exists -- name: Fail if the HorizonPublic endpoint is not defined - fail: - msg: >- - "The `HorizonPublic` endpoint is not defined in the `EndpointMap` of the - deployed stack. This means Horizon may not have been deployed correctly." - when: - - overcloud_horizon_url|default('') | length == 0 - - overcloud_deployed|bool - -# Check connectivity to horizon -- name: Check Horizon - uri: - url: "{{ overcloud_horizon_url }}" - when: overcloud_horizon_url|default('') - -# Check that the Keystone endpoint exists -- name: Fail if KeystoneURL output is not available - fail: - msg: >- - "The `KeystoneURL` output is not available in the deployed stack." - when: - - overcloud_keystone_url|default('') | length == 0 - - overcloud_deployed|bool - -# Check that we can obtain an auth token from horizon -- name: Check Keystone - no_log: true - uri: - url: "{{ overcloud_keystone_url | urlsplit('scheme') }}://{{ overcloud_keystone_url | urlsplit('netloc') }}/v3/auth/tokens" - method: POST - body_format: json - body: - auth: - identity: - methods: - - password - password: - user: - name: admin - domain: - name: Default - password: "{{ overcloud_admin_password }}" - return_content: true - status_code: 201 - register: auth_token - when: overcloud_keystone_url|default('') - -# TODO(shadower): other endpoints diff --git a/roles/openstack_endpoints/vars/main.yml b/roles/openstack_endpoints/vars/main.yml deleted file mode 100644 index b4001c66d..000000000 --- a/roles/openstack_endpoints/vars/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -metadata: - name: Check connectivity to various OpenStack services - # TODO: this could also check for undercloud endpoints - description: > - This validation gets the PublicVip address from the deployment and - tries to access Horizon and get a Keystone token. - groups: - - post-deployment - - pre-upgrade - - post-upgrade diff --git a/roles/oslo_config_validator/defaults/main.yml b/roles/oslo_config_validator/defaults/main.yml deleted file mode 100644 index 238b7d1f4..000000000 --- a/roles/oslo_config_validator/defaults/main.yml +++ /dev/null @@ -1,377 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "oslo_config_validator" -oslo_config_validator_debug: false - -# Comparison report with current settings and default settings -oslo_config_validator_report: false - -# Returns all settings with possibly invalid values or simply inexistent -# oslo.config uses a typed system for possible values of each settings. -# if a setting is not typed correctly, or is non-existent, oslo-config-validator -# will trigger an error here. -oslo_config_validator_validation: true - -# This is a temporary folder used when generating reports. -# It will be created and deleted if necessary on all the nodes -oslo_config_validator_report_path: "/var/tmp/config_validator_report" - -# Whether or not we should archive into a single file the report after creation -oslo_config_validator_report_archive: true - -# This is the working folder when running validations. It will be bind mounted -# on the validation containers -# It will be created and deleted if necessary on all the nodes -oslo_config_validator_work_path: "/var/lib/tripleo-config/oslo_config_validator" - -# When running validation, whether or not we should check for invalid settings -# This adds to the time it takes to complete validation because of the way -# the validations_read_ini module works. -oslo_config_validator_invalid_settings: true - - -# These messages are globally ignored and will not trigger a validation failure -oslo_config_validator_global_ignored_messages: - - 'INFO:keyring.backend:Loading.*' - - 'WARNING:oslo_config.generator:normalizing group name.*' - - 'WARNING:stevedore.named:Could not load .*' - - 'WARNING:root:Deprecated opt .*' - - 'INFO:oslo_utils.netutils:Could not determine default network interface, using .*' - - 'ERROR:root:quotas/quota_[^\s]+ {{ invalid_setting_regex }}' - - '.*Ignoring option because it is part of the excluded patterns. This can be changed with the.*' - -# namespace configuration: -# We can define configuration for each namespaces. -# ignored_messages: list of regexes that, when returned from calling a validation using that -# specific namespace, will be ignore -# invalid_settings: list of settings that will trigger a failure if they are set to specific -# values. Separator is meant to split the setting value and checking each -# one of the values individually like nova filters. This is useful to -# validate if deprecated or removed option values are still in use. -# keys: -# - section: configuration section (ie: DEFAULT) -# - option: setting name (ie: debug) -# - separator: string delimiter that will be used to convert value to a list -# - value_list: List of strings that would be checked against. -# - operator: Can be either these values, default being "eq": -# - not: current value should not match exactly any element in value_list -# - lt: current value should be lesser than first element of value_list -# - gt: current value should be greater than first element of value_list -# - eq: current value, if defined, should be equal to one element in value_list -# -oslo_config_validator_namespaces_config: - - namespace: glance.store - ignored_messages: - - "ERROR:stevedore.extension:Could not load '(glance.store.)*s3(.Store)*': No module named 'boto3'" - - - namespace: heat.common.config - ignored_messages: - # NOTE(dvd): openstack/heat fix: https://review.opendev.org/789680 - # heat wasn't including its yaql and cache options by default. This is being worked - # on during the Xena cycle and should hopefully be backported down to train. - - 'ERROR:root:(yaql|(resource_finder_)*cache)/[^\s]+ {{ invalid_setting_regex }}' - - - namespace: keystonemiddleware.auth_token - ignored_messages: - - >- - .*Ignoring missing option "(auth_url|username|password|(user|project)_(domain_)*name)" from group "keystone_authtoken" because the group is known to - have incomplete sample config data and thus cannot be validated properly.* - - - namespace: neutron - invalid_settings: - - section: nova - option: tenant_name - operator: eq - value_list: - - service - - section: nova - option: project_name - operator: eq - value_list: - - service - - section: DEFAULT - option: notify_nova_on_port_data_changes - operator: not - value_list: - - "True" - - section: DEFAULT - option: notify_nova_on_port_status_changes - operator: not - value_list: - - "True" - - - namespace: cinder - invalid_settings: - - section: DEFAULT - option: enable_v3_api - value_list: - - "True" - ignored_messages: - - 'ERROR:root:DEFAULT/api_paste_config {{ invalid_setting_regex }}' - - - namespace: nova.conf - invalid_settings: - - section: filter_scheduler - option: enabled_filters - separator: "," - value_list: - - ExactCoreFilter - - ExactRamFilter - - ExactDiskFilter - - CoreFilter - - RamFilter - - DiskFilter - - RetryFilter - - section: DEFAULT - option: vif_plugging_timeout - operator: lt - value_list: - - 300 - - section: DEFAULT - option: vif_plugging_timeout - value_list: - - "True" - - ignored_messages: - # These settings are used by openstacksdk and not part of oslo_config_opts. They are not taken - # into account by oslo-config-(generator|validator) - - 'ERROR:root:(cinder|service_user)/region_name {{ invalid_setting_regex }}' - # Censoring password - - 'WARNING:root:neutron/metadata_proxy_shared_secret sample value is empty but input-file has' - # TODO(dvd): Needs to be investigated with TLSe - - 'ERROR:root:cache/tls_enabled not found' - -# service configuration: -# Configuration for each openstack services is stored here -# config_files: List of config files with their specific namespaces -# default_namespaces: List of namespaces that should be checked against each files -# ignored_groups: List of groups that shouldn't be checked. This is passed as --exclude-group -# to the oslo-config-validator command. -# opt_data: Some sections are dynamically generated like cinder's backend sections. This will -# generate a custom opt_data using the content of template_sections as options for -# the list of items in index_key's values. This adds a lot of overhead to the parsing -# because we need to spawn an oslo-config-generator to pull out the default yaml -# config and build a new data structure from it. -oslo_config_validator_service_configs: - # https://opendev.org/openstack/nova/src/branch/master/etc/nova/nova-config-generator.conf - nova: - config_files: - - path: /etc/nova/nova.conf - namespaces: [] - opt_data: - - group_create: - template: os_vif_ovs - group_name: vif_plug_ovs - - group_create: - template: os_vif_linux_bridge - group_name: vif_plug_linux_brige - default_namespaces: - - nova.conf - - keystonemiddleware.auth_token - - os_vif - - oslo.log - - oslo.messaging - - oslo.policy - - oslo.privsep - - oslo.service.periodic_task - - oslo.service.service - - oslo.db - - oslo.db.concurrency - - oslo.cache - - oslo.middleware - - oslo.concurrency - - osprofiler - # https://opendev.org/openstack/cinder/src/branch/master/tools/config/cinder-config-generator.conf - cinder: - config_files: - - path: /etc/cinder/cinder.conf - namespaces: [] - ignored_groups: - - nova - - service_user - opt_data: - - index_key: - section: DEFAULT - option: enabled_backends - separator: "," - template_section: - - backend_defaults - - backend - default_namespaces: - - castellan.config - - cinder - - keystonemiddleware.auth_token - - oslo.log - - oslo.messaging - - oslo.policy - - oslo.privsep - - oslo.service.periodic_task - - oslo.service.service - - oslo.db - - oslo.db.concurrency - - oslo.middleware - - oslo.concurrency - - osprofiler - # Glance has multiple files - # https://opendev.org/openstack/glance/src/branch/master/etc/oslo-config-generator - glance: - ignored_groups: - - ref1 - - default_backend - config_files: - - path: /etc/glance/glance-api.conf - namespaces: [] - - path: /etc/glance/glance-cache.conf - namespaces: [] - - path: /etc/glance/glance-image-import.conf - namespaces: [] - - path: /etc/glance/glance-registry.conf - namespaces: [] - - path: /etc/glance/glance-scrubber.conf - namespaces: [] - - path: /etc/glance/glance-swift.conf - namespaces: [] - default_namespaces: - - glance - - glance.api - - glance.store - - glance.multi_store - - keystonemiddleware.auth_token - - oslo.log - - oslo.messaging - - oslo.policy - - oslo.privsep - - oslo.service.periodic_task - - oslo.service.service - - oslo.db - - oslo.db.concurrency - - oslo.middleware.cors - - oslo.middleware.http_proxy_to_wsgi - # https://opendev.org/openstack/heat/src/branch/master/config-generator.conf - heat: - config_files: - - path: /etc/heat/heat.conf - namespaces: [] - default_namespaces: - - heat.common.config - - heat.common.context - - heat.common.crypt - - heat.engine.clients.os.keystone.heat_keystoneclient - - heat.common.wsgi - - heat.engine.clients - - heat.engine.notification - - heat.engine.resources - - heat.api.aws.ec2token - - keystonemiddleware.auth_token - - oslo.messaging - - oslo.middleware - - oslo.db - - oslo.log - - oslo.policy - - oslo.service.service - - oslo.service.periodic_task - - oslo.service.sslutils - # https://opendev.org/openstack/ironic/src/branch/master/tools/config/ironic-config-generator.conf - ironic: - config_files: - - path: /etc/ironic/ironic.conf - namespaces: [] - - path: /etc/ironic-inspector/inspector.conf - namespaces: [] - default_namespaces: - - ironic - - ironic_lib.disk_utils - - ironic_lib.disk_partitioner - - ironic_lib.exception - - ironic_lib.json_rpc - - ironic_lib.mdns - - ironic_lib.metrics - - ironic_lib.metrics_statsd - - ironic_lib.utils - - oslo.db - - oslo.messaging - - oslo.middleware.cors - - oslo.middleware.healthcheck - - oslo.middleware.http_proxy_to_wsgi - - oslo.concurrency - - oslo.policy - - oslo.log - - oslo.reports - - oslo.service.service - - oslo.service.periodic_task - - oslo.service.sslutils - - osprofiler - - keystonemiddleware.auth_token - # https://opendev.org/openstack/placement/src/branch/master/etc/placement/config-generator.conf - placement: - config_files: - - path: /etc/placement/placement.conf - namespaces: [] - default_namespaces: - - placement.conf - - keystonemiddleware.auth_token - - oslo.log - - oslo.middleware.cors - - oslo.policy - # https://opendev.org/openstack/neutron/src/branch/master/etc/oslo-config-generator/neutron.conf - neutron: - config_files: - - path: /etc/neutron/neutron.conf - namespaces: [] - - path: /etc/neutron/plugins/ml2/ml2_conf.ini - namespaces: [] - default_namespaces: - - neutron - - neutron.agent - - neutron.base.agent - - neutron.db - - neutron.extensions - - nova.auth - - ironic.auth - - placement.auth - - oslo.log - - oslo.db - - oslo.policy - - oslo.privsep - - oslo.concurrency - - oslo.messaging - - oslo.middleware.cors - - oslo.middleware.http_proxy_to_wsgi - - oslo.service.sslutils - - oslo.service.wsgi - - keystonemiddleware.auth_token - # https://opendev.org/openstack/keystone/src/branch/master/config-generator/keystone.conf - keystone: - config_files: - - path: /etc/keystone/keystone.conf - namespaces: [] - default_namespaces: - - keystone - - oslo.cache - - oslo.log - - oslo.messaging - - oslo.policy - - oslo.db - - oslo.middleware - - oslo.service.sslutils - - osprofiler - -# Default value for the list of services to check. Default is we check all the services -oslo_config_validator_checked_services: "{{ oslo_config_validator_service_configs.keys() | list }}" diff --git a/roles/oslo_config_validator/meta/main.yml b/roles/oslo_config_validator/meta/main.yml deleted file mode 100644 index ac0f831b1..000000000 --- a/roles/oslo_config_validator/meta/main.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -galaxy_info: - namespace: openstack - author: OpenStack - description: TripleO OpenStack Role -- oslo_config_validator - company: Red Hat - license: Apache-2.0 - min_ansible_version: 2.7 - # - # Provide a list of supported platforms, and for each platform a list of versions. - # If you don't wish to enumerate all versions for a particular platform, use 'all'. - # To view available platforms and versions (or releases), visit: - # https://galaxy.ansible.com/api/v1/platforms/ - # - platforms: - - name: CentOS - versions: - - 7 - - 8 - - 9 - - galaxy_tags: - - tripleo - -collections: - - containers.podman diff --git a/roles/oslo_config_validator/molecule/default/molecule.yml b/roles/oslo_config_validator/molecule/default/molecule.yml deleted file mode 100644 index ddbb14a66..000000000 --- a/roles/oslo_config_validator/molecule/default/molecule.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -provisioner: - name: ansible - playbooks: - prepare: ../../resources/playbooks/prepare.yml - converge: ../../resources/playbooks/converge.yml - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - log: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - MOLECULE_OCV: - service_name: nova_compute - config_file: etc/nova/nova.conf - config_folder: /var/lib/config-data/puppet-generated/nova_libvirt - validator_out: >- - ERROR:root:cinder/region_name not found - - INFO:root:Ignoring missing option "auth_url" from group "keystone_authtoken" because the group - is known to have incomplete sample config data and thus cannot be validated properly. - - INFO:root:Ignoring missing option "username" from group "keystone_authtoken" because the group - is known to have incomplete sample config data and thus cannot be validated properly. - - INFO:root:Ignoring missing option "password" from group "keystone_authtoken" because the group - is known to have incomplete sample config data and thus cannot be validated properly. - - INFO:root:Ignoring missing option "user_domain_name" from group "keystone_authtoken" because - the group is known to have incomplete sample config data and thus cannot be validated properly. - - INFO:root:Ignoring missing option "project_name" from group "keystone_authtoken" because the - group is known to have incomplete sample config data and thus cannot be validated properly. - - INFO:root:Ignoring missing option "project_domain_name" from group "keystone_authtoken" - because the group is known to have incomplete sample config data and thus cannot be validated - properly. - - ERROR:root:service_user/region_name not found diff --git a/roles/oslo_config_validator/molecule/mocked_failure/molecule.yml b/roles/oslo_config_validator/molecule/mocked_failure/molecule.yml deleted file mode 100644 index e3cd50f92..000000000 --- a/roles/oslo_config_validator/molecule/mocked_failure/molecule.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -provisioner: - name: ansible - playbooks: - prepare: ../../resources/playbooks/prepare.yml - converge: ../../resources/playbooks/converge.yml - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - log: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - MOLECULE_OCV: - service_name: nova_compute - config_file: etc/nova/nova.conf - config_folder: /var/lib/config-data/puppet-generated/nova_libvirt - validator_out: | - ERROR:root:Houston we've got a problem diff --git a/roles/oslo_config_validator/resources/library/generator.yml b/roles/oslo_config_validator/resources/library/generator.yml deleted file mode 100644 index 018f890e6..000000000 --- a/roles/oslo_config_validator/resources/library/generator.yml +++ /dev/null @@ -1,74 +0,0 @@ ---- -options: - DEFAULT: - driver_option: '' - driver_opts: {} - dynamic_group_owner: '' - help: '' - opts: - - advanced: false - choices: [] - default: internal - deprecated_for_removal: false - deprecated_opts: [] - deprecated_reason: null - deprecated_since: null - dest: internal_service_availability_zone - max: null - metavar: null - min: null - mutable: false - name: internal_service_availability_zone - namespace: nova.conf - positional: false - required: false - sample_default: null - secret: false - short: null - type: string value - os_vif_linux_bridge: - driver_option: '' - driver_opts: {} - dynamic_group_owner: '' - help: '' - opts: - - advanced: false - choices: [] - default: false - dest: use_ipv6 - help: Use IPv6 - max: null - metavar: null - min: null - mutable: false - name: use_ipv6 - namespace: os_vif - positional: false - required: false - sample_default: null - secret: false - short: null - type: boolean value - os_vif_ovs: - driver_option: '' - driver_opts: {} - dynamic_group_owner: '' - help: '' - opts: - - advanced: false - choices: [] - default: 1500 - dest: network_device_mtu - help: MTU setting for network interface. - max: null - metavar: null - min: null - mutable: false - name: network_device_mtu - namespace: os_vif - positional: false - required: false - sample_default: null - secret: false - short: null - type: integer value diff --git a/roles/oslo_config_validator/resources/library/podman b/roles/oslo_config_validator/resources/library/podman deleted file mode 100755 index d000c2ea2..000000000 --- a/roles/oslo_config_validator/resources/library/podman +++ /dev/null @@ -1,304 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# This script is to mock podman command when testing containers -# created by the ansible collection containers.podman. - -from json import dumps -from yaml import safe_load as yaml_safe_load -import sys - -sample = { - "Id": "21d8b432eaec1b4eac2a21a78de524bdbb2f074d4ea43d3605b2b072ffe21878", - "State": { - "Status": "running", - "Running": True, - }, - "HostConfig": { - "Binds": [], - "CgroupManager": "systemd", - "CgroupMode": "private", - "ContainerIDFile": "", - "LogConfig": { - "Type": "k8s-file", - "Config": None, - "Path": "/ctr.log", - "Tag": "", - "Size": "0B" - }, - "NetworkMode": "slirp4netns", - "RestartPolicy": { - "Name": "", - "MaximumRetryCount": 0 - }, - "AutoRemove": False, - "VolumeDriver": "", - "VolumesFrom": None, - "CapAdd": [], - "CapDrop": [ - "CAP_AUDIT_WRITE", - "CAP_MKNOD", - "CAP_NET_RAW" - ], - "Dns": [], - "DnsOptions": [], - "DnsSearch": [], - "ExtraHosts": [], - "GroupAdd": [], - "IpcMode": "private", - "Cgroup": "", - "Cgroups": "default", - "Links": None, - "OomScoreAdj": 0, - "PidMode": "private", - "Privileged": False, - "PublishAllPorts": False, - "ReadonlyRootfs": False, - "SecurityOpt": [], - "Tmpfs": {}, - "UTSMode": "private", - "UsernsMode": "", - "ShmSize": 65536000, - "Runtime": "oci", - "ConsoleSize": [ - 0, - 0 - ], - "Isolation": "", - "CpuShares": 0, - "Memory": 0, - "NanoCpus": 0, - "CgroupParent": "user.slice", - "BlkioWeight": 0, - "BlkioWeightDevice": None, - "BlkioDeviceReadBps": None, - "BlkioDeviceWriteBps": None, - "BlkioDeviceReadIOps": None, - "BlkioDeviceWriteIOps": None, - "CpuPeriod": 0, - "CpuQuota": 0, - "CpuRealtimePeriod": 0, - "CpuRealtimeRuntime": 0, - "CpusetCpus": "", - "CpusetMems": "", - "Devices": [], - "DiskQuota": 0, - "KernelMemory": 0, - "MemoryReservation": 0, - "MemorySwap": 0, - "MemorySwappiness": 0, - "OomKillDisable": False, - "PidsLimit": 2048, - "Ulimits": [], - "CpuCount": 0, - "CpuPercent": 0, - "IOMaximumIOps": 0, - "IOMaximumBandwidth": 0, - "CgroupConf": None - }, - "Config": { - "Hostname": "9d8048113074", - "Domainname": "", - "User": "1001", - "AttachStdin": False, - "AttachStdout": False, - "AttachStderr": False, - "Tty": False, - "OpenStdin": False, - "StdinOnce": False, - "Env": [ - "PATH=/opt/app-root/src/bin:/opt/app-root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "TERM=xterm", - "container=oci", - "STI_SCRIPTS_URL=image:///usr/libexec/s2i", - "STI_SCRIPTS_PATH=/usr/libexec/s2i", - "HOME=/var/lib/redis", - "REDIS_VERSION=5", - "REDIS_PREFIX=/usr", - "CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/redis", - "APP_ROOT=/opt/app-root", - "PLATFORM=el8", - "HOSTNAME=9d8048113074" - ], - "Cmd": [ - "run-redis" - ], - "Image": "registry.redhat.io/rhel8/redis-5:latest", - "Volumes": [], - "WorkingDir": "/opt/app-root/src", - "Entrypoint": "container-entrypoint", - "OnBuild": None, - "Labels": { - "architecture": "x86_64", - "build-date": "2021-05-05T06:23:07.897115", - "vcs-ref": "ea375e008017960b0b749c1aae4dcd386ee68205", - "vcs-type": "git", - }, - "Annotations": { - "io.container.manager": "libpod", - "io.kubernetes.cri-o.Created": "2021-05-22T10:08:18.243648647-04:00", - "io.kubernetes.cri-o.TTY": "false", - "io.podman.annotations.autoremove": "FALSE", - "io.podman.annotations.init": "FALSE", - "io.podman.annotations.privileged": "FALSE", - "io.podman.annotations.publish-all": "FALSE", - "org.opencontainers.image.stopSignal": "15" - }, - "StopSignal": 15, - "Umask": "0022", - "Timeout": 0, - "StopTimeout": 10 - }, - "Image": "0ece6dfb3015c221c8ad6d364dea7884ae3e24becd60e94b80d5361f4ed78f47", - "ImageName": "undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-nova-compute:16.1_20210430.1", - "Name": "nova_compute", - "Mounts": [], - "OCIRuntime": "crun", - "ConmonPidFile": "/run/user/1000/containers/overlay-containers/9d8048113074bdd2c25ba3b0e0606608fbb6e82173afe61696f7bd48f61d7aa4/userdata/conmon.pid", - "PidFile": "", - "RestartCount": 0, - "MountLabel": "system_u:object_r:container_file_t:s0:c738,c1002", - "ProcessLabel": "system_u:system_r:container_t:s0:c738,c1002", - "AppArmorProfile": "", - "EffectiveCaps": None, - "BoundingCaps": [ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FOWNER", - "CAP_FSETID", - "CAP_KILL", - "CAP_NET_BIND_SERVICE", - "CAP_SETFCAP", - "CAP_SETGID", - "CAP_SETPCAP", - "CAP_SETUID", - "CAP_SYS_CHROOT" - ], - "ExecIDs": [], -} -image_get = { - "Id": "1f202f9b76988ef7cd962db56a801b89539e0dcf1bc03953883faaaf83f4c654", - "Digest": "sha256:ab901ece87a1bad3bbf7581356f18c1f79527124ed4792158c2b0b43a4896994", - "RepoTags": [ - "something/something:latest" - ], - "RepoDigests": [ - "something/something@sha256:ab901ece87a1bad3bbf7581356f18c1f79527124ed4792158c2b0b43a4896994" - ], - "Parent": "5d0da3dc976460b72c77d94c8a1ad043720b0416bfc16c52c45d4847e53fadb6", - "Comment": "", - "Created": "2021-09-27T21:49:36.486090789Z", - "Config": { - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], - "Cmd": [ - "sleep", - "infinity" - ], - "Labels": { - "build_date": "2021-09-27", - "description": "something", - "io.buildah.version": "1.23.0", - "maintainer": "David Vallee Delisle \u003cdvd@redhat.com\u003e", - "name": "something", - "org.label-schema.build-date": "20210915", - "org.label-schema.license": "GPLv2", - "vendor": "Red Hat" - } - }, - "Version": "", - "Author": "David Vallee Delisle", - "Architecture": "amd64", - "Os": "linux", - "Size": 1004968286, - "VirtualSize": 1004968286, - "GraphDriver": { - "Name": "overlay", - "Data": { - "LowerDir": "/home/dvd/.local/share/containers/storage/overlay/74ddd0ec08fa43d09f32636ba91a0a3053b02cb4627c35051aff89f853606b59/diff", - "UpperDir": "/home/dvd/.local/share/containers/storage/overlay/1e79d79d8ad3c6eb4446e0c429275c70f0e2918f2e0038441cd11e6f3e0b2aaf/diff", - "WorkDir": "/home/dvd/.local/share/containers/storage/overlay/1e79d79d8ad3c6eb4446e0c429275c70f0e2918f2e0038441cd11e6f3e0b2aaf/work" - } - }, - "RootFS": { - "Type": "layers", - "Layers": [ - "sha256:74ddd0ec08fa43d09f32636ba91a0a3053b02cb4627c35051aff89f853606b59", - "sha256:47bd6062c936b8eead8f0c88a7023a4e2f8c6e53086d201c9da048dde7e3b9a3" - ] - }, - "Labels": { - "build_date": "2021-09-27", - "description": "something", - "io.buildah.version": "1.23.0", - "maintainer": "David Vallee Delisle \u003cdvd@redhat.com\u003e", - "name": "something", - "org.label-schema.build-date": "20210915", - "org.label-schema.license": "GPLv2", - "vendor": "Red Hat" - }, - "Annotations": { - "org.opencontainers.image.base.digest": "sha256:a1801b843b1bfaf77c501e7a6d3f709401a1e0c83863037fa3aab063a7fdb9dc", - "org.opencontainers.image.base.name": "quay.io/centos/centos:8" - }, - "ManifestType": "application/vnd.oci.image.manifest.v1+json", - "User": "", -} - - -def read_config(config='/test.config.yml'): - with open(config, 'r') as yaml_config: - test_config = yaml_safe_load(yaml_config) - return test_config - - -def mock_generator(): - with open('/generator.yml', 'r') as generator: - return "".join(generator.readlines()) - - -def container_list(): - test_config = read_config('/test.config.yml') - config_folder = test_config.get('config_folder') - sample['Name'] = test_config.get('service_name') - sample['State']['Running'] = bool(test_config.get('service_running', True)) - sample['Mounts'].append({'Type': 'bind', 'Source': config_folder}) - return [sample] - - -def container_exec(): - test_config = read_config('/test.config.yml') - return test_config.get('validator_out') - - -if __name__ == '__main__': - if "image" in sys.argv[1]: - print(dumps([image_get])) - elif "version" in sys.argv[1]: - print("podman version 3.3.1") - elif "oslo-config-generator" in sys.argv: - print(mock_generator()) - elif len(sys.argv) > 2 and sys.argv[2] == "run": - print(container_exec()) - elif sys.argv[1] == "container": - container = container_list() - container[0]['cmd'] = sys.argv - print(dumps(container)) - else: - print(container_exec()) - print(sys.argv) diff --git a/roles/oslo_config_validator/resources/playbooks/converge.yml b/roles/oslo_config_validator/resources/playbooks/converge.yml deleted file mode 100644 index d2208e767..000000000 --- a/roles/oslo_config_validator/resources/playbooks/converge.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - environment: - ANSIBLE_STDOUT_CALLBACK: yaml - ANSIBLE_LIBRARY: ../../resources/library - tasks: - - block: - - name: Include the oslo_config_validator role - include_role: - name: oslo_config_validator - vars: - oslo_config_validator_debug: true - rescue: - - fail: - msg: "Default test failed" - when: molecule_yml.scenario.name == "default" - - fail: - msg: "Scenario {{ molecule_yml.scenario.name }} was suppsoed to fail" - when: - - not validation_errors | count diff --git a/roles/oslo_config_validator/resources/playbooks/prepare.yml b/roles/oslo_config_validator/resources/playbooks/prepare.yml deleted file mode 100644 index 288ab93ab..000000000 --- a/roles/oslo_config_validator/resources/playbooks/prepare.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- hosts: all - tasks: - - name: Copy fake podman to path - become: true - copy: - src: "{{ playbook_dir }}/../library/podman" - dest: /bin/podman - backup: true - mode: '0777' - - - name: Save test config from environment variable - set_fact: - test_config: "{{ lookup('env', 'MOLECULE_OCV') }}" - - - name: Print the test configuration - debug: - var: test_config - - - name: Set config file fact - set_fact: - config_file: "{{ test_config.config_folder }}/{{ test_config.config_file }}" - - - name: Saving test_config to file - copy: - content: "{{ test_config }}" - dest: "/test.config.yml" - - - name: Saving test_config to file - copy: - src: "{{ playbook_dir }}/../library/generator.yml" - dest: "/generator.yml" - - - name: Creating mocked config folder - file: - path: "{{ config_file | dirname }}" - state: directory - recurse: true - - - name: Creating mocked config file - file: - path: "{{ config_file }}" - state: touch diff --git a/roles/oslo_config_validator/tasks/build_validation_config.yml b/roles/oslo_config_validator/tasks/build_validation_config.yml deleted file mode 100644 index 9d8e991d8..000000000 --- a/roles/oslo_config_validator/tasks/build_validation_config.yml +++ /dev/null @@ -1,184 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# These tasks are meant to reformat the data structure and merge -# them with the config files found on the nodes as well as running -# containers. - -- name: Generating computed service config - set_fact: - service_config: >- - {% set config_list = [] %}{% - for svc in oslo_config_validator_checked_services %}{% - set ns = oslo_config_validator_service_configs[svc] %}{% - for cf in ns.config_files %}{% - set _ = config_list.append({ - "path": cf.path, - "service": svc, - "ignored_groups": ns.ignored_groups | default([]), - "opt_data": ns.opt_data | default([]), - "namespaces": cf.namespaces | union(ns.default_namespaces) | unique - }) - %}{% - endfor - %}{% - endfor - %}{{ config_list }} - -- name: Printing computed service config - when: - - oslo_config_validator_debug | bool - debug: - var: service_config - -- name: Overriding namespace configuration - set_fact: - namespaces_config: >- - {% set override_config = [] %}{% - for config in oslo_config_validator_namespaces_config | default([]) %}{% - set _ = override_config.append( - oslo_config_validator_namespaces_config_override | default([]) | - selectattr('namespace', 'eq', config.namespace) | first | default(config, true) - ) %}{% - endfor %}{{ - override_config }} - -- name: Printing overriden namespace config - when: - - oslo_config_validator_debug | bool - debug: - var: namespaces_config - -- name: podman - Gather facts for all containers - become: true - containers.podman.podman_container_info: - register: containers_facts - -- name: Generating config_location dict - set_fact: - config_locations: >- - {% set tmp_config_locations={} %}{% - for mount in container.Mounts | selectattr('Source', 'match', '/var/lib/config-data/puppet-generated/[^/]+$') | list - %}{% - if mount.Source not in config_locations - %}{%- set _ = tmp_config_locations.update({mount.Source: {"image": container.Image | default("") }}) -%}{% - endif %}{% - endfor - %}{{ config_locations | combine(tmp_config_locations) }} - loop: "{{ containers_facts.containers }}" - loop_control: - loop_var: container - label: "{{ container.Name }}" - when: - - container.State.Running - - container.Mounts | selectattr('Source', 'match', '/var/lib/config-data/puppet-generated/[^/]+$') | list | count - -- name: Looking for possible config files - find: - recurse: true - depth: 4 - excludes: - - ".*httpd.*" - - "[0-9]+.*" - - ".*wsgi" - paths: "{{ config_path.key }}" - patterns: - - ".*({{ oslo_config_validator_service_configs.keys() | list | join('|') }}).*\\.conf$" - use_regex: true - loop: "{{ dict(config_locations) | dict2items }}" - loop_control: - loop_var: config_path - label: "{{ config_path.key }}" - register: found_configs - -- name: Printing found configs - when: - - oslo_config_validator_debug | bool - debug: - var: found_configs - -- name: Building validations and invalidations data structures with real file names - when: - - result.matched - block: - - name: Building config validations dict - set_fact: - config_validations: >- - {% set tmp_config_locations = [] %}{% - for file in result.files - %}{% - set svc_config = service_config | selectattr('path', 'contains', file.path | basename) | list - %}{% - if svc_config | count %}{% - set _ = tmp_config_locations.append({ - "path": file.path, - "image": result.config_path.value.image, - "ignored_groups": svc_config.0.ignored_groups, - "namespaces": svc_config.0.namespaces | list, - "opt_data": svc_config.0.opt_data, - "service": svc_config.0.service - }) - %}{% - endif - %}{% - endfor - %}{{ config_validations | default([]) | union(tmp_config_locations) }} - loop: "{{ found_configs.results }}" - loop_control: - loop_var: result - label: "{{ result.config_path.key }}" - - - name: Building config invalidations dict - set_fact: - config_invalidations: >- - {% set tmp_config_locations = [] %}{% - for file in result.files - %}{% - set svc_config = service_config | selectattr('path', 'contains', file.path | basename) | list - %}{% - if svc_config | count %}{% - for ns in svc_config[0].namespaces | list %}{% - set ns_config = namespaces_config | - selectattr('namespace', 'equalto', ns) | - selectattr('invalid_settings', 'defined') | list %}{% - if ns_config | count %}{% - set _ = tmp_config_locations.append({ - "path": file.path, - "ignored_groups": svc_config[0].ignored_groups, - "service": svc_config[0].service, - "invalid_settings": ns_config[0].invalid_settings - }) %}{% - endif %}{% - endfor %}{% - endif %}{% - endfor - %}{{ config_invalidations | default([]) | union(tmp_config_locations) }} - loop: "{{ found_configs.results }}" - loop_control: - loop_var: result - label: "{{ result.config_path.key }}" - -- name: Printing config datastructure - when: - - oslo_config_validator_debug | bool - block: - - name: Config Validations - debug: - var: config_validations - - - name: Config Invalidations - debug: - var: config_invalidations diff --git a/roles/oslo_config_validator/tasks/container_run.yml b/roles/oslo_config_validator/tasks/container_run.yml deleted file mode 100644 index 0ec982105..000000000 --- a/roles/oslo_config_validator/tasks/container_run.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# These tasks are wrapping around the oslo-config container creation. - -# this shouldn't be necessary once this proposed change has landed -# and propagated in images -# https://review.opendev.org/c/openstack/oslo.config/+/790883 -- name: Printing oslo-config command - when: - - oslo_config_validator_debug | bool - debug: - var: oslo_command - -- name: Copying the config file to a temp path - changed_when: false - copy: - mode: 0666 - remote_src: true - src: "{{ config_file.path }}" - dest: "{{ oslo_config_validator_work_path }}" - -- name: Run oslo-config container - containers.podman.podman_container: - name: "{{ container_name }}" - image: "{{ config_file.image }}" - state: started - detach: false - rm: true - user: "0" - volume: "{{ oslo_config_validator_work_path }}:/oslo_config_validation" - network: none - command: "{{ oslo_command }}" - register: container_run - failed_when: - - false - -- name: Printing container run output - when: - - oslo_config_validator_debug | bool - debug: - var: container_run diff --git a/roles/oslo_config_validator/tasks/invalidate_config.yml b/roles/oslo_config_validator/tasks/invalidate_config.yml deleted file mode 100644 index e5a92ea2d..000000000 --- a/roles/oslo_config_validator/tasks/invalidate_config.yml +++ /dev/null @@ -1,76 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Getting possible invalid settings - become: true - validations_read_ini: - path: "{{ service.0.path }}" - section: "{{ service.1.section }}" - key: "{{ service.1.option }}" - loop: "{{ config_invalidations | default([]) | subelements('invalid_settings') }}" - loop_control: - loop_var: service - label: "{{ service.1.section }}/{{ service.1.option }} in {{ service.0.path }}" - register: invalid_setting_validation - check_mode: false - -- name: Printing invalid settings - when: - - oslo_config_validator_debug | bool - debug: - var: invalid_setting_validation - -- name: Checking if settings are infact valid - when: - - setting.value - set_fact: - invalid_settings: >- - {% set errors = [] %}{% - set svc = setting.service.1 %}{% - set operator = svc.operator | default("not") %}{% - if 'separator' in svc %}{% - set setting_values = setting.value.split(svc.separator) | list %}{% - else %}{% - set setting_values = [setting.value] %}{% - endif %}{% - set setting_path = setting.service.0.path + ':' + svc.section + '/' + svc.option %}{% - for val in setting_values %}{% - if operator == "not" and val in svc.value_list %}{% - set _ = errors.append(setting_path + - ' has an invalid value: ' + val + - ' Forbidden values: ' + svc.value_list | join(',') ) %}{% - elif operator == "eq" and val not in svc.value_list %}{% - set _ = errors.append(setting_path + - ' has an invalid value: ' + val + - ' Should be one of these values: ' + svc.value_list | join(',') ) %}{% - elif operator == "lt" and val|int < svc.value_list.0 %}{% - set _ = errors.append(setting_path + - ': Current value ' + val + ' is lesser than expected ' + svc.value_list.0 | string) %}{% - elif operator == "gt" and val|int > svc.value_list.0 %}{% - set _ = errors.append(setting_path + - ': Current value ' + val + ' is greater than expected ' + svc.value_list.0 | string) %}{% - endif %}{% - endfor %}{{ invalid_settings | default([]) + errors }} - loop: "{{ invalid_setting_validation.results }}" - loop_control: - loop_var: setting - label: "{{ setting.service.1.section }}/{{ setting.service.1.option }} in {{ setting.service.0.path }}" - -- name: Printing invalid settings - when: - - oslo_config_validator_debug | bool - debug: - var: invalid_settings diff --git a/roles/oslo_config_validator/tasks/main.yml b/roles/oslo_config_validator/tasks/main.yml deleted file mode 100644 index cb66d4064..000000000 --- a/roles/oslo_config_validator/tasks/main.yml +++ /dev/null @@ -1,73 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# tasks -- name: Include the config builder tasks - include_tasks: build_validation_config.yml - -- name: Validate configuration - when: - - 'oslo_config_validator_validation | bool or - oslo_config_validator_report | bool' - include_tasks: validate_config.yml - -- name: Invalidate configuration - when: - - oslo_config_validator_invalid_settings | bool - - config_invalidations is defined - include_tasks: invalidate_config.yml - -- name: Config validation reporting - when: - - oslo_config_validator_report | bool - include_tasks: report_generation.yml - -- name: Config validation assertions - block: - - name: Verifying setting validation - assert: - that: - - not service.output | count - fail_msg: | - Config file {{ service.config_file }} for {{ service.service }} has returned validation errors: - {% for msg in service.output %} - {{ msg }} - {% endfor %} - loop: "{{ validation_output }}" - loop_control: - loop_var: service - label: "{{ service.config_file }}/{{ service.service }}" - register: validation_errors - ignore_errors: true - - - name: Asserted failure - when: "'failed' in validation_errors or - (invalid_settings is defined and - invalid_settings | count)" - fail: - msg: | - Configuration validation failed for at least one service: - {% - if validation_errors.results | count %}{% - for service in validation_errors.results %}{% - if service.failed %}{{ service.msg }}{% endif %}{% - endfor %}{% - endif %}{% - if invalid_settings is defined and invalid_settings | count %}{% - for message in invalid_settings %}{{ message }} - {% endfor %}{% - endif %} diff --git a/roles/oslo_config_validator/tasks/opt_data_format.yml b/roles/oslo_config_validator/tasks/opt_data_format.yml deleted file mode 100644 index 40404f2c4..000000000 --- a/roles/oslo_config_validator/tasks/opt_data_format.yml +++ /dev/null @@ -1,106 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# These tasks are generating a new opt_data data structure based -# on the output of oslo-config-generator merged with the opt_data -# mapping in the config file settings. This has to be looped for -# each mapping. - -- name: Get list of templatable sections from config file - when: "'index_key' in option_data" - block: - - name: Reading config file to get template - become: true - validations_read_ini: - path: "{{ config_file.path }}" - section: "{{ option_data.index_key.section }}" - key: "{{ option_data.index_key.option }}" - register: opt_val - - - name: Printing templatable values - when: - - oslo_config_validator_debug | bool - debug: - var: opt_val - -- name: Building list of replaceable data - vars: - templates: [] - sections: [] - when: - - opt_data | length - - "('index_key' in option_data and opt_val.value) or 'group_create' in option_data" - block: - - name: Printing opt_data.options - debug: - var: opt_data.options - when: oslo_config_validator_debug | bool - - - name: Printing option_data - debug: - var: option_data - when: oslo_config_validator_debug | bool - - - block: - - name: Populating templates list - set_fact: - templates: "{{ templates + [ opt_data.options[item].opts ] }}" - with_items: "{{ option_data.template_section }}" - - name: Populating sections list with separator - set_fact: - sections: "{{ opt_val.value | split(option_data.index_key.separator) | list}}" - when: "{{ 'separator' in option_data.index_key }}" - - name: Populating sections list without separator - set_fact: - sections: "{{ [opt_val.value] }}" - when: "{{ 'separator' not in option_data.index_key }}" - when: "{{ 'template_section' in option_data }}" - - block: - - fail: - msg: > - Requested template key {{ option_data.group_create.template }} isn't - present in the options data dictionary. - {{ opt_data.options }} - when: option_data.group_create.template not in opt_data.options - - name: Populating sections and templates list without template_section - set_fact: - templates: "{{ opt_data.options[option_data.group_create.template].opts }}" - sections: "{{ [option_data.group_create.group_name] }}" - when: "{{ 'group_create' in option_data and 'template_section' not in option_data }}" - - - name: Printing sections - debug: - var: sections - when: oslo_config_validator_debug | bool - - - set_fact: - new_sections: >- - {% set section_list = {} %}{% - for val in sections %}{% - set _ = section_list.__setitem__(val, {"opts": templates }) %}{% - endfor %}{{ section_list }} - -- name: Printing generated sections - when: - - oslo_config_validator_debug | bool - debug: - var: new_sections - -- name: Adding new sections to opt_data - when: - - new_sections | default([]) | count - set_fact: - opt_data: "{{ opt_data | combine({'options': new_sections}, recursive=True) }}" diff --git a/roles/oslo_config_validator/tasks/report_generation.yml b/roles/oslo_config_validator/tasks/report_generation.yml deleted file mode 100644 index f21cc2fa2..000000000 --- a/roles/oslo_config_validator/tasks/report_generation.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Set folder name fact - set_fact: - local_folder_name: "{{ oslo_config_validator_report_path }}/{{ inventory_hostname }}" - -- name: Making sure local folder exists - changed_when: false - file: - path: "{{ local_folder_name }}" - state: directory - recurse: true - delegate_to: localhost - -- name: Saving filtered outputs from validation to files - changed_when: false - copy: - content: "{{ service.output | join('\n') }}\n" - dest: "{{ local_folder_name }}/{{ service.service }}-{{ service.config_file | basename }}" - loop: "{{ validation_output }}" - loop_control: - loop_var: service - delegate_to: localhost - -- name: Saving invalidation to files - changed_when: false - when: - - invalid_settings | default([]) | count - copy: - content: "{{ invalid_settings | join('\n') }}\n" - dest: "{{ local_folder_name }}/invalid_settings.log" - delegate_to: localhost - -- name: Setting report generation default message - set_fact: - report_msg: "Reports are available in {{ oslo_config_validator_report_path }}" - -- name: Archive report - when: - - oslo_config_validator_report_archive | bool - block: - - name: Zipping fetched files - changed_when: false - archive: - path: - - "{{ oslo_config_validator_report_path }}/*" - dest: "{{ oslo_config_validator_report_path }}.tar.xz" - remove: true - format: xz - run_once: true - delegate_to: localhost - delegate_facts: true - register: archive_out - - - name: Delete local files - changed_when: false - file: - path: "{{ oslo_config_validator_report_path }}" - state: absent - run_once: true - delegate_to: localhost - - - name: Setting report generation archived message - set_fact: - report_msg: | - Reports are archived in {{ oslo_config_validator_report_path }}.tar.xz and contains - {{ archive_out.archived | list | default([]) | count }} file(s) - - -- name: Report path - run_once: true - warn: - msg: "{{ report_msg }}" - -- name: Role terminated after reports are ready. - meta: end_play diff --git a/roles/oslo_config_validator/tasks/validate_config.yml b/roles/oslo_config_validator/tasks/validate_config.yml deleted file mode 100644 index b3105ffa1..000000000 --- a/roles/oslo_config_validator/tasks/validate_config.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Making sure work folder is present - become: true - changed_when: false - file: - mode: 0777 - path: "{{ oslo_config_validator_work_path }}" - state: directory - -- name: Validate configuration - include_tasks: validation_command.yml - loop: "{{ config_validations }}" - loop_control: - loop_var: config_file - label: "{{ config_file.service }}: {{ config_file.path }}" - -- name: Deleting work folder - become: true - changed_when: false - file: - path: "{{ oslo_config_validator_work_path }}" - state: absent - -- name: Preparing and filtering output datastructure - when: - - validated_configs is defined - set_fact: - validation_output: >- - {% set ignored_patterns = oslo_config_validator_global_ignored_messages | default("") %}{% - for ns in namespaces_config | default([]) %}{% - if ns.namespace in out.config_file.namespaces %}{% - for ignored in ns.ignored_messages | default([]) %}{% - set _ = ignored_patterns.append(ignored) %}{% - endfor %}{% - endif %}{% - endfor %}{% - set output = out.output.stderr_lines | union(out.output.stdout_lines) | unique | list | reject('match', ignored_patterns | join("|")) | list %}{% - set _ = validation_output.append({ - "service": out.config_file.service, - "config_file": out.config_file.path, - "namespaces": out.config_file.namespaces, - "output": output, - }) %}{{ validation_output }} - loop: "{{ validated_configs }}" - loop_control: - loop_var: out - label: "{{ out.config_file.path }}" - -- name: Printing validation output - when: - - oslo_config_validator_debug | bool - debug: - var: validation_output diff --git a/roles/oslo_config_validator/tasks/validation_command.yml b/roles/oslo_config_validator/tasks/validation_command.yml deleted file mode 100644 index c13891e6e..000000000 --- a/roles/oslo_config_validator/tasks/validation_command.yml +++ /dev/null @@ -1,102 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Generating namespace arguments - set_fact: - namespace_args: "--namespace {{ config_file.namespaces | join(' --namespace ') }}" - container_name: "oslo-config-{{ config_file.service }}-{{ config_file.path | basename }}" - -- name: Opt data required - when: - - config_file.opt_data | count - block: - - name: Generating oslo-config-generator command for opt_data - set_fact: - oslo_command: "oslo-config-generator --format yaml {{ namespace_args }}" - - - name: Running container - args: - apply: - become: true - include_tasks: container_run.yml - - - name: Saving opt_data datastructure - set_fact: - opt_data: "{{ container_run.stdout | from_yaml }}" - - - name: Printing opt_data - when: - - oslo_config_validator_debug | bool - debug: - var: opt_data - - - name: Generating the new config sections - include_tasks: opt_data_format.yml - loop: "{{ config_file.opt_data }}" - loop_control: - loop_var: option_data - label: >- - {% if 'index_key' in option_data %} - {{ option_data.index_key.section }} / - {{ option_data.index_key.option }} - {% else %} - {{ option_data.group_create.template }} / - {{ option_data.group_create.group_name }} - {% endif %} - - - name: Saving new opt_data content - become: true - changed_when: false - copy: - content: "{{ opt_data | to_yaml }}" - dest: "{{ oslo_config_validator_work_path }}/opt_data.yaml" - mode: 0666 - -- name: Generate validation command - set_fact: - oslo_command: >- - oslo-config-validator --input-file /oslo_config_validation/{{ config_file.path | basename }} - {% if config_file.opt_data | count and opt_data | length %} - --opt-data /oslo_config_validation/opt_data.yaml - {% else %} - {{ namespace_args }} - {% endif %} - {% if oslo_config_validator_report | bool %} --check-defaults{% endif %} - {% if config_file.ignored_groups | count %} - --exclude-group {{ config_file.ignored_groups | join(' --exclude-group ') }} - {% endif %} - -- name: Printing oslo-config-validator command - when: - - oslo_config_validator_debug | bool - debug: - var: oslo_command - -- name: Running container - args: - apply: - become: true - include_tasks: container_run.yml - -- name: Saving output to fact - set_fact: - validated_configs: "{{ validated_configs | default([]) + [{'output': container_run, 'config_file': config_file }] }}" - -- name: Printing validated configs - when: - - oslo_config_validator_debug | bool - debug: - var: validated_configs diff --git a/roles/oslo_config_validator/vars/main.yml b/roles/oslo_config_validator/vars/main.yml deleted file mode 100644 index 5da404da6..000000000 --- a/roles/oslo_config_validator/vars/main.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# While options found within the vars/ path can be overridden using extra -# vars, items within this path are considered part of the role and not -# intended to be modified. - -# All variables within this role should have a prefix of "oslo_config_validator" -metadata: - name: Openstack services configuration validation - description: > - This role is intended to leverage the `oslo-config-validator` on each one - of the configuration files found on a deployment. The goal is to quickly - catch erroneous configurations. - - When called manually, it will also be possible to generate a report - returning all the differences between the current configuration and the - default configuration - groups: - - backup-and-restore - - pre-upgrade - - post-deployment - - post-system-upgrade - - post-update - -# Placeholder variables -config_locations: {} -config_validations: [] -validation_output: [] -invalid_setting_regex: "(is not part of the sample config|not found)" diff --git a/roles/overcloud_service_status/defaults/main.yml b/roles/overcloud_service_status/defaults/main.yml deleted file mode 100644 index 0896d0ec0..000000000 --- a/roles/overcloud_service_status/defaults/main.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "overcloud_service_status" - -# Debugging mode, whether or no to log the token request -overcloud_service_status_debug: false - -# Overcloud API to validate against -overcloud_service_api: - - nova - - cinderv3 - -# A list of services that shouldn't be registered any more -overcloud_deprecated_services: - nova: - - nova-consoleauth - -# These variables are normally set as host variables for the undercloud when generating -# the inventory with tripleo-ansible-inventory: -# - overcloud_keystone_url -# - overcloud_admin_password diff --git a/roles/overcloud_service_status/molecule/default/molecule.yml b/roles/overcloud_service_status/molecule/default/molecule.yml deleted file mode 100644 index ddf28788e..000000000 --- a/roles/overcloud_service_status/molecule/default/molecule.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -provisioner: - name: ansible - playbooks: - prepare: ../../resources/playbooks/prepare.yml - converge: ../../resources/playbooks/converge.yml - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - overcloud_keystone_url: http://127.0.0.1:8080 - overcloud_admin_password: hello - log: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles" - ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}" diff --git a/roles/overcloud_service_status/molecule/deprecated_services/molecule.yml b/roles/overcloud_service_status/molecule/deprecated_services/molecule.yml deleted file mode 100644 index ddf28788e..000000000 --- a/roles/overcloud_service_status/molecule/deprecated_services/molecule.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -provisioner: - name: ansible - playbooks: - prepare: ../../resources/playbooks/prepare.yml - converge: ../../resources/playbooks/converge.yml - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - overcloud_keystone_url: http://127.0.0.1:8080 - overcloud_admin_password: hello - log: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles" - ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}" diff --git a/roles/overcloud_service_status/molecule/down_services/molecule.yml b/roles/overcloud_service_status/molecule/down_services/molecule.yml deleted file mode 100644 index ddf28788e..000000000 --- a/roles/overcloud_service_status/molecule/down_services/molecule.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -provisioner: - name: ansible - playbooks: - prepare: ../../resources/playbooks/prepare.yml - converge: ../../resources/playbooks/converge.yml - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - overcloud_keystone_url: http://127.0.0.1:8080 - overcloud_admin_password: hello - log: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles" - ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}" diff --git a/roles/overcloud_service_status/resources/playbooks/converge.yml b/roles/overcloud_service_status/resources/playbooks/converge.yml deleted file mode 100644 index 8257351f9..000000000 --- a/roles/overcloud_service_status/resources/playbooks/converge.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - tasks: - - block: - - name: "Include overcloud_service_status role" - include_role: - name: "overcloud_service_status" - rescue: - - fail: - msg: "Default test failed" - when: molecule_yml.scenario.name == "default" - - set_fact: - output_var: "{{ lookup('vars', molecule_yml.scenario.name + '_output')}}" - - fail: - msg: "No {{ molecule_yml.scenario.name }} found" - when: "'failed' not in output_var" diff --git a/roles/overcloud_service_status/resources/playbooks/prepare.yml b/roles/overcloud_service_status/resources/playbooks/prepare.yml deleted file mode 100644 index 7df04594e..000000000 --- a/roles/overcloud_service_status/resources/playbooks/prepare.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Prepare - hosts: all - - tasks: - - name: Copy http_server.py - copy: - src: ../scripts/http_server.py - dest: / - - - name: "Start http_server.py scenario {{ molecule_yml.scenario.name }}" - shell: cd /; nohup python3 /http_server.py --scenario {{ molecule_yml.scenario.name }} > http_server.log 2>&1 & diff --git a/roles/overcloud_service_status/resources/scripts/http_server.py b/roles/overcloud_service_status/resources/scripts/http_server.py deleted file mode 100644 index 2a2d9f805..000000000 --- a/roles/overcloud_service_status/resources/scripts/http_server.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env python3 -""" -Simple http server to mock a keystone token and service list. -If arguments are passed, they will either set the services as down -or create additionnal services. - -Example: -./http_server.py --scenario default -- will return services based on the 'services' dict below. - -./http_server.py --scenario deprecated_services -- will return services based on the 'services' dict below, as well - as a nova-consoleauth service. - -./http_server.py --scenario down_services -- will return services based on the 'services' dict below, as well - as marking one of the services as down. -""" -import argparse -from datetime import datetime -from http.server import BaseHTTPRequestHandler, HTTPServer -import json - -server_port = 8080 -server_url = "http://127.0.0.1" - -# List of services to mock -# Controllers are going to be created 3 times, computes and hostgroups once -services = { - "nova": { - "controller": ["nova-scheduler", "nova-conductor"], - "compute": ["nova-compute"], - }, - "cinder": { - "controller": ["cinder-scheduler"], - "hostgroup": ["cinder-volume"], - }, -} - -parser = argparse.ArgumentParser(description="mocking keystone and os-service calls") -parser.add_argument( - "--scenario", - action="store", - default="default", - help="Scenario to reproduce", -) -args = parser.parse_args() - - -class S(BaseHTTPRequestHandler): - def _set_response(self, code=200, **kwargs): - self.send_response(code) - self.send_header("Content-type", "application/json; charset=utf-8") - for key, val in kwargs.items(): - self.send_header(key, val) - self.end_headers() - - def _write_body(self, text): - self.wfile.write(text.encode("utf-8")) - - def do_GET(self): - self._set_response() - path_split = self.path.split("/") - self._write_body(self._generate_services(path_split[1])) - - def do_POST(self): - content_length = int(self.headers["Content-Length"]) - self._set_response(201, x_subject_token=123) - self._write_body(self._generate_token()) - - def _generate_services(self, service): - data = {"services": []} - svc = services[service] - for key, binaries in svc.items(): - number_of_nodes = 3 if key == "controller" else 1 - for i in range(number_of_nodes): - for binary in binaries: - data["services"].append( - self._generate_service(binary, f"{key}-{i}.redhat.local") - ) - # NOTE(dvd): yeah this is ugly and won't work if we remove nova-consoleauth - # from overcloud_deprecated_services. We should probably just - # pass the overcloud_deprecated_services list as an argument to - # to make this future proof - if service == "nova" and args.scenario == "deprecated_services": - data["services"].extend( - [ - self._generate_service( - "nova-consoleauth", "controller-0.redhat.local" - ), - self._generate_service( - "nova-consoleauth", "controller-1.redhat.local", "disabled" - ), - self._generate_service( - "nova-consoleauth", - "controller-2.redhat.local", - "enabled", - "down", - ), - ] - ) - if args.scenario == "down_services": - data["services"][0]["state"] = "down" - - return json.dumps(data) - - def _generate_service(self, binary, host, status="enabled", state="up"): - now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - return { - "binary": binary, - "host": host, - "status": status, - "state": state, - "updated_at": now, - } - - def _generate_token(self): - now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - data = { - "token": { - "catalog": [ - { - "endpoints": [ - { - "url": f"{server_url}:{server_port}/cinder", - "interface": "public", - }, - { - "url": f"{server_url}:{server_port}/cinder", - "interface": "public", - }, - { - "url": f"{server_url}:{server_port}/cinder", - "interface": "public", - }, - ], - "name": "cinderv3", - }, - { - "endpoints": [ - { - "url": f"{server_url}:{server_port}/nova", - "interface": "public", - }, - { - "url": f"{server_url}:{server_port}/nova", - "interface": "public", - }, - { - "url": f"{server_url}:{server_port}/nova", - "interface": "public", - }, - ], - "name": "nova", - }, - ], - } - } - return json.dumps(data) - - -def run(server_class=HTTPServer, handler_class=S, port=server_port): - server_address = ("", port) - httpd = server_class(server_address, handler_class) - try: - httpd.serve_forever() - except KeyboardInterrupt: - pass - httpd.server_close() - - -if __name__ == "__main__": - run() diff --git a/roles/overcloud_service_status/tasks/main.yml b/roles/overcloud_service_status/tasks/main.yml deleted file mode 100644 index 10dfdf7f6..000000000 --- a/roles/overcloud_service_status/tasks/main.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Geting a system scoped Keystone token - no_log: "{{ not overcloud_service_status_debug | bool }}" - uri: - url: "{{ overcloud_keystone_url | urlsplit('scheme') }}://{{ overcloud_keystone_url | urlsplit('netloc') }}/v3/auth/tokens" - method: POST - body_format: json - body: - auth: - identity: - methods: - - password - password: - user: - password: "{{ overcloud_admin_password }}" - name: admin - domain: - id: default - scope: - project: - name: admin - domain: - name: Default - return_content: true - status_code: 201 - register: auth_token - when: overcloud_keystone_url|default('') - -- name: Checking openstack services - include_tasks: tasks/os_service.yml - loop: "{{ overcloud_service_api }}" - loop_control: - loop_var: os_service diff --git a/roles/overcloud_service_status/tasks/os_service.yml b/roles/overcloud_service_status/tasks/os_service.yml deleted file mode 100644 index f7a4108b6..000000000 --- a/roles/overcloud_service_status/tasks/os_service.yml +++ /dev/null @@ -1,94 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Extracting the endpoint url - set_fact: - endpoint: "{{ catalog.endpoints|selectattr('interface', 'eq', 'public')|first }}" - loop: "{{ auth_token.json.token.catalog }}" - loop_control: - loop_var: catalog - when: catalog.name == os_service - -- fail: - msg: "No endpoint found for {{ os_service }} interface public in catalog" - when: endpoint is not defined - -- name: Get services - uri: - url: "{{ endpoint.url }}/os-services" - method: GET - headers: - Accept: application/json - X-Auth-Token: "{{ auth_token.x_subject_token }}" - return_content: true - status_code: 200 - register: os_services - -- name: Verifying deprecated services are absent - assert: - that: - - service[0].binary != service[1] - fail_msg: "{{ service[0].binary }} should be removed on {{ service[0].host }}" - loop: "{{ os_services.json.services | product(overcloud_deprecated_services[os_service]) | list }}" - loop_control: - loop_var: service - when: os_service in overcloud_deprecated_services - register: deprecated_services_output - ignore_errors: true - -- name: Verifying all services are up - assert: - that: > - (service.state == "up" and service.status == "enabled") - or service.status == "disabled" - or (service.status == "enabled" and service.state == "down" and service.binary is match("cinder-.*")) - fail_msg: "{{ service.binary }} on {{ service.host }} is problematic (service state is {{ service.state }} while it's {{ service.status }})" - loop: "{{ os_services.json.services }}" - loop_control: - loop_var: service - register: down_services_output - ignore_errors: true - -- debug: - msg: "{{ service.binary }} on {{ service.host }} is down while it's enabled. But it isn't cause for concern." - loop: "{{ os_services.json.services }}" - loop_control: - loop_var: service - when: - - service.binary is match("cinder-.*") - - service.status == "enabled" - - service.state == "down" - -- name: Asserted failure - fail: - msg: | - At least one of the assertion failed. - {% if 'failed' in deprecated_services_output %} - {% for service in deprecated_services_output.results %} - {% if service.failed %} - {{ service.msg }} - {% endif %} - {% endfor %} - {% endif %} - {% if 'failed' in down_services_output %} - {% for service in down_services_output.results %} - {% if service.failed %} - {{ service.msg }} - {% endif %} - {% endfor %} - {% endif %} - - when: "'failed' in deprecated_services_output or 'failed' in down_services_output" diff --git a/roles/overcloud_service_status/vars/main.yml b/roles/overcloud_service_status/vars/main.yml deleted file mode 100644 index bfe3af2e1..000000000 --- a/roles/overcloud_service_status/vars/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# While options found within the vars/ path can be overridden using extra -# vars, items within this path are considered part of the role and not -# intended to be modified. -metadata: - name: Verify overcloud services state after running a deployment or an update - description: > - An Ansible role to verify the Overcloud services states after a deployment - or an update. It checks the API /os-services and looks for deprecated - services (nova-consoleauth) or any down services. - groups: - - post-deployment - - post-upgrade - - post-overcloud-upgrade - - post-overcloud-converge diff --git a/roles/ovs_dpdk_pmd/defaults/main.yml b/roles/ovs_dpdk_pmd/defaults/main.yml deleted file mode 100644 index 7d8500664..000000000 --- a/roles/ovs_dpdk_pmd/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# defaults file for ovs-dpdk-pmd diff --git a/roles/ovs_dpdk_pmd/molecule/default/converge.yml b/roles/ovs_dpdk_pmd/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/ovs_dpdk_pmd/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/ovs_dpdk_pmd/molecule/default/molecule.yml b/roles/ovs_dpdk_pmd/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/ovs_dpdk_pmd/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/ovs_dpdk_pmd/tasks/main.yml b/roles/ovs_dpdk_pmd/tasks/main.yml deleted file mode 100644 index 87b522148..000000000 --- a/roles/ovs_dpdk_pmd/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Get OVS DPDK PMD cores mask value - become_method: sudo - become: true - register: pmd_cpu_mask - command: ovs-vsctl --no-wait get Open_vSwitch . other_config:pmd-cpu-mask - changed_when: false - -- name: Run OVS DPDK PMD cores check - become: true - ovs_dpdk_pmd_cpus_check: - pmd_cpu_mask: "{{ pmd_cpu_mask.stdout }}" diff --git a/roles/ovs_dpdk_pmd/vars/main.yml b/roles/ovs_dpdk_pmd/vars/main.yml deleted file mode 100644 index a71a13289..000000000 --- a/roles/ovs_dpdk_pmd/vars/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -metadata: - name: Validates OVS DPDK PMD cores from all NUMA nodes. - description: > - OVS DPDK PMD cpus must be provided from all NUMA nodes. - - A failed status post-deployment indicates PMD CPU list is not - configured correctly. - groups: - - post-deployment diff --git a/roles/pacemaker_status/defaults/main.yml b/roles/pacemaker_status/defaults/main.yml deleted file mode 100644 index edd7a54aa..000000000 --- a/roles/pacemaker_status/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# defaults file for pacemaker diff --git a/roles/pacemaker_status/molecule/default/converge.yml b/roles/pacemaker_status/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/pacemaker_status/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/pacemaker_status/molecule/default/molecule.yml b/roles/pacemaker_status/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/pacemaker_status/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/pacemaker_status/tasks/main.yml b/roles/pacemaker_status/tasks/main.yml deleted file mode 100644 index 3d32b1968..000000000 --- a/roles/pacemaker_status/tasks/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Check pacemaker service is running - become: true - command: "/usr/bin/systemctl show pacemaker --property ActiveState" - register: check_service - changed_when: false - ignore_errors: true - -- name: Check pacemaker service is inactive or failed - fail: - msg: "Pacemaker service found {{ check_service.stdout.split('=')[-1] }}" - when: check_service.stdout != 'ActiveState=active' - -- when: "check_service.stdout == 'ActiveState=active'" - block: - - name: Get pacemaker status - become: true - command: pcs status xml - register: pcs_status - changed_when: false - - name: Check pacemaker status - pacemaker: - status: "{{ pcs_status.stdout }}" diff --git a/roles/pacemaker_status/vars/main.yml b/roles/pacemaker_status/vars/main.yml deleted file mode 100644 index 71040b537..000000000 --- a/roles/pacemaker_status/vars/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -metadata: - name: Check the status of the pacemaker cluster - description: > - This runs `pcs status` and checks for any failed actions. - - A failed status post-deployment indicates something is not configured - correctly. This should also be run before upgrade as the process will - likely fail with a cluster that's not completely healthy. - - This validation fails if pacemaker service is found failed or inactive. - groups: - - post-deployment diff --git a/roles/package_version/defaults/main.yaml b/roles/package_version/defaults/main.yaml deleted file mode 100644 index c00caeda2..000000000 --- a/roles/package_version/defaults/main.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -package_version_packages: [] diff --git a/roles/package_version/molecule/default/converge.yml b/roles/package_version/molecule/default/converge.yml deleted file mode 100644 index e377ecac8..000000000 --- a/roles/package_version/molecule/default/converge.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Check a simple existing package - vars: - package_version_packages: - - name: bash - version: 1.0 - comparison: '>=' - state: installed - include_role: - name: package_version - - - name: Ensure we fail if something is wrong - vars: - package_version_packages: - - name: bash - version: 1.0 - comparison: '<' - state: available - block: - - name: Run check - include_role: - name: package_version - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Test output - debug: - msg: The validation works! End play - - - name: End play - meta: end_play - - - name: Fail playbook if reached - fail: - msg: | - The package_version validation didn't properly detect the failure! diff --git a/roles/package_version/molecule/default/molecule.yml b/roles/package_version/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/package_version/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/package_version/tasks/compare.yaml b/roles/package_version/tasks/compare.yaml deleted file mode 100644 index ddbed0ede..000000000 --- a/roles/package_version/tasks/compare.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: Get wanted package - set_fact: - wanted: "{{ (package_version_packages|selectattr('name', 'equalto', item)|list)[0] }}" - ok_versions: "{{ ok_versions |combine({item: []}) }}" - -- name: Do the comparison - when: - - (pkg.version ~ '-' ~ pkg.release) is version(wanted.version, wanted.comparison) - - (wanted.state == 'any' or wanted.state == pkg.yumstate) - set_fact: - ok_versions: "{{ ok_versions |combine({pkg.name: [ pkg.version ]}) }}" - loop: "{{ (repo_packages.results|selectattr('item', 'equalto', item)|map(attribute='results')|list)[0] }}" - loop_control: - label: "{{ pkg.name }}" - loop_var: 'pkg' diff --git a/roles/package_version/tasks/main.yaml b/roles/package_version/tasks/main.yaml deleted file mode 100644 index b277d2437..000000000 --- a/roles/package_version/tasks/main.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - distribution - - os_family - -# find within the "vars/" path. If no OS files are found the task will skip. -- name: Gather variables for each operating system - include_vars: "{{ item }}" - with_first_found: - - skip: true - files: - - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" - - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" - - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" - - "{{ ansible_distribution | lower }}.yml" - - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" - - "{{ ansible_os_family | lower }}.yml" - tags: - - always - -- name: Get repositories packages versions - yum: - list: "{{ item }}" - register: repo_packages - loop: "{{ package_version_packages|map(attribute='name')|list }}" - -- name: Initialiaze ok_versions - set_fact: - ok_versions: {} - -- name: Compare versions - include_tasks: compare.yaml - loop: "{{ package_version_packages|map(attribute='name')|list }}" - loop_control: - label: "{{ item }}" - -- name: Fail if we lack a version for any package - fail: - msg: >- - Unable to find a matching version for {{ item.key }}. - Should get {{ (package_version_packages|selectattr('name', 'equalto', item.key)|list)[0].version }} - as {{ (package_version_packages|selectattr('name', 'equalto', item.key)|list)[0].state }}. - when: - - item.value|length == 0 - loop: "{{ ok_versions | dict2items }}" - loop_control: - label: "{{ item.key }}" diff --git a/roles/package_version/vars/centos.yml b/roles/package_version/vars/centos.yml deleted file mode 100644 index c00caeda2..000000000 --- a/roles/package_version/vars/centos.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -package_version_packages: [] diff --git a/roles/package_version/vars/redhat-8.yml b/roles/package_version/vars/redhat-8.yml deleted file mode 100644 index 69d19b1b6..000000000 --- a/roles/package_version/vars/redhat-8.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -package_version_packages: - - name: podman - version: 1.6.4-15.module+el8.2.0+7290+954fb593 - comparison: '==' - state: installed diff --git a/roles/rabbitmq_limits/defaults/main.yml b/roles/rabbitmq_limits/defaults/main.yml deleted file mode 100644 index 2807a49a7..000000000 --- a/roles/rabbitmq_limits/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -min_fd_limit: 16384 diff --git a/roles/rabbitmq_limits/molecule/default/converge.yml b/roles/rabbitmq_limits/molecule/default/converge.yml deleted file mode 100644 index 5f3e1a17a..000000000 --- a/roles/rabbitmq_limits/molecule/default/converge.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: working detection - include_role: - name: rabbitmq_limits - - - name: make validation fail - block: - - name: run validation - include_role: - name: rabbitmq_limits - vars: - container_cli: docker - - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Test output - debug: - msg: The validation works! End play - - - name: End play - meta: end_play - - - name: Fail playbook if reached - fail: - msg: | - The rabbitmq_limits validation didn't properly detect bad rabbitmq - setting! diff --git a/roles/rabbitmq_limits/molecule/default/molecule.yml b/roles/rabbitmq_limits/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/rabbitmq_limits/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/rabbitmq_limits/molecule/default/prepare.yml b/roles/rabbitmq_limits/molecule/default/prepare.yml deleted file mode 100644 index 6e653795d..000000000 --- a/roles/rabbitmq_limits/molecule/default/prepare.yml +++ /dev/null @@ -1,96 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - - tasks: - - name: Populate successful podman CLI - copy: - dest: /usr/bin/podman - mode: 0755 - content: | - #!/bin/bash - action="$1" - shift - container="$1" - shift - command="$@" - - case $action in - 'exec') - case $container in - 'heat_api_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * heat-manage purge_deleted' - ;; - 'keystone_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * keystone-manage token_flush' - ;; - 'rabbitmq') - echo 16484 - ;; - *) - echo "Unknown container ${container}" - ;; - esac - ;; - 'ps') - (echo "$@" | grep -q 'name=rabbitmq') && echo 'rabbitmq' - ;; - *) - echo "Unknown action ${action}" - ;; - esac - - - name: Populate buggy docker CLI - copy: - dest: /usr/bin/docker - mode: 0755 - content: | - #!/bin/bash - action="$1" - shift - container="$1" - shift - command="$@" - - case $action in - 'exec') - case $container in - 'heat_api_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * some-other command' - ;; - 'keystone_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * some-other command' - ;; - 'rabbitmq') - echo 1 - ;; - *) - echo "Unknown container ${container}" - ;; - esac - ;; - 'ps') - (echo "$@" | grep -q 'name=rabbitmq') && echo rabbitmq - ;; - esac diff --git a/roles/rabbitmq_limits/tasks/main.yml b/roles/rabbitmq_limits/tasks/main.yml deleted file mode 100644 index d0bf3bda1..000000000 --- a/roles/rabbitmq_limits/tasks/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Set container_cli fact from the inventory - set_fact: - container_cli: "{{ hostvars[inventory_hostname].container_cli |default('podman', true) }}" - when: container_cli is not defined - -- name: Get file_descriptors total_limit - become: true - register: actual_fd_limit - shell: >- - "{{ container_cli }}" - exec $("{{ container_cli }}" ps -q --filter "name=rabbitmq" | head -1) - sysctl -n fs.file-max - changed_when: false - -- name: Verify the actual limit exceeds the minimal value - fail: - msg: >- - {{ actual_fd_limit.stdout }} must be greater than or equal to {{ min_fd_limit }} - failed_when: "actual_fd_limit.stdout|int < min_fd_limit" diff --git a/roles/rabbitmq_limits/vars/main.yml b/roles/rabbitmq_limits/vars/main.yml deleted file mode 100644 index 625706f5e..000000000 --- a/roles/rabbitmq_limits/vars/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -metadata: - name: Rabbitmq limits - description: > - Make sure the rabbitmq file descriptor limits are set to reasonable values. - groups: - - post-deployment diff --git a/roles/repos/molecule/default/converge.yml b/roles/repos/molecule/default/converge.yml deleted file mode 100644 index 37c832ce1..000000000 --- a/roles/repos/molecule/default/converge.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - - tasks: - - name: test role without failure - include_role: - name: repos - - - name: run with failure - block: - - name: inject faulty repository - yum_repository: - name: faulty - description: really faulty repository - baseurl: http://this.repository.do-not.exists/like-not-at-all - enabled: true - - - name: execute role - include_role: - name: repos - - rescue: - - name: clean host error - meta: clear_host_errors - - - name: Molecule output - debug: - msg: Successfully detected first broken repository - - - name: run with another failure - block: - - name: remove faulty repository - yum_repository: - name: faulty - state: absent - - - name: push another faulty repository with working DNS - yum_repository: - name: faulty-bis - description: faulty repository with working DNS - baseurl: http://download.fedoraproject.org/pub/fedora/blah - enabled: true - - - name: execute role - include_role: - name: repos - - rescue: - - name: clean host error - meta: clear_host_errors - - - name: Molecule output - debug: - msg: Successfully detected second faulty repository. Exiting! - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The repos validation failed detecting broken/non-working repository diff --git a/roles/repos/molecule/default/molecule.yml b/roles/repos/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/repos/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/repos/tasks/main.yml b/roles/repos/tasks/main.yml deleted file mode 100644 index 3959ebbde..000000000 --- a/roles/repos/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: List repositories - become: true - shell: | - {{ ansible_pkg_mgr }} repolist enabled -v 2>&1 || exit 0 - changed_when: false - register: repositories - -- name: Fail if we detect error in repolist output - fail: - msg: | - One or more repositories are either broken or unreachable. Please correct. - when: - repositories.stdout is regex('(cannot|could not|failure)', ignorecase=True) - -- name: Find repository IDs - changed_when: false - shell: 'echo "{{ repositories.stdout }}" | grep Repo-id | sed "s/Repo-id.*://" | tr -d " "' - register: repository_ids - -- name: Check if there are any unwanted repositories enabled - fail: - msg: Found unwanted repository {{ item.0 }} enabled - when: item.0 == item.1 - with_nested: - - ['epel/x86_64'] - - "{{ repository_ids.stdout_lines }}" diff --git a/roles/repos/vars/main.yml b/roles/repos/vars/main.yml deleted file mode 100644 index 15149668e..000000000 --- a/roles/repos/vars/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -metadata: - name: Check correctness of current repositories - description: > - Detect whether the repositories listed in `yum repolist` - can be connected to and that there is at least one repo - configured. - - Detect if there are any unwanted repositories (such as EPEL) enabled. - groups: - - pre-upgrade diff --git a/roles/roles.galaxy/.gitkeep b/roles/roles.galaxy/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/roles/stack_health/molecule/default/converge.yml b/roles/stack_health/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/stack_health/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/stack_health/molecule/default/molecule.yml b/roles/stack_health/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/stack_health/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/stack_health/tasks/main.yml b/roles/stack_health/tasks/main.yml deleted file mode 100644 index ccde0bce8..000000000 --- a/roles/stack_health/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Check stack resource statuses - assert: - that: - - "'_COMPLETE' in item.resource_status" - msg: >- - "Health check failed for resource {{ item.resource_name }} - with status: {{ item.resource_status }}" - with_items: "{{ lookup('stack_resources', wantlist=True) }}" diff --git a/roles/stack_health/vars/main.yml b/roles/stack_health/vars/main.yml deleted file mode 100644 index 9f649fd64..000000000 --- a/roles/stack_health/vars/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -metadata: - name: Stack Health Check - description: > - Check if all stack resources are in a *_COMPLETE state before starting - an upgrade. - groups: - - pre-upgrade - - post-upgrade diff --git a/roles/stonith_exists/molecule/default/converge.yml b/roles/stonith_exists/molecule/default/converge.yml deleted file mode 100644 index aefd9ff97..000000000 --- a/roles/stonith_exists/molecule/default/converge.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - - tasks: - - name: Safe run - include_role: - name: stonith_exists - - - name: Fail the validation - block: - - name: Faulty pcs script - copy: - dest: /usr/bin/pcs - mode: 0755 - content: | - #!/bin/sh - echo "NO stonith devices configured" - exit 0 - - - name: Run validation - include_role: - name: stonith_exists - - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Test output - debug: - msg: The validation works! End play - - - name: End play - meta: end_play - - - name: Fail playbook if reached - fail: - msg: | - The stonith_exists validation didn't properly detect failed - stonith config diff --git a/roles/stonith_exists/molecule/default/molecule.yml b/roles/stonith_exists/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/stonith_exists/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/stonith_exists/molecule/default/prepare.yml b/roles/stonith_exists/molecule/default/prepare.yml deleted file mode 100644 index eba0c86ac..000000000 --- a/roles/stonith_exists/molecule/default/prepare.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - - tasks: - - name: Populate successful stonith - copy: - dest: /usr/bin/pcs - mode: 0755 - content: | - #!/bin/sh - echo "Stonith service configured" - exit 0 diff --git a/roles/stonith_exists/tasks/main.yml b/roles/stonith_exists/tasks/main.yml deleted file mode 100644 index 4277134ec..000000000 --- a/roles/stonith_exists/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Check if we are in HA cluster environment - become: true - register: pcs_cluster_status - command: pcs cluster status - failed_when: false - changed_when: false - -- name: Get all currently configured stonith devices - become: true - command: "pcs stonith" - register: stonith_devices - changed_when: false - when: "pcs_cluster_status.rc == 0" - -- name: Verify the stonith device are configured - fail: - msg: "Stonith devices are not configured." - when: > - pcs_cluster_status.rc == 0 - and - 'NO stonith devices configured' in stonith_devices.stdout diff --git a/roles/stonith_exists/vars/main.yml b/roles/stonith_exists/vars/main.yml deleted file mode 100644 index 34a26d153..000000000 --- a/roles/stonith_exists/vars/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -metadata: - name: Validate stonith devices - description: > - Verify that stonith devices are configured for your OpenStack Platform HA cluster. - We don't configure stonith device with TripleO Installer. Because the hardware - configuration may be differ in each environment and requires different fence agents. - How to configure fencing please read - https://access.redhat.com/documentation/en/red-hat-openstack-platform/8/paged/director-installation-and-usage/86-fencing-the-controller-nodes - groups: - - post-deployment diff --git a/roles/switch_vlans/defaults/main.yml b/roles/switch_vlans/defaults/main.yml deleted file mode 100644 index a0fce911d..000000000 --- a/roles/switch_vlans/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -ironic_inspector_conf_file: /var/lib/config-data/puppet-generated/ironic_inspector/etc/ironic-inspector/inspector.conf -network_environment_path: environments/network-environment.yaml diff --git a/roles/switch_vlans/molecule/default/converge.yml b/roles/switch_vlans/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/switch_vlans/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/switch_vlans/molecule/default/molecule.yml b/roles/switch_vlans/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/switch_vlans/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/switch_vlans/tasks/main.yml b/roles/switch_vlans/tasks/main.yml deleted file mode 100644 index bd8d8afad..000000000 --- a/roles/switch_vlans/tasks/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Get Ironic Inspector swift auth_url - become: true - validations_read_ini: - path: "{{ ironic_inspector_conf_file }}" - section: swift - key: auth_url - register: auth_url - -- name: Get Ironic Inspector swift password - become: true - validations_read_ini: - path: "{{ ironic_inspector_conf_file }}" - section: swift - key: password - register: password - no_log: true - -- name: Check that switch vlans are present if used in nic-config files - switch_vlans: - path: "{{ network_environment_path }}" - template_files: "{{ lookup('tht') }}" - introspection_data: "{{ lookup('introspection_data', - auth_url=auth_url.value, password=password.value) }}" diff --git a/roles/switch_vlans/vars/main.yml b/roles/switch_vlans/vars/main.yml deleted file mode 100644 index 580a5ca78..000000000 --- a/roles/switch_vlans/vars/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -metadata: - name: Compare switch port VLANs to VLANs in nic config - description: > - LLDP data received during introspection contains the configured VLANs - for each switch port attached to the nodes interfaces. Compare the - VLAN IDs set on the switch port to those configured in nic config - files. Since the mapping of roles to nodes isn't known prior to - deployment, this check can only check VLANs across all switch ports, - not on a particular switch port. - groups: - - pre-deployment diff --git a/roles/system_encoding/defaults/main.yml b/roles/system_encoding/defaults/main.yml deleted file mode 100644 index 1d8756523..000000000 --- a/roles/system_encoding/defaults/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "system_encoding" -system_encoding_debug: false -system_encoding_wanted: - - 'utf8' - - 'utf-8' diff --git a/roles/system_encoding/molecule/default/converge.yml b/roles/system_encoding/molecule/default/converge.yml deleted file mode 100644 index ff916e3d7..000000000 --- a/roles/system_encoding/molecule/default/converge.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - tasks: - - name: Test good values - vars: - system_encoding_locale: 'en_us.UTF-8' - include_role: - name: system_encoding - - - name: Test failing - block: - - name: Validate against wrong locale - vars: - system_encoding_locale: 'C' - include_role: - name: system_encoding - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The system_encoding validation didn't properly detect wrong locale diff --git a/roles/system_encoding/molecule/default/molecule.yml b/roles/system_encoding/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/system_encoding/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/system_encoding/tasks/main.yml b/roles/system_encoding/tasks/main.yml deleted file mode 100644 index fc326ccc4..000000000 --- a/roles/system_encoding/tasks/main.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -- name: Get local lang - set_fact: - locale_lang: "{{ (lookup('env', 'LANG')) }}" - when: - system_encoding_locale is not defined - -- name: Set value to check - set_fact: - locale_to_check: >- - {%- if system_encoding_locale is defined -%} - {{ (system_encoding_locale | lower).split('.')[-1] }} - {%- else -%} - {{ (locale_lang | lower).split('.')[-1] }} - {%- endif -%} - -- name: Verify the local - fail: - msg: >- - The local must be unicode ({{ system_encoding_wanted|join(', ') }}). - Got {{ locale_to_check }} - failed_when: locale_to_check not in system_encoding_wanted diff --git a/roles/tls_everywhere/defaults/main.yml b/roles/tls_everywhere/defaults/main.yml deleted file mode 100644 index 9552c0f88..000000000 --- a/roles/tls_everywhere/defaults/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -ipa_bin: ipa -kinit_bin: kinit -kdestroy_bin: kdestroy -helper_status_ok: OK -helper_status_error: ERROR -helper_status_skipped: SKIPPED -helper_report_path: /tmp/report.txt -helper_undercloud_path: "/home/{{ ansible_ssh_user }}" diff --git a/roles/tls_everywhere/handlers/main.yml b/roles/tls_everywhere/handlers/main.yml deleted file mode 100644 index 5a2652fdb..000000000 --- a/roles/tls_everywhere/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: clean_up_temp_krb_caches - command: kdestroy -c {{ item }} - ignore_errors: false - become: true - with_items: "{{ temp_krb_caches }}" diff --git a/roles/tls_everywhere/molecule/default/converge.yml b/roles/tls_everywhere/molecule/default/converge.yml deleted file mode 100644 index ce19905b3..000000000 --- a/roles/tls_everywhere/molecule/default/converge.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - vars: - ipa_bin: echo - kinit_bin: echo - kdestroy_bin: echo - tls_everywhere_undercloud_fqdn: "undercloud.example.com" - tasks: - - name: Run ipa_server_check validation - block: - - name: run validation - include_role: - name: tls_everywhere - tasks_from: ipa-server-check - rescue: - - name: clear errors - meta: clear_host_errors - - - name: check command outputs - assert: - that: - - tls_everywhere_aci_check_kinit_output == "host/undercloud.example.com -k -t /etc/krb5.keytab" - - tls_everywhere_aci_check_dns_record_show_output == "dnsrecord-show example.com freeipa-0" - - tls_everywhere_aci_check_service_show_output == "service-show nova/undercloud.example.com --all --raw" - - ipa_server_aci_check_kdestroy_output == "-A" - - ipa_server_aci_check_failures|length == 1 - - '"Modify Realm Domains" in ipa_server_aci_check_failures[0]' - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The ipa-server-check validation didn't fail as expected diff --git a/roles/tls_everywhere/molecule/default/molecule.yml b/roles/tls_everywhere/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/tls_everywhere/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/tls_everywhere/molecule/default/prepare.yml b/roles/tls_everywhere/molecule/default/prepare.yml deleted file mode 100644 index 0d5b12436..000000000 --- a/roles/tls_everywhere/molecule/default/prepare.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Prepare - hosts: all - tasks: - - name: create directory /etc/ipa - file: - path: /etc/ipa - state: directory - - name: create fake ipa default.conf - copy: - dest: /etc/ipa/default.conf - mode: 0600 - content: | - [global] - basedn = dc=example,dc=com - realm = EXAMPLE.COM - domain = example.com - server = freeipa-0.example.com - host = undercloud-0.example.com - xmlrpc_uri = https://freeipa-0.example.com/ipa/xml - enable_ra = True diff --git a/roles/tls_everywhere/tasks/common.yaml b/roles/tls_everywhere/tasks/common.yaml deleted file mode 100644 index 9d5b89d78..000000000 --- a/roles/tls_everywhere/tasks/common.yaml +++ /dev/null @@ -1,278 +0,0 @@ ---- -# These tasks apply to all nodes. - -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - network - -# variable for handlers to clean up -- name: set facts for handlers to clean up - set_fact: - temp_krb_caches: [] - -# DNS related tasks -- name: Try to ping ipa-ca - command: ping -c 3 ipa-ca - register: ipa_ca_ping_status - ignore_errors: true - changed_when: false - -- name: Set facts for ok DNS configuration - set_fact: - dns_status: "{{ helper_status_ok }}" - dns_reason: "DNS is configured correctly" - dns_recommendations: null - when: ipa_ca_ping_status.rc == 0 - -- name: Set facts for error in DNS configuration - set_fact: - dns_status: "{{ helper_status_error }}" - dns_reason: "DNS is NOT configured correctly" - dns_recommendations: - - Check that the DNS server for this node points to IdM/FreeIPA - - For the undercloud, you need to set the 'undercloud_nameservers' configuration parameter - - For the overcloud, you need to set the 'DnsServers' parameter - - Make sure that the relevant 'search' entry is in /etc/resolv.conf - when: ipa_ca_ping_status.rc != 0 - -- name: "DNS check" - reportentry: - report_status: "{{ dns_status }}" - report_reason: "{{ dns_reason }}" - report_recommendations: "{{ dns_recommendations }}" - -# Firewall related tasks -- name: Firewall validations - when: ipa_ca_ping_status.rc == 0 - block: - - name: Check all relevant ports for IdM/FreeIPA are accessible from {{ ansible_facts['hostname'] }} - wait_for: - host: ipa-ca - port: "{{ item }}" - state: started # Port should be open - delay: 0 # No wait before first check (sec) - timeout: 3 # Stop checking after timeout (sec) - register: port_status - ignore_errors: true - loop: - - 80 - - 443 - - 389 - - 636 - - 88 - - 464 - - 53 - - - name: Set facts for ok firewall settings - set_fact: - firewall_status: "{{ helper_status_ok }}" - firewall_reason: "The host {{ ansible_facts['hostname'] }} can access FreeIPA through all relevant ports" - firewall_recommendations: null - when: "'failed' not in port_status" - - - name: Set facts for issues in firewall settings - set_fact: - firewall_status: "{{ helper_status_error }}" - firewall_reason: "The host {{ ansible_facts['hostname'] }} could NOT access IdM/FreeIPA on some ports" - firewall_recommendations: - - "Please make sure that the following ports are open on the IdM/FreeIPA node: {{ firewall_query }}" - vars: - firewall_query: "{{ port_status.results | json_query('[?failed].item') | join(', ') }}" - when: - - "'failed' in port_status" - - port_status.failed|bool - -- name: Set facts for skipping firewall checks - set_fact: - firewall_status: "{{ helper_status_skipped }}" - firewall_reason: "skipped {{ ansible_facts['hostname'] }} firewall checks because DNS wasn't set correctly." - firewall_recommendations: null - when: ipa_ca_ping_status.rc != 0 - -- name: "Firewall check" - reportentry: - report_status: "{{ firewall_status }}" - report_reason: "{{ firewall_reason }}" - report_recommendations: "{{ firewall_recommendations }}" - -# IdM/FreeIPA related tasks -- name: Check for IdM/FreeIPA host configuration - stat: - path: /etc/ipa/default.conf - register: ipa_conf_stat - -- name: Set facts for IdM/FreeIPA configuration present - set_fact: - ipa_conf_status: "{{ helper_status_ok }}" - ipa_conf_reason: "The host {{ ansible_host }} has the file /etc/ipa/default.conf" - ipa_conf_recommendations: null - when: ipa_conf_stat.stat.exists - -- name: Set facts for IdM/FreeIPA configuration missing - set_fact: - ipa_conf_status: "{{ helper_status_error }}" - ipa_conf_reason: "The host {{ ansible_host }} is missing the file /etc/ipa/default.conf" - ipa_conf_recommendations: - - "The host {{ ansible_host }} needs to be enrolled to IdM/FreeIPA" - - If there were enrollment issues, you'll see them in /var/log/ipaclient-install.log - when: not ipa_conf_stat.stat.exists - -- name: "IdM/FreeIPA host configuration check" - reportentry: - report_status: "{{ ipa_conf_status }}" - report_reason: "{{ ipa_conf_reason }}" - report_recommendations: "{{ ipa_conf_recommendations }}" - -# NOTE(jaosorior): This currently does a lookup, which only runs on the host that's -# running the playbook. We assume that all of the hosts are in the same realm, so -# this is not a problem for now. -- name: Set fact for IdM/FreeIPA realm - validations_read_ini: - path: "/etc/ipa/default.conf" - section: global - key: realm - ignore_missing_file: false - register: ipa_realm - check_mode: false - -- name: Set fact for IdM/FreeIPA host entry - set_fact: - host_entry: "{{ ansible_facts['fqdn'] }}@{{ ipa_realm.value }}" - when: ipa_conf_stat.stat.exists - -- name: Set fact for IdM/FreeIPA host principal - set_fact: - host_principal: "host/{{ host_entry }}" - when: ipa_conf_stat.stat.exists - -# Kerberos keytab related tasks -- name: Check for kerberos host keytab - stat: - path: /etc/krb5.keytab - register: krb5_keytab_stat - -- name: Set facts for kerberos host keytab present - set_fact: - krb5_keytab_status: "{{ helper_status_ok }}" - krb5_keytab_reason: "The host {{ ansible_host }} has the file /etc/krb5.keytab" - krb5_keytab_recommendations: null - when: krb5_keytab_stat.stat.exists - -- name: Set facts for kerberos host keytab missing - set_fact: - krb5_keytab_status: "{{ helper_status_error }}" - krb5_keytab_reason: "The host {{ ansible_host }} is missing the file /etc/krb5.keytab" - krb5_keytab_recommendations: - - "The host {{ ansible_host }} needs to be enrolled to IdM/FreeIPA" - - If there were enrollment issues, you'll see them in /var/log/ipaclient-install.log - - alternatively, you can request the keytab for this host with the ipa-getkeytab command - when: not krb5_keytab_stat.stat.exists - -- name: "Kerberos host keytab check" - reportentry: - report_status: "{{ krb5_keytab_status }}" - report_reason: "{{ krb5_keytab_reason }}" - report_recommendations: "{{ krb5_keytab_recommendations }}" - -- name: List Kerberos principals in /etc/krb5.keytab - expect: - command: ktutil - responses: - ktutil: - - "rkt /etc/krb5.keytab" - - "list" - - "quit" - register: keytab_principal_list - changed_when: false - become: true - when: krb5_keytab_stat.stat.exists - check_mode: false - -- name: Set facts for host principals in /etc/krb5.keytab - set_fact: - principal_in_keytab_status: "{{ helper_status_ok }}" - principal_in_keytab_reason: "The principal {{ host_principal }} is in the keytab" - principal_in_keytab_recommendations: null - when: - - krb5_keytab_stat.stat.exists - - ipa_conf_stat.stat.exists - - "host_principal in keytab_principal_list.stdout" - -- name: Set facts for host principals NOT in /etc/krb5.keytab - set_fact: - principal_in_keytab_status: "{{ helper_status_error }}" - principal_in_keytab_reason: "The principal {{ host_principal }} is missing from the keytab" - principal_in_keytab_recommendations: - - You might have overwritten the keytab. Re-enroll or request the keytab using ipa-getkeytab - when: - - krb5_keytab_stat.stat.exists - - ipa_conf_stat.stat.exists - - "host_principal not in keytab_principal_list.stdout" - -- name: Set facts for skipping host principals check without IdM/FreeIPA config - set_fact: - principal_in_keytab_status: "{{ helper_status_skipped }}" - principal_in_keytab_reason: "skipped checking for the principal in the host's {{ ansible_host }} because there is no keytab file" - principal_in_keytab_recommendations: null - when: - - not ipa_conf_stat.stat.exists - - krb5_keytab_stat.stat.exists - -- name: Set facts for skipping host principals check without keytab - set_fact: - principal_in_keytab_status: "{{ helper_status_skipped }}" - principal_in_keytab_reason: "skipped checking for the principal in the host's {{ ansible_host }} because there is no keytab file" - principal_in_keytab_recommendations: null - when: not krb5_keytab_stat.stat.exists - -- name: "Kerberos principal in host keytab check" - reportentry: - report_status: "{{ principal_in_keytab_status }}" - report_reason: "{{ principal_in_keytab_reason }}" - report_recommendations: "{{ principal_in_keytab_recommendations }}" - -- name: Test if host principal in /etc/krb5.keytab is usable - command: kinit -kt /etc/krb5.keytab -c /tmp/my_krb5_ccache - become: true - register: principal_usable_result - ignore_errors: true - when: krb5_keytab_stat.stat.exists - -- name: Set facts for principal is usable skipped - set_fact: - principal_usable_status: "{{ helper_status_skipped }}" - principal_usable_reason: "skipped checking if the principal is usable for host {{ ansible_host }} because there is no keytab file" - principal_usable_recommendations: null - when: not krb5_keytab_stat.stat.exists - -- name: Set facts for principal is usable success - set_fact: - principal_usable_status: "{{ helper_status_ok }}" - principal_usable_reason: "The principal {{ host_principal }} is usable to obtain a kerberos ticket" - principal_usable_recommendations: null - temp_krb_caches: "{{ temp_krb_caches + [ '/tmp/my_krb5_ccache' ] }}" - changed_when: true - when: - - krb5_keytab_stat.stat.exists - - principal_usable_result is succeeded - notify: - - clean_up_temp_krb_caches - -- name: Set facts for principal is usable failure - set_fact: - principal_usable_status: "{{ helper_status_error }}" - principal_usable_reason: "The principal {{ host_principal }} is unable to obtain a kerberos ticket" - principal_usable_recommendations: null - when: - - krb5_keytab_stat.stat.exists - - principal_usable_result is failed - -- name: "Kerberos principal in host keytab is usable check" - reportentry: - report_status: "{{ principal_usable_status }}" - report_reason: "{{ principal_usable_reason }}" - report_recommendations: "{{ principal_usable_recommendations }}" diff --git a/roles/tls_everywhere/tasks/ipa-server-check.yaml b/roles/tls_everywhere/tasks/ipa-server-check.yaml deleted file mode 100644 index 1ab2fca35..000000000 --- a/roles/tls_everywhere/tasks/ipa-server-check.yaml +++ /dev/null @@ -1,107 +0,0 @@ ---- -# Copyright 2020 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Changes have been - and possibly will be introduced in future - that -# require changes in IPA permissions or ACLs. We do not have the permissions -# to automatically make these changes, but we can include checks here to make -# sure that they have occurred before attempting an overcloud or undercloud -# update in a TLS-E environment. This playbook is supposed to fail with -# an appropriate error message in case a requirement is not met. -# -# This playbook contains the following parameters -# - tls_everywhere_check_dns_aci - which determines if we want to check -# for the DNS ACI. This defaults to true. -# - tls_everywhere_undercloud_fqdn - which defaults to ansible_facts['fqdn'] - -- name: check if undercloud is an ipa client - stat: - path: /etc/ipa/default.conf - register: ipa_default_conf - -- name: perform ipa_server tests - when: ipa_default_conf.stat.exists - vars: - check_dns_aci: "{{ tls_everywhere_check_dns_aci | default(True)}}" - undercloud_fqdn: "{{ tls_everywhere_undercloud_fqdn | default(ansible_facts['fqdn']) }}" - ipa_server_aci_check_failures: [] - fail_1: >- - The IPA server does not have the required ACI to allow host - entities to view dns records. Please add the ACI. - fail_2: >- - The nova/{{undercloud_fqdn}} user does not have the - "System: Modify Realm Domains" privilege. Please add this privilege for - this user on the IPA server. - block: - - name: Get the ipa server hostname - validations_read_ini: - path: "/etc/ipa/default.conf" - section: global - key: server - register: ipa_server_fqdn - - - name: set dns zone and shortname - set_fact: - dns_zone: "{{ ipa_server_fqdn.value.split('.', 1)[1] }}" - short_hostname: "{{ ipa_server_fqdn.value.split('.')[0] }}" - - - name: kinit as the host entity - command: "{{ kinit_bin }} host/{{ undercloud_fqdn }} -k -t /etc/krb5.keytab" - register: kinit - become: true - changed_when: kinit.rc == 0 - - - name: check if ipa server has correct DNS ACI on host entries - when: check_dns_aci - block: - - name: try to view the dns record for the ipa server - become: true - command: "{{ ipa_bin }} dnsrecord-show {{dns_zone}} {{short_hostname}}" - register: dnsrecord_show - ignore_errors: true - - - name: add failure message when zone is not found - set_fact: - ipa_server_aci_check_failures: "{{ ipa_server_aci_check_failures + [fail_1] }}" - when: - "'DNS zone not found' in dnsrecord_show.stderr" - - - name: check if nova service has the added permissions - become: true - command: "{{ ipa_bin}} service-show nova/{{ undercloud_fqdn }} --all --raw" - register: service_show - - - name: parse service data and fail if permission not present - set_fact: - ipa_server_aci_check_failures: "{{ ipa_server_aci_check_failures + [fail_2] }}" - when: - - "'memberof: cn=System: Modify Realm Domains' not in service_show.stdout" - - - name: fail if failures detected - fail: - msg: "{{ ipa_server_aci_check_failures }}" - when: 'ipa_server_aci_check_failures|length > 0' - always: - - name: clean up the keytab - command: "{{ kdestroy_bin }} -A" - register: kdestroy - become: true - - - name: set output for molecule testing - set_fact: - ipa_server_aci_check_kdestroy_output: "{{ kdestroy.stdout }}" - tls_everywhere_aci_check_kinit_output: "{{ kinit.stdout }}" - tls_everywhere_aci_check_dns_record_show_output: "{{ dnsrecord_show.stdout }}" - tls_everywhere_aci_check_service_show_output: "{{ service_show.stdout }}" - when: not ansible_check_mode diff --git a/roles/tls_everywhere/tasks/overcloud-post-deployment.yaml b/roles/tls_everywhere/tasks/overcloud-post-deployment.yaml deleted file mode 100644 index 403bd958c..000000000 --- a/roles/tls_everywhere/tasks/overcloud-post-deployment.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -### certificate checks ### - -# Get all the certificates that certmonger is tracking. Given the -# command that's used, we can retrieve each entry using stdout_lines. -- name: Get all certificate names in each node - shell: getcert list | grep 'Request ID' | sed "s/Request ID '\(.*\)':/\1/" - become: true - changed_when: false - register: all_certnames - check_mode: false - -# Get status of all certificates and trim the leading whitespaces -- name: Get status of all certificates - shell: getcert list -i {{ certname }} | grep "status:" | sed "s/^\s*//" - become: true - changed_when: false - loop: "{{ all_certnames.stdout_lines }}" - loop_control: - loop_var: certname - register: all_cert_status - check_mode: false - -- name: Gather certificates that are not in MONITORING status - set_fact: - failed_certs: "{{ all_cert_status.results | json_query(cert_status_query) }}" - vars: - cert_status_query: "[?stdout != 'status: MONITORING'].certname" - -- name: Set facts for certificates in failed status - set_fact: - cert_status_status: "{{ helper_status_error }}" - cert_status_reason: 'The following certificates are not in a healthy status: {{ failed_certs | join(", ") }}' - cert_status_recommendations: - - "Log into this host and do 'getcert list -i ' to verify the failure reason" - - "Verify that the service principal of the certificate is present in IdM/FreeIPA for this host" - when: failed_certs - -- name: Set facts for all certificates in monitoring status - set_fact: - cert_status_status: "{{ helper_status_ok }}" - cert_status_reason: "All of the certificates are a healthy status" - cert_status_recommendations: null - when: not failed_certs - -- name: Report on status of the certificates check - reportentry: - report_status: "{{ cert_status_status }}" - report_reason: "{{ cert_status_reason }}" - report_recommendations: "{{ cert_status_recommendations }}" diff --git a/roles/tls_everywhere/tasks/pre-deployment.yaml b/roles/tls_everywhere/tasks/pre-deployment.yaml deleted file mode 100644 index e8357492c..000000000 --- a/roles/tls_everywhere/tasks/pre-deployment.yaml +++ /dev/null @@ -1,63 +0,0 @@ ---- -- name: Set facts for undercloud handlers to clean up - set_fact: - novajoin_temp_krb_caches: [] - -### verify that the keytab and principal are usable ### -- name: Set the novajoin keytab path - set_fact: - novajoin_keytab_path: '/etc/novajoin/krb5.keytab' - -- name: Verify the novajoin keytab is usable (non-containerized) - become: true - block: - - name: Set fact for novajoin user principal - set_fact: - novajoin_principal: "nova/{{ host_entry }}" - - - name: Check for novajoin kerberos host keytab - stat: - path: "{{ novajoin_keytab_path }}" - register: non_containerized_novajoin_krb5_keytab_stat - - - name: Test if novajoin principal in novajoin keytab is usable - command: kinit -kt "{{ novajoin_keytab_path }}" -c /tmp/my_novajoin_krb5_ccache "{{ novajoin_principal }}" - become: true - register: non_containerized_novajoin_principal_usable_result - ignore_errors: true - when: non_containerized_novajoin_krb5_keytab_stat.stat.exists - - - name: Set facts for novajoin principal is usable skipped - set_fact: - principal_usable_status: "{{ helper_status_skipped }}" - principal_usable_reason: "skipped checking if the novajoin principal is usable for host {{ ansible_host }} because there is no keytab file" - principal_usable_recommendations: null - when: not non_containerized_novajoin_krb5_keytab_stat.stat.exists - - - name: Set facts for novajoin principal is usable success - set_fact: - principal_usable_status: "{{ helper_status_ok }}" - principal_usable_reason: "The principal {{ novajoin_principal }} is able to obtain a kerberos ticket" - principal_usable_recommendations: null - temp_krb_caches: "{{ novajoin_temp_krb_caches + [ '/tmp/my_novajoin_krb5_ccache' ] }}" - changed_when: true - when: - - non_containerized_novajoin_krb5_keytab_stat.stat.exists - - non_containerized_novajoin_principal_usable_result is succeeded - notify: # (hrybacki): novajoin server running on the undercloud -- it's okay to use this handler - - clean_up_temp_krb_caches - - - name: Set facts for principal is usable failure - set_fact: - principal_usable_status: "{{ helper_status_error }}" - principal_usable_reason: "Tho principal {{ novajoin_principal }} is unable to obtain a kerberos ticket" - principal_usable_recommendations: null - when: - - non_containerized_novajoin_krb5_keytab_stat.stat.exists - - non_containerized_novajoin_principal_usable_result is failed - - - name: Report on Kerberos principal in novajoin keytab is usable check - reportentry: - report_status: "{{ principal_usable_status }}" - report_reason: "{{ principal_usable_reason }}" - report_recommendations: "{{ principal_usable_recommendations }}" diff --git a/roles/tls_everywhere/tasks/prep.yaml b/roles/tls_everywhere/tasks/prep.yaml deleted file mode 100644 index 608d6b339..000000000 --- a/roles/tls_everywhere/tasks/prep.yaml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - -- name: Verify that nameservers are set in undercloud.conf - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: undercloud_nameservers - ignore_missing_file: false - register: undercloud_nameservers - check_mode: false - -- name: Check that nameservers point to IdM/FreeIPA - set_fact: - undercloud_conf_dns_query: "{{ lookup('dig', 'ipa-ca', '@{{ item }}') }}" - loop: "{{ undercloud_nameservers.value.split(',') }}" - when: undercloud_nameservers.value | length > 0 and undercloud_nameservers.value != [] - -- name: Set facts undercloud.conf DNS is not configured - set_fact: - undercloud_conf_dns_status: "{{ helper_status_error }}" - undercloud_conf_dns_reason: "DNS is not set up correctly in undercloud.conf" - undercloud_conf_dns_recommendations: - - "Please set the 'undercloud_nameservers' parameter to point to IdM/FreeIPA in undercloud.conf" - when: undercloud_conf_dns_query|default('NXDOMAIN') == "NXDOMAIN" - -- name: Set facts undercloud.conf DNS is configured - set_fact: - undercloud_conf_dns_status: "{{ helper_status_ok }}" - undercloud_conf_dns_reason: "DNS is set up correctly in undercloud.conf" - undercloud_conf_dns_recommendations: null - when: undercloud_conf_dns_query|default('NXDOMAIN') != "NXDOMAIN" - -- name: Report on DNS setup in undercloud.conf check - reportentry: - report_status: "{{ undercloud_conf_dns_status }}" - report_reason: "{{ undercloud_conf_dns_reason }}" - report_recommendations: "{{ undercloud_conf_dns_recommendations }}" diff --git a/roles/tripleo_haproxy/defaults/main.yml b/roles/tripleo_haproxy/defaults/main.yml deleted file mode 100644 index cc8d36527..000000000 --- a/roles/tripleo_haproxy/defaults/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# Path to the haproxy.cfg file -haproxy_config_file: '/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg' - -# Global mininum per-process number of concurrent connections -global_maxconn_min: 20480 - -# Defaults mininum per-process number of concurrent connections -defaults_maxconn_min: 4096 - -# Time to wait in the queue for a connection slot to be free -defaults_timeout_queue: '2m' - -# Inactivity time on the client side -defaults_timeout_client: '2m' - -# Inactivity time on the server side -defaults_timeout_server: '2m' - -# Additional check timeout -defaults_timeout_check: '10s' diff --git a/roles/tripleo_haproxy/molecule/default/converge.yml b/roles/tripleo_haproxy/molecule/default/converge.yml deleted file mode 100644 index cd34c19f3..000000000 --- a/roles/tripleo_haproxy/molecule/default/converge.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - vars: - haproxy_config_file: /haproxy.cfg - - tasks: - - name: create haproxy config file - copy: - dest: /haproxy.cfg - content: | - # This file managed by Puppet - global - daemon - group haproxy - log /dev/log local0 - maxconn 100 - pidfile /var/run/haproxy.pid - ssl-default-bind-ciphers !SSLv2:kEECDH:kRSA:kEDH:kPSK:+3DES:!aNULL:!eNULL:!MD5:!EXP:!RC4:!SEED:!IDEA:!DES - ssl-default-bind-options no-sslv3 no-tlsv10 - stats socket /var/lib/haproxy/stats mode 600 level user - stats timeout 1s - user haproxy - - defaults - log global - maxconn 100 - mode tcp - retries 1 - timeout http-request 1s - timeout queue 1s - timeout connect 1s - timeout client 1s - timeout server 2m - timeout check 10s - - block: - - include_role: - name: tripleo_haproxy - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The haproxy role should have detected issues within haproxy - configuration file! diff --git a/roles/tripleo_haproxy/molecule/default/molecule.yml b/roles/tripleo_haproxy/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/tripleo_haproxy/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/tripleo_haproxy/tasks/main.yml b/roles/tripleo_haproxy/tasks/main.yml deleted file mode 100644 index 2fd1de48c..000000000 --- a/roles/tripleo_haproxy/tasks/main.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: Gather the HAProxy configuration - become: true - tripleo_haproxy_conf: - path: "{{ haproxy_config_file }}" - -- name: Check the HAProxy configuration - fail: - msg: >- - {% if haproxy_conf.global.maxconn|int < global_maxconn_min %} - - [FAILED] 'global maxconn' value check - * Current value: {{ haproxy_conf.global.maxconn }}, Recommended value: > {{ global_maxconn_min }} - {% else %} - - [PASSED] 'global maxconn' value check - {% endif %} - {% if haproxy_conf.defaults.maxconn|int < defaults_maxconn_min %} - - [FAILED] 'defaults maxconn' value check - * Current value: {{ haproxy_conf.defaults.maxconn }}, Recommended Value: > {{ defaults_maxconn_min }} - {% else %} - - [PASSED] 'defaults maxconn' value check - {% endif %} - {% if haproxy_conf.defaults['timeout queue'] != defaults_timeout_queue %} - - [FAILED] 'timeout queue' option in 'defaults' check - * Current value: {{ haproxy_conf.defaults['timeout queue'] }} - * Recommended value: {{ defaults_timeout_queue }} - {% else %} - - [PASSED] 'timeout queue' option in 'defaults' check - {% endif %} - {% if haproxy_conf.defaults['timeout client'] != defaults_timeout_client %} - - [FAILED] 'timeout client' option in 'defaults' check - * Current value: {{ haproxy_conf.defaults['timeout client'] }} - * Recommended value: {{ defaults_timeout_client }} - {% else %} - - [PASSED] 'timeout client' option in 'defaults' check - {% endif %} - {% if haproxy_conf.defaults['timeout server'] != defaults_timeout_server %} - - [FAILED] 'timeout server' option in 'defaults' check - * Current value: {{ haproxy_conf.defaults['timeout server'] }} - * Recommended value: {{ defaults_timeout_server }} - {% else %} - - [PASSED] 'timeout server' option in 'defaults' check - {% endif %} - {% if haproxy_conf.defaults['timeout check'] != defaults_timeout_check %} - - [FAILED] 'timeout check' option in 'defaults' check - * Current value: {{ haproxy_conf.defaults['timeout check'] }} - * Recommended value: {{ defaults_timeout_check }} - {% else %} - - [PASSED] 'timeout check' option in 'defaults' check - {% endif %} - failed_when: > - (haproxy_conf.global.maxconn|int < global_maxconn_min) or - (haproxy_conf.defaults.maxconn|int < defaults_maxconn_min) or - (haproxy_conf.defaults['timeout queue'] != defaults_timeout_queue) or - (haproxy_conf.defaults['timeout client'] != defaults_timeout_client) or - (haproxy_conf.defaults['timeout server'] != defaults_timeout_server) or - (haproxy_conf.defaults['timeout check'] != defaults_timeout_check) diff --git a/roles/undercloud_debug/defaults/main.yml b/roles/undercloud_debug/defaults/main.yml deleted file mode 100644 index 706058834..000000000 --- a/roles/undercloud_debug/defaults/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Fail if the 'debug' key is set to value of 'debug_check' -debug_check: true - -# Parse following ini files, retrieving value of the 'debug' key -services_conf_files: - - /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf - - /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf - - /var/lib/config-data/puppet-generated/ceilometer/etc/ceilometer/ceilometer.conf - - /var/lib/config-data/puppet-generated/heat/etc/heat/heat.conf - - /var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf diff --git a/roles/undercloud_debug/molecule/default/converge.yml b/roles/undercloud_debug/molecule/default/converge.yml deleted file mode 100644 index 67ada326f..000000000 --- a/roles/undercloud_debug/molecule/default/converge.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - vars: - services_conf_files: - - /tmp/debug_true_1.conf - - tasks: - - name: prepare fake config file - copy: - dest: /tmp/debug_true_1.conf - content: | - [DEFAULT] - debug: true - - - name: Checking good value - include_role: - name: undercloud_debug - vars: - debug_check: true - - - name: Should fail due to bad value - block: - - include_role: - name: undercloud_debug - vars: - debug_check: false - - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The undercloud_debug should have detected a configuration issue diff --git a/roles/undercloud_debug/molecule/default/molecule.yml b/roles/undercloud_debug/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/undercloud_debug/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/undercloud_debug/tasks/main.yml b/roles/undercloud_debug/tasks/main.yml deleted file mode 100644 index 69d884475..000000000 --- a/roles/undercloud_debug/tasks/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Get the services for debug flag - become: true - validations_read_ini: - path: "{{ item }}" - section: DEFAULT - key: debug - ignore_missing_file: true - register: config_result - with_items: "{{ services_conf_files }}" - -- name: Check the services for debug flag - fail: - msg: > - debug_check is set to {{ debug_check }} and the result of - validation is {{ config_result.results[0].value }} - failed_when: "debug_check|bool != config_result.results[0].value|bool" diff --git a/roles/undercloud_debug/vars/main.yaml b/roles/undercloud_debug/vars/main.yaml deleted file mode 100644 index de6bb470e..000000000 --- a/roles/undercloud_debug/vars/main.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -metadata: - name: Undercloud Services Debug Check - description: > - The undercloud's openstack services should _not_ have debug enabled. - This will check if debug is enabled on undercloud services. - If debug is enabled, the root filesystem can fill up quickly, and - is not a good thing. - This role needs to be run against an installed Undercloud. - The tested services must use one of the specified configuration files - to set their debug status. - groups: - - pre-deployment diff --git a/roles/undercloud_disabled_services/defaults/main.yml b/roles/undercloud_disabled_services/defaults/main.yml deleted file mode 100644 index 6b5919f44..000000000 --- a/roles/undercloud_disabled_services/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -undercloud_disabled_service_list: - - libvirtd.service - - libvirtd.socket diff --git a/roles/undercloud_disabled_services/tasks/main.yml b/roles/undercloud_disabled_services/tasks/main.yml deleted file mode 100644 index 80b7cd266..000000000 --- a/roles/undercloud_disabled_services/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Check Services are running - command: "/usr/bin/systemctl show {{ item }} --property ActiveState" - become: true - loop: "{{ undercloud_disabled_service_list }}" - register: "check_disabled_services" - changed_when: false - ignore_errors: true - -- name: Fail if services were running - fail: - msg: >- - One of the undercloud services was active. - Please check {{ item.item }} first and then confirm the status of - undercloud services in general before attempting to install, update - or upgrade the environment. - failed_when: "item.stdout == 'ActiveState=active'" - loop: "{{ check_disabled_services.results }}" diff --git a/roles/undercloud_disabled_services/vars/main.yaml b/roles/undercloud_disabled_services/vars/main.yaml deleted file mode 100644 index f934d5c10..000000000 --- a/roles/undercloud_disabled_services/vars/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -metadata: - name: Verify undercloud services state before running update or upgrade - description: > - Check undercloud status before running a stack update - especially minor update and major upgrade. - groups: - - post-upgrade - - pre-upgrade diff --git a/roles/undercloud_disk_space/defaults/main.yml b/roles/undercloud_disk_space/defaults/main.yml deleted file mode 100644 index 1484cfdb4..000000000 --- a/roles/undercloud_disk_space/defaults/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -volumes: - - {mount: /var/lib/docker, min_size: 10} - - {mount: /var/lib/config-data, min_size: 3} - - {mount: /var/log, min_size: 3} - - {mount: /usr, min_size: 5} - - {mount: /var, min_size: 20} - - {mount: /, min_size: 25} diff --git a/roles/undercloud_disk_space/molecule/default/converge.yml b/roles/undercloud_disk_space/molecule/default/converge.yml deleted file mode 100644 index 31826ae80..000000000 --- a/roles/undercloud_disk_space/molecule/default/converge.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - vars: - volumes: - - {mount: /var, min_size: 20} - - {mount: /, min_size: 150} - - tasks: - - block: - - include_role: - name: undercloud_disk_space - rescue: - - name: Clear host errors - meta: clear_host_errors - - - debug: - msg: The validation works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The validation did not detect a too small disk space diff --git a/roles/undercloud_disk_space/molecule/default/molecule.yml b/roles/undercloud_disk_space/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/undercloud_disk_space/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/undercloud_disk_space/tasks/main.yml b/roles/undercloud_disk_space/tasks/main.yml deleted file mode 100644 index 67c989bac..000000000 --- a/roles/undercloud_disk_space/tasks/main.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Set a constant defining number of Bytes in 1 GB - set_fact: - const_bytes_in_gb: 1073741824 - -- name: Stat volume directories - stat: - path: "{{ item.mount }}" - with_items: "{{ volumes }}" - register: volumes_stat - -- name: Initialize existing_volumes to an empty array - set_fact: - existing_volumes="{{ [] }}" - -- name: Filter out non-existing volumes - set_fact: - existing_volumes: "{{ existing_volumes +[item.item] }}" - with_items: "{{ volumes_stat.results }}" - when: item.stat.exists - loop_control: - label: "{{ item.item.mount }}" - -- name: Loop on volumes and gather available space - shell: df -B1 {{ item.mount }} --output=avail | sed 1d - register: volume_size - with_items: "{{ existing_volumes }}" - changed_when: false - -- name: Fail if any of the volumes are too small - fail: - msg: > - Minimum free space required for {{ item.item.mount }}: {{ item.item.min_size }}G - - current free space: {{ (item.stdout|int / const_bytes_in_gb|int) |round(1) }}G - when: > - item.stdout|int / const_bytes_in_gb|int < item.item.min_size|int - with_items: "{{ volume_size.results }}" - loop_control: - label: "{{ item.item.mount }}" diff --git a/roles/undercloud_disk_space/vars/main.yaml b/roles/undercloud_disk_space/vars/main.yaml deleted file mode 100644 index 69b5444e5..000000000 --- a/roles/undercloud_disk_space/vars/main.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -metadata: - name: Verify undercloud fits the disk space requirements - description: > - Make sure that the root partition on the undercloud node has enough - free space. - - http://tripleo.org/install/environments/baremetal.html#minimum-system-requirements - groups: - - prep - - pre-introspection diff --git a/roles/undercloud_heat_purge_deleted/defaults/main.yml b/roles/undercloud_heat_purge_deleted/defaults/main.yml deleted file mode 100644 index 80245e9c5..000000000 --- a/roles/undercloud_heat_purge_deleted/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- - -cron_check: "heat-manage purge_deleted" diff --git a/roles/undercloud_heat_purge_deleted/molecule/default/converge.yml b/roles/undercloud_heat_purge_deleted/molecule/default/converge.yml deleted file mode 100644 index 80e70da67..000000000 --- a/roles/undercloud_heat_purge_deleted/molecule/default/converge.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - tasks: - - name: working detection - include_role: - name: undercloud_heat_purge_deleted - - - name: Validate failure - block: - - name: Override container_cli - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [DEFAULT] - container_cli = docker - - - name: run validation - include_role: - name: undercloud_heat_purge_deleted - rescue: - - name: Clear host errors - meta: clear_host_errors - - - name: Test output - debug: - msg: The validation works! Ending play. - - - name: End play - meta: end_play - - - name: Fail the validation at this point - fail: - msg: | - The undercloud_heat_purge_deleted validation failed to detect - missing cron job. diff --git a/roles/undercloud_heat_purge_deleted/molecule/default/molecule.yml b/roles/undercloud_heat_purge_deleted/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/undercloud_heat_purge_deleted/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/undercloud_heat_purge_deleted/molecule/default/prepare.yml b/roles/undercloud_heat_purge_deleted/molecule/default/prepare.yml deleted file mode 100644 index ac3b4a329..000000000 --- a/roles/undercloud_heat_purge_deleted/molecule/default/prepare.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Prepare - hosts: all - gather_facts: false - tasks: - - name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - - - name: populate undercloud.conf - copy: - dest: "{{ ansible_env.HOME }}/undercloud.conf" - content: | - [DEFAULT] - container_cli = podman - - - name: Populate successful podman CLI - copy: - dest: /usr/bin/podman - mode: 0755 - content: | - #!/bin/bash - action="$1" - shift - container="$1" - shift - command="$@" - - case $container in - 'heat_api_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * heat-manage purge_deleted' - ;; - *) - echo "Unknown container ${container}" - ;; - esac - - - name: Populate buggy docker CLI - copy: - dest: /usr/bin/docker - mode: 0755 - content: | - #!/bin/bash - action="$1" - shift - container="$1" - shift - command="$@" - - case $container in - 'heat_api_cron') - echo '# This is a comment that should be ignored' - echo '0 12 14 2 * some-other command' - ;; - *) - echo "Unknown container ${container}" - ;; - esac diff --git a/roles/undercloud_heat_purge_deleted/tasks/main.yml b/roles/undercloud_heat_purge_deleted/tasks/main.yml deleted file mode 100644 index ac390b3e4..000000000 --- a/roles/undercloud_heat_purge_deleted/tasks/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - -- name: Get the Container CLI from the undercloud.conf file - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: container_cli - ignore_missing_file: true - register: container_cli - -- name: Get heat crontab - become: true - shell: | - set -o pipefail - {{ container_cli.value|default('podman', true) }} exec heat_api_cron crontab -l -u heat |grep -v '^#' - register: cron_result - changed_when: false - -- name: Check heat crontab - fail: - msg: >- - heat-manage purge_deleted does not appear to be enabled via cron. You - should add ' heat-manage purge_deleted' to the heat - users crontab. - when: "cron_result.stdout.find('heat-manage purge_deleted') == -1" diff --git a/roles/undercloud_heat_purge_deleted/vars/main.yaml b/roles/undercloud_heat_purge_deleted/vars/main.yaml deleted file mode 100644 index 8beb1e030..000000000 --- a/roles/undercloud_heat_purge_deleted/vars/main.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -metadata: - name: Verify heat-manage purge_deleted is enabled in crontab - description: > - Without a purge_deleted crontab enabled, the - heat database can grow very large. This validation checks that - the purge_deleted crontab has been set up. - This role should only be used on systems using 'heat_api_cron'. - Starting from Wallaby, the 'heat_api_cron' is no longer installed by - default, and the role may behave upredictably. - groups: [] diff --git a/roles/undercloud_process_count/defaults/main.yml b/roles/undercloud_process_count/defaults/main.yml deleted file mode 100644 index 810f575b0..000000000 --- a/roles/undercloud_process_count/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- - -max_process_count: 8 diff --git a/roles/undercloud_process_count/molecule/default/converge.yml b/roles/undercloud_process_count/molecule/default/converge.yml deleted file mode 100644 index 6c61a39a2..000000000 --- a/roles/undercloud_process_count/molecule/default/converge.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - gather_facts: false - - tasks: - - name: Warn developers about the lack of molecule testing - fail: - msg: >- - This role needs molecule tests! diff --git a/roles/undercloud_process_count/molecule/default/molecule.yml b/roles/undercloud_process_count/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/undercloud_process_count/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/undercloud_process_count/tasks/main.yml b/roles/undercloud_process_count/tasks/main.yml deleted file mode 100644 index 7774e5749..000000000 --- a/roles/undercloud_process_count/tasks/main.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -- name: Ensure we get needed facts - setup: - gather_subset: - - '!all' - - '!any' - - '!min' - - env - -- name: Get the Container CLI from the undercloud.conf file - become: true - validations_read_ini: - path: "{{ ansible_env.HOME }}/undercloud.conf" - section: DEFAULT - key: container_cli - ignore_missing_file: true - register: container_cli - -- name: Collect the number of running processes per OpenStack service - command: "{{ container_cli.value|default('podman', true) }} exec {{ item.container }} pgrep -f -c {{ item.proc }}" - become: true - ignore_errors: true - register: "process_count" - changed_when: false - loop: - - {container: "heat_engine", proc: "heat-engine"} - - {container: "ironic_inspector", proc: "ironic-inspector"} - - {container: "ironic_conductor", proc: "ironic-conductor"} - - {container: "nova_api", proc: "nova_api"} - - {container: "nova_scheduler", proc: "nova-scheduler"} - - {container: "nova_conductor", proc: "nova-conductor"} - - {container: "glance_api", proc: "glance-api"} - -- name: Create warning messages - command: echo "There are {{ item.stdout }} {{ item.item }} processes running. Having more than {{ max_process_count }} risks running out of memory." - register: process_warnings - with_items: "{{ process_count.results }}" - when: "item.stdout|int > max_process_count" - -- name: Output warning message - warn: msg={{ warning_msg }} - when: "warning_msg|length > 0" - vars: - warning_msg: "{{ process_warnings.results|selectattr('changed')|map(attribute='stdout')|join('\n') }}" diff --git a/roles/undercloud_process_count/vars/main.yaml b/roles/undercloud_process_count/vars/main.yaml deleted file mode 100644 index ae40b8847..000000000 --- a/roles/undercloud_process_count/vars/main.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -metadata: - name: Check the number of OpenStack processes on undercloud - description: > - The default settings for OpenStack is to run one process (heat-engine, - keystone, etc.) per CPU core. On a machine with a lot of cores this is - both unnecessary and can consume a significant amount of RAM, leading - to crashes due to OOMKiller. - groups: - - pre-deployment diff --git a/roles/undercloud_proxy_validation/defaults/main.yml b/roles/undercloud_proxy_validation/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/undercloud_proxy_validation/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/undercloud_proxy_validation/tasks/main.yml b/roles/undercloud_proxy_validation/tasks/main.yml deleted file mode 100644 index 822080714..000000000 --- a/roles/undercloud_proxy_validation/tasks/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- name: Fail if no_proxy is not set - fail: - msg: >- - http_proxy and/or https_proxy are set but no_proxy is not set. - no_proxy needs to contain 127.0.0.1 or any of the undercloud - public/private IPs otherwise deployment and/or upgrade will fail. - failed_when: "item.stdout == 'ActiveState=active'" - when: (http_proxy|length > 0 or https_proxy|length > 0 is defined) and no_proxy|length == 0 or - (HTTP_PROXY|length > 0 or HTTPS_PROXY|length > 0 is defined) and NO_PROXY|length == 0 or diff --git a/roles/undercloud_proxy_validation/vars/main.yaml b/roles/undercloud_proxy_validation/vars/main.yaml deleted file mode 100644 index 8c4df6ab7..000000000 --- a/roles/undercloud_proxy_validation/vars/main.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -metadata: - name: Verify undercloud proxy configuration - description: > - Check undercloud proxy configuration before a stack update - especially minor update and major upgrade. - groups: - - post-upgrade - - pre-upgrade - vars: - HTTP_PROXY: "{{ lookup('ansible.builtin.env', 'HTTP_PROXY', default=undef()) }}" - HTTPS_PROXY: "{{ lookup('ansible.builtin.env', 'HTTPS_PROXY', default=undef()) }}" - NO_PROXY: "{{ lookup('ansible.builtin.env', 'NO_PROXY', default=undef()) }}" - http_proxy: "{{ lookup('ansible.builtin.env', 'http_proxy', default=undef()) }}" - https_proxy: "{{ lookup('ansible.builtin.env', 'https_proxy', default=undef()) }}" - no_proxy: "{{ lookup('ansible.builtin.env', 'no_proxy', default=undef()) }}" diff --git a/roles/undercloud_service_status/defaults/main.yml b/roles/undercloud_service_status/defaults/main.yml deleted file mode 100644 index 3c70ac06e..000000000 --- a/roles/undercloud_service_status/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -undercloud_service_list: - - tripleo_heat_engine - - tripleo_ironic_conductor diff --git a/roles/undercloud_service_status/tasks/main.yml b/roles/undercloud_service_status/tasks/main.yml deleted file mode 100644 index 8291e81f4..000000000 --- a/roles/undercloud_service_status/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Check Services are running - command: "/usr/bin/systemctl show {{ item }} --property ActiveState" - become: true - with_items: "{{ undercloud_service_list }}" - register: "check_services" - changed_when: false - ignore_errors: true - -- name: Fail if services were not running - fail: - msg: >- - One of the undercloud services was not active. - Please check {{ item.item }} first and then confirm the status of - undercloud services in general before attempting to update or - upgrade the environment. - failed_when: "item.stdout != 'ActiveState=active'" - with_items: "{{ check_services.results }}" diff --git a/roles/undercloud_service_status/vars/main.yaml b/roles/undercloud_service_status/vars/main.yaml deleted file mode 100644 index f934d5c10..000000000 --- a/roles/undercloud_service_status/vars/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -metadata: - name: Verify undercloud services state before running update or upgrade - description: > - Check undercloud status before running a stack update - especially minor update and major upgrade. - groups: - - post-upgrade - - pre-upgrade diff --git a/roles/undercloud_sysctl/defaults/main.yaml b/roles/undercloud_sysctl/defaults/main.yaml deleted file mode 100644 index cbe7406bc..000000000 --- a/roles/undercloud_sysctl/defaults/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -undercloud_sysctl_options: - - net.ipv4.ip_forward - - net.ipv4.ip_nonlocal_bind - -undercloud_sysctl_ipv6_option: net.ipv6.ip_nonlocal_bind -missing_options: [] -fail_options: false diff --git a/roles/undercloud_sysctl/tasks/main.yaml b/roles/undercloud_sysctl/tasks/main.yaml deleted file mode 100644 index fa9ab0432..000000000 --- a/roles/undercloud_sysctl/tasks/main.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -# Copyright 2022 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Check if ipv6 is enabled - become: true - register: stat_result - stat: - path: /proc/net/if_inet6 - -- name: Set ipv6 option if enabled - set_fact: - undercloud_sysctl_options: "{{ undercloud_sysctl_options + [undercloud_sysctl_ipv6_option] }}" - when: stat_result.stat.exists - -- name: Check sysctl options - become: true - register: option_result - stat: - path: "/proc/sys/{{ item | replace('.', '/') }}" - loop: "{{ undercloud_sysctl_options }}" - -- name: Set missing options - set_fact: - missing_options: "{{ missing_options + [item.invocation.module_args.path] }}" - when: not item.stat.exists - loop: "{{ option_result.results }}" - -- name: Clear missing options for fail message - set_fact: - fail_options: "{{ missing_options | join(', ') | replace('/proc/sys/', '') | replace('/', '.') }}" - -- name: Fail if some options are missing - fail: - msg: | - Required sysctl options are not available. Check - that your kernel is up to date. Missing: {{ fail_options }} - when: fail_options diff --git a/roles/validate_passwords_file/defaults/main.yaml b/roles/validate_passwords_file/defaults/main.yaml deleted file mode 100644 index 745f476ad..000000000 --- a/roles/validate_passwords_file/defaults/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -validate_passwords_file_name: 'tripleo-undercloud-passwords.yaml' -validate_passwords_file_output_dir: '/home/stack/tripleo-deploy/undercloud' diff --git a/roles/validate_passwords_file/tasks/main.yaml b/roles/validate_passwords_file/tasks/main.yaml deleted file mode 100644 index a9a9dedba..000000000 --- a/roles/validate_passwords_file/tasks/main.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# Copyright 2022 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Check if passwords file exists - register: stat_result - stat: - path: "{{ validate_passwords_file_output_dir + '/' + validate_passwords_file_name }}" - -- name: Fail when the passwords file is missing - fail: - msg: | - The {{ validate_passwords_file_name }} file is missing. - This will cause all service passwords to change and break the existing - undercloud. - when: not stat_result.stat.exists diff --git a/roles/validation_init/defaults/main.yml b/roles/validation_init/defaults/main.yml deleted file mode 100644 index 18d19f91f..000000000 --- a/roles/validation_init/defaults/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# All variables within this role should have a prefix of "validation_init" - -# Debugging mode -validation_init_debug: false - -# New role prefix -validation_init_prefix: "tripleo" - -# Absolute path of the directory where the skeletin will be deployed -validation_init_skeleton_role_dir: "/tmp" - -# Absolute/Relative path to the roles directory where the new role will be -# created -validation_init_roles_dir: "roles" - -# Absolute/Relative path to the CI molecule yaml file -validation_init_zuuld_molecule: "zuul.d/molecule.yaml" - -# Absolute/Relative path to the playbooks directory where the new playbook will -# be created -validation_init_playbooks_dir: "playbooks" - -# Absolute/Relative path to the documentation roles directory -validation_init_roles_doc_dir: "doc/source/roles" - -# If 'true', the documentation and CI configuration will be created, otherwise not. -# Will be used to create new custom validation through the CLI. -validation_init_enabling_ci: true - -# If sets to 'true', the molecule directory created from the skeleton won't be deleted -# from the role and a CI job will be added in zuul.d/molecule.yaml file. -# If sets to 'false', the molecule directory will be deleted after the creation -# of the new role and no ci job will be added in zuul.d/molecule.yaml file. -validation_init_molecule: true diff --git a/roles/validation_init/files/_skeleton_role_/defaults/main.yml.j2 b/roles/validation_init/files/_skeleton_role_/defaults/main.yml.j2 deleted file mode 100644 index 638ccb9a6..000000000 --- a/roles/validation_init/files/_skeleton_role_/defaults/main.yml.j2 +++ /dev/null @@ -1,28 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# All variables intended for modification should place placed in this file. - -# !!!! IMPORTANT !!!! -# Add a comment above every variables describing them. -# This will be included in the sphinx role documentation -# !!!! IMPORTANT !!!! - -# All variables within this role should have a prefix of "{{ role_name | replace('-', '_') }}" - -# Debugging mode -{{ role_name | replace('-', '_') }}_debug: false diff --git a/roles/validation_init/files/_skeleton_role_/molecule/default/converge.yml.j2 b/roles/validation_init/files/_skeleton_role_/molecule/default/converge.yml.j2 deleted file mode 100644 index 15d924d2d..000000000 --- a/roles/validation_init/files/_skeleton_role_/molecule/default/converge.yml.j2 +++ /dev/null @@ -1,21 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: Converge - hosts: all - roles: - - role: "{{ role_name }}" diff --git a/roles/validation_init/files/_skeleton_role_/molecule/default/molecule.yml b/roles/validation_init/files/_skeleton_role_/molecule/default/molecule.yml deleted file mode 100644 index ba05cf07d..000000000 --- a/roles/validation_init/files/_skeleton_role_/molecule/default/molecule.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. diff --git a/roles/validation_init/files/_skeleton_role_/molecule/default/prepare.yml b/roles/validation_init/files/_skeleton_role_/molecule/default/prepare.yml deleted file mode 100644 index d8d2b1c40..000000000 --- a/roles/validation_init/files/_skeleton_role_/molecule/default/prepare.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Prepare - hosts: all - gather_facts: false - tasks: - - name: Add you own tasks to prepare the hosts for your tests - debug: - msg: >- - Installing package(s), create file(s), whatever you need here - in this file to prepare your test(s). diff --git a/roles/validation_init/files/_skeleton_role_/molecule/default/verify.yml b/roles/validation_init/files/_skeleton_role_/molecule/default/verify.yml deleted file mode 100644 index cb94e56a6..000000000 --- a/roles/validation_init/files/_skeleton_role_/molecule/default/verify.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Verify - hosts: all - gather_facts: false - tasks: - # This is an example playbook to execute your ansible tests. - - name: Example assertion - assert: - that: true diff --git a/roles/validation_init/files/_skeleton_role_/tasks/main.yml.j2 b/roles/validation_init/files/_skeleton_role_/tasks/main.yml.j2 deleted file mode 100644 index 167d93dcd..000000000 --- a/roles/validation_init/files/_skeleton_role_/tasks/main.yml.j2 +++ /dev/null @@ -1,18 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# "{{ role_name }}" tasks diff --git a/roles/validation_init/files/_skeleton_role_/vars/main.yml.j2 b/roles/validation_init/files/_skeleton_role_/vars/main.yml.j2 deleted file mode 100644 index 5381f2e92..000000000 --- a/roles/validation_init/files/_skeleton_role_/vars/main.yml.j2 +++ /dev/null @@ -1,27 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# While options found within the vars/ path can be overridden using extra -# vars, items within this path are considered part of the role and not -# intended to be modified. - -# All variables within this role should have a prefix of "{{ role_name }}" - -# !!!! IMPORTANT !!!! -# Add a comment above every variables describing them. -# This will be included in the sphinx role documentation -# !!!! IMPORTANT !!!! diff --git a/roles/validation_init/molecule/default/converge.yml b/roles/validation_init/molecule/default/converge.yml deleted file mode 100644 index 923663646..000000000 --- a/roles/validation_init/molecule/default/converge.yml +++ /dev/null @@ -1,62 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Converge - hosts: all - gather_facts: false - tasks: - - name: Create new role - include_role: - name: validation_init - vars: - validation_init_role_name: skeleton_test - validation_init_prefix: "tripleo" - validation_init_skeleton_role_dir: "/tmp" - validation_init_roles_dir: "/tmp/roles" - validation_init_zuuld_molecule: "/tmp/zuul.d/molecule.yaml" - validation_init_playbooks_dir: "/tmp/playbooks" - validation_init_roles_doc_dir: "/tmp/doc/source/roles" - - - name: Create a new role with the same name - block: - - name: Run the validation_init role again - include_role: - name: validation_init - vars: - validation_init_role_name: skeleton_test - validation_init_prefix: "tripleo" - validation_init_skeleton_role_dir: "/tmp" - validation_init_roles_dir: "/tmp/roles" - validation_init_zuuld_molecule: "/tmp/zuul.d/molecule.yaml" - validation_init_playbooks_dir: "/tmp/playbooks" - validation_init_roles_doc_dir: "/tmp/doc/source/roles" - - rescue: - - name: Clear host error - meta: clear_host_errors - - - name: Role addition output - debug: - msg: The Role works! End the playbook run - - - name: End play - meta: end_play - - - name: Fail the test - fail: - msg: | - The validation_init role didn't properly detect that the role name was - already been taken! diff --git a/roles/validation_init/molecule/default/molecule.yml b/roles/validation_init/molecule/default/molecule.yml deleted file mode 100644 index 491cedf1d..000000000 --- a/roles/validation_init/molecule/default/molecule.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -provisioner: - name: ansible - playbooks: - prepare: ../../resources/playbooks/prepare.yml - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - log: true - options: - vvv: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles" - ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}" diff --git a/roles/validation_init/molecule/default/verify.yml b/roles/validation_init/molecule/default/verify.yml deleted file mode 100644 index 967a8aaae..000000000 --- a/roles/validation_init/molecule/default/verify.yml +++ /dev/null @@ -1,137 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Verify - hosts: all - gather_facts: false - vars: - new_role_path: "/tmp/roles/tripleo_skeleton_test" - new_zmol_path: "/tmp/zuul.d/molecule.yaml" - new_play_path: "/tmp/playbooks/tripleo-skeleton-test.yaml" - new_role_doc: "/tmp/doc/source/roles/role-tripleo_skeleton_test.rst" - tasks: - - name: Check new role directory is present - stat: - path: "{{ new_role_path }}" - register: roles_name_dir - - - assert: - that: - - roles_name_dir.stat.exists - - roles_name_dir.stat.isdir - fail_msg: | - {{ new_role_path }} role directory doesn't exist. - success_msg: | - {{ new_role_path }} role directory does exist. - - - name: Read zuul molecule file - slurp: - src: "{{ new_zmol_path }}" - register: molecule_yaml - - - name: Print returned information - debug: - msg: "{{ molecule_yaml['content'] | b64decode }}" - - - name: Check zuul molecule file content (gate/check definition) - lineinfile: - path: "{{ new_zmol_path }}" - regexp: '^.*- tripleo-validations-centos-8-molecule-skeleton' - insertbefore: '^.*name: tripleo-validations-molecule-tripleo_undercloud_conf' - line: ' - tripleo-validations-centos-8-molecule-skeleton_test' - state: present - check_mode: true - register: zuul_def - failed_when: (zuul_def is changed) or (zuul_def is failed) - - - name: Check zuul molecule file content (job definition) - lineinfile: - path: "{{ new_zmol_path }}" - regexp: '^.*name: tripleo-validations-centos-8-molecule-skeleton' - insertbefore: '^.*parent: tripleo-validations-centos-8-base$' - line: ' name: tripleo-validations-centos-8-molecule-skeleton_test' - state: present - check_mode: true - register: zuul_job_def - failed_when: (zuul_job_def is changed) or (zuul_job_def is failed) - - - name: Check new validation playbook is present - stat: - path: "{{ new_play_path }}" - register: play_name_path - - - assert: - that: - - play_name_path.stat.exists - - play_name_path.stat.isreg - fail_msg: | - {{ new_play_path }} playbook doesn't exist. - success_msg: | - {{ new_play_path }} playbook directory does exist. - - - name: Read new playbook file - slurp: - src: "{{ new_play_path }}" - register: playbook_yaml - - - name: Check playbook content for new role - lineinfile: - path: "{{ new_play_path }}" - regexp: '.*- tripleo_skeleton: ' - insertafter: '^.*roles:$' - line: ' - tripleo_skeleton_test' - state: present - check_mode: true - register: include_role - failed_when: (include_role is changed) or (include_role is failed) - - - name: Print returned information - debug: - msg: "{{ playbook_yaml['content'] | b64decode }}" - - - name: Check new role doc is present - stat: - path: "{{ new_role_doc }}" - register: role_doc_file - - - assert: - that: - - role_doc_file.stat.exists - - role_doc_file.stat.isreg - fail_msg: | - {{ new_role_doc }} playbook doesn't exist. - success_msg: | - {{ new_role_doc }} playbook directory does exist. - - - name: Read new role documentation file - slurp: - src: "{{ new_role_doc }}" - register: doc_file - - - name: Print returned information - debug: - msg: "{{ doc_file['content'] | b64decode }}" - - - name: Check role documentation content - lineinfile: - path: "{{ new_role_doc }}" - regexp: '.*:role: ' - insertafter: '^\.\. ansibleautoplugin::$' - line: ' :role: /tmp/roles/tripleo_skeleton_test' - state: present - check_mode: true - register: doc - failed_when: (doc is changed) or (doc is failed) diff --git a/roles/validation_init/molecule/no_molecule_test/converge.yml b/roles/validation_init/molecule/no_molecule_test/converge.yml deleted file mode 100644 index bfe3531c7..000000000 --- a/roles/validation_init/molecule/no_molecule_test/converge.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Converge - Role without molecule tests - hosts: all - gather_facts: false - tasks: - - name: Create new role - include_role: - name: validation_init - vars: - validation_init_role_name: skeleton_test_no_molecule - validation_init_prefix: "tripleo" - validation_init_skeleton_role_dir: "/tmp" - validation_init_roles_dir: "/tmp/roles" - validation_init_zuuld_molecule: "/tmp/zuul.d/molecule.yaml" - validation_init_playbooks_dir: "/tmp/playbooks" - validation_init_roles_doc_dir: "/tmp/doc/source/roles" - validation_init_molecule: false diff --git a/roles/validation_init/molecule/no_molecule_test/molecule.yml b/roles/validation_init/molecule/no_molecule_test/molecule.yml deleted file mode 100644 index 491cedf1d..000000000 --- a/roles/validation_init/molecule/no_molecule_test/molecule.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# inherits tripleo-validations/.config/molecule/config.yml -# To override default values, please take a look at the config.yml. - -provisioner: - name: ansible - playbooks: - prepare: ../../resources/playbooks/prepare.yml - inventory: - hosts: - all: - hosts: - centos: - ansible_python_interpreter: /usr/bin/python3 - log: true - options: - vvv: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml - ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles" - ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}" diff --git a/roles/validation_init/molecule/no_molecule_test/verify.yml b/roles/validation_init/molecule/no_molecule_test/verify.yml deleted file mode 100644 index 5de26dc6f..000000000 --- a/roles/validation_init/molecule/no_molecule_test/verify.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Verify with no molecule tests - hosts: all - gather_facts: false - vars: - new_role_path: "/tmp/roles/tripleo_skeleton_test_no_molecule" - new_role_name: "tripleo_skeleton_test_no_molecule" - new_zmol_path: "/tmp/zuul.d/molecule.yaml" - tasks: - - name: Check new role directory is present - stat: - path: "{{ new_role_path }}" - register: roles_name_dir - - - assert: - that: - - roles_name_dir.stat.exists | bool - - roles_name_dir.stat.isdir | bool - fail_msg: | - {{ new_role_path }} role directory doesn't exist. - success_msg: | - {{ new_role_path }} role directory does exist. - - - name: Check the molecule directory presence/absence - stat: - path: "{{ new_role_path }}/molecule" - register: roles_molecule_dir - - - name: Fail if the molecule directory if present - fail: - msg: | - {{ new_role_path }}/molecule directory has not been removed. - when: roles_molecule_dir.stat.exists | bool - - - name: Read zuul molecule file - slurp: - src: "{{ new_zmol_path }}" - register: molecule_yaml - - - name: Print returned information - debug: - msg: "{{ molecule_yaml['content'] | b64decode }}" - - - name: Check no molecule job has been added - command: >- - grep {{ new_role_name }} {{ new_zmol_path }} - changed_when: false - ignore_errors: true - register: is_molecule_ci_job_definition - - - assert: - that: - - "{{ is_molecule_ci_job_definition.rc | int }} > 0" - fail_msg: | - CI Job definition has been added! - success_msg: | - CI Job definition not found as expected! diff --git a/roles/validation_init/resources/playbooks/prepare.yml b/roles/validation_init/resources/playbooks/prepare.yml deleted file mode 100644 index 7ae2d2465..000000000 --- a/roles/validation_init/resources/playbooks/prepare.yml +++ /dev/null @@ -1,96 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Prepare - hosts: all - gather_facts: false - tasks: - - name: Install Ansible for getting ansible-galaxy - package: - name: ansible - state: installed - - - name: Create roles directory - file: - path: /tmp/roles - state: directory - mode: '0755' - - - name: Create playbooks directory - file: - path: /tmp/playbooks - state: directory - mode: '0755' - - - name: Create roles doc directory - file: - path: /tmp/doc/source/roles - state: directory - mode: '0755' - - - name: Create zuul.d directory - file: - path: /tmp/zuul.d/ - state: directory - mode: '0755' - - - name: Copy zuul.d/molecule.yaml - copy: - dest: /tmp/zuul.d/molecule.yaml - content: | - --- - - project-template: - check: - jobs: - - tripleo-validations-centos-8-molecule-role_one - - tripleo-validations-centos-8-molecule-role_two - - tripleo-validations-centos-8-molecule-tripleo_undercloud_conf - gate: - jobs: - - tripleo-validations-centos-8-molecule-role_one - - tripleo-validations-centos-8-molecule-role_two - - tripleo-validations-centos-8-molecule-tripleo_undercloud_conf - name: tripleo-validations-molecule-jobs - - job: - files: - - ^roles/role_one/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - name: tripleo-validations-centos-8-molecule-role_one - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: role_one - - job: - files: - - ^roles/role_two/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - name: tripleo-validations-centos-8-molecule-role_two - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: role_two - - job: - files: - - ^roles/tripleo_undercloud_conf/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - name: tripleo-validations-centos-8-molecule-tripleo_undercloud_conf - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: tripleo_undercloud_conf diff --git a/roles/validation_init/tasks/main.yml b/roles/validation_init/tasks/main.yml deleted file mode 100644 index 639e74f6e..000000000 --- a/roles/validation_init/tasks/main.yml +++ /dev/null @@ -1,243 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -- name: Check for role name - fail: - msg: >- - The required variable `role_name` is undefined. Check your settings. - when: - - validation_init_role_name is undefined - -- name: Ensure role name is not empty - fail: - msg: >- - The required variable `role_name` is empty. Check your settings. - when: - - validation_init_role_name is defined - - validation_init_role_name == '' - -- name: Normalize the role name - set_fact: - _role_name: "{{ validation_init_role_name | replace('-', '_' ) }}" - dashed_role_name: "{{ validation_init_role_name | replace('_', '-') }}" - -- name: Copy Skeleton role directory - copy: - src: _skeleton_role_ - dest: "{{ validation_init_skeleton_role_dir }}/" - mode: '0755' - -- name: Check if the role name is available - stat: - path: "{{ validation_init_roles_dir }}/{{ validation_init_prefix }}_{{ _role_name }}" - register: new_roles_name_dir - -- name: Fail if the new role name already exists in roles directory - assert: - that: - - not new_roles_name_dir.stat.exists|bool - fail_msg: | - {{ validation_init_prefix }}_{{ _role_name }} role does already exist :( - success_msg: | - {{ validation_init_prefix }}_{{ _role_name }} role doesn't exist! :) - -- name: Create role - command: >- - ansible-galaxy init - --role-skeleton={{ validation_init_skeleton_role_dir }}/_skeleton_role_ - --init-path="{{ validation_init_roles_dir }}/" - {{ validation_init_prefix }}_{{ _role_name }} - args: - creates: "{{ validation_init_roles_dir }}/{{ validation_init_prefix }}_{{ _role_name }}" - -- name: Remove molecule directory if not needed - file: - path: "{{ validation_init_roles_dir }}/{{ validation_init_prefix }}_{{ _role_name }}/molecule" - state: absent - when: not validation_init_molecule | default(true) | bool - -- name: Create the playbook - copy: - content: | - --- - - hosts: undercloud - gather_facts: false - vars: - metadata: - name: Brief and general description of the validation - description: | - The complete description of this validation should be here - groups: - - backup-and-restore - - no-op - - prep - - pre-introspection - - pre-deployment - - post-deployment - - openshift-on-openstack - - pre-upgrade - - post-upgrade - - pre-system-upgrade - - post-system-upgrade - - pre-undercloud-upgrade - - post-undercloud-upgrade - - pre-overcloud-prepare - - post-overcloud-prepare - - pre-overcloud-upgrade - - post-overcloud-upgrade - - pre-overcloud-converge - - post-overcloud-converge - - pre-ceph - - post-ceph - - pre-update - - pre-update-prepare - - pre-update-run - - pre-update-converge - - post-update - categories: - - networking - - compute - - baremetal - - provisioning - - database - - os - - system - - packaging - - kernel - - security - - webserver - - storage - - ha - - clustering - - undercloud-config - products: - - tripleo - {{ validation_init_prefix }}_{{ _role_name }}_debug: false - roles: - - {{ validation_init_prefix }}_{{ _role_name }} - dest: "{{ validation_init_playbooks_dir }}/{{ validation_init_prefix }}-{{ dashed_role_name }}.yaml" - -- when: validation_init_enabling_ci | default(true) | bool - block: - - name: Read zuul molecule file - slurp: - src: "{{ validation_init_zuuld_molecule }}" - register: molecule_yaml - - - name: Create molecule entry - copy: - content: |- - # Managed via ./role-addition.yml, do not edit manually without testing that - # new role addition does not reformat it. - --- - {% set items = molecule_yaml['content'] | b64decode | from_yaml %} - {% set job_index = [] %} - {% set new_job_name = "tripleo-validations-centos-8-molecule-" ~ _role_name %} - {% for item in items %} - {% if 'project-template' in item %} - {% if item['project-template']['name'] == "tripleo-validations-molecule-jobs" %} - {% if not (new_job_name in item['project-template']['check']['jobs']) %} - {% set _ = item['project-template']['check']['jobs'].append(new_job_name) %} - {% set check_jobs = (item['project-template']['check']['jobs'] | sort) %} - {% set _ = item['project-template']['check'].update({'jobs': check_jobs}) %} - {% endif %} - {% if not (new_job_name in item['project-template']['gate']['jobs']) %} - {% set _ = item['project-template']['gate']['jobs'].append(new_job_name) %} - {% set gate_jobs = (item['project-template']['gate']['jobs'] | sort) %} - {% set _ = item['project-template']['gate'].update({'jobs': gate_jobs}) %} - {% endif %} - {% endif %} - {% else %} - {% if item['job']['name'] == new_job_name %} - {% set _ = job_index.append(new_job_name) %} - {% endif %} - {% endif %} - {% endfor %} - {% if (job_index | length) < 1 %} - {% set new_job = { - "name": new_job_name, - "parent": "tripleo-validations-centos-8-base", - "files": [ - "^roles/" ~ validation_init_prefix ~ "_" ~ _role_name ~ "/.*", - "^tests/prepare-test-host.yml", - "^ci/playbooks/pre.yml", - "^ci/playbooks/run.yml", - "^molecule-requirements.txt" - ], - "vars": { - "tripleo_validations_role_name": validation_init_prefix ~ "_" ~ _role_name - } - } - %} - {% set _ = items.append({"job": new_job}) %} - {% endif %} - {% set project = items.pop(0) %} - {% set sorted_jobs = items | sort(attribute='job.name') %} - {% set _ = sorted_jobs.insert(0, project) %} - {{ sorted_jobs | to_nice_yaml(indent=2, width=1337) }} - dest: "{{ validation_init_zuuld_molecule }}" - when: validation_init_molecule | default(true) | bool - - - name: Create role documentation - copy: - content: | - {% set opening = _role_name %} - {{ '=' * (opening | length) }} - {{ opening }} - {{ '=' * (opening | length) }} - - -------------- - About The Role - -------------- - - - - Requirements - ============ - - - - Dependencies - ============ - - - - Example Playbook - ================ - - .. code-block:: yaml - - - hosts: localhost - gather_facts: false - roles: - - { role: {{ validation_init_prefix }}_{{ _role_name }} } - - Licence - ======= - - Apache - - Author Information - ================== - - **Red Hat TripleO DFG: Squad:** - - ---------------- - Full Description - ---------------- - - .. ansibleautoplugin:: - :role: {{ validation_init_roles_dir }}/{{ validation_init_prefix }}_{{ _role_name }} - dest: "{{ validation_init_roles_doc_dir }}/role-{{ validation_init_prefix }}_{{ _role_name }}.rst" diff --git a/scripts/bindep-install b/scripts/bindep-install deleted file mode 100755 index b9f123317..000000000 --- a/scripts/bindep-install +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -## Shell Opts ---------------------------------------------------------------- - -set -o pipefail -set -xeuo - - -## Vars ---------------------------------------------------------------------- - -export BINDEP_FILE="${BINDEP_FILE:-$(dirname $(readlink -f ${BASH_SOURCE[0]}))/../bindep.txt}" - - -## Main ---------------------------------------------------------------------- - -# Source distribution information -source /etc/os-release || source /usr/lib/os-release -RHT_PKG_MGR=$(command -v dnf || command -v yum) - -# NOTE(cloudnull): Get a list of packages to install with bindep. If packages -# need to be installed, bindep exits with an exit code of 1. -BINDEP_PKGS=$(bindep -b -f "${BINDEP_FILE}" test || true) - -if [[ ${#BINDEP_PKGS} > 0 ]]; then - case "${ID,,}" in - amzn|rhel|centos|fedora) - sudo "${RHT_PKG_MGR}" install -y ${BINDEP_PKGS} - ;; - esac -fi diff --git a/scripts/run-local-test b/scripts/run-local-test deleted file mode 100755 index 799f9b08a..000000000 --- a/scripts/run-local-test +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -## Functions ----------------------------------------------------------------- -function usage { - echo "Usage: ROLE_NAME=ROLE_NAME ${0##*/} or ${0##*/} ROLE_NAME" -} - -## Vars ---------------------------------------------------------------------- - -export PROJECT_DIR="$(dirname $(readlink -f ${BASH_SOURCE[0]}))/../" -if [ "${ROLE_NAME}x" = "x" -a "${1}x" = "x" ]; then - usage; - exit 2 -fi -export ROLE_NAME="${ROLE_NAME:-$1}" -export TRIPLEO_JOB_ANSIBLE_ARGS=${TRIPLEO_JOB_ANSIBLE_ARGS:-""} - -## Shell Opts ---------------------------------------------------------------- - -set -o pipefail -set -xeuo - -## Main ---------------------------------------------------------------------- - -# Source distribution information -source /etc/os-release || source /usr/lib/os-release -RHT_PKG_MGR=$(command -v dnf || command -v yum) - -# Install the one requirement we need to run any local test -case "${ID,,}" in - amzn|rhel|centos|fedora) - sudo "${RHT_PKG_MGR}" install -y python3 python*-virtualenv - ;; -esac - -# Ensure the required ci file is present -sudo mkdir -p /etc/ci -sudo touch /etc/ci/mirror_info.sh - -# Get Python Executable -PYTHON_EXEC=$(command -v python3 || command -v python) - -# Create a virtual env -"${PYTHON_EXEC}" -m virtualenv --system-site-packages "${HOME}/test-python" - -# Activate a virtual env -PS1="[\u@\h \W]\$" source "${HOME}/test-python/bin/activate" - -# Run bindep -"${HOME}/test-python/bin/pip" install "pip>=19.1.1" setuptools bindep --upgrade -"${PROJECT_DIR}/scripts/bindep-install" - -# Install local requirements -if [[ -d "${HOME}/.cache/pip/wheels" ]]; then - rm -rf "${HOME}/.cache/pip/wheels" -fi -"${HOME}/test-python/bin/pip" install \ - -r "${PROJECT_DIR}/requirements.txt" \ - -r "${PROJECT_DIR}/test-requirements.txt" \ - -r "${PROJECT_DIR}/molecule-requirements.txt" - -# Run local test -source "${PROJECT_DIR}/ansible-test-env.rc" -export ANSIBLE_ROLES_PATH="${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles" -ansible-galaxy install -fr "${PROJECT_DIR}/ansible-collections-requirements.yml" -ansible-playbook -i "${PROJECT_DIR}/tests/hosts.ini" \ - -e "tripleo_src=$(realpath --relative-to="${HOME}" "${PROJECT_DIR}")" \ - -e "tripleo_validations_role_name=${ROLE_NAME}" \ - -e "tripleo_job_ansible_args='${TRIPLEO_JOB_ANSIBLE_ARGS}'" \ - -e "ansible_user=${USER}" \ - -e "ansible_user_dir=${HOME}" \ - "${PROJECT_DIR}/tests/prepare-test-host.yml" \ - "${PROJECT_DIR}/ci/playbooks/run-local.yml" \ - -v diff --git a/scripts/tripleo-ansible-inventory b/scripts/tripleo-ansible-inventory deleted file mode 100755 index 017e2736a..000000000 --- a/scripts/tripleo-ansible-inventory +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sys - - -def main(): - msg = ("DEPRECATED - This script has been deprecated." - "Use the inventory file tripleo-ansible-inventory.yaml located " - "under ~/tripleo-deploy/ for a Standalone/Undercloud " - "deployment or overcloud-deploy/ for the Overcloud. \n" - "To generate an inventory file, use the playbook in " - "tripleo-ansible: cli-config-download.yaml") - print(msg) - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index cc30e6cb0..000000000 --- a/setup.cfg +++ /dev/null @@ -1,40 +0,0 @@ -[metadata] -name = tripleo-validations -summary = A collection of Ansible playbooks to detect and report potential issues during TripleO deployments -long_description = file: README.rst -long_description_content_type = text/x-rst -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/tripleo-validations/latest/ -classifier = - Development Status :: 5 - Production/Stable - Environment :: OpenStack - Framework :: Ansible - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: Implementation :: CPython - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.8 - -[options] -python_requires = >=3.8 - -[files] -packages = - tripleo_validations - -scripts = - scripts/tripleo-ansible-inventory - -data_files = - share/ansible = hosts.sample - share/ansible = groups.yaml - share/ansible/roles = roles/* - share/ansible/validation-playbooks = playbooks/* - share/ansible/callback_plugins = callback_plugins/* - share/ansible/lookup_plugins = lookup_plugins/* - share/ansible/library = library/* diff --git a/setup.py b/setup.py deleted file mode 100644 index cd35c3c35..000000000 --- a/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index a4cccb8b3..000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,18 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -ansible-core<2.12.0 # GPLv3+ -coverage!=4.4,>=4.0 # Apache-2.0 -python-subunit>=1.0.0 # Apache-2.0/BSD -sphinx>=2.0.0,!=2.1.0 # BSD -oslotest>=3.2.0 # Apache-2.0 -testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=2.2.0 # MIT -reno>=3.1.0 # Apache-2.0 -netaddr>=0.7.18 # BSD -pre-commit>=2.3.0 # MIT -stestr>=3.0.1 # Apache-2.0 -mock>=4.0.3 # BSD -requests-mock>=1.8.0 # Apache-2.0 diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index b88a113d9..000000000 --- a/tests/conftest.py +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def pytest_addoption(parser): - parser.addoption('--scenario', help='scenario setting') - parser.addoption( - '--ansible-args', help='ansible args passed into test runner.') diff --git a/tests/hosts.ini b/tests/hosts.ini deleted file mode 100644 index 28280524b..000000000 --- a/tests/hosts.ini +++ /dev/null @@ -1 +0,0 @@ -test ansible_connection=local ansible_host=localhost diff --git a/tests/prepare-test-host.yml b/tests/prepare-test-host.yml deleted file mode 100644 index e017ccbf8..000000000 --- a/tests/prepare-test-host.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -- name: pre prepare - hosts: all - gather_facts: false - tasks: - - name: set basic user fact - fail: - msg: >- - The variable `ansible_user` set this option and try again. On the - CLI this can be defined with "-e ansible_user=${USER}" - when: - - ansible_user is undefined - - - name: set basic home fact - fail: - msg: >- - The variable `ansible_user_dir` set this option and try again. On - the CLI this can be defined with "-e ansible_user_dir=${HOME}" - when: - - ansible_user_dir is undefined - - - name: Ensure the user has a .ssh directory - file: - path: "{{ ansible_user_dir }}/.ssh" - state: directory - owner: "{{ ansible_user }}" - group: "{{ ansible_user }}" - mode: "0700" - - - name: Create ssh key pair - openssh_keypair: - path: "{{ ansible_user_dir }}/.ssh/id_rsa" - size: 2048 - - - name: Slurp pub key - slurp: - src: "{{ ansible_user_dir ~ '/.ssh/id_rsa.pub' }}" - register: pub_key - - - name: Ensure can ssh to can connect to localhost - authorized_key: - user: "{{ ansible_user }}" - key: "{{ pub_key['content'] | b64decode }}" - - - name: Get the zuul/zuul-jobs repo - git: - repo: https://opendev.org/zuul/zuul-jobs - dest: "{{ ansible_user_dir }}/zuul-jobs" - version: master - force: true diff --git a/tests/test_molecule.py b/tests/test_molecule.py deleted file mode 100644 index 25322249e..000000000 --- a/tests/test_molecule.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import subprocess - - -def test_molecule(pytestconfig): - cmd = ['python', '-m', 'molecule'] - scenario = pytestconfig.getoption("scenario") - ansible_args = pytestconfig.getoption("ansible_args") - - if ansible_args: - cmd.append('converge') - if scenario: - cmd.extend(['--scenario-name', scenario]) - cmd.append('--') - cmd.extend(ansible_args.split()) - else: - cmd.append('test') - if scenario: - cmd.extend(['--scenario-name', scenario]) - else: - cmd.append('--all') - - try: - assert subprocess.call(cmd) == 0 - finally: - if ansible_args: - cmd = ['python', '-m', 'molecule', 'destroy'] - if scenario: - cmd.extend(['--scenario-name', scenario]) - subprocess.call(cmd) diff --git a/tools/releasenotes_tox.sh b/tools/releasenotes_tox.sh deleted file mode 100755 index c37cf598e..000000000 --- a/tools/releasenotes_tox.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -rm -rf releasenotes/build - -sphinx-build -a -E -W \ - -d releasenotes/build/doctrees \ - -b html \ - releasenotes/source releasenotes/build/html -BUILD_RESULT=$? - -UNCOMMITTED_NOTES=$(git status --porcelain | \ - awk '$1 == "M" && $2 ~ /releasenotes\/notes/ {print $2}') - -if [ "${UNCOMMITTED_NOTES}" ]; then - cat <' % sys.argv[0]) - sys.exit(1) - - -def validate_library_file(file_path): - with open(file_path, encoding='utf-8') as f: - file_content = f.read() - if 'DOCUMENTATION = ' not in file_content \ - or 'EXAMPLES = ' not in file_content: - if quiet < 1: - print('Missing ansible documentation in %s' % file_path) - return 1 - return 0 - - -def parse_args(): - p = argparse.ArgumentParser() - - p.add_argument('--quiet', '-q', - action='count', - # TODO(akrivoka): Python3 sets this default to None instead - # of 0. Remove this when this bug is fixed in Python3. - default=0, - help='output warnings and errors (-q) or only errors (-qq)') - - p.add_argument('path_args', - nargs='*', - default=['.']) - - return p.parse_args() - - -args = parse_args() -path_args = args.path_args -quiet = args.quiet -exit_val = 0 -failed_files = [] - -for base_path in path_args: - if os.path.isdir(base_path): - for subdir, dirs, files in os.walk(base_path): - if '.tox' in dirs: - dirs.remove('.tox') - if '.git' in dirs: - dirs.remove('.git') - for f in files: - if f.endswith('.py') \ - and not f == '__init__.py' \ - and subdir in [os.path.join(base_path, - 'validations', - 'library'), - os.path.join(base_path, - 'library')]: - file_path = os.path.join(subdir, f) - if quiet < 1: - print('Validating %s' % file_path) - failed = validate_library_file(file_path) - if failed: - failed_files.append(file_path) - exit_val |= failed - else: - print('Unexpected argument %s' % base_path) - exit_usage() - -if failed_files: - print('Validation failed on:') - for f in failed_files: - print(f) -else: - print('Validation successful!') -sys.exit(exit_val) diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 6d707eaff..000000000 --- a/tox.ini +++ /dev/null @@ -1,96 +0,0 @@ -[tox] -minversion = 4.0.0 -envlist = linters,docs,py -skipsdist = True - -# Automatic envs (pyXX) will only use the python version appropriate to that -# env and ignore basepython inherited from [testenv] if we set -# ignore_basepython_conflict. -ignore_basepython_conflict = True - -[testenv] -basepython = python3.10 -usedevelop = True -passenv = TERM -setenv = - ANSIBLE_CALLBACK_PLUGINS={toxinidir}/callback_plugins - ANSIBLE_LOOKUP_PLUGINS={toxinidir}/lookup_plugins - ANSIBLE_LIBRARY={toxinidir}/library - ANSIBLE_ROLES_PATH={toxinidir}/roles - ANSIBLE_NOCOWS=1 - ANSIBLE_RETRY_FILES_ENABLED=0 - ANSIBLE_STDOUT_CALLBACK=debug - ANSIBLE_LOG_PATH={envlogdir}/ansible-execution.log - # pip: Avoid 2020-01-01 warnings: https://github.com/pypa/pip/issues/6207 - # paramiko CryptographyDeprecationWarning: https://github.com/ansible/ansible/issues/52598 - PYTHONWARNINGS=ignore:DEPRECATION::pip._internal.cli.base_command,ignore::UserWarning - PIP_DISABLE_PIP_VERSION_CHECK=1 - PIP_+ VIRTUAL_ENV={envdir} - LANG=en_US.UTF-8 - LANGUAGE=en_US:en - LC_ALL=en_US.UTF-8 - HOME={envdir} -commands = - ansible-galaxy install -fr {toxinidir}/ansible-collections-requirements.yml - stestr run --slowest --color {posargs} -deps = - -c {env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r {toxinidir}/requirements.txt - -r {toxinidir}/test-requirements.txt -whitelist_externals = - bash - ansible-galaxy - -[testenv:bindep] -# Do not install any requirements. We want this to be fast and work even if -# system dependencies are missing, since it's used to tell you what system -# dependencies are missing! This also means that bindep must be installed -# separately, outside of the requirements files. -deps = bindep -commands = bindep test - -[testenv:debug] -commands = oslo_debug_helper {posargs} - -[testenv:linters] -deps = - -r {toxinidir}/requirements.txt - -r {toxinidir}/test-requirements.txt - -c {env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -commands = - ansible-galaxy install -fr {toxinidir}/ansible-collections-requirements.yml - python '{toxinidir}/tools/validate-files.py' . - python -m pre_commit run -a - -[testenv:releasenotes] -deps = -r{toxinidir}/doc/requirements.txt -commands = - sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html - -[testenv:cover] -setenv = - PYTHON=coverage run --parallel-mode - HOME={envdir} -commands = - coverage erase - stestr run {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report - -[testenv:docs] -deps = - -r {toxinidir}/doc/requirements.txt - -r {toxinidir}/molecule-requirements.txt - -c {env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r {toxinidir}/requirements.txt - -r {toxinidir}/test-requirements.txt -commands= - sphinx-build -a -E -W -d doc/build/doctrees --keep-going -b html doc/source doc/build/html -T - doc8 doc - -[doc8] -# Settings for doc8: -extensions = .rst -ignore = D001 diff --git a/tripleo_validations/__init__.py b/tripleo_validations/__init__.py deleted file mode 100644 index cd002bafe..000000000 --- a/tripleo_validations/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - - -__version__ = pbr.version.VersionInfo('tripleo-validations') diff --git a/tripleo_validations/tests/__init__.py b/tripleo_validations/tests/__init__.py deleted file mode 100644 index c47397cfb..000000000 --- a/tripleo_validations/tests/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""tripleo_validations tests. -""" diff --git a/tripleo_validations/tests/base.py b/tripleo_validations/tests/base.py deleted file mode 100644 index 1c30cdb56..000000000 --- a/tripleo_validations/tests/base.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - - -class TestCase(base.BaseTestCase): - - """Test case base class for all unit tests.""" diff --git a/tripleo_validations/tests/fakes.py b/tripleo_validations/tests/fakes.py deleted file mode 100644 index 9e3d9a5b2..000000000 --- a/tripleo_validations/tests/fakes.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""This sub module provides various data structures and functions -useful for automated testing. Additional helpers should be placed here -if at all possible. This should help with reduction of redundancy and -isolation of potentially problematic testing code. -""" - -import sys -try: - from unittest import mock -except ImportError: - import mock - -sys.modules['uuidutils'] = mock.MagicMock() -sys.modules['xml.etree'] = mock.MagicMock() -sys.modules['glanceclient.exc'] = mock.MagicMock() -sys.modules['ironic_inspector_client'] = mock.MagicMock() -sys.modules['novaclient.exceptions'] = mock.MagicMock() - -MOCK_CPUS_RET_VALUE = ( - [0, 1], - [{'numa_node': 0, 'thread_siblings': [0, 2], 'cpu': 0}, - {'numa_node': 0, 'thread_siblings': [4, 6], 'cpu': 4}, - {'numa_node': 0, 'thread_siblings': [8, 10], 'cpu': 8}, - {'numa_node': 1, 'thread_siblings': [1, 3], 'cpu': 1}, - {'numa_node': 1, 'thread_siblings': [5, 7], 'cpu': 5}, - {'numa_node': 1, 'thread_siblings': [9, 11], 'cpu': 9} - ]) - -MOCK_ROLES_INFO = [ - { - 'name': 'foo', - 'flavor': 'bar', - 'count': 9999}] - -MOCK_FLAVORS = { - 'ok': { - 'bar': { - 'keys': { - 'resources:fizz': 'fizz', - 'resources:buzz': 'buzz', - 'resources:DISK_GB': 1, - 'MEMORY_MB': 10, - 'VCPU': 100 - } - } - }, - 'fail_NOVCPU': { - 'bar': { - 'keys': { - 'resources:fizz': 'fizz', - 'resources:buzz': 'buzz', - 'resources:DISK_GB': 1, - 'MEMORY_MB': 10 - } - } - } -} - -MOCK_FLAVORS_CHECK_EXPECTED = { - 'ok': ( - {'bar': ( - ({'keys': { - 'resources:fizz': 'fizz', - 'resources:buzz': 'buzz', - 'resources:DISK_GB': 1, - 'MEMORY_MB': 10, - 'VCPU': 100 - }}, - 9999) - )}, - [], - [ - 'Flavor bar does not have a custom resource class associated with it', - 'Flavor bar has to have scheduling based on standard properties disabled by setting resources:VCPU=0 resources:MEMORY_MB=0 resources:DISK_GB=0 in the flavor property' - ] - ), - 'fail_NOVCPU': ( - {'bar': ( - ({'keys': { - 'resources:fizz': 'fizz', - 'resources:buzz': 'buzz', - 'resources:DISK_GB': 1, - 'MEMORY_MB': 10, - }}, - 9999) - )}, - [], - [ - 'Flavor bar does not have a custom resource class associated with it', - 'Flavor bar has to have scheduling based on standard properties disabled by setting resources:VCPU=0 resources:MEMORY_MB=0 resources:DISK_GB=0 in the flavor property' - ] - ) -} - - -MOCK_NODES = [ - { - 'uuid': 'foo123', - 'provision_state': 'active', - 'properties': { - 'capabilities': { - 'foocap': 'foocapdesc' - } - } - }, - { - 'uuid': 'bar123', - 'provision_state': 'active', - 'properties': { - 'capabilities': { - 'barcap': 'bcapdesc' - } - } - } -] - - -MOCK_PROFILE_FLAVORS = { - 'fooflavor': (MOCK_FLAVORS['ok'], 1), -} - - -UUIDs = [ - '13c319a4-7704-4b44-bb2e-501951879f96', - '8201bb8e-be20-4a97-bcf4-91bcf7eeff86', - 'cc04effd-6bac-45ba-a0dc-83e6cd2c589d', - 'cbb12140-a088-4646-a873-73eeb055ccc2' -] - - -def node_helper(node_id, kernel_id, ram_id, arch=None, platform=None): - - node = { - "uuid": node_id, - "driver_info": { - "deploy_kernel": kernel_id, - "deploy_ramdisk": ram_id, - }, - "properties": {}, - "extra": {}, - } - if arch: - node["properties"]["cpu_arch"] = arch - if platform: - node["extra"]["tripleo_platform"] = platform - return node diff --git a/tripleo_validations/tests/library/__init__.py b/tripleo_validations/tests/library/__init__.py deleted file mode 100644 index ba4b667e1..000000000 --- a/tripleo_validations/tests/library/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""library tests -""" diff --git a/tripleo_validations/tests/library/test_ceph_pools_pg_protection.py b/tripleo_validations/tests/library/test_ceph_pools_pg_protection.py deleted file mode 100644 index 050c2ba47..000000000 --- a/tripleo_validations/tests/library/test_ceph_pools_pg_protection.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_ceph_pools_pg_protection ------------------------------ - -Tests for `ceph_pools_pg_protection` module. -""" -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -import library.ceph_pools_pg_protection as validation - - -class TestCephPoolsPgProtection(base.TestCase): - - def test_module_init(self): - module_attributes = dir(validation) - - required_attributes = [ - 'DOCUMENTATION', - 'EXAMPLES' - ] - - self.assertTrue(set(required_attributes).issubset(module_attributes)) - - @mock.patch( - 'library.ceph_pools_pg_protection.run_module') - def test_module_main(self, mock_run_module): - - validation.main() - - mock_run_module.assert_called_once() - - @mock.patch( - 'library.ceph_pools_pg_protection.simulate_pool_creation', - return_value={'failed': False}) - @mock.patch( - 'library.ceph_pools_pg_protection.yaml_safe_load', - return_value={'options': 'bar'}) - @mock.patch( - 'library.ceph_pools_pg_protection.AnsibleModule') - def test_run_module_sim_success(self, mock_module, - mock_yaml_safe_load, - mock_simulate_pool_creation): - - mock_exit_json = mock.MagicMock() - - mock_module.return_value = mock.MagicMock( - check_mode=False, - exit_json=mock_exit_json) - - validation.run_module() - - mock_yaml_safe_load.assert_called_once() - - mock_module.assert_called_once_with( - argument_spec='bar', - supports_check_mode=False - ) - - mock_exit_json.assert_called_once() - - @mock.patch( - 'library.ceph_pools_pg_protection.simulate_pool_creation', - return_value={'failed': True, 'msg': 'fizz'}) - @mock.patch( - 'library.ceph_pools_pg_protection.yaml_safe_load', - return_value={'options': 'bar'}) - @mock.patch( - 'library.ceph_pools_pg_protection.AnsibleModule') - def test_run_module_sim_failed(self, mock_module, - mock_yaml_safe_load, - mock_simulate_pool_creation): - - mock_exit_json = mock.MagicMock() - - mock_module.return_value = mock.MagicMock( - check_mode=False, - exit_json=mock_exit_json) - - validation.run_module() - - mock_yaml_safe_load.assert_called_once() - - mock_module.assert_called_once_with( - argument_spec='bar', - supports_check_mode=False - ) - - mock_exit_json.assert_called_once() - - def test_check_pg_num_enough_osds(self): - '''Test adding one more pool to the existing pools with 36 OSDs''' - num_osds = 36 - pools = {'images': {'pg_num': 128, 'size': 3}, - 'vms': {'pg_num': 256, 'size': 3}, - 'volumes': {'pg_num': 512, 'size': 3}} - msg = validation.check_pg_num('backups', 128, 3, num_osds, 200, pools) - self.assertEqual(msg, "") - - def test_check_pg_num_not_enough_osds(self): - '''Test adding one more pool to the existing pools with 1 OSD''' - num_osds = 1 - error = "Cannot add pool: backups pg_num 128 size 3 " - error += "would mean 2688 total pgs, which exceeds max 200 " - error += "(mon_max_pg_per_osd 200 * num_in_osds 1)" - pools = {'images': {'pg_num': 128, 'size': 3}, - 'vms': {'pg_num': 256, 'size': 3}, - 'volumes': {'pg_num': 512, 'size': 3}} - msg = validation.check_pg_num('backups', 128, 3, num_osds, 200, pools) - self.assertEqual(msg, error) - - def test_simulate_pool_creation_enough_osds(self): - '''Test creating 3 pools with differing PGs with 36 OSDs''' - num_osds = 36 - pools = [{'name': 'images', 'pg_num': 128, 'size': 3}, - {'name': 'vms', 'pg_num': 256, 'size': 3}, - {'name': 'volumes', 'pg_num': 512, 'size': 3}] - sim = validation.simulate_pool_creation(num_osds, pools) - self.assertEqual(sim['failed'], False) - self.assertEqual(sim['msg'], "") - - def test_simulate_pool_creation_not_enough_osds(self): - '''Test creating 3 pools with differing PGs with 1 OSD''' - num_osds = 1 - pools = [{'name': 'images', 'pg_num': 128, 'size': 3}, - {'name': 'vms', 'pg_num': 256, 'size': 3}, - {'name': 'volumes', 'pg_num': 512, 'size': 3}] - sim = validation.simulate_pool_creation(num_osds, pools) - self.assertEqual(sim['failed'], True) - - error_head = "The following Ceph pools would be created (but no others):\n" - order0 = "{'images': {'size': 3}, 'pg_num': 128}\n" - order1 = "{'images': {'pg_num': 128, 'size': 3}}\n" - error_tail = "Pool creation would then fail with the following from Ceph:\n" - error_tail += "Cannot add pool: vms pg_num 256 size 3 would mean 384 total pgs, " - error_tail += "which exceeds max 200 (mon_max_pg_per_osd 200 * num_in_osds 1)\n" - error_tail += "Please use https://ceph.io/pgcalc and then update the " - error_tail += "CephPools parameter" - - self.assertTrue( - (sim['msg'] == error_head + order0 + error_tail) - or (sim['msg'] == error_head + order1 + error_tail)) diff --git a/tripleo_validations/tests/library/test_check_cpus_aligned_with_dpdk_nics.py b/tripleo_validations/tests/library/test_check_cpus_aligned_with_dpdk_nics.py deleted file mode 100644 index 9409c4057..000000000 --- a/tripleo_validations/tests/library/test_check_cpus_aligned_with_dpdk_nics.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest.mock import MagicMock -from unittest.mock import patch - -import library.check_cpus_aligned_with_dpdk_nics as validation -from tripleo_validations.tests import base - - -class TestCpusAlignedWithDpdkNicsCheck(base.TestCase): - - def setUp(self): - super(TestCpusAlignedWithDpdkNicsCheck, self).setUp() - self.module = MagicMock() - - @patch('library.check_cpus_aligned_with_dpdk_nics.' - 'get_nodes_cpus_info') - def test_validate_valid_cpus_aligned_with_dpdk_nics_numa(self, mock_nodes_cpus): - mock_nodes_cpus.return_value = {0: [0, 1, 2, 3], 1: [4, 5, 6, 7]} - dpdk_nics_numa_info = [{"numa_node": 0, "mac": "mac1", "pci": "pci1"}] - cpus = "2,3" - numa_node = 0 - validation.check_cpus_aligned_with_dpdk_nics(self.module, cpus, - numa_node, dpdk_nics_numa_info) - self.module.exit_json.assert_called_with( - changed=False, - message="CPU's configured correctly: 2,3", - valid_cpus=True) - - @patch('library.check_cpus_aligned_with_dpdk_nics.' - 'get_nodes_cpus_info') - def test_validate_invalid_cpus_aligned_with_dpdk_nics_numa(self, mock_nodes_cpus): - mock_nodes_cpus.return_value = {0: [0, 1, 2, 3], 1: [4, 5, 6, 7]} - dpdk_nics_numa_info = [{"numa_node": 0, "mac": "mac1", "pci": "pci1"}] - cpus = "2,3,4,5" - numa_node = 0 - validation.check_cpus_aligned_with_dpdk_nics(self.module, cpus, - numa_node, dpdk_nics_numa_info) - self.module.fail_json.assert_called_with( - msg="CPU's are not aligned with DPDK NIC's NUMA, Invalid CPU's: 4,5") - - def test_valid_get_nodes_cpus_info(self): - lines = "# format\n0,0\n 0,2\n1,1\n1,3" - self.module.run_command.return_value = [0, lines, ""] - expected_value = {0: [0, 2], 1: [1, 3]} - result = validation.get_nodes_cpus_info(self.module) - self.assertEqual(result, expected_value) - - def test_invalid_missing_val_get_nodes_cpus_info(self): - lines = "# format\n,0\n0,2\n1,1\n1,3" - self.module.run_command.return_value = [0, lines, ""] - validation.get_nodes_cpus_info(self.module) - self.module.fail_json.assert_called_with( - msg="Unable to determine NUMA cpus") diff --git a/tripleo_validations/tests/library/test_check_flavors.py b/tripleo_validations/tests/library/test_check_flavors.py deleted file mode 100644 index e6f97a1a7..000000000 --- a/tripleo_validations/tests/library/test_check_flavors.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_check_flavors --------------- - -Tests for the `check_flavors` module. -""" -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes -from library import check_flavors - - -class TestCheckFlavors(base.TestCase): - - def setUp(self): - self.tested_module = check_flavors - return super().setUp() - - def test_module_init(self): - - expepect_attrs = set( - [ - 'DOCUMENTATION', - 'EXAMPLES', - 'main']) - - actual_attrs = set(dir(self.tested_module)) - - self.assertTrue(expepect_attrs.issubset(actual_attrs)) - - def test_validate_roles_and_flavors(self): - expected_values = fakes.MOCK_FLAVORS_CHECK_EXPECTED['ok'] - - return_value = self.tested_module.validate_roles_and_flavors( - fakes.MOCK_ROLES_INFO, - fakes.MOCK_FLAVORS['ok']) - self.assertEqual(expected_values, return_value) - - def test_validate_roles_and_flavors_nocpu(self): - """Tests situation when the 'VCPU' key doesn't have associated value. - 'DISK_GB' and 'MEMORY_MB' behave the same way, - so we don't need to test them. For now at least. - """ - expected_values = fakes.MOCK_FLAVORS_CHECK_EXPECTED['fail_NOVCPU'] - - return_value = self.tested_module.validate_roles_and_flavors( - fakes.MOCK_ROLES_INFO, - fakes.MOCK_FLAVORS['fail_NOVCPU']) - self.assertEqual(expected_values, return_value) diff --git a/tripleo_validations/tests/library/test_check_ironic_boot_config.py b/tripleo_validations/tests/library/test_check_ironic_boot_config.py deleted file mode 100644 index 2afb37214..000000000 --- a/tripleo_validations/tests/library/test_check_ironic_boot_config.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests of the check_ironic_boot_config submodule. -The initial try/except block is a safeguard against Python version -incompatibility and general confusion it can cause. -But worry not, it's barely used these days. -""" -try: - from unittest import mock -except ImportError: - import mock - -import tripleo_validations.tests.base as base -import tripleo_validations.tests.fakes as fakes -import library.check_ironic_boot_config as validation - - -class TestCheckIronicBootConfigModule(base.TestCase): - - def setUp(self): - super(TestCheckIronicBootConfigModule, self).setUp() - self.module = validation - - def test_module_init(self): - module_attributes = dir(self.module) - - required_attributes = [ - 'DOCUMENTATION', - 'EXAMPLES' - ] - - self.assertTrue(set(required_attributes).issubset(module_attributes)) - - @mock.patch( - 'library.check_ironic_boot_config.yaml_safe_load', - return_value={'options': 'fizz'}) - @mock.patch( - 'library.check_ironic_boot_config.validate_boot_config', - return_value=None) - @mock.patch('library.check_ironic_boot_config.AnsibleModule') - def test_module_main_success(self, mock_module, - mock_validate_boot_config, - mock_yaml_safe_load): - - module_calls = [ - mock.call(argument_spec='fizz'), - mock.call().params.get('nodes'), - mock.call().exit_json() - ] - - self.module.main() - - mock_validate_boot_config.assert_called_once() - mock_module.assert_has_calls(module_calls) - - @mock.patch( - 'library.check_ironic_boot_config.yaml_safe_load', - return_value={'options': 'fizz'}) - @mock.patch( - 'library.check_ironic_boot_config.validate_boot_config', - return_value=['foo', 'bar']) - @mock.patch('library.check_ironic_boot_config.AnsibleModule') - def test_module_main_fail(self, mock_module, - mock_validate_boot_config, - mock_yaml_safe_load): - - module_calls = [ - mock.call(argument_spec='fizz'), - mock.call().params.get('nodes'), - mock.call().fail_json('foobar') - ] - - self.module.main() - - mock_validate_boot_config.assert_called_once() - mock_module.assert_has_calls(module_calls) - - def test_too_diverse(self): - """Test if the function returns string without raising exception. - """ - - return_value = self.module._too_diverse( - 'foo', - [ - 'bar', - 'fizz', - 'buzz' - ], - '000') - - self.assertIsInstance(return_value, str) - - def test_invalid_image_entry(self): - """Test if the function returns string without raising exception. - """ - - return_value = self.module._invalid_image_entry( - 'foo', - [ - 'bar', - 'fizz', - 'buzz' - ], - '000') - - self.assertIsInstance(return_value, str) - - -class TestValidateBootConfig(base.TestCase): - """Tests for validate_boot_config function of the check_ironic_boot_config - submodule. Tests assert on returned value and calls made. - """ - - @mock.patch('library.check_ironic_boot_config._too_diverse') - @mock.patch('library.check_ironic_boot_config._invalid_image_entry') - def test_validate_boot_config_success(self, mock_image_entry_error, mock_diverse_error): - """As we are trying to verify functionality for multiple subsets - of various nodes, this test is slightly more complex. - List of nodes is sliced and individual slices are fed - to the validate_boot_config function we are testing. - However, this approach still doesn't test all the possibilities. - For example the order of original list is maintained, and number - of nodes is very, very limited. - Further improvement will require consultation. - """ - nodes = [ - fakes.node_helper(1, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le', 'p9'), - fakes.node_helper(2, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le', 'p9'), - fakes.node_helper(3, 'file://k.img', 'file://r.img', 'ppc64le', 'p9'), - fakes.node_helper(4, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le') - ] - - for node_slice in [nodes[::index] for index in range(1, len(nodes))]: - errors = validation.validate_boot_config(node_slice) - - mock_diverse_error.assert_not_called() - mock_image_entry_error.assert_not_called() - - self.assertIsInstance(errors, list) - self.assertEqual(len(errors), 0) - - @mock.patch('library.check_ironic_boot_config._too_diverse') - def test_validate_boot_config_fail_too_diverse_uuid(self, mock_error): - nodes = [ - fakes.node_helper(1, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le', 'p9'), - fakes.node_helper(2, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le', 'p9'), - fakes.node_helper(3, 'file://k.img', 'file://r.img', 'ppc64le', 'p9'), - fakes.node_helper(4, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le'), - fakes.node_helper(5, fakes.UUIDs[2], fakes.UUIDs[3], 'ppc64le', 'p9'), - ] - - validation.validate_boot_config(nodes) - mock_error.assert_called() - - @mock.patch('library.check_ironic_boot_config._too_diverse') - def test_validate_boot_config_fail_too_diverse_path(self, mock_error): - nodes = [ - fakes.node_helper(1, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le', 'p9'), - fakes.node_helper(2, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le', 'p9'), - fakes.node_helper(3, 'file://k.img', 'file://r.img', 'ppc64le', 'p9'), - fakes.node_helper(4, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le'), - fakes.node_helper(5, 'file://k2.img', 'file://r2.img', 'ppc64le', 'p9') - ] - - calls = [ - mock.call('file-based', ('kernel', 'ppc64le', 'p9'), {'file://k.img', 'file://k2.img'}), - mock.call('file-based', ('ramdisk', 'ppc64le', 'p9'), {'file://r2.img', 'file://r.img'}) - ] - - validation.validate_boot_config(nodes) - mock_error.assert_has_calls(calls) - - @mock.patch('library.check_ironic_boot_config._invalid_image_entry') - def test_validate_boot_config_fail_invalid_image_entry(self, mock_error): - nodes = [ - fakes.node_helper(1, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le', 'p9'), - fakes.node_helper(2, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le', 'p9'), - fakes.node_helper(3, 'file://k.img', 'file://r.img', 'ppc64le', 'p9'), - fakes.node_helper(4, fakes.UUIDs[0], fakes.UUIDs[1], 'ppc64le'), - fakes.node_helper(5, 'not_uuid_or_path', 'not_uuid_or_path') - ] - - calls = [ - mock.call('kernel', 'not_uuid_or_path', 5), - mock.call('ramdisk', 'not_uuid_or_path', 5) - ] - - validation.validate_boot_config(nodes) - - mock_error.assert_has_calls(calls) diff --git a/tripleo_validations/tests/library/test_check_other_processes_pmd_usage.py b/tripleo_validations/tests/library/test_check_other_processes_pmd_usage.py deleted file mode 100644 index c69752c59..000000000 --- a/tripleo_validations/tests/library/test_check_other_processes_pmd_usage.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest.mock import MagicMock -from unittest.mock import patch - -import library.check_other_processes_pmd_usage as validation -from tripleo_validations.tests import base - - -class TestOtherProcessesPmdusageCheck(base.TestCase): - - def setUp(self): - super(TestOtherProcessesPmdusageCheck, self).setUp() - self.module = MagicMock() - - @patch('library.check_other_processes_pmd_usage.' - 'check_current_process_pmd_usage') - def test_validate_no_other_processes_pmd_usage(self, mock_number_list): - mock_number_list.side_effect = [[], []] - pmd_cpus = ["2", "3"] - current_processes = "21's 4-7\n22's 4-9\n24's 4-5" - pmd_processes = "22's 4-9" - self.module.run_command.side_effect = [[0, current_processes, ""], - [0, pmd_processes, ""]] - exclude_processes_pid = ["24"] - validation.check_other_processes_pmd_usage(self.module, pmd_cpus, - exclude_processes_pid) - self.module.exit_json.assert_called_with( - messages=[], - pmd_interrupts=False) - - @patch('library.check_other_processes_pmd_usage.' - 'check_current_process_pmd_usage') - def test_validate_with_no_current_processes(self, mock_number_list): - mock_number_list.side_effect = [[], []] - pmd_cpus = ["2", "3"] - current_processes = "" - pmd_processes = "22's 4-9" - self.module.run_command.side_effect = [None, - [0, pmd_processes, ""]] - exclude_processes_pid = ["24"] - validation.check_other_processes_pmd_usage(self.module, pmd_cpus, - exclude_processes_pid) - self.module.fail_json.assert_called_with( - msg="Unable to determine current processes") - - @patch('library.check_other_processes_pmd_usage.' - 'check_current_process_pmd_usage') - def test_validate_with_no_pmd_processes(self, mock_number_list): - mock_number_list.return_value = [] - pmd_cpus = ["2", "3"] - current_processes = "21's 2-5\n22's 4-9\n24's 4-5" - pmd_processes = "" - self.module.run_command.side_effect = [[0, current_processes, ""], - None] - exclude_processes_pid = ["24"] - validation.check_other_processes_pmd_usage(self.module, pmd_cpus, - exclude_processes_pid) - self.module.fail_json.assert_called_with( - msg="Unable to determine PMD threads processes") - - @patch('library.check_other_processes_pmd_usage.' - 'check_current_process_pmd_usage') - def test_validate_other_processes_pmd_usage(self, mock_number_list): - mock_number_list.side_effect = [["pmd threads: 2,3 used in process: 21"], []] - pmd_cpus = ["2", "3"] - current_processes = "21's 2-5\n22's 4-9\n24's 4-5" - pmd_processes = "22's 4-9" - self.module.run_command.side_effect = [[0, current_processes, ""], - [0, pmd_processes, ""]] - exclude_processes_pid = ["24"] - validation.check_other_processes_pmd_usage(self.module, pmd_cpus, - exclude_processes_pid) - self.module.exit_json.assert_called_with( - messages=["pmd threads: 2,3 used in process: 21"], - pmd_interrupts=True) - - def test_check_current_process_pmd_usage(self): - pmd_cpus = ["2", "3"] - process_id = "21" - range_list = "2-5,8-11" - expected_value = ["pmd threads: 2,3 used in process: 21"] - result = validation.check_current_process_pmd_usage(self.module, pmd_cpus, - process_id, range_list) - self.assertEqual(result, expected_value) - - def test_check_current_process_pmd_usage_with_exclude_value(self): - pmd_cpus = ["2", "3"] - process_id = "21" - range_list = "2-5,8-11,^8" - expected_value = ["pmd threads: 2,3 used in process: 21"] - result = validation.check_current_process_pmd_usage(self.module, pmd_cpus, - process_id, range_list) - self.assertEqual(result, expected_value) - - def test_check_current_process_pmd_usage_with_invalid_range(self): - pmd_cpus = ["2", "3"] - process_id = "21" - range_list = "2-5,-" - result = validation.check_current_process_pmd_usage(self.module, pmd_cpus, - process_id, range_list) - self.module.fail_json.assert_called_with( - msg="Invalid number in input param 'range_list': invalid literal for int() with base 10: ''") diff --git a/tripleo_validations/tests/library/test_convert_range_to_numbers_list.py b/tripleo_validations/tests/library/test_convert_range_to_numbers_list.py deleted file mode 100644 index 0027412e1..000000000 --- a/tripleo_validations/tests/library/test_convert_range_to_numbers_list.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest.mock import MagicMock -from unittest.mock import patch - -import library.convert_range_to_numbers_list as validation -from tripleo_validations.tests import base - - -class TestConvertRangeToNumbersList(base.TestCase): - - def setUp(self): - super(TestConvertRangeToNumbersList, self).setUp() - self.module = MagicMock() - - def test_valid_convert_range_to_numbers_list(self): - range_list = "2-5,8-11" - expected_value = [2, 3, 4, 5, 8, 9, 10, 11] - result = validation.convert_range_to_numbers_list(self.module, range_list) - self.assertEqual(result, expected_value) - - def test_valid_convert_range_to_numbers_list_with_exclude_value(self): - range_list = "2-5,8-11,^8" - expected_value = [2, 3, 4, 5, 9, 10, 11] - result = validation.convert_range_to_numbers_list(self.module, range_list) - self.assertEqual(result, expected_value) - - def test_invalid_convert_range_to_numbers_list(self): - range_list = "2-5,-" - validation.convert_range_to_numbers_list(self.module, range_list) - self.module.fail_json.assert_called_with( - msg="Invalid number in input param 'range_list': invalid literal for int() with base 10: ''") diff --git a/tripleo_validations/tests/library/test_get_dpdk_nics_numa_info.py b/tripleo_validations/tests/library/test_get_dpdk_nics_numa_info.py deleted file mode 100644 index 8566651bc..000000000 --- a/tripleo_validations/tests/library/test_get_dpdk_nics_numa_info.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest.mock import MagicMock -from unittest.mock import patch - -import library.get_dpdk_nics_numa_info as validation -from tripleo_validations.tests import base - - -class TestGetDpdkNicsNumaInfo(base.TestCase): - - def setUp(self): - super(TestGetDpdkNicsNumaInfo, self).setUp() - self.module = MagicMock() - - @patch('library.get_dpdk_nics_numa_info.' - 'get_dpdk_nics_info') - def test_get_dpdk_nics_numa_info(self, mock_dpdk_nics_info): - dpdk_nics_numa_info = [{"numa_node": 0, "mac": "mac1", "pci": "pci1"}] - mock_dpdk_nics_info.return_value = dpdk_nics_numa_info - dpdk_mapping_file = "/var/lib/os-net-config/dpdk_mapping.yaml" - validation.get_dpdk_nics_numa_info(self.module, dpdk_mapping_file) - self.module.exit_json.assert_called_with( - changed=False, - message="DPDK NIC's NUMA info", - dpdk_nics_numa_info=dpdk_nics_numa_info) - - @patch('library.get_dpdk_nics_numa_info.' - 'get_dpdk_nics_info') - def test_no_dpdk_nics_numa_info(self, mock_dpdk_nics_info): - mock_dpdk_nics_info.return_value = [] - dpdk_mapping_file = "/var/lib/os-net-config/dpdk_mapping.yaml" - validation.get_dpdk_nics_numa_info(self.module, dpdk_mapping_file) - self.module.fail_json.assert_called_with( - msg="Unable to determine DPDK NIC's NUMA info") diff --git a/tripleo_validations/tests/library/test_icmp_ping.py b/tripleo_validations/tests/library/test_icmp_ping.py deleted file mode 100644 index b60f52ffd..000000000 --- a/tripleo_validations/tests/library/test_icmp_ping.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_icmp_ping ----------------------------------- - -Tests for `icmp_ping` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -import library.icmp_ping as validation - - -class TestIcmpPing(base.TestCase): - - def setUp(self): - self.tested_module = validation - return super().setUp() - - def test_module_init(self): - module_attributes = dir(self.tested_module) - - required_attributes = [ - 'DOCUMENTATION', - 'EXAMPLES'] - - self.assertTrue(set(required_attributes).issubset(module_attributes)) - - @mock.patch( - 'library.icmp_ping.yaml_safe_load', - return_value={'options': 'foo'}) - @mock.patch('library.icmp_ping.AnsibleModule') - def test_successful_ping(self, mock_module, mock_yaml_load): - """Test successful ping call with dummy stdout and stderr. - Calls to the code handling the actual network comms are mocked. - """ - ansible_module = mock.MagicMock( - run_command=mock.MagicMock( - return_value=[0, "fizz", "buzz"]), - autospec=True) - - mock_module.return_value = ansible_module - self.tested_module.main() - - ansible_module.exit_json.assert_called_once_with( - changed=False, - failed=False, - msg="fizz") - - @mock.patch( - 'library.icmp_ping.yaml_safe_load', - return_value={'options': 'foo'}) - @mock.patch('library.icmp_ping.AnsibleModule') - def test_success_no_stdout(self, mock_module, mock_yaml_load): - """Test successful ping call that didn't produce and stdout. - Calls to the code handling the actual network comms are mocked. - """ - - ansible_module = mock.MagicMock( - run_command=mock.MagicMock( - return_value=[0, None, "buzz"])) - - mock_module.return_value = ansible_module - self.tested_module.main() - - ansible_module.exit_json.assert_called_once_with( - changed=False, - failed=False, - msg="buzz") - - @mock.patch( - 'library.icmp_ping.yaml_safe_load', - return_value={'options': 'foo'}) - @mock.patch('library.icmp_ping.AnsibleModule') - def test_failure(self, mock_module, mock_yaml_load): - """Test failed ping call with dummy stdout and stderr. - Calls to the code handling the actual network comms are mocked. - """ - ansible_module = mock.MagicMock( - run_command=mock.MagicMock( - return_value=[1, "fizz", "buzz"])) - - mock_module.return_value = ansible_module - self.tested_module.main() - ansible_module.exit_json.assert_called_once_with( - changed=False, - failed=True, - msg="fizz") diff --git a/tripleo_validations/tests/library/test_ip_range.py b/tripleo_validations/tests/library/test_ip_range.py deleted file mode 100644 index 923b49679..000000000 --- a/tripleo_validations/tests/library/test_ip_range.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_ip_range ----------------------------------- - -Tests for `ip_range` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -import library.ip_range as validation - - -class TestIPRange(base.TestCase): - - def test_check_arguments_non_IP(self): - '''Test ip_range when start is not an IP''' - errors = validation.check_arguments('something', '192.168.0.1', 1) - self.assertEqual(len(errors), 1) - self.assertEqual('Argument start (something) must be an IP', errors[0]) - - def test_check_arguments_IP_versions(self): - '''Test ip_range when start is IPv4 and end is IPv6''' - errors = validation.check_arguments('191.168.0.1', '::1', 2) - self.assertEqual(len(errors), 1) - self.assertEqual('Arguments start, end must share the same IP version', - errors[0]) - - def test_check_arguments_neg_range(self): - '''Test ip_range when min_size is a negative number''' - errors = validation.check_arguments('192.168.0.1', '192.168.0.2', -3) - self.assertEqual(len(errors), 1) - self.assertEqual('Argument min_size(-3) must be greater than 0', - errors[0]) - - def test_check_arguments_IPv4_ok(self): - '''Test ip_range on valid IPv4 arguments''' - errors = validation.check_arguments('192.168.0.1', '192.169.0.254', 5) - self.assertEqual(errors, []) - - def test_check_arguments_IPv6_ok(self): - '''Test ip_range on valid IPv6 arguments''' - errors = validation.check_arguments('2001:d8::1', '2001:d8::1:1', 120) - self.assertEqual(errors, []) - - def test_check_IP_range_too_small(self): - '''Test ip_range when range is less than minimal''' - errors = validation.check_IP_range('192.168.0.1', '192.168.0.5', 6) - self.assertEqual(len(errors), 2) - self.assertEqual( - 'The IP range 192.168.0.1 - 192.168.0.5 contains 5 addresses.', - errors[0] - ) - self.assertEqual( - 'This might not be enough for the deployment or later scaling.', - errors[1] - ) - - def test_check_lower_bound_greater_than_upper(self): - """Test ip_range when lower IP bound is greater than upper""" - errors = validation.check_arguments('192.168.0.10', '192.168.0.1', 5) - self.assertEqual(len(errors), 1) - self.assertEqual("Lower IP bound (192.168.0.10) must be smaller than " - "upper bound (192.168.0.1)", errors[0]) diff --git a/tripleo_validations/tests/library/test_network_environment.py b/tripleo_validations/tests/library/test_network_environment.py deleted file mode 100644 index 5cd662bd8..000000000 --- a/tripleo_validations/tests/library/test_network_environment.py +++ /dev/null @@ -1,925 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -import library.network_environment as validation - - -class TestNicConfigs(base.TestCase): - - def test_non_dict(self): - errors = validation.check_nic_configs("controller.yaml", None) - self.assertEqual(len(errors), 1) - self.assertEqual('The nic_data parameter must be a dictionary.', - errors[0]) - - def _test_resources_invalid(self, nic_data): - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertEqual("The nic_data must contain the 'resources' key and it" - " must be a dictionary.", errors[0]) - - def test_resources_dict(self): - self._test_resources_invalid({}) - self._test_resources_invalid({'resources': None}) - - def test_resource_not_dict(self): - nic_data = {'resources': {'foo': None}} - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertEqual("'foo' is not a valid resource.", errors[0]) - - def test_resource_config_not_dict(self): - nic_data = {'resources': {'foo': {'properties': {'config': None}}}} - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertEqual("The 'config' property of 'foo' must be" - " a dictionary.", errors[0]) - - def nic_data(self, bridges): - return { - 'resources': { - 'foo': { - 'properties': { - 'config': { - 'str_replace': { - 'params': { - '$network_config': { - 'network_config': bridges - } - } - } - } - } - } - } - } - - def test_network_config_not_list(self): - nic_data = self.nic_data(None) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn("'foo' must be a list", errors[0]) - - # See comment from 2018-11-22 in library/network_environment.py - """ - def test_bridge_has_type(self): - nic_data = self.nic_data([{ - 'name': 'storage', - 'members': [], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn("'type' is a required property", errors[0]) - - def test_bridge_is_of_known_type(self): - nic_data = self.nic_data([{ - 'type': 'interface' - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn("{'type': 'intreface'} is not valid", errors[0]) - - def test_bridge_has_name(self): - nic_data = self.nic_data([{ - 'type': 'ovs_bridge', - 'members': [], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn("'name' is a required property", errors[0]) - - def test_bridge_has_only_known_properties(self): - nic_data = self.nic_data([{ - 'type': 'ovs_bridge', - 'name': 'storage', - 'member': [], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn("Additional properties are not allowed" - " ('member' was unexpected)", errors[0]) - - def test_ovs_bridge_has_members(self): - nic_data = self.nic_data([{ - 'name': 'storage', - 'type': 'ovs_bridge', - 'members': None, - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn("members/type: 'None' is not of type 'array'", - errors[0]) - - def test_ovs_bridge_members_dict(self): - nic_data = self.nic_data([{ - 'name': 'storage', - 'type': 'ovs_bridge', - 'members': [None], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn("members/items/oneOf: None is not valid under any" - " of the given schemas", errors[0]) - - def test_bonds_have_known_type(self): - nic_data = self.nic_data([{ - 'type': 'magic_bridge', - 'name': 'storage', - 'members': [{}], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn("members/items/oneOf: {} is not valid under any" - " of the given schemas", errors[0]) - """ - - def test_more_than_one_bond(self): - nic_data = self.nic_data([{ - 'type': 'ovs_bridge', - 'name': 'storage', - 'members': [ - { - 'type': 'ovs_bond', - 'name': 'bond0', - 'members': [ - {'type': 'interface', 'name': 'eth0'}, - {'type': 'interface', 'name': 'eth1'}, - ] - }, { - 'type': 'ovs_bond', - 'name': 'bond1', - 'members': [ - {'type': 'interface', 'name': 'eth2'}, - {'type': 'interface', 'name': 'eth3'}, - ] - }, - ], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn('Invalid bonding: There are >= 2 bonds for bridge ' - 'storage', errors[0]) - - def test_multiple_interfaces_without_bond(self): - nic_data = self.nic_data([{ - 'type': 'ovs_bridge', - 'name': 'storage', - 'members': [ - {'type': 'interface', 'name': 'eth0'}, - {'type': 'interface', 'name': 'eth1'}, - ], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn('Invalid interface: When not using a bond, there can' - ' only be 1 interface for bridge storage', errors[0]) - - def test_one_interface_without_bond(self): - nic_data = self.nic_data([{ - 'type': 'ovs_bridge', - 'name': 'storage', - 'members': [ - {'type': 'interface', 'name': 'eth0'}, - ], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual([], errors) - - # See comment from 2018-11-22 in library/network_environment.py - """ - def test_one_bond_no_interfaces(self): - nic_data = self.nic_data([{ - 'type': 'ovs_bridge', - 'name': 'storage', - 'members': [ - {'type': 'ovs_bond', 'name': 'bond0', 'members': []}, - ], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual(len(errors), 1) - self.assertIn('members/minItems: [] is too short', errors[0]) - """ - - def test_one_bond_multiple_interfaces(self): - nic_data = self.nic_data([{ - 'type': 'ovs_bridge', - 'name': 'storage', - 'members': [ - { - 'type': 'ovs_bond', - 'name': 'bond0', - 'members': [ - {'type': 'interface', 'name': 'eth2'}, - {'type': 'interface', 'name': 'eth3'}, - ] - }, - {'type': 'interface', 'name': 'eth0'}, - {'type': 'interface', 'name': 'eth1'}, - ], - }]) - errors = validation.check_nic_configs("controller.yaml", nic_data) - self.assertEqual([], errors) - - -class TestCheckCidrOverlap(base.TestCase): - - def test_empty(self): - errors = validation.check_cidr_overlap([]) - self.assertEqual([], errors) - - def test_none(self): - errors = validation.check_cidr_overlap(None) - self.assertEqual(len(errors), 1) - self.assertEqual("The argument must be iterable.", errors[0]) - - def test_network_none(self): - errors = validation.check_cidr_overlap([None]) - self.assertEqual(len(errors), 1) - self.assertEqual("Invalid network: None", errors[0]) - - def test_single_network(self): - errors = validation.check_cidr_overlap(['172.16.0.0/24']) - self.assertEqual([], errors) - - def test_single_network_ipv6(self): - errors = validation.check_cidr_overlap(['fd00:fd00:fd00:2000::/64']) - self.assertEqual([], errors) - - def test_non_overlapping_networks(self): - networks = ['172.16.0.0/24', '172.17.0.0/24'] - errors = validation.check_cidr_overlap(networks) - self.assertEqual([], errors) - - def test_identical_networks(self): - networks = ['172.16.0.0/24', '172.16.0.0/24'] - errors = validation.check_cidr_overlap(networks) - self.assertEqual(len(errors), 1) - self.assertEqual('Networks 172.16.0.0/24 and 172.16.0.0/24 overlap.', - errors[0]) - - def test_identical_networks_ipv6(self): - networks = ['fd00:fd00:fd00:2000::/64', 'fd00:fd00:fd00:2000::/64'] - errors = validation.check_cidr_overlap(networks) - self.assertEqual(len(errors), 1) - self.assertEqual('Networks fd00:fd00:fd00:2000::/64 and ' - 'fd00:fd00:fd00:2000::/64 overlap.', - errors[0]) - - def test_first_cidr_is_subset_of_second_ipv6(self): - networks = ['fd00:fd00:fd00:2000::/126', 'fd00:fd00:fd00:2000::/124'] - errors = validation.check_cidr_overlap(networks) - self.assertEqual(len(errors), 1) - self.assertEqual('Networks fd00:fd00:fd00:2000::/126 and ' - 'fd00:fd00:fd00:2000::/124 overlap.', - errors[0]) - - def test_first_cidr_is_subset_of_second(self): - networks = ['172.16.10.0/24', '172.16.0.0/16'] - errors = validation.check_cidr_overlap(networks) - self.assertEqual(len(errors), 1) - self.assertEqual('Networks 172.16.10.0/24 and 172.16.0.0/16 overlap.', - errors[0]) - - def test_second_cidr_is_subset_of_first(self): - networks = ['172.16.0.0/16', '172.16.10.0/24'] - errors = validation.check_cidr_overlap(networks) - self.assertEqual(len(errors), 1) - self.assertEqual('Networks 172.16.0.0/16 and 172.16.10.0/24 overlap.', - errors[0]) - - def test_second_cidr_is_subset_of_first_ipv6(self): - networks = ['fd00:fd00:fd00:2000::/124', 'fd00:fd00:fd00:2000::/126'] - errors = validation.check_cidr_overlap(networks) - self.assertEqual(len(errors), 1) - self.assertEqual('Networks fd00:fd00:fd00:2000::/124 and ' - 'fd00:fd00:fd00:2000::/126 overlap.', - errors[0]) - - def test_multiple_overlapping_networks(self): - networks = ['172.16.0.0/16', '172.16.10.0/24', - '172.16.11.0/23', '172.17.0.0/24'] - errors = validation.check_cidr_overlap(networks) - self.assertEqual(len(errors), 3) - self.assertEqual('Networks 172.16.0.0/16 and 172.16.10.0/24 overlap.', - errors[0]) - self.assertEqual('Networks 172.16.0.0/16 and 172.16.11.0/23 overlap.', - errors[1]) - self.assertEqual('Networks 172.16.10.0/24 and 172.16.11.0/23 overlap.', - errors[2]) - - -class TestCheckAllocationPoolsPairing(base.TestCase): - - def test_empty(self): - errors = validation.check_allocation_pools_pairing({}, {}) - self.assertEqual([], errors) - - def test_non_dict(self): - errors = validation.check_allocation_pools_pairing(None, {}) - self.assertEqual(len(errors), 1) - self.assertEqual('The `filedata` argument must be a dictionary.', - errors[0]) - errors = validation.check_allocation_pools_pairing({}, None) - self.assertEqual(len(errors), 1) - self.assertEqual('The `pools` argument must be a dictionary.', - errors[0]) - - def test_pool_range_not_list(self): - pools = {'TestPools': None} - errors = validation.check_allocation_pools_pairing({}, pools) - self.assertEqual(len(errors), 1) - self.assertEqual('The IP ranges in TestPools must form a list.', - errors[0]) - - def _test_pool_invalid_range(self, addr_range): - filedata = {'TestNetCidr': '172.18.0.0/24'} - pools = {'TestAllocationPools': [addr_range]} - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(len(errors), 1) - self.assertEqual('Invalid format of the IP range in' - ' TestAllocationPools: {}'.format(addr_range), - errors[0]) - - def test_pool_invalid_range(self): - broken_ranges = [None, - {}, - {'start': 'foo', 'end': 'bar'}, - {'start': '10.0.0.1', 'end': '10.0.0.0'}, - ] - for addr_range in broken_ranges: - self._test_pool_invalid_range(addr_range) - - def test_pool_with_correct_range(self): - filedata = { - 'StorageNetCidr': '172.18.0.0/24', - } - pools = { - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'} - ] - } - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertEqual([], errors) - - def test_pool_without_cidr(self): - filedata = {} - pools = { - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'} - ] - } - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(len(errors), 1) - self.assertEqual('The StorageNetCidr CIDR is not specified for' - ' StorageAllocationPools.', errors[0]) - - def test_pool_with_invalid_cidr(self): - filedata = { - 'StorageNetCidr': 'breakit', - } - pools = { - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'} - ] - } - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(len(errors), 1) - self.assertEqual('Invalid IP network: breakit', errors[0]) - - def test_pool_outside_cidr(self): - filedata = { - 'StorageNetCidr': '172.18.0.0/25', - } - pools = { - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'} - ] - } - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(len(errors), 1) - self.assertIn('outside of subnet StorageNetCidr', errors[0]) - - def test_pool_outside_cidr_ipv6(self): - filedata = { - 'StorageNetCidr': 'fd00:fd00:fd00:3000::10/125', - } - pools = { - 'StorageAllocationPools': [ - {'start': 'fd00:fd00:fd00:3000::10', - 'end': 'fd00:fd00:fd00:3000::18'} - ] - } - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertEqual(len(errors), 1) - self.assertIn('outside of subnet StorageNetCidr', errors[0]) - - def test_multiple_ranges_and_pools(self): - filedata = { - 'StorageNetCidr': '172.18.0.0/24', - 'TenantNetCidr': '172.16.0.0/24', - } - pools = { - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.20'}, - {'start': '172.18.0.100', 'end': '172.18.0.200'}, - ], - 'TenantAllocationPools': [ - {'start': '172.16.0.20', 'end': '172.16.0.30'}, - {'start': '172.16.0.70', 'end': '172.16.0.80'}, - ], - } - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertEqual([], errors) - - def test_pool_very_large_range_ipv6(self): - filedata = { - 'StorageNetCidr': 'fd00:fd00:fd00:3000::/64', - } - pools = { - 'StorageAllocationPools': [ - {'start': 'fd00:fd00:fd00:3000::10', - 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'} - ] - } - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertEqual([], errors) - - def test_overlapping_pools(self): - filedata = { - 'StorageNetCidr': '172.18.0.0/24', - } - pools = { - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.30'}, - {'start': '172.18.0.20', 'end': '172.18.0.200'}, - ], - } - errors = validation.check_allocation_pools_pairing(filedata, pools) - self.assertIn('Some pools in StorageAllocationPools are overlapping.', - errors[0]) - - -class TestStaticIpPoolCollision(base.TestCase): - - def test_empty(self): - errors = validation.check_static_ip_pool_collision({}, {}) - self.assertEqual([], errors) - - def test_non_dict(self): - errors = validation.check_static_ip_pool_collision(None, {}) - self.assertEqual(len(errors), 1) - self.assertEqual('The static IPs input must be a dictionary.', - errors[0]) - errors = validation.check_static_ip_pool_collision({}, None) - self.assertEqual(len(errors), 1) - self.assertEqual('The Pools input must be a dictionary.', - errors[0]) - - def test_pool_range_not_list(self): - pools = {'TestPools': None} - errors = validation.check_static_ip_pool_collision({}, pools) - self.assertEqual(len(errors), 1) - self.assertEqual('The IP ranges in TestPools must form a list.', - errors[0]) - - def _test_pool_invalid_range(self, addr_range): - static_ips = {} - pools = {'TestAllocationPools': [addr_range]} - errors = validation.check_static_ip_pool_collision(static_ips, pools) - self.assertEqual(len(errors), 1) - self.assertEqual('Invalid format of the IP range in' - ' TestAllocationPools: {}'.format(addr_range), - errors[0]) - - def test_pool_invalid_range(self): - broken_ranges = [None, - {}, - {'start': 'foo', 'end': 'bar'}, - {'start': '10.0.0.1', 'end': '10.0.0.0'}, - ] - for addr_range in broken_ranges: - self._test_pool_invalid_range(addr_range) - - def test_pool_with_correct_range(self): - static_ips = {} - pools = { - 'StorageAllocationPools': [ - {'start': '172.18.0.10', 'end': '172.18.0.200'} - ] - } - errors = validation.check_static_ip_pool_collision(static_ips, pools) - self.assertEqual([], errors) - - def test_static_ip_service_not_dict(self): - static_ips = {'ComputeIPs': None} - errors = validation.check_static_ip_pool_collision(static_ips, {}) - self.assertEqual(len(errors), 1) - self.assertEqual('The ComputeIPs must be a dictionary.', errors[0]) - - def test_static_ips_not_lists(self): - static_ips = { - 'ComputeIPs': { - 'internal_api': None - } - } - errors = validation.check_static_ip_pool_collision(static_ips, {}) - self.assertEqual(len(errors), 1) - self.assertEqual('The ComputeIPs->internal_api must be an array.', - errors[0]) - - def test_static_ips_not_parseable(self): - static_ips = { - 'ComputeIPs': { - 'internal_api': ['nonsense', None, '270.0.0.1'], - } - } - pools = {} - errors = validation.check_static_ip_pool_collision(static_ips, pools) - self.assertEqual(len(errors), 3) - self.assertIn('nonsense is not a valid IP address', errors[0]) - self.assertIn('None is not a valid IP address', errors[1]) - self.assertIn('270.0.0.1 is not a valid IP address', errors[2]) - - def test_static_ip_collide_with_pool(self): - static_ips = { - 'ControllerIps': { - 'internal_api': ['10.35.191.150', '10.35.191.60'] - } - } - pools = { - 'InternalApiAllocationPools': [ - {'start': '10.35.191.150', 'end': '10.35.191.240'} - ] - } - errors = validation.check_static_ip_pool_collision(static_ips, pools) - self.assertEqual(len(errors), 1) - self.assertEqual('IP address 10.35.191.150 from ' - 'ControllerIps[internal_api] is in the ' - 'InternalApiAllocationPools pool.', errors[0]) - - def test_static_ip_no_collisions(self): - static_ips = { - 'ControllerIps': { - 'internal_api': ['10.35.191.50', '10.35.191.60'], - 'storage': ['192.168.100.20', '192.168.100.30'], - }, - 'ComputeIps': { - 'internal_api': ['10.35.191.100', '10.35.191.110'], - 'storage': ['192.168.100.45', '192.168.100.46'] - } - } - pools = { - 'InternalApiAllocationPools': [ - {'start': '10.35.191.150', 'end': '10.35.191.240'} - ] - } - errors = validation.check_static_ip_pool_collision(static_ips, pools) - self.assertEqual([], errors) - - -class TestVlanIds(base.TestCase): - - def test_empty(self): - errors = validation.check_vlan_ids({}) - self.assertEqual([], errors) - - def test_non_dict(self): - errors = validation.check_vlan_ids(None) - self.assertEqual(len(errors), 1) - errors = validation.check_vlan_ids(42) - self.assertEqual(len(errors), 1) - errors = validation.check_vlan_ids("Ceci n'est pas un dict.") - self.assertEqual(len(errors), 1) - - def test_id_collision(self): - vlans = { - 'TenantNetworkVlanID': 204, - 'StorageMgmtNetworkVlanID': 203, - 'StorageNetworkVlanID': 202, - 'ExternalNetworkVlanID': 100, - 'InternalApiNetworkVlanID': 202, - } - errors = validation.check_vlan_ids(vlans) - self.assertEqual(len(errors), 1) - self.assertIn('Vlan ID 202', errors[0]) - self.assertIn('already exists', errors[0]) - - def test_id_no_collisions(self): - vlans = { - 'TenantNetworkVlanID': 204, - 'StorageMgmtNetworkVlanID': 203, - 'StorageNetworkVlanID': 202, - 'ExternalNetworkVlanID': 100, - 'InternalApiNetworkVlanID': 201, - } - errors = validation.check_vlan_ids(vlans) - self.assertEqual([], errors) - - -class TestStaticIpInCidr(base.TestCase): - - def test_empty(self): - errors = validation.check_static_ip_in_cidr({}, {}) - self.assertEqual([], errors) - - def test_non_dict(self): - errors = validation.check_static_ip_in_cidr(None, {}) - self.assertEqual(len(errors), 1) - self.assertEqual('The networks argument must be a dictionary.', - errors[0]) - errors = validation.check_static_ip_in_cidr({}, None) - self.assertEqual(len(errors), 1) - self.assertEqual('The static_ips argument must be a dictionary.', - errors[0]) - - def test_invalid_cidr(self): - errors = validation.check_static_ip_in_cidr( - {'StorageNetCidr': 'breakit'}, {}) - self.assertEqual(len(errors), 1) - self.assertEqual("Network 'StorageNetCidr' has an invalid CIDR:" - " 'breakit'", errors[0]) - - def test_service_not_a_dict(self): - static_ips = {'ControllerIps': None} - errors = validation.check_static_ip_in_cidr({}, static_ips) - self.assertEqual(len(errors), 1) - self.assertEqual('The ControllerIps must be a dictionary.', errors[0]) - - def test_static_ips_not_a_list(self): - networks = { - 'InternalApiNetCidr': '10.35.191.0/24', - } - static_ips = { - 'ControllerIps': { - 'internal_api': None, - } - } - errors = validation.check_static_ip_in_cidr(networks, static_ips) - self.assertEqual(len(errors), 1) - self.assertEqual('The ControllerIps->internal_api must be a list.', - errors[0]) - - def test_missing_cidr(self): - static_ips = { - 'ControllerIps': { - 'storage': ['192.168.100.120'] - } - } - errors = validation.check_static_ip_in_cidr({}, static_ips) - self.assertEqual(len(errors), 1) - self.assertEqual("Service 'storage' does not have a corresponding" - " range: 'StorageNetCidr'.", errors[0]) - - def test_address_not_within_cidr(self): - networks = { - 'StorageNetCidr': '192.168.100.0/24', - } - static_ips = { - 'ControllerIps': { - 'storage': ['192.168.100.120', '192.168.101.0'] - } - } - errors = validation.check_static_ip_in_cidr(networks, static_ips) - self.assertEqual(len(errors), 1) - self.assertEqual('The IP address 192.168.101.0 is outside of the' - ' StorageNetCidr range: 192.168.100.0/24', errors[0]) - - def test_addresses_within_cidr(self): - networks = { - 'StorageNetCidr': '192.168.100.0/24', - 'InternalApiNetCidr': '10.35.191.0/24', - } - static_ips = { - 'ControllerIps': { - 'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'], - 'internal_api': ['10.35.191.60', '10.35.191.70'] - }, - 'ComputeIps': { - 'storage': ['192.168.100.125', '192.168.100.135'], - 'internal_api': ['10.35.191.100', '10.35.191.110'], - } - } - errors = validation.check_static_ip_in_cidr(networks, static_ips) - self.assertEqual([], errors) - - -class TestDuplicateStaticIps(base.TestCase): - - def test_empty(self): - errors = validation.duplicate_static_ips({}) - self.assertEqual([], errors) - - def test_not_a_dict(self): - errors = validation.duplicate_static_ips(None) - self.assertEqual(len(errors), 1) - self.assertEqual('The static_ips argument must be a dictionary.', - errors[0]) - - def test_service_not_a_dict(self): - static_ips = { - 'ControllerIps': None, - } - errors = validation.duplicate_static_ips(static_ips) - self.assertEqual(len(errors), 1) - self.assertEqual('The ControllerIps must be a dictionary.', - errors[0]) - - def test_static_ips_not_a_list(self): - static_ips = { - 'ControllerIps': { - 'internal_api': None, - } - } - errors = validation.duplicate_static_ips(static_ips) - self.assertEqual(len(errors), 1) - self.assertEqual('The ControllerIps->internal_api must be a list.', - errors[0]) - - def test_duplicate_ips_within_service(self): - static_ips = { - 'ControllerIps': { - 'internal_api': ['10.35.191.60', '10.35.191.60'] - }, - } - errors = validation.duplicate_static_ips(static_ips) - self.assertEqual(len(errors), 1) - self.assertIn('The 10.35.191.60 IP address was entered multiple times', - errors[0]) - - def test_duplicate_ips_across_services(self): - static_ips = { - 'ControllerIps': { - 'internal_api': ['10.35.191.60', '10.35.191.70'], - 'storage': ['192.168.100.1', '10.35.191.60', '192.168.100.3'], - }, - } - errors = validation.duplicate_static_ips(static_ips) - self.assertEqual(len(errors), 1) - self.assertIn('The 10.35.191.60 IP address was entered multiple times', - errors[0]) - - def test_duplicate_ips_across_roles(self): - static_ips = { - 'ControllerIps': { - 'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'], - 'internal_api': ['10.35.191.60', '10.35.191.70'] - }, - 'ComputeIps': { - 'storage': ['192.168.100.125', '192.168.100.135'], - 'internal_api': ['10.35.191.60', '10.35.191.110'], - } - } - errors = validation.duplicate_static_ips(static_ips) - self.assertEqual(len(errors), 1) - self.assertIn('The 10.35.191.60 IP address was entered multiple times', - errors[0]) - - def test_no_duplicate_ips(self): - static_ips = { - 'ControllerIps': { - 'storage': ['192.168.100.1', '192.168.100.2', '192.168.100.3'], - 'internal_api': ['10.35.191.60', '10.35.191.70'] - }, - 'ComputeIps': { - 'storage': ['192.168.100.125', '192.168.100.135'], - 'internal_api': ['10.35.191.100', '10.35.191.110'], - } - } - errors = validation.duplicate_static_ips(static_ips) - self.assertEqual([], errors) - - -class TestNodePoolSize(base.TestCase): - def test_pool_size_ok(self): - - plan_env_path = 'plan-environment.yaml' - ip_pools_path = 'environments/ips-from-pool-all.yaml' - plan_env_content = """parameter_defaults: - BlockStorageCount: 0 - CephStorageCount: 0 - ComputeCount: 1 - ControllerCount: 1 - ObjectStorageCount: 0""" - ip_pools_content = """parameter_defaults: - ControllerIPs: - external: - - 10.0.0.251 - internal_api: - - 172.16.2.251 - storage: - - 172.16.1.251 - storage_mgmt: - - 172.16.3.251 - tenant: - - 172.16.0.251 - ComputeIPs: - internal_api: - - 172.16.2.252 - storage: - - 172.16.1.252 - tenant: - - 172.16.0.252""" - template_files = { - plan_env_path: plan_env_content, - ip_pools_path: ip_pools_content - } - warnings = validation.validate_node_pool_size( - plan_env_path, ip_pools_path, template_files) - self.assertEqual(len(warnings), 0) - - def test_pool_size_pool_too_small(self): - plan_env_path = 'plan-environment.yaml' - ip_pools_path = 'environments/ips-from-pool-all.yaml' - plan_env_content = """parameter_defaults: - BlockStorageCount: 0 - CephStorageCount: 0 - ComputeCount: 2 - ControllerCount: 1 - ObjectStorageCount: 0""" - ip_pools_content = """parameter_defaults: - ControllerIPs: - external: - - 10.0.0.251 - internal_api: - - 172.16.2.251 - storage: - - 172.16.1.251 - storage_mgmt: - - 172.16.3.251 - tenant: - - 172.16.0.251 - ComputeIPs: - internal_api: - - 172.16.2.252 - storage: - - 172.16.1.252 - tenant: - - 172.16.0.252""" - template_files = { - plan_env_path: plan_env_content, - ip_pools_path: ip_pools_content - } - warnings = validation.validate_node_pool_size( - plan_env_path, ip_pools_path, template_files) - self.assertEqual(len(warnings), 3) - self.assertEqual(set(warnings), { - "Insufficient number of IPs in 'internal_api' pool for 'Compute' " - "role: 1 IP(s) found in pool, but 2 nodes assigned to role.", - "Insufficient number of IPs in 'storage' pool for 'Compute' " - "role: 1 IP(s) found in pool, but 2 nodes assigned to role.", - "Insufficient number of IPs in 'tenant' pool for 'Compute' " - "role: 1 IP(s) found in pool, but 2 nodes assigned to role." - }) - - def test_pool_size_pool_missing(self): - plan_env_path = 'plan-environment.yaml' - ip_pools_path = 'environments/ips-from-pool-all.yaml' - plan_env_content = """parameter_defaults: - BlockStorageCount: 0 - CephStorageCount: 0 - ComputeCount: 1 - ControllerCount: 1 - ObjectStorageCount: 0""" - ip_pools_content = """parameter_defaults: - ControllerIPs: - external: - - 10.0.0.251 - internal_api: - - 172.16.2.251 - storage: - - 172.16.1.251 - storage_mgmt: - - 172.16.3.251 - tenant: - - 172.16.0.251""" - template_files = { - plan_env_path: plan_env_content, - ip_pools_path: ip_pools_content - } - warnings = validation.validate_node_pool_size( - plan_env_path, ip_pools_path, template_files) - self.assertEqual(len(warnings), 1) - self.assertEqual(warnings, [ - "Found 1 node(s) assigned to 'Compute' role, but no static IP " - "pools defined." - ]) diff --git a/tripleo_validations/tests/library/test_ovs_dpdk_pmd_cpus_check.py b/tripleo_validations/tests/library/test_ovs_dpdk_pmd_cpus_check.py deleted file mode 100644 index eb3a415c0..000000000 --- a/tripleo_validations/tests/library/test_ovs_dpdk_pmd_cpus_check.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_ovs_dpdk_pmd_cpus_check ------------------------------ - -Tests for `ovs_dpdk_pmd_cpus_check` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -import library.ovs_dpdk_pmd_cpus_check as validation - - -class TestOvsDpdkPmdCpusCheck(base.TestCase): - - def setUp(self): - super(TestOvsDpdkPmdCpusCheck, self).setUp() - self.module = mock.MagicMock() - - def test_module_init(self): - module_attributes = dir(validation) - - required_attributes = [ - 'DOCUMENTATION', - 'EXAMPLES' - ] - - self.assertTrue(set(required_attributes).issubset(module_attributes)) - - @mock.patch( - 'library.ovs_dpdk_pmd_cpus_check.yaml_safe_load', - return_value={'options': 'fizz'}) - @mock.patch( - 'library.ovs_dpdk_pmd_cpus_check.validate_pmd_cpus', - return_value=None) - @mock.patch('library.ovs_dpdk_pmd_cpus_check.AnsibleModule') - def test_module_main(self, mock_module, - mock_validate_pmd_cpus, - mock_yaml_safe_load): - - module_calls = [ - mock.call(argument_spec='fizz'), - mock.call().params.get('pmd_cpu_mask') - ] - - validation.main() - - mock_validate_pmd_cpus.assert_called_once() - mock_module.assert_has_calls(module_calls) - - @mock.patch( - 'library.ovs_dpdk_pmd_cpus_check.' - 'get_nodes_cores_info') - @mock.patch( - 'library.ovs_dpdk_pmd_cpus_check.' - 'get_cpus_list_from_mask_value') - def test_validate_valid_pmd_cpus(self, mock_pmd_cpus, mock_cpus): - mock_pmd_cpus.return_value = '0,1' - mock_cpus.return_value = fakes.MOCK_CPUS_RET_VALUE - - validation.validate_pmd_cpus(self.module, '"3"') - self.module.exit_json.assert_called_once_with( - msg="PMD CPU's configured correctly.") - - @mock.patch( - 'library.ovs_dpdk_pmd_cpus_check.' - 'get_nodes_cores_info') - @mock.patch( - 'library.ovs_dpdk_pmd_cpus_check.' - 'get_cpus_list_from_mask_value') - def test_validate_invalid_pmd_cpus(self, mock_pmd_cpus, mock_cpus): - mock_pmd_cpus.return_value = '0,2' - mock_cpus.return_value = fakes.MOCK_CPUS_RET_VALUE - - validation.validate_pmd_cpus(self.module, '"5"') - self.module.fail_json.assert_called_once_with( - msg="Invalid PMD CPU's, cpu is not used from NUMA node(s): 1.") - - def test_get_cpus_list_from_mask_value(self): - cpu_mask_val = '"3"' - expected_value = "0,1" - result = validation.get_cpus_list_from_mask_value(cpu_mask_val) - self.assertEqual(result, expected_value) - - def test_get_cpus_list_from_mask_value_zero_mask(self): - """In this scenario the pmd-cpu-mask has value of zero. - Meaning that no cores are selected. - """ - cpu_mask_val = '"0"' - expected_value = "" - result = validation.get_cpus_list_from_mask_value(cpu_mask_val) - self.assertEqual(result, expected_value) - - def test_valid_get_nodes_cores_info(self): - lines = "# format\n0,0,0\n 0,0,2\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - - expected_value = ( - [0, 1], - [{'numa_node': 0, 'thread_siblings': [0, 2], 'cpu': 0}, - {'numa_node': 1, 'thread_siblings': [1, 3], 'cpu': 1}]) - result = validation.get_nodes_cores_info(self.module) - self.assertListEqual(result[0], expected_value[0]) - self.assertListEqual(result[1], expected_value[1]) - - def test_valid_get_nodes_cores_info_duplicate_thread(self): - lines = "# format\n0,0,0\n 0,0,0\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - - expected_value = ( - [0, 1], - [{'numa_node': 0, 'thread_siblings': [0], 'cpu': 0}, - {'numa_node': 1, 'thread_siblings': [1, 3], 'cpu': 1}]) - - result = validation.get_nodes_cores_info(self.module) - self.assertListEqual(result[0], expected_value[0]) - self.assertListEqual(result[1], expected_value[1]) - - def test_invalid_missing_val_get_nodes_cores_info(self): - lines = "# format\n,0,0\n 0,0,2\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - validation.get_nodes_cores_info(self.module) - self.module.fail_json.assert_called_once_with( - msg="Unable to determine physical and logical cpus.") - - def test_invalid_missing_field_get_nodes_cores_info(self): - lines = "# format\n0,0\n 0,0,2\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - validation.get_nodes_cores_info(self.module) - self.module.fail_json.assert_called_once_with( - msg="Unable to determine physical and logical cpus.") - - def test_invalid_incorrect_value_get_nodes_cores_info(self): - lines = "# format\nab,0,0\n0,0,2\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - validation.get_nodes_cores_info(self.module) - self.module.fail_json.assert_called_once_with( - msg="Unable to determine physical and logical cpus.") - - def test_invalid_command_result_get_nodes_cores_info(self): - self.module.run_command.return_value = [] - validation.get_nodes_cores_info(self.module) - self.module.fail_json.assert_called_once_with( - msg="Unable to determine physical and logical cpus.") diff --git a/tripleo_validations/tests/library/test_pacemaker.py b/tripleo_validations/tests/library/test_pacemaker.py deleted file mode 100644 index e6395d2da..000000000 --- a/tripleo_validations/tests/library/test_pacemaker.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_pacemaker --------------- - -Tests for the `pacemaker` module. -""" -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes -from library import pacemaker - - -class TestPacemaker(base.TestCase): - def setUp(self): - self.tested_module = pacemaker - return super().setUp() - - def test_module_init(self): - - expepect_attrs = set( - [ - 'DOCUMENTATION', - 'EXAMPLES', - 'main']) - - actual_attrs = set(dir(pacemaker)) - - self.assertTrue(expepect_attrs.issubset(actual_attrs)) - - @mock.patch('library.pacemaker.ElementTree.fromstring') - def test_parse_pcs_status(self, mock_fromstring): - test_xml = "" - return_value = self.tested_module.parse_pcs_status(test_xml) - mock_fromstring.assert_called_once_with(test_xml) - - def test_format_failure_empty(self): - - return_value = self.tested_module.format_failure({}) - expected_value = ( - "Task None None failed on node None. Exit reason: " - "'None'. Exit status: 'None'.") - self.assertEqual(expected_value, return_value) diff --git a/tripleo_validations/tests/library/test_pmd_threads_siblings_check.py b/tripleo_validations/tests/library/test_pmd_threads_siblings_check.py deleted file mode 100644 index 26280e3a1..000000000 --- a/tripleo_validations/tests/library/test_pmd_threads_siblings_check.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest.mock import MagicMock -from unittest.mock import patch - -import library.pmd_threads_siblings_check as validation -from tripleo_validations.tests import base - - -class TestPmdThreadsSiblingsCheck(base.TestCase): - - def setUp(self): - super(TestPmdThreadsSiblingsCheck, self).setUp() - self.module = MagicMock() - - @patch('library.pmd_threads_siblings_check.' - 'get_cpus_list_from_mask_value') - @patch('library.pmd_threads_siblings_check.' - 'get_nodes_cores_info') - @patch('library.pmd_threads_siblings_check.' - 'get_thread_siblings') - def test_validate_valid_pmd_cpus(self, mock_pmd_cpus, mock_cpus, - mock_threads_siblings): - mock_pmd_cpus.return_value = ['0', '2'] - mock_cpus.return_value = ( - [0], - [{'numa_node': 0, 'thread_siblings': [0, 2], 'cpu': 0}, - {'numa_node': 0, 'thread_siblings': [4, 6], 'cpu': 4}, - {'numa_node': 0, 'thread_siblings': [8, 10], 'cpu': 8}, - {'numa_node': 1, 'thread_siblings': [1, 3], 'cpu': 1}, - {'numa_node': 1, 'thread_siblings': [5, 7], 'cpu': 5}, - {'numa_node': 1, 'thread_siblings': [9, 11], 'cpu': 9}]) - mock_threads_siblings.return_value = ['0', '2'] - - validation.validate_pmd_cpus(self.module, '"3"') - self.module.exit_json.assert_called_with( - changed=False, - message="PMD CPU's configured correctly: 0,2", - pmd_cpus_list=['0', '2']) - - @patch('library.pmd_threads_siblings_check.' - 'get_cpus_list_from_mask_value') - @patch('library.pmd_threads_siblings_check.' - 'get_nodes_cores_info') - @patch('library.pmd_threads_siblings_check.' - 'get_thread_siblings') - def test_validate_invalid_pmd_cpus(self, mock_pmd_cpus, mock_cpus, - mock_threads_siblings): - mock_pmd_cpus.return_value = ['0', '1'] - mock_cpus.return_value = ( - [0], - [{'numa_node': 0, 'thread_siblings': [0, 2], 'cpu': 0}, - {'numa_node': 0, 'thread_siblings': [4, 6], 'cpu': 4}, - {'numa_node': 0, 'thread_siblings': [8, 10], 'cpu': 8}, - {'numa_node': 1, 'thread_siblings': [1, 3], 'cpu': 1}, - {'numa_node': 1, 'thread_siblings': [5, 7], 'cpu': 5}, - {'numa_node': 1, 'thread_siblings': [9, 11], 'cpu': 9}]) - mock_threads_siblings.return_value = ['0', '2'] - - validation.validate_pmd_cpus(self.module, '"5"') - self.module.fail_json.assert_called_with( - msg="Invalid PMD CPU's, thread siblings missed") - - def test_get_cpus_list_from_mask_value(self): - cpu_mask_val = '"3"' - expected_value = ['0', '1'] - result = validation.get_cpus_list_from_mask_value(cpu_mask_val) - self.assertEqual(result, expected_value) - - def test_valid_get_nodes_cores_info(self): - lines = "# format\n0,0,0\n 0,0,2\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - - expected_value = ( - [0, 1], - [{'numa_node': 0, 'thread_siblings': [0, 2], 'cpu': 0}, - {'numa_node': 1, 'thread_siblings': [1, 3], 'cpu': 1}]) - result = validation.get_nodes_cores_info(self.module) - self.assertListEqual(result[0], expected_value[0]) - self.assertListEqual(result[1], expected_value[1]) - - def test_invalid_missing_val_get_nodes_cores_info(self): - lines = "# format\n,0,0\n 0,0,2\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - validation.get_nodes_cores_info(self.module) - self.module.fail_json.assert_called_with( - msg="Unable to determine physical and logical cpus.") - - def test_invalid_missing_field_get_nodes_cores_info(self): - lines = "# format\n0,0\n 0,0,2\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - validation.get_nodes_cores_info(self.module) - self.module.fail_json.assert_called_with( - msg="Unable to determine physical and logical cpus.") - - def test_invalid_incorrect_value_get_nodes_cores_info(self): - lines = "# format\nab,0,0\n0,0,2\n1,1,1\n1,1,3" - self.module.run_command.return_value = [0, lines, ""] - validation.get_nodes_cores_info(self.module) - self.module.fail_json.assert_called_with( - msg="Unable to determine physical and logical cpus.") - - def test_invalid_command_result_get_nodes_cores_info(self): - self.module.run_command.return_value = [] - validation.get_nodes_cores_info(self.module) - self.module.fail_json.assert_called_with( - msg="Unable to determine physical and logical cpus.") diff --git a/tripleo_validations/tests/library/test_switch_vlans.py b/tripleo_validations/tests/library/test_switch_vlans.py deleted file mode 100644 index decc5c11f..000000000 --- a/tripleo_validations/tests/library/test_switch_vlans.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -import library.switch_vlans as validation - - -class TestSwitchVlans(base.TestCase): - - def __init__(self, display=None): - super(TestSwitchVlans, self).__init__(display) - - self.introspect_data = { - "inspector_data-8c3faec8-bc05-401c-8956-99c40cdea97d": { - "all_interfaces": { - "em1": { - "mac": "00:11:22:33:44:55", - "lldp_processed": { - "switch_port_id": "555", - "switch_port_vlans": [ - {"id": 101, "name": "vlan101"}, - {"id": 104, "name": "vlan104"}, - {"id": 203, "name": "vlan203"} - ] - } - }, - "em2": { - "mac": "00:11:22:33:44:66", - "lldp_processed": { - "switch_port_id": "557", - "switch_port_vlans": [ - {"id": 101, "name": "vlan101"}, - {"id": 105, "name": "vlan105"}, - {"id": 204, "name": "vlan204"} - ] - } - } - } - }, - "inspector_data-c0d2568e-1825-4d34-96ec-f08bbf0ba7ae": { - "all_interfaces": { - "em1": { - "mac": "00:66:77:88:99:aa", - "lldp_processed": { - "switch_port_id": "559", - "switch_port_vlans": [ - {"id": 101, "name": "vlan101"}, - {"id": 201, "name": "vlan201"}, - {"id": 222, "name": "vlan222"} - ] - } - } - } - } - } - - def test_valid_vlan_first_node(self): - msg, result = validation.vlan_exists_on_switch(104, - self.introspect_data) - self.assertEqual(result, True) - self.assertEqual(msg, []) - - def test_valid_vlan_first_node_second_int(self): - msg, result = validation.vlan_exists_on_switch(105, - self.introspect_data) - self.assertEqual(result, True) - self.assertEqual(msg, []) - - def test_valid_vlan_second_node(self): - msg, result = validation.vlan_exists_on_switch(222, - self.introspect_data) - self.assertEqual(result, True) - self.assertEqual(msg, []) - - def test_vlan_not_found(self): - msg, result = validation.vlan_exists_on_switch( - 111, self.introspect_data) - self.assertEqual(result, False) - self.assertEqual(msg, []) - - def test_no_lldp_data(self): - local_data = { - "inspector_data-8c3faec8-bc05-401c-8956-99c40cdea97d": { - "all_interfaces": { - "em1": { - "mac": "00:11:22:33:44:55" - } - } - } - } - - msg, result = validation.vlan_exists_on_switch( - 104, local_data) - self.assertEqual(result, False) - self.assertEqual(msg, ["LLDP data not available for node " - "8c3faec8-bc05-401c-8956-99c40cdea97d"]) - - def test_vlans_with_network_data(self): - # End-to-end test using template files. One VLAN - - # TenantNetworkVlanID, is not configured in the introspection - # data for switch - - self.network_data = { - "network_environment.yaml": - "resource_registry:\n" - " OS::TripleO::Compute::Net::SoftwareConfig: " - "nic-configs/compute.yaml\n" - " OS::TripleO::Controller::Net::SoftwareConfig: " - "nic-configs/controller.yaml\n\n" - "parameter_defaults:\n " - "InternalApiNetworkVlanID: 201 \n " - "StorageNetworkVlanID: 204 \n " - "StorageMgmtNetworkVlanID: 203 \n " - "TenantNetworkVlanID: 107 \n " - "ExternalNetworkVlanID: 101 \n" - "", - "nic-configs/controller.yaml": - "resources:\n\ - OsNetConfigImpl:\n\ - properties:\n\ - config:\n\ - params:\n\ - $network_config:\n\ - network_config:\n\ - - type: ovs_bridge\n\ - name: bridge_name\n\ - members:\n\ - - type: interface\n\ - name: nic2\n\ - - type: vlan\n\ - vlan_id:\n\ - get_param: ExternalNetworkVlanID\n\ - - type: vlan\n\ - vlan_id:\n\ - get_param: InternalApiNetworkVlanID\n\ - - type: vlan\n\ - vlan_id:\n\ - get_param: StorageNetworkVlanID\n\ - - type: vlan\n\ - vlan_id:\n\ - get_param: StorageMgmtNetworkVlanID\n\ - - type: vlan\n\ - vlan_id:\n\ - get_param: TenantNetworkVlanID\n", - "nic-configs/compute.yaml": - "resources:\n\ - OsNetConfigImpl:\n\ - properties:\n\ - config:\n\ - params:\n\ - $network_config:\n\ - network_config:\n\ - - type: ovs_bridge\n\ - name: bridge_name\n\ - members:\n\ - - type: interface\n\ - name: nic2\n\ - - type: vlan\n\ - vlan_id:\n\ - get_param: InternalApiNetworkVlanID\n\ - - type: vlan\n\ - vlan_id:\n\ - get_param: StorageNetworkVlanID\n\ - - type: vlan\n\ - vlan_id:\n\ - get_param: TenantNetworkVlanID\n" - } - - netenv_path = "network_environment.yaml" - warnings, errors = validation.validate_switch_vlans( - netenv_path, self.network_data, self.introspect_data) - self.assertEqual(warnings, set([])) - self.assertEqual(errors, set(['VLAN ID 107 not on attached switch', - 'VLAN ID 107 not on attached switch'])) diff --git a/tripleo_validations/tests/library/test_tripleo_haproxy_conf.py b/tripleo_validations/tests/library/test_tripleo_haproxy_conf.py deleted file mode 100644 index a18165969..000000000 --- a/tripleo_validations/tests/library/test_tripleo_haproxy_conf.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- - - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -from library import tripleo_haproxy_conf - - -class TestHaproxyConf(base.TestCase): - def setUp(self): - super(TestHaproxyConf, self).setUp() - self.h_conf = tripleo_haproxy_conf - - @mock.patch('library.tripleo_haproxy_conf.generic_ini_style_conf_parser') - def test_parse_haproxy_conf(self, mock_generic_ini_style_conf_parser): - """ Despite the appearences this test is not using regex at all. - These are merely raw strings, that it asserts are passed to the `generic_ini_style_conf_parser`. - From the pov of the test it is irrelevant what form they have. - It's the `generic_ini_style_conf_parser` function that is supposed to receive these strings as arguments. - Test is merely checking that the code immediately preceding it's call does what it should do. - The regexes are finally used for parsing haproxy.cfg, which has a rather vague syntax. - In short: The regexes are supposed to match all possibilities described here, and some more: - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/load_balancer_administration/ch-haproxy-setup-vsa - """ - - file_path = './foo/bar' - - args = { - 'file_path': file_path, - 'section_regex': r'^(\w+)', - 'option_regex': r'^(?:\s+)(\w+(?:\s+\w+)*?)\s+([\w/]*)$' - } - - self.h_conf.parse_haproxy_conf(file_path) - mock_generic_ini_style_conf_parser.assert_called_once_with( - args['file_path'], - args['section_regex'], - args['option_regex'] - ) diff --git a/tripleo_validations/tests/library/test_verify_profiles.py b/tripleo_validations/tests/library/test_verify_profiles.py deleted file mode 100644 index 484c7d08b..000000000 --- a/tripleo_validations/tests/library/test_verify_profiles.py +++ /dev/null @@ -1,169 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_verify_profiles ----------------------------------- - -Tests for `verify_profiles` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from logging import warning -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -import library.verify_profiles as validation - - -class TestVerifyProfiles(base.TestCase): - - def setUp(self): - self.tested_module = validation - return super().setUp() - - def test_module_init(self): - module_attributes = dir(self.tested_module) - - required_attributes = [ - 'DOCUMENTATION', - 'EXAMPLES'] - - self.assertTrue(set(required_attributes).issubset(module_attributes)) - - def test_caps_to_dict(self): - """Test various use cases of the '_capabilities_to_dict' function. - """ - test_dict = {"foo": "bar", "fizz": "buzz"} - #Test None => dict "conversion" - self.assertEqual({}, self.tested_module._capabilities_to_dict(None)) - - #Test dict => dict. This isn't a conversion at all, but an identity op - self.assertEqual( - test_dict, - self.tested_module._capabilities_to_dict(test_dict)) - - self.assertEqual( - test_dict, - self.tested_module._capabilities_to_dict('foo:bar,fizz:buzz')) - - @mock.patch('library.verify_profiles._capabilities_to_dict') - def test_node_get_capabilities(self, mock_dict_conv): - """Test 'node_get_capabilities' function. - Not much to test, just a call to the '_capabilities_to_dict'. - """ - - self.tested_module._node_get_capabilities(fakes.MOCK_NODES[0]) - - mock_dict_conv.assert_called_once_with( - fakes.MOCK_NODES[0]['properties']['capabilities']) - - @mock.patch( - 'library.verify_profiles.yaml_safe_load', - return_value={'options': 'foo'}) - @mock.patch( - 'library.verify_profiles.AnsibleModule') - @mock.patch( - 'library.verify_profiles.verify_profiles', - return_value=(None, None)) - def test_main_success(self, mock_verify, mock_module, mock_yaml): - """Test if the module properly sends information about successful run, - to the 'exit_json' method of the 'AnsibleModule' object. - """ - - returned_module = mock.MagicMock( - params={ - 'nodes': 'fizz', - 'flavors': 'buzz'}) - mock_module.return_value = returned_module - self.tested_module.main() - - mock_yaml.assert_called_once() - mock_verify.assert_called_once_with('fizz', 'buzz') - returned_module.assert_has_calls([ - mock.call.exit_json(msg="No profile errors detected.")]) - - @mock.patch( - 'library.verify_profiles.yaml_safe_load', - return_value={'options': 'foo'}) - @mock.patch( - 'library.verify_profiles.AnsibleModule') - @mock.patch( - 'library.verify_profiles.verify_profiles', - return_value=(None, ['HCF'])) - def test_main_errors(self, mock_verify, mock_module, mock_yaml): - """Test if the module properly sends information about error, - such as the catastrophic temperature increase, to the 'fail_json' - method of the 'AnsibleModule' object. - """ - - returned_module = mock.MagicMock( - params={ - 'nodes': 'fizz', - 'flavors': 'buzz'}) - mock_module.return_value = returned_module - self.tested_module.main() - - mock_yaml.assert_called_once() - mock_verify.assert_called_once_with('fizz', 'buzz') - returned_module.assert_has_calls([ - mock.call.fail_json(msg="HCF")]) - - @mock.patch( - 'library.verify_profiles.yaml_safe_load', - return_value={'options': 'foo'}) - @mock.patch( - 'library.verify_profiles.AnsibleModule') - @mock.patch( - 'library.verify_profiles.verify_profiles', - return_value=(['HCF imminent'], None)) - def test_main_warnings(self, mock_verify, mock_module, mock_yaml): - """Test if the module properly sends information about warning, - such as incoming catastrophic temperature increase, - to the 'exit_json' method of the 'AnsibleModule' object. - """ - - returned_module = mock.MagicMock( - params={ - 'nodes': 'fizz', - 'flavors': 'buzz'}) - - mock_module.return_value = returned_module - self.tested_module.main() - - mock_yaml.assert_called_once() - mock_verify.assert_called_once_with('fizz', 'buzz') - - returned_module.assert_has_calls([ - mock.call.exit_json(warnings="HCF imminent")]) - - def test_verify_profiles_success(self): - - return_value = self.tested_module.verify_profiles( - [fakes.MOCK_NODES[0]], - fakes.MOCK_PROFILE_FLAVORS) - - self.assertEqual(return_value, ([], [])) - - def test_verify_profiles_warning_overcount(self): - warn_msg = "1 nodes with profile None won't be used for deployment now" - return_value = self.tested_module.verify_profiles( - fakes.MOCK_NODES, - fakes.MOCK_PROFILE_FLAVORS) - - self.assertEqual(([warn_msg], []), return_value) diff --git a/tripleo_validations/tests/lookup_plugins/__init__.py b/tripleo_validations/tests/lookup_plugins/__init__.py deleted file mode 100644 index 9b28bc41d..000000000 --- a/tripleo_validations/tests/lookup_plugins/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Lookup plugins tests. -""" diff --git a/tripleo_validations/tests/lookup_plugins/test_glance_images.py b/tripleo_validations/tests/lookup_plugins/test_glance_images.py deleted file mode 100644 index 3b3abd16c..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_glance_images.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_glance_images ------------------------------ - -Tests for `glance_images` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base - -import lookup_plugins.glance_images as plugin - - -class TestGlanceImages(base.TestCase): - - def setUp(self): - super(TestGlanceImages, self).setUp() - - def test_module_init(self): - """Verifying that the lookup plugin is instantiated properly. - """ - - lookup = plugin.LookupModule() - - module_attributes = dir(plugin) - required_attributes = [ - 'DOCUMENTATION', - 'EXAMPLES' - ] - - self.assertTrue(set(required_attributes).issubset(module_attributes)) - - self.assertIsInstance(plugin.DOCUMENTATION, str) - self.assertIsInstance(plugin.EXAMPLES, str) - - self.assertIn('run', dir(lookup)) diff --git a/tripleo_validations/tests/lookup_plugins/test_introspection_data.py b/tripleo_validations/tests/lookup_plugins/test_introspection_data.py deleted file mode 100644 index 41b0f36c4..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_introspection_data.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_introspection_data ------------------------------ - -Tests for `introspection_data` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base -import lookup_plugins.introspection_data as plugin - - -class TestIntrospectionData(base.TestCase): - - def setUp(self): - super(TestIntrospectionData, self).setUp() diff --git a/tripleo_validations/tests/lookup_plugins/test_ironic_nodes.py b/tripleo_validations/tests/lookup_plugins/test_ironic_nodes.py deleted file mode 100644 index 92b6271b1..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_ironic_nodes.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_ironic_nodes ------------------------------ - -Tests for `ironic_nodes` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base - -import lookup_plugins.ironic_nodes as plugin - - -class TestIronicNodes(base.TestCase): - - def setUp(self): - super(TestIronicNodes, self).setUp() diff --git a/tripleo_validations/tests/lookup_plugins/test_nova_flavors.py b/tripleo_validations/tests/lookup_plugins/test_nova_flavors.py deleted file mode 100644 index a8cf3e162..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_nova_flavors.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_nova_flavors ------------------------------ - -Tests for `nova_flavors` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base - -import lookup_plugins.nova_flavors as plugin - - -class TestNovaFlavors(base.TestCase): - - def setUp(self): - super(TestNovaFlavors, self).setUp() diff --git a/tripleo_validations/tests/lookup_plugins/test_nova_hypervisor_statistics.py b/tripleo_validations/tests/lookup_plugins/test_nova_hypervisor_statistics.py deleted file mode 100644 index 34f938f01..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_nova_hypervisor_statistics.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_nova_hypervisor_statistics ------------------------------ - -Tests for `nova_hypervisor_statistics` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base - -import lookup_plugins.nova_hypervisor_statistics as plugin - - -class TestNovaHypersvisorStatistics(base.TestCase): - - def setUp(self): - super(TestNovaHypersvisorStatistics, self).setUp() diff --git a/tripleo_validations/tests/lookup_plugins/test_nova_servers.py b/tripleo_validations/tests/lookup_plugins/test_nova_servers.py deleted file mode 100644 index 6a1d9a3c5..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_nova_servers.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_nova_servers ------------------------------ - -Tests for `nova_servers` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base - -import lookup_plugins.nova_servers as plugin - - -class TestNovaServers(base.TestCase): - - def setUp(self): - super(TestNovaServers, self).setUp() diff --git a/tripleo_validations/tests/lookup_plugins/test_roles_info.py b/tripleo_validations/tests/lookup_plugins/test_roles_info.py deleted file mode 100644 index 69f32af26..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_roles_info.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_roles_info ------------------------------ - -Tests for `roles_info` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base - -import lookup_plugins.roles_info as plugin - - -class TestRolesInfo(base.TestCase): - - def setUp(self): - super(TestRolesInfo, self).setUp() diff --git a/tripleo_validations/tests/lookup_plugins/test_stack_resources.py b/tripleo_validations/tests/lookup_plugins/test_stack_resources.py deleted file mode 100644 index 44c5d9cb4..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_stack_resources.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_stack_resources ------------------------------ - -Tests for `stack_resources` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base - -import lookup_plugins.stack_resources as plugin - - -class TestStackResources(base.TestCase): - - def setUp(self): - super(TestStackResources, self).setUp() diff --git a/tripleo_validations/tests/lookup_plugins/test_tht.py b/tripleo_validations/tests/lookup_plugins/test_tht.py deleted file mode 100644 index efa1fdb56..000000000 --- a/tripleo_validations/tests/lookup_plugins/test_tht.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_tht ------------------------------ - -Tests for `tht` module. -""" - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import fakes -from tripleo_validations.tests import base - -import lookup_plugins.tht as plugin - - -class TestTht(base.TestCase): - - def setUp(self): - super(TestTht, self).setUp() diff --git a/tripleo_validations/tests/test_utils.py b/tripleo_validations/tests/test_utils.py deleted file mode 100644 index 15a6dc99a..000000000 --- a/tripleo_validations/tests/test_utils.py +++ /dev/null @@ -1,164 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections.abc - -try: - from unittest import mock -except ImportError: - import mock - -from tripleo_validations.tests import base -from tripleo_validations.tests import fakes - -from tripleo_validations import utils - -PATH = [ - ('properties', collections.abc.Mapping, 'dictionary'), - ('config', collections.abc.Mapping, 'dictionary'), - ('network_config', collections.abc.Iterable, 'list'), -] - - -class TestGetNested(base.TestCase): - - def test_get_nested(self): - # Test config lookup using current format (t-h-t >= Ocata) - resources = { - 'properties': { - 'config': { - 'str_replace': { - 'params': { - '$network_config': { - 'network_config': [ - 'current' - ] - } - } - } - } - } - } - self.assertEqual( - utils.get_nested(resources, 'foo', PATH[:])[0], - 'current') - - def test_get_nested_returns_none_if_not_found(self): - # get_nested should return None if - # any of the keys cannot be found in the resources tree: - # `properties`, `config`, `network_config` - no_properties = { - 'bar': { - 'config': { - 'str_replace': { - 'params': { - '$network_config': { - 'network_config': [ - 'current' - ] - } - } - } - } - } - } - no_config = { - 'properties': { - 'bar': { - 'str_replace': { - 'params': { - '$network_config': { - 'network_config': [ - 'current' - ] - } - } - } - } - } - } - no_network_config = { - 'properties': { - 'config': { - 'str_replace': { - 'params': { - '$network_config': { - 'bar': { - 'some': 'val' - } - } - } - } - } - } - } - self.assertEqual( - utils.get_nested(no_properties, 'foo', PATH[:]), None) - self.assertEqual(utils.get_nested(no_config, 'foo', PATH[:]), None) - self.assertEqual( - utils.get_nested(no_network_config, 'foo', PATH[:]), None) - - def test_get_nested_old_format(self): - # Test config lookup using format used in t-h-t <= Newton - resources = { - 'properties': { - 'config': { - 'os_net_config': { - 'network_config': [ - 'old' - ] - } - } - } - } - self.assertEqual( - utils.get_nested(resources, 'foo', PATH[:])[0], - 'old') - - -class TestGetAuthSession(base.TestCase): - """Tests for tripleo_validations.utils.get_auth_session function. - """ - - @mock.patch('keystoneauth1.session.Session') - @mock.patch('keystoneauth1.identity.generic.Token') - def test_get_auth_session_token(self, mock_token, mock_session): - - fake_auth_vars = { - 'auth_url': 'http://www.fizz.bar/auth', - 'username': 'buzz', - 'project_name': 'project_foo', - 'os_auth_token': 'token', - 'password': 'password', - 'cacert': 'fizz_buzz_cert', - 'timeout': '999' - } - - utils.get_auth_session(fake_auth_vars) - - @mock.patch('keystoneauth1.session.Session') - @mock.patch('keystoneauth1.identity.generic.Password') - def test_get_auth_session_password(self, mock_pass, mock_session): - - fake_auth_vars = {} - - utils.get_auth_session(fake_auth_vars) - - @mock.patch('keystoneauth1.session.Session') - @mock.patch('keystoneauth1.identity.generic.Password') - def test_get_auth_session_empty_vars(self, mock_pass, mock_session): - - fake_auth_vars = {} - - utils.get_auth_session(fake_auth_vars) diff --git a/tripleo_validations/utils.py b/tripleo_validations/utils.py deleted file mode 100644 index 79a68251a..000000000 --- a/tripleo_validations/utils.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections.abc as collectionsAbc -from glanceclient import client as glance_client -from heatclient import client as heat_client -from heatclient import exc as heat_exc -from ironicclient import client as ironic_client -from keystoneauth1 import session as ks_session -from keystoneauth1.exceptions import catalog as catalog_exc -from keystoneauth1.identity import generic as ks_id -from novaclient import client as nova_client -from swiftclient.client import Connection -from swiftclient import exceptions as swiftexceptions - - -def get_auth_session(auth_variables): - auth_url = auth_variables.get('auth_url') - username = auth_variables.get('username') - project_name = auth_variables.get('project_name') - auth_token = auth_variables.get('os_auth_token') - password = auth_variables.get('password') - cacert = auth_variables.get('cacert') - timeout = auth_variables.get('timeout') - - if auth_token: - auth = ks_id.Token(auth_url=auth_url, - token=auth_token, - project_name=project_name, - project_domain_id='default') - else: - auth = ks_id.Password(auth_url=auth_url, - username=username, - password=password, - project_name=project_name, - user_domain_id='default', - project_domain_id='default') - return ks_session.Session(auth=auth, verify=cacert, timeout=timeout) - - -def get_swift_client(auth_variables): - return Connection(authurl=auth_variables.get('auth_url'), - user=auth_variables.get('username'), - key=auth_variables.get('password'), - auth_version='3', - tenant_name=auth_variables.get('project_name')) - - -def get_nova_client(auth_variables): - return nova_client.Client(2, session=get_auth_session(auth_variables)) - - -def get_glance_client(auth_variables): - return glance_client.Client(2, session=get_auth_session(auth_variables)) - - -def get_heat_client(auth_variables): - return heat_client.Client('1', session=get_auth_session(auth_variables)) - - -def get_ironic_client(auth_variables): - return ironic_client.get_client( - 1, - session=get_auth_session(auth_variables) - ) - - -def filtered(obj): - """Only return properties of obj whose value can be properly serialized.""" - return {k: v for k, v in obj.__dict__.items() - if isinstance(v, (str, int, list, dict, type(None)))} - - -def get_nested(data, name, path): - # Finds and returns a property from a nested dictionary by - # following a path of a defined set of property names and types. - - def deep_find_key(key_data, data, name): - key, instance_type, instance_name = key_data - if key in data: - if not isinstance(data[key], instance_type): - raise ValueError("The '{}' property of '{}' must be a {}." - "".format(key, name, instance_name)) - return data[key] - for k, v in sorted(data.items()): - if isinstance(v, collectionsAbc.Mapping): - return deep_find_key(key_data, v, name) - return None - - if not isinstance(data, collectionsAbc.Mapping): - raise ValueError( - "'{}' is not a valid resource.".format(name)) - - current_value = data - while len(path) > 0: - key_data = path.pop(0) - current_value = deep_find_key(key_data, current_value, name) - if current_value is None: - break - - return current_value diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml deleted file mode 100644 index f8becad37..000000000 --- a/zuul.d/base.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- job: - description: Base tripleo-validations job - name: tripleo-validations-centos-8-base - nodeset: centos-8-stream - parent: base - success-url: "reports.html" - failure-url: "reports.html" - pre-run: - - tests/prepare-test-host.yml - - ci/playbooks/pre.yml - run: - - ci/playbooks/run.yml - timeout: 1800 - voting: true - -- job: - name: tripleo-ci-centos-9-standalone-tv-validation - parent: tripleo-ci-centos-9-standalone - vars: - featureset: '052' - validate_services: true - enable_validation: true - validation_component: validation diff --git a/zuul.d/layout.yaml b/zuul.d/layout.yaml deleted file mode 100644 index 638e63506..000000000 --- a/zuul.d/layout.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- project: - templates: - - check-requirements - check: - jobs: - - openstack-tox-linters - - openstack-tox-py38 - - openstack-tox-py39 - gate: - jobs: - - openstack-tox-linters - - openstack-tox-py38 - - openstack-tox-py39 diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml deleted file mode 100644 index 9ee8932ef..000000000 --- a/zuul.d/molecule.yaml +++ /dev/null @@ -1,625 +0,0 @@ -# Managed via ./role-addition.yml, do not edit manually without testing that -# new role addition does not reformat it. ---- -- project-template: - check: - jobs: - - tripleo-validations-centos-8-molecule-ceph - - tripleo-validations-centos-8-molecule-check_fips_status - - tripleo-validations-centos-8-molecule-check_manila_policy_file - - tripleo-validations-centos-8-molecule-check_for_dangling_images - - tripleo-validations-centos-8-molecule-check_kernel_version - - tripleo-validations-centos-8-molecule-check_network_gateway - - tripleo-validations-centos-8-molecule-check_rhsm_version - - tripleo-validations-centos-8-molecule-check_uc_hostname - - tripleo-validations-centos-8-molecule-check_undercloud_conf - - tripleo-validations-centos-8-molecule-compute_tsx - - tripleo-validations-centos-8-molecule-controller_token - - tripleo-validations-centos-8-molecule-controller_ulimits - - tripleo-validations-centos-8-molecule-ctlplane_ip_range - - tripleo-validations-centos-8-molecule-frr_status - - tripleo-validations-centos-8-molecule-image_serve - - tripleo-validations-centos-8-molecule-nova_status - - tripleo-validations-centos-8-molecule-nova_svirt - - tripleo-validations-centos-8-molecule-oslo_config_validator - - tripleo-validations-centos-8-molecule-overcloud_service_status - - tripleo-validations-centos-8-molecule-package_version - - tripleo-validations-centos-8-molecule-rabbitmq_limits - - tripleo-validations-centos-8-molecule-repos - - tripleo-validations-centos-8-molecule-stonith_exists - - tripleo-validations-centos-8-molecule-system_encoding - - tripleo-validations-centos-8-molecule-tls_everywhere - - tripleo-validations-centos-8-molecule-tripleo_haproxy - - tripleo-validations-centos-8-molecule-undercloud_debug - - tripleo-validations-centos-8-molecule-undercloud_heat_purge_deleted - - tripleo-validations-centos-8-molecule-validation_init - gate: - jobs: - - tripleo-validations-centos-8-molecule-ceph - - tripleo-validations-centos-8-molecule-check_fips_status - - tripleo-validations-centos-8-molecule-check_manila_policy_file - - tripleo-validations-centos-8-molecule-check_for_dangling_images - - tripleo-validations-centos-8-molecule-check_kernel_version - - tripleo-validations-centos-8-molecule-check_network_gateway - - tripleo-validations-centos-8-molecule-check_rhsm_version - - tripleo-validations-centos-8-molecule-check_uc_hostname - - tripleo-validations-centos-8-molecule-check_undercloud_conf - - tripleo-validations-centos-8-molecule-compute_tsx - - tripleo-validations-centos-8-molecule-controller_token - - tripleo-validations-centos-8-molecule-controller_ulimits - - tripleo-validations-centos-8-molecule-ctlplane_ip_range - - tripleo-validations-centos-8-molecule-frr_status - - tripleo-validations-centos-8-molecule-nova_status - - tripleo-validations-centos-8-molecule-nova_svirt - - tripleo-validations-centos-8-molecule-oslo_config_validator - - tripleo-validations-centos-8-molecule-overcloud_service_status - - tripleo-validations-centos-8-molecule-package_version - - tripleo-validations-centos-8-molecule-rabbitmq_limits - - tripleo-validations-centos-8-molecule-repos - - tripleo-validations-centos-8-molecule-stonith_exists - - tripleo-validations-centos-8-molecule-system_encoding - - tripleo-validations-centos-8-molecule-tls_everywhere - - tripleo-validations-centos-8-molecule-tripleo_haproxy - - tripleo-validations-centos-8-molecule-undercloud_debug - - tripleo-validations-centos-8-molecule-undercloud_heat_purge_deleted - - tripleo-validations-centos-8-molecule-validation_init - name: tripleo-validations-molecule-jobs -- job: - files: - - ^roles/ceph/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-ceph - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: ceph -- job: - files: - - ^roles/fips_enabled/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-check_fips_status - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: fips_enabled -- job: - files: - - ^roles/check_manila_policy_file/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-check_manila_policy_file - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: check_manila_policy_file -- job: - files: - - ^roles/check_for_dangling_images/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-check_for_dangling_images - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: check_for_dangling_images -- job: - files: - - ^roles/check_kernel_version/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-check_kernel_version - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: check_kernel_version -- job: - files: - - ^roles/check_network_gateway/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-check_network_gateway - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: check_network_gateway -- job: - files: - - ^roles/check_rhsm_version/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-check_rhsm_version - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: check_rhsm_version -- job: - files: - - ^roles/check_uc_hostname/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-check_uc_hostname - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: check_uc_hostname -- job: - files: - - ^roles/check_undercloud_conf/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-check_undercloud_conf - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: check_undercloud_conf -- job: - files: - - ^roles/collect_flavors_and_verify_profiles/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-collect_flavors_and_verify_profiles - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: collect_flavors_and_verify_profiles - voting: false -- job: - files: - - ^roles/compute_tsx/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-compute_tsx - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: compute_tsx -- job: - files: - - ^roles/compute_tsx/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-compute_tsx - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: compute_tsx -- job: - files: - - ^roles/container_status/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-container_status - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: container_status - voting: false -- job: - files: - - ^roles/controller_token/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-controller_token - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: controller_token -- job: - files: - - ^roles/controller_ulimits/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-controller_ulimits - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: controller_ulimits -- job: - files: - - ^roles/ctlplane_ip_range/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-ctlplane_ip_range - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: ctlplane_ip_range -- job: - files: - - ^roles/dhcp_validations/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-dhcp_validations - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: dhcp_validations - voting: false -- job: - files: - - ^roles/frr_status/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-frr_status - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: frr_status -- job: - files: - - ^roles/healthcheck_service_status/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-healthcheck_service_status - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: healthcheck_service_status - voting: false -- job: - files: - - ^roles/image_serve/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-image_serve - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: image_serve - voting: false -- job: - files: - - ^roles/ironic_boot_configuration/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-ironic_boot_configuration - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: ironic_boot_configuration - voting: false -- job: - files: - - ^roles/mysql_open_files_limit/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-mysql_open_files_limit - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: mysql_open_files_limit - voting: false -- job: - files: - - ^roles/network_environment/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-network_environment - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: network_environment - voting: false -- job: - files: - - ^roles/neutron_sanity_check/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-neutron_sanity_check - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: neutron_sanity_check - voting: false -- job: - files: - - ^roles/node_disks/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-node_disks - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: node_disks - voting: false -- job: - files: - - ^roles/node_health/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-node_health - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: node_health - voting: false -- job: - files: - - ^roles/nova_event_callback/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-nova_event_callback - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: nova_event_callback - voting: false -- job: - files: - - ^roles/nova_status/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-nova_status - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: nova_status -- job: - files: - - ^roles/nova_svirt/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-nova_svirt - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: nova_svirt -- job: - files: - - ^roles/openshift_on_openstack/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-openshift_on_openstack - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: openshift_on_openstack - voting: false -- job: - files: - - ^roles/openstack_endpoints/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-openstack_endpoints - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: openstack_endpoints - voting: false -- job: - files: - - ^roles/oslo_config_validator/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-oslo_config_validator - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: oslo_config_validator -- job: - files: - - ^roles/oslo_config_validator/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-oslo_config_validator - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: oslo_config_validator -- job: - files: - - ^roles/overcloud_service_status/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-overcloud_service_status - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: overcloud_service_status -- job: - files: - - ^roles/ovs_dpdk_pmd/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-ovs_dpdk_pmd - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: ovs_dpdk_pmd - voting: false -- job: - files: - - ^roles/pacemaker_status/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-pacemaker_status - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: pacemaker_status - voting: false -- job: - files: - - ^roles/package_version/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-package_version - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: package_version -- job: - files: - - ^roles/rabbitmq_limits/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-rabbitmq_limits - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: rabbitmq_limits -- job: - files: - - ^roles/repos/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-repos - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: repos -- job: - files: - - ^roles/stack_health/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-stack_health - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: stack_health - voting: false -- job: - files: - - ^roles/stonith_exists/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-stonith_exists - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: stonith_exists -- job: - files: - - ^roles/switch_vlans/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-switch_vlans - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: switch_vlans - voting: false -- job: - files: - - ^roles/system_encoding/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-system_encoding - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: system_encoding -- job: - files: - - ^roles/tls_everywhere/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-tls_everywhere - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: tls_everywhere - voting: true -- job: - files: - - ^roles/tripleo_haproxy/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-tripleo_haproxy - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: tripleo_haproxy -- job: - files: - - ^roles/undercloud_debug/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-undercloud_debug - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: undercloud_debug -- job: - files: - - ^roles/undercloud_heat_purge_deleted/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-undercloud_heat_purge_deleted - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: undercloud_heat_purge_deleted -- job: - files: - - ^roles/undercloud_process_count/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-undercloud_process_count - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: undercloud_process_count - voting: false -- job: - files: - - ^roles/validation_init/.* - - ^tests/prepare-test-host.yml - - ^ci/playbooks/pre.yml - - ^ci/playbooks/run.yml - - ^molecule-requirements.txt - name: tripleo-validations-centos-8-molecule-validation_init - parent: tripleo-validations-centos-8-base - vars: - tripleo_validations_role_name: validation_init