diff --git a/centos_iso_image.inc b/centos_iso_image.inc index 808bee3d..d10f00fc 100644 --- a/centos_iso_image.inc +++ b/centos_iso_image.inc @@ -23,7 +23,3 @@ pxe-network-installer # platform-kickstarts platform-kickstarts - -# inventory -inventory -python-inventoryclient diff --git a/centos_pkg_dirs b/centos_pkg_dirs index f461cecc..00c335f7 100644 --- a/centos_pkg_dirs +++ b/centos_pkg_dirs @@ -5,6 +5,4 @@ mtce-control mtce-storage installer/pxe-network-installer kickstart -inventory -python-inventoryclient tools/rvmc diff --git a/devstack/lib/metal b/devstack/lib/metal index 79e6e599..c463ed43 100644 --- a/devstack/lib/metal +++ b/devstack/lib/metal @@ -103,22 +103,6 @@ function build_mtce_common { popd } -function build_inventory { - pushd ${STX_METAL_DIR}/inventory/inventory - - python setup.py build - - popd -} - -function build_inventory_client { - pushd ${STX_METAL_DIR}/python-inventoryclient/inventoryclient - - python setup.py build - - popd -} - function install_metal { install_mtce_common # components could be seperately installed if @@ -134,13 +118,6 @@ function install_metal { if is_service_enabled mtce-storage; then install_mtce_storage fi - - if is_service_enabled inventory-api || is_service_enabled inventory-conductor || is_service_enabled inventory-agent; then - install_inventory - fi - if is_service_enabled inventory-client; then - install_inventory_client - fi } function install_mtce_common { @@ -255,64 +232,6 @@ function install_mtce_control { popd } -function install_inventory { - local lib_dir=${PREFIX}/lib - local unit_dir=${PREFIX}/lib/systemd/system - local lib64_dir=${PREFIX}/lib64 - local pythonroot=${lib64_dir}/python2.7/site-packages - - local sysconf_dir=${SYSCONFDIR} - local local_etc_goenabledd=${SYSCONFDIR}/goenabled.d - local local_etc_inventory=${SYSCONFDIR}/inventory - local local_etc_motdd=${SYSCONFDIR}/motd.d - - build_inventory - - pushd ${STX_METAL_DIR}/inventory/inventory - - sudo python setup.py install \ - --root=/ \ - --install-lib=$PYTHON_SITE_DIR \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed - - sudo install -d -m 755 ${local_etc_goenabledd} - sudo install -p -D -m 755 etc/inventory/inventory_goenabled_check.sh ${local_etc_goenabledd}/inventory_goenabled_check.sh - - sudo install -d -m 755 ${local_etc_inventory} - sudo install -p -D -m 755 etc/inventory/policy.json ${local_etc_inventory}/policy.json - - sudo install -d -m 755 ${local_etc_motdd} - sudo install -p -D -m 755 etc/inventory/motd-system ${local_etc_motdd}/10-system-config - - sudo install -m 755 -p -D scripts/inventory-api ${lib_dir}/ocf/resource.d/platform/inventory-api - sudo install -m 755 -p -D scripts/inventory-conductor ${lib_dir}/ocf/resource.d/platform/inventory-conductor - - sudo install -m 644 -p -D scripts/inventory-api.service ${unit_dir}/inventory-api.service - sudo install -m 644 -p -D scripts/inventory-conductor.service ${unit_dir}/inventory-conductor.service - - popd -} - -function install_inventory_client { - pushd ${STX_METAL_DIR}/python-inventoryclient/inventoryclient - - build_inventory_client - - sudo python setup.py install \ - --root=/ \ - --install-lib=$PYTHON_SITE_DIR \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed - - sudo install -d -m 755 /etc/bash_completion.d/ - sudo install -p -D -m 664 tools/inventory.bash_completion /etc/bash_completion.d/inventory.bash_completion - - popd -} - function install_mtce_storage { local sysconf_dir=${SYSCONFDIR} local unit_dir=${SYSCONFDIR}/systemd/system @@ -972,40 +891,6 @@ function cleanup_metal { sudo rm -rf ${sysconf_dir}/init.d/goenabledStorage fi - if is_service_enabled inventory-api || is_service_enabled inventory-conductor || is_service_enabled inventory-agent; then - cleanup_inventory - fi - if is_service_enabled inventory-client; then - cleanup_inventory_client - fi -} - -function cleanup_inventory { - local lib_dir=${PREFIX}/lib - local unit_dir=${PREFIX}/lib/systemd/system - local lib64_dir=${PREFIX}/lib64 - local pythonroot=${lib64_dir}/python2.7/site-packages - - local sysconf_dir=${SYSCONFDIR} - local local_etc_goenabledd=${SYSCONFDIR}/goenabled.d - local local_etc_inventory=${SYSCONFDIR}/inventory - local local_etc_motdd=${SYSCONFDIR}/motd.d - - sudo pip uninstall -y inventory - - sudo rm -rf ${local_etc_goenabledd}/inventory_goenabled_check.sh - sudo rm -rf ${local_etc_inventory}/policy.json - sudo rm -rf ${local_etc_motdd}/10-system-config - sudo rm -rf ${lib_dir}/ocf/resource.d/platform/inventory-api - sudo rm -rf ${lib_dir}/ocf/resource.d/platform/inventory-conductor - sudo rm -rf ${unit_dir}/inventory-api.service - sudo rm -rf ${unit_dir}/inventory-conductor.service -} - -function cleanup_inventory_client { - sudo pip uninstall -y inventoryclient - - sudo rm -rf /etc/bash_completion.d/inventory.bash_completion } function uninstall_files { diff --git a/inventory/PKG-INFO b/inventory/PKG-INFO deleted file mode 100644 index 242f667a..00000000 --- a/inventory/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: inventory -Version: 1.0 -Summary: Inventory -Home-page: https://wiki.openstack.org/wiki/StarlingX -Author: StarlingX -Author-email: starlingx-discuss@lists.starlingx.io -License: Apache-2.0 - -Description: Inventory Service - - -Platform: UNKNOWN diff --git a/inventory/centos/build_srpm.data b/inventory/centos/build_srpm.data deleted file mode 100644 index 291ca575..00000000 --- a/inventory/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="inventory" -TIS_PATCH_VER=3 diff --git a/inventory/centos/inventory.spec b/inventory/centos/inventory.spec deleted file mode 100644 index 5ae6b632..00000000 --- a/inventory/centos/inventory.spec +++ /dev/null @@ -1,195 +0,0 @@ -Summary: Inventory -Name: inventory -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz - -BuildRequires: cgts-client -BuildRequires: python-setuptools -BuildRequires: python-jsonpatch -BuildRequires: python-keystoneauth1 -BuildRequires: python-keystonemiddleware -BuildRequires: python-mock -BuildRequires: python-neutronclient -BuildRequires: python-oslo-concurrency -BuildRequires: python-oslo-config -BuildRequires: python-oslo-context -BuildRequires: python-oslo-db -BuildRequires: python-oslo-db-tests -BuildRequires: python-oslo-i18n -BuildRequires: python-oslo-log -BuildRequires: python-oslo-messaging -BuildRequires: python-oslo-middleware -BuildRequires: python-oslo-policy -BuildRequires: python-oslo-rootwrap -BuildRequires: python-oslo-serialization -BuildRequires: python-oslo-service -BuildRequires: python-oslo-utils -BuildRequires: python-oslo-versionedobjects -BuildRequires: python-oslotest -BuildRequires: python-osprofiler -BuildRequires: python-os-testr -BuildRequires: python-pbr -BuildRequires: python-pecan -BuildRequires: python-psutil -BuildRequires: python-requests -BuildRequires: python-retrying -BuildRequires: python-six -BuildRequires: python-sqlalchemy -BuildRequires: python-stevedore -BuildRequires: python-webob -BuildRequires: python-wsme -BuildRequires: systemd -BuildRequires: systemd-devel - - -Requires: python-pyudev -Requires: pyparted -Requires: python-ipaddr -Requires: python-paste -Requires: python-eventlet -Requires: python-futurist >= 0.11.0 -Requires: python-jsonpatch -Requires: python-keystoneauth1 >= 3.1.0 -Requires: python-keystonemiddleware >= 4.12.0 -Requires: python-neutronclient >= 6.3.0 -Requires: python-oslo-concurrency >= 3.8.0 -Requires: python-oslo-config >= 2:4.0.0 -Requires: python-oslo-context >= 2.14.0 -Requires: python-oslo-db >= 4.24.0 -Requires: python-oslo-i18n >= 2.1.0 -Requires: python-oslo-log >= 3.22.0 -Requires: python-oslo-messaging >= 5.24.2 -Requires: python-oslo-middleware >= 3.27.0 -Requires: python-oslo-policy >= 1.23.0 -Requires: python-oslo-rootwrap >= 5.0.0 -Requires: python-oslo-serialization >= 1.10.0 -Requires: python-oslo-service >= 1.10.0 -Requires: python-oslo-utils >= 3.20.0 -Requires: python-oslo-versionedobjects >= 1.17.0 -Requires: python-osprofiler >= 1.4.0 -Requires: python-pbr -Requires: python-pecan -Requires: python-psutil -Requires: python-requests -Requires: python-retrying -Requires: python-six -Requires: python-sqlalchemy -Requires: python-stevedore >= 1.20.0 -Requires: python-webob >= 1.7.1 -Requires: python-wsme - -%description -Inventory Service - -%define local_bindir /usr/bin/ -%define local_etc_goenabledd /etc/goenabled.d/ -%define local_etc_inventory /etc/inventory/ -%define local_etc_motdd /etc/motd.d/ -%define pythonroot /usr/lib64/python2.7/site-packages -%define ocf_resourced /usr/lib/ocf/resource.d - -%define local_etc_initd /etc/init.d/ -%define local_etc_pmond /etc/pmon.d/ - -%define debug_package %{nil} - -%prep -%setup - -# Remove bundled egg-info -rm -rf *.egg-info - -%build -echo "Start inventory build" -export PBR_VERSION=%{version} -%{__python} setup.py build -PYTHONPATH=. oslo-config-generator --config-file=inventory/config-generator.conf - -%install -echo "Start inventory install" -export PBR_VERSION=%{version} -%{__python} setup.py install --root=%{buildroot} \ - --install-lib=%{pythonroot} \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed - -install -d -m 755 %{buildroot}%{local_etc_goenabledd} -install -p -D -m 755 etc/inventory/inventory_goenabled_check.sh %{buildroot}%{local_etc_goenabledd}/inventory_goenabled_check.sh - -install -d -m 755 %{buildroot}%{local_etc_inventory} -install -p -D -m 755 etc/inventory/policy.json %{buildroot}%{local_etc_inventory}/policy.json - -install -d -m 755 %{buildroot}%{local_etc_motdd} -install -p -D -m 755 etc/inventory/motd-system %{buildroot}%{local_etc_motdd}/10-system-config - -install -m 755 -p -D scripts/inventory-api %{buildroot}/usr/lib/ocf/resource.d/platform/inventory-api -install -m 755 -p -D scripts/inventory-conductor %{buildroot}/usr/lib/ocf/resource.d/platform/inventory-conductor - -install -m 644 -p -D scripts/inventory-api.service %{buildroot}%{_unitdir}/inventory-api.service -install -m 644 -p -D scripts/inventory-conductor.service %{buildroot}%{_unitdir}/inventory-conductor.service - -# TODO(jkung) activate inventory-agent with puppet integration) -# install -d -m 755 %{buildroot}%{local_etc_initd} -# install -p -D -m 755 scripts/inventory-agent-initd %{buildroot}%{local_etc_initd}/inventory-agent - -# install -d -m 755 %{buildroot}%{local_etc_pmond} -# install -p -D -m 644 etc/inventory/inventory-agent-pmond.conf %{buildroot}%{local_etc_pmond}/inventory-agent-pmond.conf -# install -p -D -m 644 scripts/inventory-agent.service %{buildroot}%{_unitdir}/inventory-agent.service - -# Install sql migration -install -m 644 inventory/db/sqlalchemy/migrate_repo/migrate.cfg %{buildroot}%{pythonroot}/inventory/db/sqlalchemy/migrate_repo/migrate.cfg - -# install default config files -cd %{_builddir}/%{name}-%{version} && oslo-config-generator --config-file inventory/config-generator.conf --output-file %{_builddir}/%{name}-%{version}/inventory.conf.sample -# install -p -D -m 644 %{_builddir}/%{name}-%{version}/inventory.conf.sample %{buildroot}%{_sysconfdir}/inventory/inventory.conf - - -# TODO(jkung) activate inventory-agent -# %post -# /usr/bin/systemctl enable inventory-agent.service >/dev/null 2>&1 - - -%clean -echo "CLEAN CALLED" -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,root,root,-) -%doc LICENSE - -%{local_bindir}/* - -%{pythonroot}/%{name} - -%{pythonroot}/%{name}-%{version}*.egg-info - -%{local_etc_goenabledd}/* - -%{local_etc_inventory}/* - -%{local_etc_motdd}/* - -# SM OCF Start/Stop/Monitor Scripts -%{ocf_resourced}/platform/inventory-api -%{ocf_resourced}/platform/inventory-conductor - -# systemctl service files -%{_unitdir}/inventory-api.service -%{_unitdir}/inventory-conductor.service - -# %{_bindir}/inventory-agent -%{_bindir}/inventory-api -%{_bindir}/inventory-conductor -%{_bindir}/inventory-dbsync -%{_bindir}/inventory-dnsmasq-lease-update - -# inventory-agent files -# %{local_etc_initd}/inventory-agent -# %{local_etc_pmond}/inventory-agent-pmond.conf -# %{_unitdir}/inventory-agent.service diff --git a/inventory/inventory/.coveragerc b/inventory/inventory/.coveragerc deleted file mode 100644 index 07c3d91c..00000000 --- a/inventory/inventory/.coveragerc +++ /dev/null @@ -1,6 +0,0 @@ -[run] -branch = True -source = inventory - -[report] -ignore_errors = True diff --git a/inventory/inventory/.gitignore b/inventory/inventory/.gitignore deleted file mode 100644 index 59b35f50..00000000 --- a/inventory/inventory/.gitignore +++ /dev/null @@ -1,59 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg* -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -cover/ -.coverage* -!.coveragerc -.tox -nosetests.xml -.testrepository -.stestr -.venv - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -# Files created by releasenotes build -releasenotes/build diff --git a/inventory/inventory/.mailmap b/inventory/inventory/.mailmap deleted file mode 100644 index 516ae6fe..00000000 --- a/inventory/inventory/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/inventory/inventory/.stestr.conf b/inventory/inventory/.stestr.conf deleted file mode 100644 index cf348572..00000000 --- a/inventory/inventory/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=./inventory/tests -top_dir=./ diff --git a/inventory/inventory/CONTRIBUTING.rst b/inventory/inventory/CONTRIBUTING.rst deleted file mode 100644 index 14e6c9a9..00000000 --- a/inventory/inventory/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -If you would like to contribute to the development of StarlingX, you must -follow the steps in this page: - - https://wiki.openstack.org/wiki/StarlingX/Contribution_Guidelines - -If you already have a good understanding of how the system works and your -StarlingX accounts are set up, you can skip to the development workflow -section of this documentation to learn how changes to StarlingX should be -submitted for review via the Gerrit tool: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad: - https://bugs.launchpad.net/starlingx - -Storyboard: - https://storyboard.openstack.org/#!/story/2002950 diff --git a/inventory/inventory/HACKING.rst b/inventory/inventory/HACKING.rst deleted file mode 100644 index 6f58ce44..00000000 --- a/inventory/inventory/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -inventory Style Commandments -============================ - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ diff --git a/inventory/inventory/LICENSE b/inventory/inventory/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/inventory/inventory/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/inventory/inventory/README.rst b/inventory/inventory/README.rst deleted file mode 100644 index 9d282cc5..00000000 --- a/inventory/inventory/README.rst +++ /dev/null @@ -1,3 +0,0 @@ -Placeholder to allow setup.py to work. -Removing this requires modifying the -setup.py manifest. diff --git a/inventory/inventory/babel.cfg b/inventory/inventory/babel.cfg deleted file mode 100644 index 15cd6cb7..00000000 --- a/inventory/inventory/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/inventory/inventory/doc/requirements.txt b/inventory/inventory/doc/requirements.txt deleted file mode 100644 index afd3597a..00000000 --- a/inventory/inventory/doc/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD -openstackdocstheme>=1.18.1 # Apache-2.0 -# releasenotes -reno>=2.5.0 # Apache-2.0 diff --git a/inventory/inventory/doc/source/admin/index.rst b/inventory/inventory/doc/source/admin/index.rst deleted file mode 100644 index 1c8c7ae4..00000000 --- a/inventory/inventory/doc/source/admin/index.rst +++ /dev/null @@ -1,5 +0,0 @@ -==================== -Administrators guide -==================== - -Administrators guide of inventory. diff --git a/inventory/inventory/doc/source/cli/index.rst b/inventory/inventory/doc/source/cli/index.rst deleted file mode 100644 index df79ad2e..00000000 --- a/inventory/inventory/doc/source/cli/index.rst +++ /dev/null @@ -1,5 +0,0 @@ -================================ -Command line interface reference -================================ - -CLI reference of inventory. diff --git a/inventory/inventory/doc/source/conf.py b/inventory/inventory/doc/source/conf.py deleted file mode 100755 index f0f029ff..00000000 --- a/inventory/inventory/doc/source/conf.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'openstackdocstheme', - #'sphinx.ext.intersphinx', -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'inventory' -copyright = u'2018, StarlingX' - -# openstackdocstheme options -repository_name = 'stx-metal' -bug_project = '22952' -bug_tag = '' -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -# html_static_path = ['static'] -html_theme = 'starlingxdocs' - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Developers', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/inventory/inventory/doc/source/configuration/index.rst b/inventory/inventory/doc/source/configuration/index.rst deleted file mode 100644 index a16a8a47..00000000 --- a/inventory/inventory/doc/source/configuration/index.rst +++ /dev/null @@ -1,5 +0,0 @@ -============= -Configuration -============= - -Configuration of inventory. diff --git a/inventory/inventory/doc/source/contributor/contributing.rst b/inventory/inventory/doc/source/contributor/contributing.rst deleted file mode 100644 index 2aa07077..00000000 --- a/inventory/inventory/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,4 +0,0 @@ -============ -Contributing -============ -.. include:: ../../../CONTRIBUTING.rst diff --git a/inventory/inventory/doc/source/contributor/index.rst b/inventory/inventory/doc/source/contributor/index.rst deleted file mode 100644 index 38cef815..00000000 --- a/inventory/inventory/doc/source/contributor/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -========================= -Contributor Documentation -========================= - -.. toctree:: - :maxdepth: 2 - - contributing - diff --git a/inventory/inventory/doc/source/index.rst b/inventory/inventory/doc/source/index.rst deleted file mode 100644 index 2b5e6fee..00000000 --- a/inventory/inventory/doc/source/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. inventory documentation master file, created by - sphinx-quickstart on Tue Jul 9 22:26:36 2013. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -========================================= -Welcome to the documentation of inventory -========================================= - -Contents: - -.. toctree:: - :maxdepth: 2 - - readme - install/index - library/index - contributor/index - configuration/index - cli/index - user/index - admin/index - reference/index - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/inventory/inventory/doc/source/install/common_configure.rst b/inventory/inventory/doc/source/install/common_configure.rst deleted file mode 100644 index 9e4b639e..00000000 --- a/inventory/inventory/doc/source/install/common_configure.rst +++ /dev/null @@ -1,10 +0,0 @@ -2. Edit the ``/etc/inventory/inventory.conf`` file and complete the following - actions: - - * In the ``[database]`` section, configure database access: - - .. code-block:: ini - - [database] - ... - connection = mysql+pymysql://inventory:INVENTORY_DBPASS@controller/inventory diff --git a/inventory/inventory/doc/source/install/common_prerequisites.rst b/inventory/inventory/doc/source/install/common_prerequisites.rst deleted file mode 100644 index 2496034b..00000000 --- a/inventory/inventory/doc/source/install/common_prerequisites.rst +++ /dev/null @@ -1,75 +0,0 @@ -Prerequisites -------------- - -Before you install and configure the inventory service, -you must create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - * Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - * Create the ``inventory`` database: - - .. code-block:: none - - CREATE DATABASE inventory; - - * Grant proper access to the ``inventory`` database: - - .. code-block:: none - - GRANT ALL PRIVILEGES ON inventory.* TO 'inventory'@'localhost' \ - IDENTIFIED BY 'INVENTORY_DBPASS'; - GRANT ALL PRIVILEGES ON inventory.* TO 'inventory'@'%' \ - IDENTIFIED BY 'INVENTORY_DBPASS'; - - Replace ``INVENTORY_DBPASS`` with a suitable password. - - * Exit the database access client. - - .. code-block:: none - - exit; - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - * Create the ``inventory`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt inventory - - * Add the ``admin`` role to the ``inventory`` user: - - .. code-block:: console - - $ openstack role add --project service --user inventory admin - - * Create the inventory service entities: - - .. code-block:: console - - $ openstack service create --name inventory --description "inventory" inventory - -#. Create the inventory service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - inventory public http://controller:XXXX/vY/%\(tenant_id\)s - $ openstack endpoint create --region RegionOne \ - inventory internal http://controller:XXXX/vY/%\(tenant_id\)s - $ openstack endpoint create --region RegionOne \ - inventory admin http://controller:XXXX/vY/%\(tenant_id\)s diff --git a/inventory/inventory/doc/source/install/get_started.rst b/inventory/inventory/doc/source/install/get_started.rst deleted file mode 100644 index dbba1d78..00000000 --- a/inventory/inventory/doc/source/install/get_started.rst +++ /dev/null @@ -1,9 +0,0 @@ -========================== -inventory service overview -========================== -The inventory service provides host inventory of resources on the host. - -The inventory service consists of the following components: - -``inventory-api`` service - Accepts and responds to end user API calls... diff --git a/inventory/inventory/doc/source/install/index.rst b/inventory/inventory/doc/source/install/index.rst deleted file mode 100644 index eefb242f..00000000 --- a/inventory/inventory/doc/source/install/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -==================================== -inventory service installation guide -==================================== - -.. toctree:: - :maxdepth: 2 - - get_started.rst - install.rst - verify.rst - next-steps.rst - -The inventory service (inventory) provides... - -This chapter assumes a working setup of StarlingX following the -`StarlingX Installation Guide -`_. diff --git a/inventory/inventory/doc/source/install/install-obs.rst b/inventory/inventory/doc/source/install/install-obs.rst deleted file mode 100644 index c7c97a19..00000000 --- a/inventory/inventory/doc/source/install/install-obs.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. _install-obs: - - -Install and configure for openSUSE and SUSE Linux Enterprise -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the inventory service -for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1. - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # zypper --quiet --non-interactive install - -.. include:: common_configure.rst - - -Finalize installation ---------------------- - -Start the inventory services and configure them to start when -the system boots: - -.. code-block:: console - - # systemctl enable openstack-inventory-api.service - - # systemctl start openstack-inventory-api.service diff --git a/inventory/inventory/doc/source/install/install-rdo.rst b/inventory/inventory/doc/source/install/install-rdo.rst deleted file mode 100644 index 30bc134b..00000000 --- a/inventory/inventory/doc/source/install/install-rdo.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _install-rdo: - -Install and configure for Red Hat Enterprise Linux and CentOS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - -This section describes how to install and configure the inventory service -for Red Hat Enterprise Linux 7 and CentOS 7. - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # yum install - -.. include:: common_configure.rst - -Finalize installation ---------------------- - -Start the inventory services and configure them to start when -the system boots: - -.. code-block:: console - - # systemctl enable openstack-inventory-api.service - - # systemctl start openstack-inventory-api.service diff --git a/inventory/inventory/doc/source/install/install-ubuntu.rst b/inventory/inventory/doc/source/install/install-ubuntu.rst deleted file mode 100644 index 0b36a42b..00000000 --- a/inventory/inventory/doc/source/install/install-ubuntu.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _install-ubuntu: - -Install and configure for Ubuntu -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the inventory -service for Ubuntu 14.04 (LTS). - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # apt-get update - - # apt-get install - -.. include:: common_configure.rst - -Finalize installation ---------------------- - -Restart the inventory services: - -.. code-block:: console - - # service openstack-inventory-api restart diff --git a/inventory/inventory/doc/source/install/install.rst b/inventory/inventory/doc/source/install/install.rst deleted file mode 100644 index 2d7c01d5..00000000 --- a/inventory/inventory/doc/source/install/install.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _install: - -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the -inventory service, code-named inventory, on the controller node. - -This section assumes that you already have a working OpenStack -environment with at least the following components installed: -.. (add the appropriate services here and further notes) - -Note that installation and configuration vary by distribution. - -.. toctree:: - :maxdepth: 2 - - install-obs.rst - install-rdo.rst - install-ubuntu.rst diff --git a/inventory/inventory/doc/source/install/next-steps.rst b/inventory/inventory/doc/source/install/next-steps.rst deleted file mode 100644 index 435afbe0..00000000 --- a/inventory/inventory/doc/source/install/next-steps.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the inventory service. - -To add additional services, see -https://docs.openstack.org/project-install-guide/ocata/. diff --git a/inventory/inventory/doc/source/install/verify.rst b/inventory/inventory/doc/source/install/verify.rst deleted file mode 100644 index 91b2f528..00000000 --- a/inventory/inventory/doc/source/install/verify.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the inventory service. - -.. note:: - - Perform these commands on the controller node. - -#. Source the ``admin`` project credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. List service components to verify successful launch and registration - of each process: - - .. code-block:: console - - $ openstack inventory service list diff --git a/inventory/inventory/doc/source/library/index.rst b/inventory/inventory/doc/source/library/index.rst deleted file mode 100644 index ad7fb71b..00000000 --- a/inventory/inventory/doc/source/library/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -===== -Usage -===== - -To use inventory in a project: - - import inventory diff --git a/inventory/inventory/doc/source/readme.rst b/inventory/inventory/doc/source/readme.rst deleted file mode 100644 index a6210d3d..00000000 --- a/inventory/inventory/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/inventory/inventory/doc/source/reference/index.rst b/inventory/inventory/doc/source/reference/index.rst deleted file mode 100644 index 96df64c6..00000000 --- a/inventory/inventory/doc/source/reference/index.rst +++ /dev/null @@ -1,5 +0,0 @@ -========== -References -========== - -References of inventory. diff --git a/inventory/inventory/doc/source/user/index.rst b/inventory/inventory/doc/source/user/index.rst deleted file mode 100644 index 3dc33bf0..00000000 --- a/inventory/inventory/doc/source/user/index.rst +++ /dev/null @@ -1,5 +0,0 @@ -=========== -Users guide -=========== - -Users guide of inventory. diff --git a/inventory/inventory/etc/inventory/delete_load.sh b/inventory/inventory/etc/inventory/delete_load.sh deleted file mode 100644 index a0d0155d..00000000 --- a/inventory/inventory/etc/inventory/delete_load.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Copyright (c) 2015-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# This script removes a load from a controller. -# The load version is passed in as the first variable. - -: ${1?"Usage $0 VERSION"} -VERSION=$1 - -FEED_DIR=/www/pages/feed/rel-$VERSION - -rm -f /pxeboot/pxelinux.cfg.files/*-$VERSION -rm -rf /pxeboot/rel-$VERSION - -rm -f /usr/sbin/pxeboot-update-$VERSION.sh - -rm -rf $FEED_DIR diff --git a/inventory/inventory/etc/inventory/inventory-agent-pmond.conf b/inventory/inventory/etc/inventory/inventory-agent-pmond.conf deleted file mode 100644 index fd20eda5..00000000 --- a/inventory/inventory/etc/inventory/inventory-agent-pmond.conf +++ /dev/null @@ -1,9 +0,0 @@ -[process] -process = inventory-agent -pidfile = /var/run/inventory-agent.pid -script = /etc/init.d/inventory-agent -style = lsb ; ocf or lsb -severity = major ; minor, major, critical -restarts = 3 ; restarts before error assertion -interval = 5 ; number of seconds to wait between restarts -debounce = 20 ; number of seconds to wait before degrade clear diff --git a/inventory/inventory/etc/inventory/inventory_goenabled_check.sh b/inventory/inventory/etc/inventory/inventory_goenabled_check.sh deleted file mode 100644 index 2e57a594..00000000 --- a/inventory/inventory/etc/inventory/inventory_goenabled_check.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# Inventory "goenabled" check. -# Wait for inventory information to be posted prior to allowing goenabled. - -NAME=$(basename $0) -INVENTORY_READY_FLAG=/var/run/.inventory_ready - -# logfile=/var/log/platform.log - -function LOG { - logger "$NAME: $*" - # echo "`date "+%FT%T"`: $NAME: $*" >> $logfile -} - -count=0 -while [ $count -le 45 ]; do - if [ -f $INVENTORY_READY_FLAG ]; then - LOG "Inventory is ready. Passing goenabled check." - echo "Inventory goenabled iterations PASS $count" - LOG "Inventory goenabled iterations PASS $count" - exit 0 - fi - sleep 1 - count=$(($count+1)) -done - -echo "Inventory goenabled iterations FAIL $count" - -LOG "Inventory is not ready. Continue." -exit 0 diff --git a/inventory/inventory/etc/inventory/motd-system b/inventory/inventory/etc/inventory/motd-system deleted file mode 100644 index 7ccde4e1..00000000 --- a/inventory/inventory/etc/inventory/motd-system +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# update inventory MOTD if motd.system content present - -[ -f /etc/inventory/motd.system ] && cat /etc/inventory/motd.system || true diff --git a/inventory/inventory/etc/inventory/policy.json b/inventory/inventory/etc/inventory/policy.json deleted file mode 100644 index 94ac3a5b..00000000 --- a/inventory/inventory/etc/inventory/policy.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "admin": "role:admin or role:administrator", - "admin_api": "is_admin:True", - "default": "rule:admin_api" -} diff --git a/inventory/inventory/inventory/__init__.py b/inventory/inventory/inventory/__init__.py deleted file mode 100644 index 2a16b190..00000000 --- a/inventory/inventory/inventory/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import pbr.version - - -__version__ = pbr.version.VersionInfo( - 'inventory').version_string() diff --git a/inventory/inventory/inventory/agent/__init__.py b/inventory/inventory/inventory/agent/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/agent/base_manager.py b/inventory/inventory/inventory/agent/base_manager.py deleted file mode 100644 index 88ac8eb8..00000000 --- a/inventory/inventory/inventory/agent/base_manager.py +++ /dev/null @@ -1,114 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -"""Base agent manager functionality.""" - -import futurist -from futurist import periodics -from futurist import rejection -import inspect -from inventory.common import exception -from inventory.common.i18n import _ -from oslo_config import cfg -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class BaseAgentManager(object): - - def __init__(self, host, topic): - super(BaseAgentManager, self).__init__() - if not host: - host = cfg.CONF.host - self.host = host - self.topic = topic - self._started = False - - def init_host(self, admin_context=None): - """Initialize the agent host. - - :param admin_context: the admin context to pass to periodic tasks. - :raises: RuntimeError when agent is already running. - """ - if self._started: - raise RuntimeError(_('Attempt to start an already running ' - 'agent manager')) - - rejection_func = rejection.reject_when_reached(64) - # CONF.conductor.workers_pool_size) - self._executor = futurist.GreenThreadPoolExecutor( - 64, check_and_reject=rejection_func) - # JK max_workers=CONF.conductor.workers_pool_size, - """Executor for performing tasks async.""" - - # Collect driver-specific periodic tasks. - # Conductor periodic tasks accept context argument, - LOG.info('Collecting periodic tasks') - self._periodic_task_callables = [] - self._collect_periodic_tasks(self, (admin_context,)) - - self._periodic_tasks = periodics.PeriodicWorker( - self._periodic_task_callables, - executor_factory=periodics.ExistingExecutor(self._executor)) - - # Start periodic tasks - self._periodic_tasks_worker = self._executor.submit( - self._periodic_tasks.start, allow_empty=True) - self._periodic_tasks_worker.add_done_callback( - self._on_periodic_tasks_stop) - - self._started = True - - def del_host(self, deregister=True): - # Conductor deregistration fails if called on non-initialized - # agent (e.g. when rpc server is unreachable). - if not hasattr(self, 'agent'): - return - - self._periodic_tasks.stop() - self._periodic_tasks.wait() - self._executor.shutdown(wait=True) - self._started = False - - def _collect_periodic_tasks(self, obj, args): - """Collect periodic tasks from a given object. - - Populates self._periodic_task_callables with tuples - (callable, args, kwargs). - - :param obj: object containing periodic tasks as methods - :param args: tuple with arguments to pass to every task - """ - for name, member in inspect.getmembers(obj): - if periodics.is_periodic(member): - LOG.debug('Found periodic task %(owner)s.%(member)s', - {'owner': obj.__class__.__name__, - 'member': name}) - self._periodic_task_callables.append((member, args, {})) - - def _on_periodic_tasks_stop(self, fut): - try: - fut.result() - except Exception as exc: - LOG.critical('Periodic tasks worker has failed: %s', exc) - else: - LOG.info('Successfully shut down periodic tasks') - - def _spawn_worker(self, func, *args, **kwargs): - - """Create a greenthread to run func(*args, **kwargs). - - Spawns a greenthread if there are free slots in pool, otherwise raises - exception. Execution control returns immediately to the caller. - - :returns: Future object. - :raises: NoFreeConductorWorker if worker pool is currently full. - - """ - try: - return self._executor.submit(func, *args, **kwargs) - except futurist.RejectedSubmission: - raise exception.NoFreeConductorWorker() diff --git a/inventory/inventory/inventory/agent/disk.py b/inventory/inventory/inventory/agent/disk.py deleted file mode 100644 index 05f2a3f9..00000000 --- a/inventory/inventory/inventory/agent/disk.py +++ /dev/null @@ -1,369 +0,0 @@ -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" inventory idisk Utilities and helper functions.""" - -import os -import pyudev -import re -import subprocess -import sys - -from inventory.common import constants -from inventory.common import context -from inventory.common import utils -from inventory.conductor import rpcapi as conductor_rpcapi -from oslo_log import log - - -LOG = log.getLogger(__name__) - - -class DiskOperator(object): - '''Class to encapsulate Disk operations for System Inventory''' - - def __init__(self): - - self.num_cpus = 0 - self.num_nodes = 0 - self.float_cpuset = 0 - self.default_hugepage_size_kB = 0 - self.total_memory_MiB = 0 - self.free_memory_MiB = 0 - self.total_memory_nodes_MiB = [] - self.free_memory_nodes_MiB = [] - self.topology = {} - - def convert_range_string_to_list(self, s): - olist = [] - s = s.strip() - if s: - for part in s.split(','): - if '-' in part: - a, b = part.split('-') - a, b = int(a), int(b) - olist.extend(range(a, b + 1)) - else: - a = int(part) - olist.append(a) - olist.sort() - return olist - - def get_rootfs_node(self): - cmdline_file = '/proc/cmdline' - device = None - - with open(cmdline_file, 'r') as f: - for line in f: - for param in line.split(): - params = param.split("=", 1) - if params[0] == "root": - if "UUID=" in params[1]: - key, uuid = params[1].split("=") - symlink = "/dev/disk/by-uuid/%s" % uuid - device = os.path.basename(os.readlink(symlink)) - else: - device = os.path.basename(params[1]) - - if device is not None: - if constants.DEVICE_NAME_NVME in device: - re_line = re.compile(r'^(nvme[0-9]*n[0-9]*)') - else: - re_line = re.compile(r'^(\D*)') - match = re_line.search(device) - if match: - return os.path.join("/dev", match.group(1)) - - return - - @utils.skip_udev_partition_probe - def get_disk_available_mib(self, device_node): - # Check that partition table format is GPT. - # Return 0 if not. - if not utils.disk_is_gpt(device_node=device_node): - LOG.debug("Format of disk node %s is not GPT." % device_node) - return 0 - - pvs_command = '{} {}'.format('pvs | grep -w ', device_node) - pvs_process = subprocess.Popen(pvs_command, stdout=subprocess.PIPE, - shell=True) - pvs_output = pvs_process.stdout.read() - - if pvs_output: - LOG.debug("Disk %s is completely used by a PV => 0 available mib." - % device_node) - return 0 - - # Get sector size command. - sector_size_bytes_cmd = '{} {}'.format('blockdev --getss', device_node) - - # Get total free space in sectors command. - avail_space_sectors_cmd = '{} {} {}'.format( - 'sgdisk -p', device_node, "| grep \"Total free space\"") - - # Get the sector size. - sector_size_bytes_process = subprocess.Popen( - sector_size_bytes_cmd, stdout=subprocess.PIPE, shell=True) - sector_size_bytes = sector_size_bytes_process.stdout.read().rstrip() - - # Get the free space. - avail_space_sectors_process = subprocess.Popen( - avail_space_sectors_cmd, stdout=subprocess.PIPE, shell=True) - avail_space_sectors_output = avail_space_sectors_process.stdout.read() - avail_space_sectors = re.findall( - '\d+', avail_space_sectors_output)[0].rstrip() - - # Free space in MiB. - avail_space_mib = (int(sector_size_bytes) * int(avail_space_sectors) / - (1024 ** 2)) - - # Keep 2 MiB for partition table. - if avail_space_mib >= 2: - avail_space_mib = avail_space_mib - 2 - else: - avail_space_mib = 0 - - return avail_space_mib - - def disk_format_gpt(self, host_uuid, idisk_dict, is_cinder_device): - disk_node = idisk_dict.get('device_path') - - utils.disk_wipe(disk_node) - utils.execute('parted', disk_node, 'mklabel', 'gpt') - - if is_cinder_device: - LOG.debug("Removing .node_cinder_lvm_config_complete_file") - try: - os.remove(constants.NODE_CINDER_LVM_CONFIG_COMPLETE_FILE) - except OSError: - LOG.error(".node_cinder_lvm_config_complete_file not present.") - pass - - # On SX ensure wipe succeeds before DB is updated. - # Flag file is used to mark wiping in progress. - try: - os.remove(constants.DISK_WIPE_IN_PROGRESS_FLAG) - except OSError: - # it's ok if file is not present. - pass - - # We need to send the updated info about the host disks back to - # the conductor. - idisk_update = self.idisk_get() - ctxt = context.get_admin_context() - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - rpcapi.idisk_update_by_ihost(ctxt, - host_uuid, - idisk_update) - - def handle_exception(self, e): - traceback = sys.exc_info()[-1] - LOG.error("%s @ %s:%s" % ( - e, traceback.tb_frame.f_code.co_filename, traceback.tb_lineno)) - - def is_rotational(self, device_name): - """Find out if a certain disk is rotational or not. Mostly used for - determining if disk is HDD or SSD. - """ - - # Obtain the path to the rotational file for the current device. - device = device_name['DEVNAME'].split('/')[-1] - rotational_path = "/sys/block/{device}/queue/rotational"\ - .format(device=device) - - rotational = None - # Read file and remove trailing whitespaces. - if os.path.isfile(rotational_path): - with open(rotational_path, 'r') as rot_file: - rotational = rot_file.read() - rotational = rotational.rstrip() - - return rotational - - def get_device_id_wwn(self, device): - """Determine the ID and WWN of a disk from the value of the DEVLINKS - attribute. - - Note: This data is not currently being used for anything. We are - gathering this information so conductor can store for future use. - """ - # The ID and WWN default to None. - device_id = None - device_wwn = None - - # If there is no DEVLINKS attribute, return None. - if 'DEVLINKS' not in device: - return device_id, device_wwn - - # Extract the ID and the WWN. - LOG.debug("[DiskEnum] get_device_id_wwn: devlinks= %s" % - device['DEVLINKS']) - devlinks = device['DEVLINKS'].split() - for devlink in devlinks: - if "by-id" in devlink: - if "wwn" not in devlink: - device_id = devlink.split('/')[-1] - LOG.debug("[DiskEnum] by-id: %s id: %s" % (devlink, - device_id)) - else: - device_wwn = devlink.split('/')[-1] - LOG.debug("[DiskEnum] by-wwn: %s wwn: %s" % (devlink, - device_wwn)) - - return device_id, device_wwn - - def idisk_get(self): - """Enumerate disk topology based on: - - :param self - :returns list of disk and attributes - """ - idisk = [] - context = pyudev.Context() - - for device in context.list_devices(DEVTYPE='disk'): - if not utils.is_system_usable_block_device(device): - continue - - if device['MAJOR'] in constants.VALID_MAJOR_LIST: - if 'ID_PATH' in device: - device_path = "/dev/disk/by-path/" + device['ID_PATH'] - LOG.debug("[DiskEnum] device_path: %s ", device_path) - else: - # We should always have a udev supplied /dev/disk/by-path - # value as a matter of normal operation. We do not expect - # this to occur, thus the error. - # - # The kickstart files for the host install require the - # by-path value also to be present or the host install will - # fail. Since the installer and the runtime share the same - # kernel/udev we should not see this message on an - # installed system. - device_path = None - LOG.error("Device %s does not have an ID_PATH value " - "provided by udev" % device.device_node) - - size_mib = 0 - available_mib = 0 - model_num = '' - serial_id = '' - - # Can merge all try/except in one block but this allows - # at least attributes with no exception to be filled - try: - size_mib = utils.get_disk_capacity_mib(device.device_node) - except Exception as e: - self.handle_exception("Could not retrieve disk size - %s " - % e) - - try: - available_mib = self.get_disk_available_mib( - device_node=device.device_node) - except Exception as e: - self.handle_exception( - "Could not retrieve disk %s free space" % e) - - try: - # ID_MODEL received from udev is not correct for disks that - # are used entirely for LVM. LVM replaced the model ID with - # its own identifier that starts with "LVM PV".For this - # reason we will attempt to retrieve the correct model ID - # by using 2 different commands: hdparm and lsblk and - # hdparm. If one of them fails, the other one can attempt - # to retrieve the information. Else we use udev. - - # try hdparm command first - hdparm_command = 'hdparm -I %s |grep Model' % ( - device.get('DEVNAME')) - hdparm_process = subprocess.Popen( - hdparm_command, - stdout=subprocess.PIPE, - shell=True) - hdparm_output = hdparm_process.communicate()[0] - if hdparm_process.returncode == 0: - second_half = hdparm_output.split(':')[1] - model_num = second_half.strip() - else: - # try lsblk command - lsblk_command = 'lsblk -dn --output MODEL %s' % ( - device.get('DEVNAME')) - lsblk_process = subprocess.Popen( - lsblk_command, - stdout=subprocess.PIPE, - shell=True) - lsblk_output = lsblk_process.communicate()[0] - if lsblk_process.returncode == 0: - model_num = lsblk_output.strip() - else: - # both hdparm and lsblk commands failed, try udev - model_num = device.get('ID_MODEL') - if not model_num: - model_num = constants.DEVICE_MODEL_UNKNOWN - except Exception as e: - self.handle_exception("Could not retrieve disk model " - "for disk %s. Exception: %s" % - (device.get('DEVNAME'), e)) - try: - if 'ID_SCSI_SERIAL' in device: - serial_id = device['ID_SCSI_SERIAL'] - else: - serial_id = device['ID_SERIAL_SHORT'] - except Exception as e: - self.handle_exception("Could not retrieve disk " - "serial ID - %s " % e) - - capabilities = dict() - if model_num: - capabilities.update({'model_num': model_num}) - - if self.get_rootfs_node() == device.device_node: - capabilities.update({'stor_function': 'rootfs'}) - - rotational = self.is_rotational(device) - device_type = device.device_type - - rotation_rate = constants.DEVICE_TYPE_UNDETERMINED - if rotational is '1': - device_type = constants.DEVICE_TYPE_HDD - if 'ID_ATA_ROTATION_RATE_RPM' in device: - rotation_rate = device['ID_ATA_ROTATION_RATE_RPM'] - elif rotational is '0': - if constants.DEVICE_NAME_NVME in device.device_node: - device_type = constants.DEVICE_TYPE_NVME - else: - device_type = constants.DEVICE_TYPE_SSD - rotation_rate = constants.DEVICE_TYPE_NA - - # TODO(sc) else: what are other possible stor_function value? - # or do we just use pair { 'is_rootfs': True } instead? - # Obtain device ID and WWN. - device_id, device_wwn = self.get_device_id_wwn(device) - - attr = { - 'device_node': device.device_node, - 'device_num': device.device_number, - 'device_type': device_type, - 'device_path': device_path, - 'device_id': device_id, - 'device_wwn': device_wwn, - 'size_mib': size_mib, - 'available_mib': available_mib, - 'serial_id': serial_id, - 'capabilities': capabilities, - 'rpm': rotation_rate, - } - - idisk.append(attr) - - LOG.debug("idisk= %s" % idisk) - - return idisk diff --git a/inventory/inventory/inventory/agent/lldp/__init__.py b/inventory/inventory/inventory/agent/lldp/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/agent/lldp/config.py b/inventory/inventory/inventory/agent/lldp/config.py deleted file mode 100644 index 40e63c58..00000000 --- a/inventory/inventory/inventory/agent/lldp/config.py +++ /dev/null @@ -1,23 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from oslo_config import cfg -from oslo_utils._i18n import _ - -INVENTORY_LLDP_OPTS = [ - cfg.ListOpt('drivers', - default=['lldpd'], - help=_("An ordered list of inventory LLDP driver " - "entrypoints to be loaded from the " - "inventory.agent namespace.")), -] - -cfg.CONF.register_opts(INVENTORY_LLDP_OPTS, group="lldp") diff --git a/inventory/inventory/inventory/agent/lldp/drivers/__init__.py b/inventory/inventory/inventory/agent/lldp/drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/agent/lldp/drivers/base.py b/inventory/inventory/inventory/agent/lldp/drivers/base.py deleted file mode 100644 index f69157d1..00000000 --- a/inventory/inventory/inventory/agent/lldp/drivers/base.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class InventoryLldpDriverBase(object): - """Inventory LLDP Driver Base Class.""" - - @abc.abstractmethod - def lldp_has_neighbour(self, name): - pass - - @abc.abstractmethod - def lldp_update(self): - pass - - @abc.abstractmethod - def lldp_agents_list(self): - pass - - @abc.abstractmethod - def lldp_neighbours_list(self): - pass - - @abc.abstractmethod - def lldp_agents_clear(self): - pass - - @abc.abstractmethod - def lldp_neighbours_clear(self): - pass - - @abc.abstractmethod - def lldp_update_systemname(self, systemname): - pass diff --git a/inventory/inventory/inventory/agent/lldp/drivers/lldpd/__init__.py b/inventory/inventory/inventory/agent/lldp/drivers/lldpd/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/agent/lldp/drivers/lldpd/driver.py b/inventory/inventory/inventory/agent/lldp/drivers/lldpd/driver.py deleted file mode 100644 index 3f8d538a..00000000 --- a/inventory/inventory/inventory/agent/lldp/drivers/lldpd/driver.py +++ /dev/null @@ -1,321 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from oslo_log import log as logging - -import simplejson as json -import subprocess - -from inventory.agent.lldp.drivers import base -from inventory.agent.lldp import plugin -from inventory.common import k_lldp - -LOG = logging.getLogger(__name__) - - -class InventoryLldpdAgentDriver(base.InventoryLldpDriverBase): - - def __init__(self, **kwargs): - self.client = "" - self.agents = [] - self.neighbours = [] - self.current_neighbours = [] - self.previous_neighbours = [] - self.current_agents = [] - self.previous_agents = [] - self.agent_audit_count = 0 - self.neighbour_audit_count = 0 - - def initialize(self): - self.__init__() - - @staticmethod - def _lldpd_get_agent_status(): - json_obj = json - p = subprocess.Popen(["lldpcli", "-f", "json", "show", - "configuration"], - stdout=subprocess.PIPE) - data = json_obj.loads(p.communicate()[0]) - - configuration = data['configuration'][0] - config = configuration['config'][0] - rx_only = config['rx-only'][0] - - if rx_only.get("value") == "no": - return "rx=enabled,tx=enabled" - else: - return "rx=enabled,tx=disabled" - - @staticmethod - def _lldpd_get_attrs(iface): - name_or_uuid = None - chassis_id = None - system_name = None - system_desc = None - capability = None - management_address = None - port_desc = None - dot1_lag = None - dot1_port_vid = None - dot1_vid_digest = None - dot1_mgmt_vid = None - dot1_vlan_names = None - dot1_proto_vids = None - dot1_proto_ids = None - dot3_mac_status = None - dot3_max_frame = None - dot3_power_mdi = None - ttl = None - attrs = {} - - # Note: dot1_vid_digest, dot1_mgmt_vid are not currently supported - # by the lldpd daemon - - name_or_uuid = iface.get("name") - chassis = iface.get("chassis")[0] - port = iface.get("port")[0] - - if not chassis.get('id'): - return attrs - chassis_id = chassis['id'][0].get("value") - - if not port.get('id'): - return attrs - port_id = port["id"][0].get("value") - - if not port.get('ttl'): - return attrs - ttl = port['ttl'][0].get("value") - - if chassis.get("name"): - system_name = chassis['name'][0].get("value") - - if chassis.get("descr"): - system_desc = chassis['descr'][0].get("value") - - if chassis.get("capability"): - capability = "" - for cap in chassis["capability"]: - if cap.get("enabled"): - if capability: - capability += ", " - capability += cap.get("type").lower() - - if chassis.get("mgmt-ip"): - management_address = "" - for addr in chassis["mgmt-ip"]: - if management_address: - management_address += ", " - management_address += addr.get("value").lower() - - if port.get("descr"): - port_desc = port["descr"][0].get("value") - - if port.get("link-aggregation"): - dot1_lag_supported = port["link-aggregation"][0].get("supported") - dot1_lag_enabled = port["link-aggregation"][0].get("enabled") - dot1_lag = "capable=" - if dot1_lag_supported: - dot1_lag += "y," - else: - dot1_lag += "n," - dot1_lag += "enabled=" - if dot1_lag_enabled: - dot1_lag += "y" - else: - dot1_lag += "n" - - if port.get("auto-negotiation"): - port_auto_neg_support = port["auto-negotiation"][0].get( - "supported") - port_auto_neg_enabled = port["auto-negotiation"][0].get("enabled") - dot3_mac_status = "auto-negotiation-capable=" - if port_auto_neg_support: - dot3_mac_status += "y," - else: - dot3_mac_status += "n," - dot3_mac_status += "auto-negotiation-enabled=" - if port_auto_neg_enabled: - dot3_mac_status += "y," - else: - dot3_mac_status += "n," - advertised = "" - if port.get("auto-negotiation")[0].get("advertised"): - for adv in port["auto-negotiation"][0].get("advertised"): - if advertised: - advertised += ", " - type = adv.get("type").lower() - if adv.get("hd") and not adv.get("fd"): - type += "hd" - elif adv.get("fd"): - type += "fd" - advertised += type - dot3_mac_status += advertised - - if port.get("mfs"): - dot3_max_frame = port["mfs"][0].get("value") - - if port.get("power"): - power_mdi_support = port["power"][0].get("supported") - power_mdi_enabled = port["power"][0].get("enabled") - power_mdi_devicetype = port["power"][0].get("device-type")[0].get( - "value") - power_mdi_pairs = port["power"][0].get("pairs")[0].get("value") - power_mdi_class = port["power"][0].get("class")[0].get("value") - dot3_power_mdi = "power-mdi-supported=" - if power_mdi_support: - dot3_power_mdi += "y," - else: - dot3_power_mdi += "n," - dot3_power_mdi += "power-mdi-enabled=" - if power_mdi_enabled: - dot3_power_mdi += "y," - else: - dot3_power_mdi += "n," - if power_mdi_support and power_mdi_enabled: - dot3_power_mdi += "device-type=" + power_mdi_devicetype - dot3_power_mdi += ",pairs=" + power_mdi_pairs - dot3_power_mdi += ",class=" + power_mdi_class - - vlans = None - if iface.get("vlan"): - vlans = iface.get("vlan") - - if vlans: - dot1_vlan_names = "" - for vlan in vlans: - if vlan.get("pvid"): - dot1_port_vid = vlan.get("vlan-id") - continue - if dot1_vlan_names: - dot1_vlan_names += ", " - dot1_vlan_names += vlan.get("value") - - ppvids = None - if iface.get("ppvids"): - ppvids = iface.get("ppvid") - - if ppvids: - dot1_proto_vids = "" - for ppvid in ppvids: - if dot1_proto_vids: - dot1_proto_vids += ", " - dot1_proto_vids += ppvid.get("value") - - pids = None - if iface.get("pi"): - pids = iface.get('pi') - dot1_proto_ids = "" - for id in pids: - if dot1_proto_ids: - dot1_proto_ids += ", " - dot1_proto_ids += id.get("value") - - msap = chassis_id + "," + port_id - - attrs = {"name_or_uuid": name_or_uuid, - k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: chassis_id, - k_lldp.LLDP_TLV_TYPE_PORT_ID: port_id, - k_lldp.LLDP_TLV_TYPE_TTL: ttl, - "msap": msap, - k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: system_name, - k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: system_desc, - k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: capability, - k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: management_address, - k_lldp.LLDP_TLV_TYPE_PORT_DESC: port_desc, - k_lldp.LLDP_TLV_TYPE_DOT1_LAG: dot1_lag, - k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID: dot1_port_vid, - k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST: dot1_vid_digest, - k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID: dot1_mgmt_vid, - k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: dot1_vlan_names, - k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS: dot1_proto_vids, - k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS: dot1_proto_ids, - k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS: dot3_mac_status, - k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME: dot3_max_frame, - k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI: dot3_power_mdi} - - return attrs - - def lldp_has_neighbour(self, name): - p = subprocess.check_output(["lldpcli", "-f", "keyvalue", "show", - "neighbors", "summary", "ports", name]) - return len(p) > 0 - - def lldp_update(self): - subprocess.call(['lldpcli', 'update']) - - def lldp_agents_list(self): - json_obj = json - lldp_agents = [] - - p = subprocess.Popen(["lldpcli", "-f", "json", "show", "interface", - "detail"], stdout=subprocess.PIPE) - data = json_obj.loads(p.communicate()[0]) - - lldp = data['lldp'][0] - - if not lldp.get('interface'): - return lldp_agents - - for iface in lldp['interface']: - agent_attrs = self._lldpd_get_attrs(iface) - status = self._lldpd_get_agent_status() - agent_attrs.update({"status": status}) - agent = plugin.Agent(**agent_attrs) - lldp_agents.append(agent) - - return lldp_agents - - def lldp_agents_clear(self): - self.current_agents = [] - self.previous_agents = [] - - def lldp_neighbours_list(self): - json_obj = json - lldp_neighbours = [] - p = subprocess.Popen(["lldpcli", "-f", "json", "show", "neighbor", - "detail"], stdout=subprocess.PIPE) - data = json_obj.loads(p.communicate()[0]) - - lldp = data['lldp'][0] - - if not lldp.get('interface'): - return lldp_neighbours - - for iface in lldp['interface']: - neighbour_attrs = self._lldpd_get_attrs(iface) - neighbour = plugin.Neighbour(**neighbour_attrs) - lldp_neighbours.append(neighbour) - - return lldp_neighbours - - def lldp_neighbours_clear(self): - self.current_neighbours = [] - self.previous_neighbours = [] - - def lldp_update_systemname(self, systemname): - p = subprocess.Popen(["lldpcli", "-f", "json", "show", "chassis"], - stdout=subprocess.PIPE) - data = json.loads(p.communicate()[0]) - - local_chassis = data['local-chassis'][0] - chassis = local_chassis['chassis'][0] - name = chassis.get('name', None) - if name is None or not name[0].get("value"): - return - name = name[0] - - hostname = name.get("value").partition(':')[0] - - newname = hostname + ":" + systemname - - p = subprocess.Popen(["lldpcli", "configure", "system", "hostname", - newname], stdout=subprocess.PIPE) diff --git a/inventory/inventory/inventory/agent/lldp/drivers/ovs/__init__.py b/inventory/inventory/inventory/agent/lldp/drivers/ovs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/agent/lldp/drivers/ovs/driver.py b/inventory/inventory/inventory/agent/lldp/drivers/ovs/driver.py deleted file mode 100644 index 70e71c27..00000000 --- a/inventory/inventory/inventory/agent/lldp/drivers/ovs/driver.py +++ /dev/null @@ -1,167 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -import simplejson as json -import subprocess - -from oslo_log import log as logging - -from inventory.agent.lldp.drivers.lldpd import driver as lldpd_driver -from inventory.common import k_lldp - -LOG = logging.getLogger(__name__) - - -class InventoryOVSAgentDriver(lldpd_driver.InventoryLldpdAgentDriver): - - def run_cmd(self, cmd): - p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - p.wait() - output, error = p.communicate() - if p.returncode != 0: - LOG.error("Failed to run command %s: error: %s", cmd, error) - return None - return output - - def lldp_ovs_get_interface_port_map(self): - interface_port_map = {} - - cmd = "ovs-vsctl --timeout 10 --format json "\ - "--columns name,_uuid,interfaces list Port" - - output = self.run_cmd(cmd) - if not output: - return - - ports = json.loads(output) - ports = ports['data'] - - for port in ports: - port_uuid = port[1][1] - interfaces = port[2][1] - - if isinstance(interfaces, list): - for interface in interfaces: - interface_uuid = interface[1] - interface_port_map[interface_uuid] = port_uuid - else: - interface_uuid = interfaces - interface_port_map[interface_uuid] = port_uuid - - return interface_port_map - - def lldp_ovs_get_port_bridge_map(self): - port_bridge_map = {} - - cmd = "ovs-vsctl --timeout 10 --format json "\ - "--columns name,ports list Bridge" - output = self.run_cmd(cmd) - if not output: - return - - bridges = json.loads(output) - bridges = bridges['data'] - - for bridge in bridges: - bridge_name = bridge[0] - port_set = bridge[1][1] - for port in port_set: - value = port[1] - port_bridge_map[value] = bridge_name - - return port_bridge_map - - def lldp_ovs_lldp_flow_exists(self, brname, in_port): - - cmd = "ovs-ofctl dump-flows {} in_port={},dl_dst={},dl_type={}".format( - brname, in_port, k_lldp.LLDP_MULTICAST_ADDRESS, - k_lldp.LLDP_ETHER_TYPE) - output = self.run_cmd(cmd) - if not output: - return None - - return (output.count("\n") > 1) - - def lldp_ovs_add_flows(self, brname, in_port, out_port): - - cmd = ("ovs-ofctl add-flow {} in_port={},dl_dst={},dl_type={}," - "actions=output:{}".format( - brname, in_port, k_lldp.LLDP_MULTICAST_ADDRESS, - k_lldp.LLDP_ETHER_TYPE, out_port)) - output = self.run_cmd(cmd) - if not output: - return - - cmd = ("ovs-ofctl add-flow {} in_port={},dl_dst={},dl_type={}," - "actions=output:{}".format( - brname, out_port, k_lldp.LLDP_MULTICAST_ADDRESS, - k_lldp.LLDP_ETHER_TYPE, in_port)) - output = self.run_cmd(cmd) - if not output: - return - - def lldp_ovs_update_flows(self): - - port_bridge_map = self.lldp_ovs_get_port_bridge_map() - if not port_bridge_map: - return - - interface_port_map = self.lldp_ovs_get_interface_port_map() - if not interface_port_map: - return - - cmd = "ovs-vsctl --timeout 10 --format json "\ - "--columns name,_uuid,type,other_config list Interface" - - output = self.run_cmd(cmd) - if not output: - return - - data = json.loads(output) - data = data['data'] - - for interface in data: - name = interface[0] - uuid = interface[1][1] - type = interface[2] - other_config = interface[3] - - if type != 'internal': - continue - - config_map = other_config[1] - for config in config_map: - key = config[0] - value = config[1] - if key != 'lldp_phy_peer': - continue - - phy_peer = value - brname = port_bridge_map[interface_port_map[uuid]] - if not self.lldp_ovs_lldp_flow_exists(brname, name): - LOG.info("Adding missing LLDP flow from %s to %s", - name, phy_peer) - self.lldp_ovs_add_flows(brname, name, phy_peer) - - if not self.lldp_ovs_lldp_flow_exists(brname, value): - LOG.info("Adding missing LLDP flow from %s to %s", - phy_peer, name) - self.lldp_ovs_add_flows(brname, phy_peer, name) - - def lldp_agents_list(self): - self.lldp_ovs_update_flows() - return lldpd_driver.InventoryLldpdAgentDriver.lldp_agents_list(self) - - def lldp_neighbours_list(self): - self.lldp_ovs_update_flows() - return lldpd_driver.InventoryLldpdAgentDriver.lldp_neighbours_list( - self) diff --git a/inventory/inventory/inventory/agent/lldp/manager.py b/inventory/inventory/inventory/agent/lldp/manager.py deleted file mode 100644 index 1133fb54..00000000 --- a/inventory/inventory/inventory/agent/lldp/manager.py +++ /dev/null @@ -1,176 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventory.common import exception -from oslo_config import cfg -from oslo_log import log -from stevedore.named import NamedExtensionManager - -LOG = log.getLogger(__name__) -cfg.CONF.import_opt('drivers', - 'inventory.agent.lldp.config', - group='lldp') - - -class InventoryLldpDriverManager(NamedExtensionManager): - """Implementation of Inventory LLDP drivers.""" - - def __init__(self, namespace='inventory.agent.lldp.drivers'): - - # Registered inventory lldp agent drivers, keyed by name. - self.drivers = {} - - # Ordered list of inventory lldp agent drivers, defining - # the order in which the drivers are called. - self.ordered_drivers = [] - - names = cfg.CONF.lldp.drivers - LOG.info("Configured inventory LLDP agent drivers: %s", names) - - super(InventoryLldpDriverManager, self).__init__( - namespace, - names, - invoke_on_load=True, - name_order=True) - - LOG.info("Loaded inventory LLDP agent drivers: %s", self.names()) - self._register_drivers() - - def _register_drivers(self): - """Register all inventory LLDP agent drivers. - - This method should only be called once in the - InventoryLldpDriverManager constructor. - """ - for ext in self: - self.drivers[ext.name] = ext - self.ordered_drivers.append(ext) - LOG.info("Registered inventory LLDP agent drivers: %s", - [driver.name for driver in self.ordered_drivers]) - - def _call_drivers_and_return_array(self, method_name, attr=None, - raise_orig_exc=False): - """Helper method for calling a method across all drivers. - - :param method_name: name of the method to call - :param attr: an optional attribute to provide to the drivers - :param raise_orig_exc: whether or not to raise the original - driver exception, or use a general one - """ - ret = [] - for driver in self.ordered_drivers: - try: - method = getattr(driver.obj, method_name) - if attr: - ret = ret + method(attr) - else: - ret = ret + method() - except Exception as e: - LOG.exception(e) - LOG.error( - "Inventory LLDP agent driver '%(name)s' " - "failed in %(method)s", - {'name': driver.name, 'method': method_name} - ) - if raise_orig_exc: - raise - else: - raise exception.LLDPDriverError( - method=method_name - ) - return list(set(ret)) - - def _call_drivers(self, method_name, attr=None, raise_orig_exc=False): - """Helper method for calling a method across all drivers. - - :param method_name: name of the method to call - :param attr: an optional attribute to provide to the drivers - :param raise_orig_exc: whether or not to raise the original - driver exception, or use a general one - """ - for driver in self.ordered_drivers: - try: - method = getattr(driver.obj, method_name) - if attr: - method(attr) - else: - method() - except Exception as e: - LOG.exception(e) - LOG.error( - "Inventory LLDP agent driver '%(name)s' " - "failed in %(method)s", - {'name': driver.name, 'method': method_name} - ) - if raise_orig_exc: - raise - else: - raise exception.LLDPDriverError( - method=method_name - ) - - def lldp_has_neighbour(self, name): - try: - return self._call_drivers("lldp_has_neighbour", - attr=name, - raise_orig_exc=True) - except Exception as e: - LOG.exception(e) - return [] - - def lldp_update(self): - try: - return self._call_drivers("lldp_update", - raise_orig_exc=True) - except Exception as e: - LOG.exception(e) - return [] - - def lldp_agents_list(self): - try: - return self._call_drivers_and_return_array("lldp_agents_list", - raise_orig_exc=True) - except Exception as e: - LOG.exception(e) - return [] - - def lldp_neighbours_list(self): - try: - return self._call_drivers_and_return_array("lldp_neighbours_list", - raise_orig_exc=True) - except Exception as e: - LOG.exception(e) - return [] - - def lldp_agents_clear(self): - try: - return self._call_drivers("lldp_agents_clear", - raise_orig_exc=True) - except Exception as e: - LOG.exception(e) - return - - def lldp_neighbours_clear(self): - try: - return self._call_drivers("lldp_neighbours_clear", - raise_orig_exc=True) - except Exception as e: - LOG.exception(e) - return - - def lldp_update_systemname(self, systemname): - try: - return self._call_drivers("lldp_update_systemname", - attr=systemname, - raise_orig_exc=True) - except Exception as e: - LOG.exception(e) - return diff --git a/inventory/inventory/inventory/agent/lldp/plugin.py b/inventory/inventory/inventory/agent/lldp/plugin.py deleted file mode 100644 index 6a3fca3a..00000000 --- a/inventory/inventory/inventory/agent/lldp/plugin.py +++ /dev/null @@ -1,246 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from oslo_log import log -from oslo_utils import excutils - -from inventory.agent.lldp import manager -from inventory.common import exception -from inventory.common import k_lldp -from inventory.common.utils import compare as cmp - -LOG = log.getLogger(__name__) - - -class Key(object): - def __init__(self, chassisid, portid, portname): - self.chassisid = chassisid - self.portid = portid - self.portname = portname - - def __hash__(self): - return hash((self.chassisid, self.portid, self.portname)) - - def __cmp__(self, rhs): - return (cmp(self.chassisid, rhs.chassisid) or - cmp(self.portid, rhs.portid) or - cmp(self.portname, rhs.portname)) - - def __eq__(self, rhs): - return (self.chassisid == rhs.chassisid and - self.portid == rhs.portid and - self.portname == rhs.portname) - - def __ne__(self, rhs): - return (self.chassisid != rhs.chassisid or - self.portid != rhs.portid or - self.portname != rhs.portname) - - def __str__(self): - return "%s [%s] [%s]" % (self.portname, self.chassisid, self.portid) - - def __repr__(self): - return "" % str(self) - - -class Agent(object): - '''Class to encapsulate LLDP agent data for System Inventory''' - - def __init__(self, **kwargs): - '''Construct an Agent object with the given values.''' - self.key = Key(kwargs.get(k_lldp.LLDP_TLV_TYPE_CHASSIS_ID), - kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_ID), - kwargs.get("name_or_uuid")) - self.status = kwargs.get('status') - self.ttl = kwargs.get(k_lldp.LLDP_TLV_TYPE_TTL) - self.system_name = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME) - self.system_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC) - self.port_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_DESC) - self.capabilities = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP) - self.mgmt_addr = kwargs.get(k_lldp.LLDP_TLV_TYPE_MGMT_ADDR) - self.dot1_lag = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_LAG) - self.dot1_vlan_names = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES) - self.dot3_max_frame = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME) - self.state = None - - def __hash__(self): - return self.key.__hash__() - - def __eq__(self, rhs): - return (self.key == rhs.key) - - def __ne__(self, rhs): - return (self.key != rhs.key or - self.status != rhs.status or - self.ttl != rhs.ttl or - self.system_name != rhs.system_name or - self.system_desc != rhs.system_desc or - self.port_desc != rhs.port_desc or - self.capabilities != rhs.capabilities or - self.mgmt_addr != rhs.mgmt_addr or - self.dot1_lag != rhs.dot1_lag or - self.dot1_vlan_names != rhs.dot1_vlan_names or - self.dot3_max_frame != rhs.dot3_max_frame or - self.state != rhs.state) - - def __str__(self): - return "%s: [%s] [%s] [%s], [%s], [%s], [%s], [%s], [%s]" % ( - self.key, self.status, self.system_name, self.system_desc, - self.port_desc, self.capabilities, - self.mgmt_addr, self.dot1_lag, - self.dot3_max_frame) - - def __repr__(self): - return "" % str(self) - - -class Neighbour(object): - '''Class to encapsulate LLDP neighbour data for System Inventory''' - - def __init__(self, **kwargs): - '''Construct an Neighbour object with the given values.''' - self.key = Key(kwargs.get(k_lldp.LLDP_TLV_TYPE_CHASSIS_ID), - kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_ID), - kwargs.get("name_or_uuid")) - self.msap = kwargs.get('msap') - self.ttl = kwargs.get(k_lldp.LLDP_TLV_TYPE_TTL) - self.system_name = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME) - self.system_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC) - self.port_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_DESC) - self.capabilities = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP) - self.mgmt_addr = kwargs.get(k_lldp.LLDP_TLV_TYPE_MGMT_ADDR) - self.dot1_port_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID) - self.dot1_vid_digest = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST) - self.dot1_mgmt_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID) - self.dot1_vid_digest = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST) - self.dot1_mgmt_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID) - self.dot1_lag = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_LAG) - self.dot1_vlan_names = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES) - self.dot1_proto_vids = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS) - self.dot1_proto_ids = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS) - self.dot3_mac_status = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS) - self.dot3_max_frame = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME) - self.dot3_power_mdi = kwargs.get( - k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI) - - self.state = None - - def __hash__(self): - return self.key.__hash__() - - def __eq__(self, rhs): - return (self.key == rhs.key) - - def __ne__(self, rhs): - return (self.key != rhs.key or - self.msap != rhs.msap or - self.system_name != rhs.system_name or - self.system_desc != rhs.system_desc or - self.port_desc != rhs.port_desc or - self.capabilities != rhs.capabilities or - self.mgmt_addr != rhs.mgmt_addr or - self.dot1_port_vid != rhs.dot1_port_vid or - self.dot1_vid_digest != rhs.dot1_vid_digest or - self.dot1_mgmt_vid != rhs.dot1_mgmt_vid or - self.dot1_vid_digest != rhs.dot1_vid_digest or - self.dot1_mgmt_vid != rhs.dot1_mgmt_vid or - self.dot1_lag != rhs.dot1_lag or - self.dot1_vlan_names != rhs.dot1_vlan_names or - self.dot1_proto_vids != rhs.dot1_proto_vids or - self.dot1_proto_ids != rhs.dot1_proto_ids or - self.dot3_mac_status != rhs.dot3_mac_status or - self.dot3_max_frame != rhs.dot3_max_frame or - self.dot3_power_mdi != rhs.dot3_power_mdi) - - def __str__(self): - return "%s [%s] [%s] [%s], [%s]" % ( - self.key, self.system_name, self.system_desc, - self.port_desc, self.capabilities) - - def __repr__(self): - return "" % str(self) - - -class InventoryLldpPlugin(object): - - """Implementation of the Plugin.""" - - def __init__(self): - self.manager = manager.InventoryLldpDriverManager() - - def lldp_has_neighbour(self, name): - try: - return self.manager.lldp_has_neighbour(name) - except exception.LLDPDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("LLDP has neighbour failed") - - def lldp_update(self): - try: - self.manager.lldp_update() - except exception.LLDPDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("LLDP update failed") - - def lldp_agents_list(self): - try: - agents = self.manager.lldp_agents_list() - except exception.LLDPDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("LLDP agents list failed") - - return agents - - def lldp_agents_clear(self): - try: - self.manager.lldp_agents_clear() - except exception.LLDPDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("LLDP agents clear failed") - - def lldp_neighbours_list(self): - try: - neighbours = self.manager.lldp_neighbours_list() - except exception.LLDPDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("LLDP neighbours list failed") - - return neighbours - - def lldp_neighbours_clear(self): - try: - self.manager.lldp_neighbours_clear() - except exception.LLDPDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("LLDP neighbours clear failed") - - def lldp_update_systemname(self, systemname): - try: - self.manager.lldp_update_systemname(systemname) - except exception.LLDPDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("LLDP update systemname failed") diff --git a/inventory/inventory/inventory/agent/manager.py b/inventory/inventory/inventory/agent/manager.py deleted file mode 100644 index b9955562..00000000 --- a/inventory/inventory/inventory/agent/manager.py +++ /dev/null @@ -1,973 +0,0 @@ -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -""" Perform activity related to local inventory. - -A single instance of :py:class:`inventory.agent.manager.AgentManager` is -created within the *inventory-agent* process, and is responsible for -performing all actions for this host managed by inventory . - -On start, collect and post inventory. - -Commands (from conductors) are received via RPC calls. - -""" - -import errno -import fcntl -import os -import oslo_messaging as messaging -import socket -import subprocess -import time - -from futurist import periodics -from oslo_config import cfg -from oslo_log import log - -# from inventory.agent import partition -from inventory.agent import base_manager -from inventory.agent.lldp import plugin as lldp_plugin -from inventory.agent import node -from inventory.agent import pci -from inventory.common import constants -from inventory.common import context as mycontext -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.common import k_lldp -from inventory.common import utils -from inventory.conductor import rpcapi as conductor_rpcapi -import tsconfig.tsconfig as tsc - -MANAGER_TOPIC = 'inventory.agent_manager' - -LOG = log.getLogger(__name__) - -agent_opts = [ - cfg.StrOpt('api_url', - default=None, - help=('Url of Inventory API service. If not set Inventory can ' - 'get current value from Keystone service catalog.')), - cfg.IntOpt('audit_interval', - default=60, - help='Maximum time since the last check-in of a agent'), -] - -CONF = cfg.CONF -CONF.register_opts(agent_opts, 'agent') - -MAXSLEEP = 300 # 5 minutes - -INVENTORY_READY_FLAG = os.path.join(tsc.VOLATILE_PATH, ".inventory_ready") - - -FIRST_BOOT_FLAG = os.path.join( - tsc.PLATFORM_CONF_PATH, ".first_boot") - - -class AgentManager(base_manager.BaseAgentManager): - """Inventory Agent service main class.""" - - # Must be in sync with rpcapi.AgentAPI's - RPC_API_VERSION = '1.0' - - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, host, topic): - super(AgentManager, self).__init__(host, topic) - - self._report_to_conductor = False - self._report_to_conductor_iplatform_avail_flag = False - self._ipci_operator = pci.PCIOperator() - self._inode_operator = node.NodeOperator() - self._lldp_operator = lldp_plugin.InventoryLldpPlugin() - self._ihost_personality = None - self._ihost_uuid = "" - self._agent_throttle = 0 - self._subfunctions = None - self._subfunctions_configured = False - self._notify_subfunctions_alarm_clear = False - self._notify_subfunctions_alarm_raise = False - self._first_grub_update = False - - @property - def report_to_conductor_required(self): - return self._report_to_conductor - - @report_to_conductor_required.setter - def report_to_conductor_required(self, val): - if not isinstance(val, bool): - raise ValueError("report_to_conductor_required not bool %s" % - val) - self._report_to_conductor = val - - def start(self): - # Do not collect inventory and report to conductor at startup in - # order to eliminate two inventory reports - # (one from here and one from audit) being sent to the conductor - - super(AgentManager, self).start() - - if os.path.isfile('/etc/inventory/inventory.conf'): - LOG.info("inventory-agent started, " - "inventory to be reported by audit") - else: - LOG.info("No config file for inventory-agent found.") - - if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX: - utils.touch(INVENTORY_READY_FLAG) - - def init_host(self, admin_context=None): - super(AgentManager, self).init_host(admin_context) - if os.path.isfile('/etc/inventory/inventory.conf'): - LOG.info(_("inventory-agent started, " - "system config to be reported by audit")) - else: - LOG.info(_("No config file for inventory-agent found.")) - - if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX: - utils.touch(INVENTORY_READY_FLAG) - - def del_host(self, deregister=True): - return - - def periodic_tasks(self, context, raise_on_error=False): - """Periodic tasks are run at pre-specified intervals. """ - return self.run_periodic_tasks(context, - raise_on_error=raise_on_error) - - def _report_to_conductor_iplatform_avail(self): - utils.touch(INVENTORY_READY_FLAG) - time.sleep(1) # give time for conductor to process - self._report_to_conductor_iplatform_avail_flag = True - - def _update_ttys_dcd_status(self, context, host_id): - # Retrieve the serial line carrier detect flag - ttys_dcd = None - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - try: - ttys_dcd = rpcapi.get_host_ttys_dcd(context, host_id) - except exception.InventoryException: - LOG.exception("Inventory Agent exception getting host ttys_dcd.") - pass - if ttys_dcd is not None: - self._config_ttys_login(ttys_dcd) - else: - LOG.debug("ttys_dcd is not configured") - - @staticmethod - def _get_active_device(): - # the list of currently configured console devices, - # like 'tty1 ttyS0' or just 'ttyS0' - # The last entry in the file is the active device connected - # to /dev/console. - active_device = 'ttyS0' - try: - cmd = 'cat /sys/class/tty/console/active | grep ttyS' - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) - output = proc.stdout.read().strip() - proc.communicate()[0] - if proc.returncode != 0: - LOG.info("Cannot find the current configured serial device, " - "return default %s" % active_device) - return active_device - # if more than one devices are found, take the last entry - if ' ' in output: - devs = output.split(' ') - active_device = devs[len(devs) - 1] - else: - active_device = output - except subprocess.CalledProcessError as e: - LOG.error("Failed to execute (%s) (%d)", cmd, e.returncode) - except OSError as e: - LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno) - - return active_device - - @staticmethod - def _is_local_flag_disabled(device): - """ - :param device: - :return: boolean: True if the local flag is disabled 'i.e. -clocal is - set'. This means the serial data carrier detect - signal is significant - """ - try: - # uses -o for only-matching and -e for a pattern beginning with a - # hyphen (-), the following command returns 0 if the local flag - # is disabled - cmd = 'stty -a -F /dev/%s | grep -o -e -clocal' % device - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) - proc.communicate()[0] - return proc.returncode == 0 - except subprocess.CalledProcessError as e: - LOG.error("Failed to execute (%s) (%d)", cmd, e.returncode) - return False - except OSError as e: - LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno) - return False - - def _config_ttys_login(self, ttys_dcd): - # agetty is now enabled by systemd - # we only need to disable the local flag to enable carrier detection - # and enable the local flag when the feature is turned off - toggle_flag = None - active_device = self._get_active_device() - local_flag_disabled = self._is_local_flag_disabled(active_device) - if str(ttys_dcd) in ['True', 'true']: - LOG.info("ttys_dcd is enabled") - # check if the local flag is disabled - if not local_flag_disabled: - LOG.info("Disable (%s) local line" % active_device) - toggle_flag = 'stty -clocal -F /dev/%s' % active_device - else: - if local_flag_disabled: - # enable local flag to ignore the carrier detection - LOG.info("Enable local flag for device :%s" % active_device) - toggle_flag = 'stty clocal -F /dev/%s' % active_device - - if toggle_flag: - try: - subprocess.Popen(toggle_flag, stdout=subprocess.PIPE, - shell=True) - # restart serial-getty - restart_cmd = ('systemctl restart serial-getty@%s.service' - % active_device) - subprocess.check_call(restart_cmd, shell=True) - except subprocess.CalledProcessError as e: - LOG.error("subprocess error: (%d)", e.returncode) - - def _force_grub_update(self): - """Force update the grub on the first AIO controller after the initial - config is completed - """ - if (not self._first_grub_update and - os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG)): - self._first_grub_update = True - return True - return False - - def host_lldp_get_and_report(self, context, rpcapi, host_uuid): - neighbour_dict_array = [] - agent_dict_array = [] - neighbours = [] - agents = [] - - try: - neighbours = self._lldp_operator.lldp_neighbours_list() - except Exception as e: - LOG.error("Failed to get LLDP neighbours: %s", str(e)) - - for neighbour in neighbours: - neighbour_dict = { - 'name_or_uuid': neighbour.key.portname, - 'msap': neighbour.msap, - 'state': neighbour.state, - k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: neighbour.key.chassisid, - k_lldp.LLDP_TLV_TYPE_PORT_ID: neighbour.key.portid, - k_lldp.LLDP_TLV_TYPE_TTL: neighbour.ttl, - k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: neighbour.system_name, - k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: neighbour.system_desc, - k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: neighbour.capabilities, - k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: neighbour.mgmt_addr, - k_lldp.LLDP_TLV_TYPE_PORT_DESC: neighbour.port_desc, - k_lldp.LLDP_TLV_TYPE_DOT1_LAG: neighbour.dot1_lag, - k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID: neighbour.dot1_port_vid, - k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST: - neighbour.dot1_vid_digest, - k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID: neighbour.dot1_mgmt_vid, - k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS: - neighbour.dot1_proto_vids, - k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS: - neighbour.dot1_proto_ids, - k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: - neighbour.dot1_vlan_names, - k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS: - neighbour.dot3_mac_status, - k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME: - neighbour.dot3_max_frame, - k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI: - neighbour.dot3_power_mdi, - } - neighbour_dict_array.append(neighbour_dict) - - if neighbour_dict_array: - try: - rpcapi.lldp_neighbour_update_by_host(context, - host_uuid, - neighbour_dict_array) - except exception.InventoryException: - LOG.exception("Inventory Agent exception updating " - "lldp neighbours.") - self._lldp_operator.lldp_neighbours_clear() - pass - - try: - agents = self._lldp_operator.lldp_agents_list() - except Exception as e: - LOG.error("Failed to get LLDP agents: %s", str(e)) - - for agent in agents: - agent_dict = { - 'name_or_uuid': agent.key.portname, - 'state': agent.state, - 'status': agent.status, - k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: agent.key.chassisid, - k_lldp.LLDP_TLV_TYPE_PORT_ID: agent.key.portid, - k_lldp.LLDP_TLV_TYPE_TTL: agent.ttl, - k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: agent.system_name, - k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: agent.system_desc, - k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: agent.capabilities, - k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: agent.mgmt_addr, - k_lldp.LLDP_TLV_TYPE_PORT_DESC: agent.port_desc, - k_lldp.LLDP_TLV_TYPE_DOT1_LAG: agent.dot1_lag, - k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: agent.dot1_vlan_names, - k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME: agent.dot3_max_frame, - } - agent_dict_array.append(agent_dict) - - if agent_dict_array: - try: - rpcapi.lldp_agent_update_by_host(context, - host_uuid, - agent_dict_array) - except exception.InventoryException: - LOG.exception("Inventory Agent exception updating " - "lldp agents.") - self._lldp_operator.lldp_agents_clear() - pass - - def synchronized_network_config(func): - """Synchronization decorator to acquire and release - network_config_lock. - """ - - def wrap(self, *args, **kwargs): - try: - # Get lock to avoid conflict with apply_network_config.sh - lockfd = self._acquire_network_config_lock() - return func(self, *args, **kwargs) - finally: - self._release_network_config_lock(lockfd) - - return wrap - - @synchronized_network_config - def _lldp_enable_and_report(self, context, rpcapi, host_uuid): - """Temporarily enable interfaces and get lldp neighbor information. - This method should only be called before - INITIAL_CONFIG_COMPLETE_FLAG is set. - """ - links_down = [] - try: - # Turn on interfaces, so that lldpd can show all neighbors - for interface in self._ipci_operator.pci_get_net_names(): - flag = self._ipci_operator.pci_get_net_flags(interface) - # If administrative state is down, bring it up momentarily - if not (flag & pci.IFF_UP): - subprocess.call(['ip', 'link', 'set', interface, 'up']) - links_down.append(interface) - LOG.info('interface %s enabled to receive LLDP PDUs' % - interface) - self._lldp_operator.lldp_update() - - # delay maximum 30 seconds for lldpd to receive LLDP PDU - timeout = 0 - link_wait_for_lldp = True - while timeout < 30 and link_wait_for_lldp and links_down: - time.sleep(5) - timeout = timeout + 5 - link_wait_for_lldp = False - - for link in links_down: - if not self._lldp_operator.lldp_has_neighbour(link): - link_wait_for_lldp = True - break - self.host_lldp_get_and_report(context, rpcapi, host_uuid) - except Exception as e: - LOG.exception(e) - pass - finally: - # restore interface administrative state - for interface in links_down: - subprocess.call(['ip', 'link', 'set', interface, 'down']) - LOG.info('interface %s disabled after querying LLDP neighbors' - % interface) - - def platform_update_by_host(self, rpcapi, context, host_uuid, msg_dict): - """Update host platform information. - If this is the first boot (kickstart), then also update the Host - Action State to reinstalled, and remove the flag. - """ - if os.path.exists(FIRST_BOOT_FLAG): - msg_dict.update({k_host.HOST_ACTION_STATE: - k_host.HAS_REINSTALLED}) - - try: - rpcapi.platform_update_by_host(context, - host_uuid, - msg_dict) - if os.path.exists(FIRST_BOOT_FLAG): - os.remove(FIRST_BOOT_FLAG) - LOG.info("Removed %s" % FIRST_BOOT_FLAG) - except exception.InventoryException: - LOG.warn("platform_update_by_host exception " - "host_uuid=%s msg_dict=%s." % - (host_uuid, msg_dict)) - pass - - LOG.info("Inventory Agent platform update by host: %s" % msg_dict) - - def _acquire_network_config_lock(self): - """Synchronization with apply_network_config.sh - - This method is to acquire the lock to avoid - conflict with execution of apply_network_config.sh - during puppet manifest application. - - :returns: fd of the lock, if successful. 0 on error. - """ - lock_file_fd = os.open( - constants.NETWORK_CONFIG_LOCK_FILE, os.O_CREAT | os.O_RDONLY) - count = 1 - delay = 5 - max_count = 5 - while count <= max_count: - try: - fcntl.flock(lock_file_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - return lock_file_fd - except IOError as e: - # raise on unrelated IOErrors - if e.errno != errno.EAGAIN: - raise - else: - LOG.info("Could not acquire lock({}): {} ({}/{}), " - "will retry".format(lock_file_fd, str(e), - count, max_count)) - time.sleep(delay) - count += 1 - LOG.error("Failed to acquire lock (fd={})".format(lock_file_fd)) - return 0 - - def _release_network_config_lock(self, lockfd): - """Release the lock guarding apply_network_config.sh """ - if lockfd: - fcntl.flock(lockfd, fcntl.LOCK_UN) - os.close(lockfd) - - def ihost_inv_get_and_report(self, icontext): - """Collect data for an ihost. - - This method allows an ihost data to be collected. - - :param: icontext: an admin context - :returns: updated ihost object, including all fields. - """ - - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - - ihost = None - - # find list of network related inics for this ihost - inics = self._ipci_operator.inics_get() - - # create an array of ports for each net entry of the NIC device - iports = [] - for inic in inics: - lockfd = 0 - try: - # Get lock to avoid conflict with apply_network_config.sh - lockfd = self._acquire_network_config_lock() - pci_net_array = \ - self._ipci_operator.pci_get_net_attrs(inic.pciaddr) - finally: - self._release_network_config_lock(lockfd) - for net in pci_net_array: - iports.append(pci.Port(inic, **net)) - - # find list of pci devices for this host - pci_devices = self._ipci_operator.pci_devices_get() - - # create an array of pci_devs for each net entry of the device - pci_devs = [] - for pci_dev in pci_devices: - pci_dev_array = \ - self._ipci_operator.pci_get_device_attrs(pci_dev.pciaddr) - for dev in pci_dev_array: - pci_devs.append(pci.PCIDevice(pci_dev, **dev)) - - # create a list of MAC addresses that will be used to identify the - # inventoried host (one of the MACs should be the management MAC) - host_macs = [port.mac for port in iports if port.mac] - - # get my ihost record which should be avail since booted - - LOG.debug('Inventory Agent iports={}, host_macs={}'.format( - iports, host_macs)) - - slept = 0 - while slept < MAXSLEEP: - # wait for controller to come up first may be a DOR - try: - ihost = rpcapi.get_host_by_macs(icontext, host_macs) - except messaging.MessagingTimeout: - LOG.info("get_host_by_macs Messaging Timeout.") - except Exception as ex: - LOG.warn("Conductor RPC get_host_by_macs exception " - "response %s" % ex) - - if not ihost: - hostname = socket.gethostname() - if hostname != k_host.LOCALHOST_HOSTNAME: - try: - ihost = rpcapi.get_host_by_hostname(icontext, - hostname) - except messaging.MessagingTimeout: - LOG.info("get_host_by_hostname Messaging Timeout.") - return # wait for next audit cycle - except Exception as ex: - LOG.warn("Conductor RPC get_host_by_hostname " - "exception response %s" % ex) - - if ihost and ihost.get('personality'): - self.report_to_conductor_required = True - self._ihost_uuid = ihost['uuid'] - self._ihost_personality = ihost['personality'] - - if os.path.isfile(tsc.PLATFORM_CONF_FILE): - # read the platform config file and check for UUID - found = False - with open(tsc.PLATFORM_CONF_FILE, "r") as fd: - for line in fd: - if line.find("UUID=") == 0: - found = True - if not found: - # the UUID is not found, append it - with open(tsc.PLATFORM_CONF_FILE, "a") as fd: - fd.write("UUID=" + self._ihost_uuid + "\n") - - # Report host install status - msg_dict = {} - self.platform_update_by_host(rpcapi, - icontext, - self._ihost_uuid, - msg_dict) - LOG.info("Agent found matching ihost: %s" % ihost['uuid']) - break - - time.sleep(30) - slept += 30 - - if not self.report_to_conductor_required: - # let the audit take care of it instead - LOG.info("Inventory no matching ihost found... await Audit") - return - - subfunctions = self.subfunctions_get() - try: - rpcapi.subfunctions_update_by_host(icontext, - ihost['uuid'], - subfunctions) - except exception.InventoryException: - LOG.exception("Inventory Agent exception updating " - "subfunctions conductor.") - pass - - # post to inventory db by ihost['uuid'] - iport_dict_array = [] - for port in iports: - inic_dict = {'pciaddr': port.ipci.pciaddr, - 'pclass': port.ipci.pclass, - 'pvendor': port.ipci.pvendor, - 'pdevice': port.ipci.pdevice, - 'prevision': port.ipci.prevision, - 'psvendor': port.ipci.psvendor, - 'psdevice': port.ipci.psdevice, - 'pname': port.name, - 'numa_node': port.numa_node, - 'sriov_totalvfs': port.sriov_totalvfs, - 'sriov_numvfs': port.sriov_numvfs, - 'sriov_vfs_pci_address': port.sriov_vfs_pci_address, - 'driver': port.driver, - 'mac': port.mac, - 'mtu': port.mtu, - 'speed': port.speed, - 'link_mode': port.link_mode, - 'dev_id': port.dev_id, - 'dpdksupport': port.dpdksupport} - - LOG.debug('Inventory Agent inic {}'.format(inic_dict)) - - iport_dict_array.append(inic_dict) - try: - # may get duplicate key if already sent on earlier init - rpcapi.port_update_by_host(icontext, - ihost['uuid'], - iport_dict_array) - except messaging.MessagingTimeout: - LOG.info("pci_device_update_by_host Messaging Timeout.") - self.report_to_conductor_required = False - return # wait for next audit cycle - - # post to inventory db by ihost['uuid'] - pci_device_dict_array = [] - for dev in pci_devs: - pci_dev_dict = {'name': dev.name, - 'pciaddr': dev.pci.pciaddr, - 'pclass_id': dev.pclass_id, - 'pvendor_id': dev.pvendor_id, - 'pdevice_id': dev.pdevice_id, - 'pclass': dev.pci.pclass, - 'pvendor': dev.pci.pvendor, - 'pdevice': dev.pci.pdevice, - 'prevision': dev.pci.prevision, - 'psvendor': dev.pci.psvendor, - 'psdevice': dev.pci.psdevice, - 'numa_node': dev.numa_node, - 'sriov_totalvfs': dev.sriov_totalvfs, - 'sriov_numvfs': dev.sriov_numvfs, - 'sriov_vfs_pci_address': dev.sriov_vfs_pci_address, - 'driver': dev.driver, - 'enabled': dev.enabled, - 'extra_info': dev.extra_info} - LOG.debug('Inventory Agent dev {}'.format(pci_dev_dict)) - - pci_device_dict_array.append(pci_dev_dict) - try: - # may get duplicate key if already sent on earlier init - rpcapi.pci_device_update_by_host(icontext, - ihost['uuid'], - pci_device_dict_array) - except messaging.MessagingTimeout: - LOG.info("pci_device_update_by_host Messaging Timeout.") - self.report_to_conductor_required = True - - # Find list of numa_nodes and cpus for this ihost - inumas, icpus = self._inode_operator.inodes_get_inumas_icpus() - - try: - # may get duplicate key if already sent on earlier init - rpcapi.numas_update_by_host(icontext, - ihost['uuid'], - inumas) - except messaging.RemoteError as e: - LOG.error("numas_update_by_host RemoteError exc_type=%s" % - e.exc_type) - except messaging.MessagingTimeout: - LOG.info("pci_device_update_by_host Messaging Timeout.") - self.report_to_conductor_required = True - except Exception as e: - LOG.exception("Inventory Agent exception updating inuma e=%s." % e) - pass - - force_grub_update = self._force_grub_update() - try: - # may get duplicate key if already sent on earlier init - rpcapi.cpus_update_by_host(icontext, - ihost['uuid'], - icpus, - force_grub_update) - except messaging.RemoteError as e: - LOG.error("cpus_update_by_host RemoteError exc_type=%s" % - e.exc_type) - except messaging.MessagingTimeout: - LOG.info("cpus_update_by_host Messaging Timeout.") - self.report_to_conductor_required = True - except Exception as e: - LOG.exception("Inventory exception updating cpus e=%s." % e) - self.report_to_conductor_required = True - pass - except exception.InventoryException: - LOG.exception("Inventory exception updating cpus conductor.") - pass - - imemory = self._inode_operator.inodes_get_imemory() - if imemory: - try: - # may get duplicate key if already sent on earlier init - rpcapi.memory_update_by_host(icontext, - ihost['uuid'], - imemory) - except messaging.MessagingTimeout: - LOG.info("memory_update_by_host Messaging Timeout.") - except messaging.RemoteError as e: - LOG.error("memory_update_by_host RemoteError exc_type=%s" % - e.exc_type) - except exception.InventoryException: - LOG.exception("Inventory Agent exception updating imemory " - "conductor.") - - if self._ihost_uuid and \ - os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG): - if not self._report_to_conductor_iplatform_avail_flag: - # and not self._wait_for_nova_lvg() - imsg_dict = {'availability': k_host.AVAILABILITY_AVAILABLE} - - iscsi_initiator_name = self.get_host_iscsi_initiator_name() - if iscsi_initiator_name is not None: - imsg_dict.update({'iscsi_initiator_name': - iscsi_initiator_name}) - - # Before setting the host to AVAILABILITY_AVAILABLE make - # sure that nova_local aggregates are correctly set - self.platform_update_by_host(rpcapi, - icontext, - self._ihost_uuid, - imsg_dict) - - self._report_to_conductor_iplatform_avail() - - def subfunctions_get(self): - """returns subfunctions on this host. - """ - - self._subfunctions = ','.join(tsc.subfunctions) - - return self._subfunctions - - @staticmethod - def subfunctions_list_get(): - """returns list of subfunctions on this host. - """ - subfunctions = ','.join(tsc.subfunctions) - subfunctions_list = subfunctions.split(',') - - return subfunctions_list - - def subfunctions_configured(self, subfunctions_list): - """Determines whether subfunctions configuration is completed. - return: Bool whether subfunctions configuration is completed. - """ - if (k_host.CONTROLLER in subfunctions_list and - k_host.COMPUTE in subfunctions_list): - if not os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE): - self._subfunctions_configured = False - return False - - self._subfunctions_configured = True - return True - - @staticmethod - def _wait_for_nova_lvg(icontext, rpcapi, ihost_uuid, nova_lvgs=None): - """See if we wait for a provisioned nova-local volume group - - This method queries the conductor to see if we are provisioning - a nova-local volume group on this boot cycle. This check is used - to delay sending the platform availability to the conductor. - - :param: icontext: an admin context - :param: rpcapi: conductor rpc api - :param: ihost_uuid: an admin context - :returns: True if we are provisioning false otherwise - """ - - return True - LOG.info("TODO _wait_for_nova_lvg from systemconfig") - - def _is_config_complete(self): - """Check if this node has completed config - - This method queries node's config flag file to see if it has - complete config. - :return: True if the complete flag file exists false otherwise - """ - if not os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG): - return False - subfunctions = self.subfunctions_list_get() - if k_host.CONTROLLER in subfunctions: - if not os.path.isfile(tsc.INITIAL_CONTROLLER_CONFIG_COMPLETE): - return False - if k_host.COMPUTE in subfunctions: - if not os.path.isfile(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE): - return False - if k_host.STORAGE in subfunctions: - if not os.path.isfile(tsc.INITIAL_STORAGE_CONFIG_COMPLETE): - return False - return True - - @periodics.periodic(spacing=CONF.agent.audit_interval, - run_immediately=True) - def _agent_audit(self, context): - # periodically, perform inventory audit - self.agent_audit(context, host_uuid=self._ihost_uuid, - force_updates=None) - - def agent_audit(self, context, - host_uuid, force_updates, cinder_device=None): - # perform inventory audit - if self._ihost_uuid != host_uuid: - # The function call is not for this host agent - return - - icontext = mycontext.get_admin_context() - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - - if not self.report_to_conductor_required: - LOG.info("Inventory Agent audit running inv_get_and_report.") - self.ihost_inv_get_and_report(icontext) - - if self._ihost_uuid and os.path.isfile( - tsc.INITIAL_CONFIG_COMPLETE_FLAG): - if (not self._report_to_conductor_iplatform_avail_flag and - not self._wait_for_nova_lvg( - icontext, rpcapi, self._ihost_uuid)): - imsg_dict = {'availability': k_host.AVAILABILITY_AVAILABLE} - - iscsi_initiator_name = self.get_host_iscsi_initiator_name() - if iscsi_initiator_name is not None: - imsg_dict.update({'iscsi_initiator_name': - iscsi_initiator_name}) - - # Before setting the host to AVAILABILITY_AVAILABLE make - # sure that nova_local aggregates are correctly set - self.platform_update_by_host(rpcapi, - icontext, - self._ihost_uuid, - imsg_dict) - - self._report_to_conductor_iplatform_avail() - - if (self._ihost_personality == k_host.CONTROLLER and - not self._notify_subfunctions_alarm_clear): - - subfunctions_list = self.subfunctions_list_get() - if ((k_host.CONTROLLER in subfunctions_list) and - (k_host.COMPUTE in subfunctions_list)): - if self.subfunctions_configured(subfunctions_list) and \ - not self._wait_for_nova_lvg( - icontext, rpcapi, self._ihost_uuid): - - ihost_notify_dict = {'subfunctions_configured': True} - rpcapi.notify_subfunctions_config(icontext, - self._ihost_uuid, - ihost_notify_dict) - self._notify_subfunctions_alarm_clear = True - else: - if not self._notify_subfunctions_alarm_raise: - ihost_notify_dict = {'subfunctions_configured': - False} - rpcapi.notify_subfunctions_config( - icontext, self._ihost_uuid, ihost_notify_dict) - self._notify_subfunctions_alarm_raise = True - else: - self._notify_subfunctions_alarm_clear = True - - if self._ihost_uuid: - LOG.debug("Inventory Agent Audit running.") - - if force_updates: - LOG.debug("Inventory Agent Audit force updates: (%s)" % - (', '.join(force_updates))) - - self._update_ttys_dcd_status(icontext, self._ihost_uuid) - if self._agent_throttle > 5: - # throttle updates - self._agent_throttle = 0 - imemory = self._inode_operator.inodes_get_imemory() - rpcapi.memory_update_by_host(icontext, - self._ihost_uuid, - imemory) - if self._is_config_complete(): - self.host_lldp_get_and_report( - icontext, rpcapi, self._ihost_uuid) - else: - self._lldp_enable_and_report( - icontext, rpcapi, self._ihost_uuid) - self._agent_throttle += 1 - - if os.path.isfile(tsc.PLATFORM_CONF_FILE): - # read the platform config file and check for UUID - if 'UUID' not in open(tsc.PLATFORM_CONF_FILE).read(): - # the UUID is not in found, append it - with open(tsc.PLATFORM_CONF_FILE, "a") as fd: - fd.write("UUID=" + self._ihost_uuid) - - def configure_lldp_systemname(self, context, systemname): - """Configure the systemname into the lldp agent with the supplied data. - - :param context: an admin context. - :param systemname: the systemname - """ - - # TODO(sc): This becomes an inventory-api call from - # via systemconfig: configure_isystemname - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - # Update the lldp agent - self._lldp_operator.lldp_update_systemname(systemname) - # Trigger an audit to ensure the db is up to date - self.host_lldp_get_and_report(context, rpcapi, self._ihost_uuid) - - def configure_ttys_dcd(self, context, uuid, ttys_dcd): - """Configure the getty on the serial device. - - :param context: an admin context. - :param uuid: the host uuid - :param ttys_dcd: the flag to enable/disable dcd - """ - - LOG.debug("AgentManager.configure_ttys_dcd: %s %s" % (uuid, ttys_dcd)) - if self._ihost_uuid and self._ihost_uuid == uuid: - LOG.debug("AgentManager configure getty on serial console") - self._config_ttys_login(ttys_dcd) - return - - def execute_command(self, context, host_uuid, command): - """Execute a command on behalf of inventory-conductor - - :param context: request context - :param host_uuid: the host uuid - :param command: the command to execute - """ - - LOG.debug("AgentManager.execute_command: (%s)" % command) - if self._ihost_uuid and self._ihost_uuid == host_uuid: - LOG.info("AgentManager execute_command: (%s)" % command) - with open(os.devnull, "w") as fnull: - try: - subprocess.check_call(command, stdout=fnull, stderr=fnull) - except subprocess.CalledProcessError as e: - LOG.error("Failed to execute (%s) (%d)", - command, e.returncode) - except OSError as e: - LOG.error("Failed to execute (%s), OS error:(%d)", - command, e.errno) - - LOG.info("(%s) executed.", command) - - def get_host_iscsi_initiator_name(self): - iscsi_initiator_name = None - try: - stdout, __ = utils.execute('cat', '/etc/iscsi/initiatorname.iscsi', - run_as_root=True) - if stdout: - stdout = stdout.strip() - iscsi_initiator_name = stdout.split('=')[-1] - LOG.info("iscsi initiator name = %s" % iscsi_initiator_name) - except Exception: - LOG.error("Failed retrieving iscsi initiator name") - - return iscsi_initiator_name - - def update_host_memory(self, context, host_uuid): - """update the host memory - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :return: None - """ - if self._ihost_uuid and self._ihost_uuid == host_uuid: - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - memory = self._inode_operator.inodes_get_imemory() - rpcapi.memory_update_by_host(context, - self._ihost_uuid, - memory, - force_update=True) diff --git a/inventory/inventory/inventory/agent/node.py b/inventory/inventory/inventory/agent/node.py deleted file mode 100644 index 8552a116..00000000 --- a/inventory/inventory/inventory/agent/node.py +++ /dev/null @@ -1,608 +0,0 @@ -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" inventory numa node Utilities and helper functions.""" - -import os -from os import listdir -from os.path import isfile -from os.path import join -from oslo_log import log -import re -import subprocess -import tsconfig.tsconfig as tsc - -LOG = log.getLogger(__name__) - -# Defines per-socket vswitch memory requirements (in MB) -VSWITCH_MEMORY_MB = 1024 - -# Defines the size of one kilobyte -SIZE_KB = 1024 - -# Defines the size of 2 megabytes in kilobyte units -SIZE_2M_KB = 2048 - -# Defines the size of 1 gigabyte in kilobyte units -SIZE_1G_KB = 1048576 - -# Defines the size of 2 megabytes in megabyte units -SIZE_2M_MB = int(SIZE_2M_KB / SIZE_KB) - -# Defines the size of 1 gigabyte in megabyte units -SIZE_1G_MB = int(SIZE_1G_KB / SIZE_KB) - -# Defines the minimum size of memory for a controller node in megabyte units -CONTROLLER_MIN_MB = 6000 - -# Defines the minimum size of memory for a compute node in megabyte units -COMPUTE_MIN_MB = 1600 - -# Defines the minimum size of memory for a secondary compute node in megabyte -# units -COMPUTE_MIN_NON_0_MB = 500 - - -class CPU(object): - '''Class to encapsulate CPU data for System Inventory''' - - def __init__(self, cpu, numa_node, core, thread, - cpu_family=None, cpu_model=None, revision=None): - '''Construct a cpu object with the given values.''' - - self.cpu = cpu - self.numa_node = numa_node - self.core = core - self.thread = thread - self.cpu_family = cpu_family - self.cpu_model = cpu_model - self.revision = revision - # self.allocated_functions = mgmt (usu. 0), vswitch - - def __eq__(self, rhs): - return (self.cpu == rhs.cpu and - self.numa_node == rhs.numa_node and - self.core == rhs.core and - self.thread == rhs.thread) - - def __ne__(self, rhs): - return (self.cpu != rhs.cpu or - self.numa_node != rhs.numa_node or - self.core != rhs.core or - self.thread != rhs.thread) - - def __str__(self): - return "%s [%s] [%s] [%s]" % (self.cpu, self.numa_node, - self.core, self.thread) - - def __repr__(self): - return "" % str(self) - - -class NodeOperator(object): - '''Class to encapsulate CPU operations for System Inventory''' - - def __init__(self): - - self.num_cpus = 0 - self.num_nodes = 0 - self.float_cpuset = 0 - self.total_memory_mb = 0 - self.free_memory_mb = 0 - self.total_memory_nodes_mb = [] - self.free_memory_nodes_mb = [] - self.topology = {} - - # self._get_cpu_topology() - # self._get_total_memory_mb() - # self._get_total_memory_nodes_mb() - # self._get_free_memory_mb() - # self._get_free_memory_nodes_mb() - - def _is_strict(self): - with open(os.devnull, "w") as fnull: - try: - output = subprocess.check_output( - ["cat", "/proc/sys/vm/overcommit_memory"], - stderr=fnull) - if int(output) == 2: - return True - except subprocess.CalledProcessError as e: - LOG.info("Failed to check for overcommit, error (%s)", - e.output) - return False - - def convert_range_string_to_list(self, s): - olist = [] - s = s.strip() - if s: - for part in s.split(','): - if '-' in part: - a, b = part.split('-') - a, b = int(a), int(b) - olist.extend(range(a, b + 1)) - else: - a = int(part) - olist.append(a) - olist.sort() - return olist - - def inodes_get_inumas_icpus(self): - '''Enumerate logical cpu topology based on parsing /proc/cpuinfo - as function of socket_id, core_id, and thread_id. This updates - topology. - - :param self - :updates self.num_cpus- number of logical cpus - :updates self.num_nodes- number of sockets;maps to number of numa nodes - :updates self.topology[socket_id][core_id][thread_id] = cpu - :returns None - ''' - self.num_cpus = 0 - self.num_nodes = 0 - self.topology = {} - - thread_cnt = {} - cpu = socket_id = core_id = thread_id = -1 - re_processor = re.compile(r'^[Pp]rocessor\s+:\s+(\d+)') - re_socket = re.compile(r'^physical id\s+:\s+(\d+)') - re_core = re.compile(r'^core id\s+:\s+(\d+)') - re_cpu_family = re.compile(r'^cpu family\s+:\s+(\d+)') - re_cpu_model = re.compile(r'^model name\s+:\s+(\w+)') - - inumas = [] - icpus = [] - sockets = [] - - with open('/proc/cpuinfo', 'r') as infile: - icpu_attrs = {} - - for line in infile: - match = re_processor.search(line) - if match: - cpu = int(match.group(1)) - socket_id = -1 - core_id = -1 - thread_id = -1 - self.num_cpus += 1 - continue - - match = re_cpu_family.search(line) - if match: - name_value = [s.strip() for s in line.split(':', 1)] - name, value = name_value - icpu_attrs.update({'cpu_family': value}) - continue - - match = re_cpu_model.search(line) - if match: - name_value = [s.strip() for s in line.split(':', 1)] - name, value = name_value - icpu_attrs.update({'cpu_model': value}) - continue - - match = re_socket.search(line) - if match: - socket_id = int(match.group(1)) - if socket_id not in sockets: - sockets.append(socket_id) - attrs = { - 'numa_node': socket_id, - 'capabilities': {}, - } - inumas.append(attrs) - continue - - match = re_core.search(line) - if match: - core_id = int(match.group(1)) - - if socket_id not in thread_cnt: - thread_cnt[socket_id] = {} - if core_id not in thread_cnt[socket_id]: - thread_cnt[socket_id][core_id] = 0 - else: - thread_cnt[socket_id][core_id] += 1 - thread_id = thread_cnt[socket_id][core_id] - - if socket_id not in self.topology: - self.topology[socket_id] = {} - if core_id not in self.topology[socket_id]: - self.topology[socket_id][core_id] = {} - - self.topology[socket_id][core_id][thread_id] = cpu - attrs = { - 'cpu': cpu, - 'numa_node': socket_id, - 'core': core_id, - 'thread': thread_id, - 'capabilities': {}, - } - icpu_attrs.update(attrs) - icpus.append(icpu_attrs) - icpu_attrs = {} - continue - - self.num_nodes = len(self.topology.keys()) - - # In the case topology not detected, hard-code structures - if self.num_nodes == 0: - n_sockets, n_cores, n_threads = (1, int(self.num_cpus), 1) - self.topology = {} - for socket_id in range(n_sockets): - self.topology[socket_id] = {} - if socket_id not in sockets: - sockets.append(socket_id) - attrs = { - 'numa_node': socket_id, - 'capabilities': {}, - } - inumas.append(attrs) - for core_id in range(n_cores): - self.topology[socket_id][core_id] = {} - for thread_id in range(n_threads): - self.topology[socket_id][core_id][thread_id] = 0 - attrs = { - 'cpu': cpu, - 'numa_node': socket_id, - 'core': core_id, - 'thread': thread_id, - 'capabilities': {}, - } - icpus.append(attrs) - - # Define Thread-Socket-Core order for logical cpu enumeration - cpu = 0 - for thread_id in range(n_threads): - for core_id in range(n_cores): - for socket_id in range(n_sockets): - if socket_id not in sockets: - sockets.append(socket_id) - attrs = { - 'numa_node': socket_id, - 'capabilities': {}, - } - inumas.append(attrs) - self.topology[socket_id][core_id][thread_id] = cpu - attrs = { - 'cpu': cpu, - 'numa_node': socket_id, - 'core': core_id, - 'thread': thread_id, - 'capabilities': {}, - } - icpus.append(attrs) - cpu += 1 - self.num_nodes = len(self.topology.keys()) - - LOG.debug("inumas= %s, cpus = %s" % (inumas, icpus)) - - return inumas, icpus - - def _get_immediate_subdirs(self, dir): - return [name for name in listdir(dir) - if os.path.isdir(join(dir, name))] - - def _inode_get_memory_hugepages(self): - """Collect hugepage info, including vswitch, and vm. - Collect platform reserved if config. - :param self - :returns list of memory nodes and attributes - """ - - imemory = [] - - initial_compute_config_completed = \ - os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE) - - # check if it is initial report before the huge pages are allocated - initial_report = not initial_compute_config_completed - - # do not send report if the initial compute config is completed and - # compute config has not finished, i.e.during subsequent - # reboot before the manifest allocates the huge pages - compute_config_completed = \ - os.path.exists(tsc.VOLATILE_COMPUTE_CONFIG_COMPLETE) - if (initial_compute_config_completed and - not compute_config_completed): - return imemory - - for node in range(self.num_nodes): - attr = {} - total_hp_mb = 0 # Total memory (MB) currently configured in HPs - free_hp_mb = 0 - - # Check vswitch and libvirt memory - # Loop through configured hugepage sizes of this node and record - # total number and number free - hugepages = "/sys/devices/system/node/node%d/hugepages" % node - - try: - subdirs = self._get_immediate_subdirs(hugepages) - - for subdir in subdirs: - hp_attr = {} - sizesplit = subdir.split('-') - if sizesplit[1].startswith("1048576kB"): - size = SIZE_1G_MB - else: - size = SIZE_2M_MB - - nr_hugepages = 0 - free_hugepages = 0 - - mydir = hugepages + '/' + subdir - files = [f for f in listdir(mydir) - if isfile(join(mydir, f))] - - if files: - for file in files: - with open(mydir + '/' + file, 'r') as f: - if file.startswith("nr_hugepages"): - nr_hugepages = int(f.readline()) - if file.startswith("free_hugepages"): - free_hugepages = int(f.readline()) - - total_hp_mb = total_hp_mb + int(nr_hugepages * size) - free_hp_mb = free_hp_mb + int(free_hugepages * size) - - # Libvirt hugepages can be 1G and 2M - if size == SIZE_1G_MB: - vswitch_hugepages_nr = VSWITCH_MEMORY_MB / size - hp_attr = { - 'vswitch_hugepages_size_mib': size, - 'vswitch_hugepages_nr': vswitch_hugepages_nr, - 'vswitch_hugepages_avail': 0, - 'vm_hugepages_nr_1G': - (nr_hugepages - vswitch_hugepages_nr), - 'vm_hugepages_avail_1G': free_hugepages, - 'vm_hugepages_use_1G': 'True' - } - else: - if len(subdirs) == 1: - # No 1G hugepage support. - vswitch_hugepages_nr = VSWITCH_MEMORY_MB / size - hp_attr = { - 'vswitch_hugepages_size_mib': size, - 'vswitch_hugepages_nr': vswitch_hugepages_nr, - 'vswitch_hugepages_avail': 0, - } - hp_attr.update({'vm_hugepages_use_1G': 'False'}) - else: - # vswitch will use 1G hugpages - vswitch_hugepages_nr = 0 - - hp_attr.update({ - 'vm_hugepages_avail_2M': free_hugepages, - 'vm_hugepages_nr_2M': - (nr_hugepages - vswitch_hugepages_nr) - }) - - attr.update(hp_attr) - - except IOError: - # silently ignore IO errors (eg. file missing) - pass - - # Get the free and total memory from meminfo for this node - re_node_memtotal = re.compile(r'^Node\s+\d+\s+\MemTotal:\s+(\d+)') - re_node_memfree = re.compile(r'^Node\s+\d+\s+\MemFree:\s+(\d+)') - re_node_filepages = \ - re.compile(r'^Node\s+\d+\s+\FilePages:\s+(\d+)') - re_node_sreclaim = \ - re.compile(r'^Node\s+\d+\s+\SReclaimable:\s+(\d+)') - re_node_commitlimit = \ - re.compile(r'^Node\s+\d+\s+\CommitLimit:\s+(\d+)') - re_node_committed_as = \ - re.compile(r'^Node\s+\d+\s+\'Committed_AS:\s+(\d+)') - - free_kb = 0 # Free Memory (KB) available - total_kb = 0 # Total Memory (KB) - limit = 0 # only used in strict accounting - committed = 0 # only used in strict accounting - - meminfo = "/sys/devices/system/node/node%d/meminfo" % node - try: - with open(meminfo, 'r') as infile: - for line in infile: - match = re_node_memtotal.search(line) - if match: - total_kb += int(match.group(1)) - continue - match = re_node_memfree.search(line) - if match: - free_kb += int(match.group(1)) - continue - match = re_node_filepages.search(line) - if match: - free_kb += int(match.group(1)) - continue - match = re_node_sreclaim.search(line) - if match: - free_kb += int(match.group(1)) - continue - match = re_node_commitlimit.search(line) - if match: - limit = int(match.group(1)) - continue - match = re_node_committed_as.search(line) - if match: - committed = int(match.group(1)) - continue - - if self._is_strict(): - free_kb = limit - committed - - except IOError: - # silently ignore IO errors (eg. file missing) - pass - - # Calculate PSS - pss_mb = 0 - if node == 0: - cmd = 'cat /proc/*/smaps 2>/dev/null | awk \'/^Pss:/ ' \ - '{a += $2;} END {printf "%d\\n", a/1024.0;}\'' - try: - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, - shell=True) - result = proc.stdout.read().strip() - pss_mb = int(result) - except subprocess.CalledProcessError as e: - LOG.error("Cannot calculate PSS (%s) (%d)", cmd, - e.returncode) - except OSError as e: - LOG.error("Failed to execute (%s) OS error (%d)", cmd, - e.errno) - - # need to multiply total_mb by 1024 to match compute_huge - node_total_kb = total_hp_mb * SIZE_KB + free_kb + pss_mb * SIZE_KB - - # Read base memory from compute_reserved.conf - base_mem_mb = 0 - with open('/etc/nova/compute_reserved.conf', 'r') as infile: - for line in infile: - if "COMPUTE_BASE_RESERVED" in line: - val = line.split("=") - base_reserves = val[1].strip('\n')[1:-1] - for reserve in base_reserves.split(): - reserve = reserve.split(":") - if reserve[0].strip('"') == "node%d" % node: - base_mem_mb = int(reserve[1].strip('MB')) - - # On small systems, clip memory overhead to more reasonable minimal - # settings - if (total_kb / SIZE_KB - base_mem_mb) < 1000: - if node == 0: - base_mem_mb = COMPUTE_MIN_MB - if tsc.nodetype == 'controller': - base_mem_mb += CONTROLLER_MIN_MB - else: - base_mem_mb = COMPUTE_MIN_NON_0_MB - - eng_kb = node_total_kb - base_mem_mb * SIZE_KB - - vswitch_mem_kb = (attr.get('vswitch_hugepages_size_mib', 0) * - attr.get('vswitch_hugepages_nr', 0) * SIZE_KB) - - vm_kb = (eng_kb - vswitch_mem_kb) - - max_vm_pages_2mb = vm_kb / SIZE_2M_KB - max_vm_pages_1gb = vm_kb / SIZE_1G_KB - - attr.update({ - 'vm_hugepages_possible_2M': max_vm_pages_2mb, - 'vm_hugepages_possible_1G': max_vm_pages_1gb, - }) - - # calculate 90% 2M pages if it is initial report and the huge - # pages have not been allocated - if initial_report: - max_vm_pages_2mb = max_vm_pages_2mb * 0.9 - total_hp_mb += int(max_vm_pages_2mb * (SIZE_2M_KB / SIZE_KB)) - free_hp_mb = total_hp_mb - attr.update({ - 'vm_hugepages_nr_2M': max_vm_pages_2mb, - 'vm_hugepages_avail_2M': max_vm_pages_2mb, - 'vm_hugepages_nr_1G': 0 - }) - - attr.update({ - 'numa_node': node, - 'memtotal_mib': total_hp_mb, - 'memavail_mib': free_hp_mb, - 'hugepages_configured': 'True', - 'node_memtotal_mib': node_total_kb / 1024, - }) - - imemory.append(attr) - - return imemory - - def _inode_get_memory_nonhugepages(self): - '''Collect nonhugepage info, including platform reserved if config. - :param self - :returns list of memory nodes and attributes - ''' - - imemory = [] - self.total_memory_mb = 0 - - re_node_memtotal = re.compile(r'^Node\s+\d+\s+\MemTotal:\s+(\d+)') - re_node_memfree = re.compile(r'^Node\s+\d+\s+\MemFree:\s+(\d+)') - re_node_filepages = re.compile(r'^Node\s+\d+\s+\FilePages:\s+(\d+)') - re_node_sreclaim = re.compile(r'^Node\s+\d+\s+\SReclaimable:\s+(\d+)') - - for node in range(self.num_nodes): - attr = {} - total_mb = 0 - free_mb = 0 - - meminfo = "/sys/devices/system/node/node%d/meminfo" % node - try: - with open(meminfo, 'r') as infile: - for line in infile: - match = re_node_memtotal.search(line) - if match: - total_mb += int(match.group(1)) - continue - - match = re_node_memfree.search(line) - if match: - free_mb += int(match.group(1)) - continue - match = re_node_filepages.search(line) - if match: - free_mb += int(match.group(1)) - continue - match = re_node_sreclaim.search(line) - if match: - free_mb += int(match.group(1)) - continue - - except IOError: - # silently ignore IO errors (eg. file missing) - pass - - total_mb /= 1024 - free_mb /= 1024 - self.total_memory_nodes_mb.append(total_mb) - attr = { - 'numa_node': node, - 'memtotal_mib': total_mb, - 'memavail_mib': free_mb, - 'hugepages_configured': 'False', - } - - imemory.append(attr) - - return imemory - - def inodes_get_imemory(self): - '''Enumerate logical memory topology based on: - if CONF.compute_hugepages: - self._inode_get_memory_hugepages() - else: - self._inode_get_memory_nonhugepages() - - :param self - :returns list of memory nodes and attributes - ''' - imemory = [] - - # if CONF.compute_hugepages: - if os.path.isfile("/etc/nova/compute_reserved.conf"): - imemory = self._inode_get_memory_hugepages() - else: - imemory = self._inode_get_memory_nonhugepages() - - LOG.debug("imemory= %s" % imemory) - - return imemory diff --git a/inventory/inventory/inventory/agent/pci.py b/inventory/inventory/inventory/agent/pci.py deleted file mode 100644 index db9daa4a..00000000 --- a/inventory/inventory/inventory/agent/pci.py +++ /dev/null @@ -1,621 +0,0 @@ -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" inventory pci Utilities and helper functions.""" - -import glob -import os -import shlex -import subprocess - -from inventory.common import k_pci -from inventory.common import utils -from oslo_log import log - -LOG = log.getLogger(__name__) - -# Look for PCI class 0x0200 and 0x0280 so that we get generic ethernet -# controllers and those that may report as "other" network controllers. -ETHERNET_PCI_CLASSES = ['ethernet controller', 'network controller'] - -# Look for other devices we may want to inventory. -KNOWN_PCI_DEVICES = [ - {"vendor_id": k_pci.NOVA_PCI_ALIAS_QAT_PF_VENDOR, - "device_id": k_pci.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE, - "class_id": k_pci.NOVA_PCI_ALIAS_QAT_CLASS}, - {"vendor_id": k_pci.NOVA_PCI_ALIAS_QAT_PF_VENDOR, - "device_id": k_pci.NOVA_PCI_ALIAS_QAT_C62X_PF_DEVICE, - "class_id": k_pci.NOVA_PCI_ALIAS_QAT_CLASS}, - {"class_id": k_pci.NOVA_PCI_ALIAS_GPU_CLASS}] - -# PCI-SIG 0x06 bridge devices to not inventory. -IGNORE_BRIDGE_PCI_CLASSES = ['bridge', 'isa bridge', 'host bridge'] - -# PCI-SIG 0x08 generic peripheral devices to not inventory. -IGNORE_PERIPHERAL_PCI_CLASSES = ['system peripheral', 'pic', 'dma controller', - 'iommu', 'rtc'] - -# PCI-SIG 0x11 signal processing devices to not inventory. -IGNORE_SIGNAL_PROCESSING_PCI_CLASSES = ['performance counters'] - -# Blacklist of devices we do not want to inventory, because they are dealt -# with separately (ie. Ethernet devices), or do not make sense to expose -# to a guest. -IGNORE_PCI_CLASSES = ETHERNET_PCI_CLASSES + IGNORE_BRIDGE_PCI_CLASSES + \ - IGNORE_PERIPHERAL_PCI_CLASSES + IGNORE_SIGNAL_PROCESSING_PCI_CLASSES - -pciaddr = 0 -pclass = 1 -pvendor = 2 -pdevice = 3 -prevision = 4 -psvendor = 5 -psdevice = 6 - -VALID_PORT_SPEED = ['10', '100', '1000', '10000', '40000', '100000'] - -# Network device flags (from include/uapi/linux/if.h) -IFF_UP = 1 << 0 -IFF_BROADCAST = 1 << 1 -IFF_DEBUG = 1 << 2 -IFF_LOOPBACK = 1 << 3 -IFF_POINTOPOINT = 1 << 4 -IFF_NOTRAILERS = 1 << 5 -IFF_RUNNING = 1 << 6 -IFF_NOARP = 1 << 7 -IFF_PROMISC = 1 << 8 -IFF_ALLMULTI = 1 << 9 -IFF_MASTER = 1 << 10 -IFF_SLAVE = 1 << 11 -IFF_MULTICAST = 1 << 12 -IFF_PORTSEL = 1 << 13 -IFF_AUTOMEDIA = 1 << 14 -IFF_DYNAMIC = 1 << 15 - - -class PCI(object): - '''Class to encapsulate PCI data for System Inventory''' - - def __init__(self, pciaddr, pclass, pvendor, pdevice, prevision, - psvendor, psdevice): - '''Construct a pci object with the given values.''' - - self.pciaddr = pciaddr - self.pclass = pclass - self.pvendor = pvendor - self.pdevice = pdevice - self.prevision = prevision - self.psvendor = psvendor - self.psdevice = psdevice - - def __eq__(self, rhs): - return (self.pvendor == rhs.pvendor and - self.pdevice == rhs.pdevice) - - def __ne__(self, rhs): - return (self.pvendor != rhs.pvendor or - self.pdevice != rhs.pdevice) - - def __str__(self): - return "%s [%s] [%s]" % (self.pciaddr, self.pvendor, self.pdevice) - - def __repr__(self): - return "" % str(self) - - -class Port(object): - '''Class to encapsulate PCI data for System Inventory''' - - def __init__(self, ipci, **kwargs): - '''Construct an port object with the given values.''' - self.ipci = ipci - self.name = kwargs.get('name') - self.mac = kwargs.get('mac') - self.mtu = kwargs.get('mtu') - self.speed = kwargs.get('speed') - self.link_mode = kwargs.get('link_mode') - self.numa_node = kwargs.get('numa_node') - self.dev_id = kwargs.get('dev_id') - self.sriov_totalvfs = kwargs.get('sriov_totalvfs') - self.sriov_numvfs = kwargs.get('sriov_numvfs') - self.sriov_vfs_pci_address = kwargs.get('sriov_vfs_pci_address') - self.driver = kwargs.get('driver') - self.dpdksupport = kwargs.get('dpdksupport') - - def __str__(self): - return "%s %s: [%s] [%s] [%s], [%s], [%s], [%s], [%s]" % ( - self.ipci, self.name, self.mac, self.mtu, self.speed, - self.link_mode, self.numa_node, self.dev_id, self.dpdksupport) - - def __repr__(self): - return "" % str(self) - - -class PCIDevice(object): - '''Class to encapsulate extended PCI data for System Inventory''' - - def __init__(self, pci, **kwargs): - '''Construct a PciDevice object with the given values.''' - self.pci = pci - self.name = kwargs.get('name') - self.pclass_id = kwargs.get('pclass_id') - self.pvendor_id = kwargs.get('pvendor_id') - self.pdevice_id = kwargs.get('pdevice_id') - self.numa_node = kwargs.get('numa_node') - self.sriov_totalvfs = kwargs.get('sriov_totalvfs') - self.sriov_numvfs = kwargs.get('sriov_numvfs') - self.sriov_vfs_pci_address = kwargs.get('sriov_vfs_pci_address') - self.driver = kwargs.get('driver') - self.enabled = kwargs.get('enabled') - self.extra_info = kwargs.get('extra_info') - - def __str__(self): - return "%s %s: [%s]" % ( - self.pci, self.numa_node, self.driver) - - def __repr__(self): - return "" % str(self) - - -class PCIOperator(object): - '''Class to encapsulate PCI operations for System Inventory''' - - def format_lspci_output(self, device): - # hack for now - if device[prevision].strip() == device[pvendor].strip(): - # no revision info - device.append(device[psvendor]) - device[psvendor] = device[prevision] - device[prevision] = "0" - elif len(device) <= 6: # one less entry, no revision - LOG.debug("update psdevice length=%s" % len(device)) - device.append(device[psvendor]) - return device - - def get_pci_numa_node(self, pciaddr): - fnuma_node = '/sys/bus/pci/devices/' + pciaddr + '/numa_node' - try: - with open(fnuma_node, 'r') as f: - numa_node = f.readline().strip() - LOG.debug("ATTR numa_node: %s " % numa_node) - except Exception: - LOG.debug("ATTR numa_node unknown for: %s " % pciaddr) - numa_node = None - return numa_node - - def get_pci_sriov_totalvfs(self, pciaddr): - fsriov_totalvfs = '/sys/bus/pci/devices/' + pciaddr + '/sriov_totalvfs' - try: - with open(fsriov_totalvfs, 'r') as f: - sriov_totalvfs = f.readline() - LOG.debug("ATTR sriov_totalvfs: %s " % sriov_totalvfs) - f.close() - except Exception: - LOG.debug("ATTR sriov_totalvfs unknown for: %s " % pciaddr) - sriov_totalvfs = None - pass - return sriov_totalvfs - - def get_pci_sriov_numvfs(self, pciaddr): - fsriov_numvfs = '/sys/bus/pci/devices/' + pciaddr + '/sriov_numvfs' - try: - with open(fsriov_numvfs, 'r') as f: - sriov_numvfs = f.readline() - LOG.debug("ATTR sriov_numvfs: %s " % sriov_numvfs) - f.close() - except Exception: - LOG.debug("ATTR sriov_numvfs unknown for: %s " % pciaddr) - sriov_numvfs = 0 - pass - LOG.debug("sriov_numvfs: %s" % sriov_numvfs) - return sriov_numvfs - - def get_pci_sriov_vfs_pci_address(self, pciaddr, sriov_numvfs): - dirpcidev = '/sys/bus/pci/devices/' + pciaddr - sriov_vfs_pci_address = [] - i = 0 - while i < int(sriov_numvfs): - lvf = dirpcidev + '/virtfn' + str(i) - try: - sriov_vfs_pci_address.append( - os.path.basename(os.readlink(lvf))) - except Exception: - LOG.warning("virtfn link %s non-existent (sriov_numvfs=%s)" - % (lvf, sriov_numvfs)) - pass - i += 1 - LOG.debug("sriov_vfs_pci_address: %s" % sriov_vfs_pci_address) - return sriov_vfs_pci_address - - def get_pci_driver_name(self, pciaddr): - ddriver = '/sys/bus/pci/devices/' + pciaddr + '/driver/module/drivers' - try: - drivers = [ - os.path.basename(os.readlink(ddriver + '/' + d)) - for d in os.listdir(ddriver)] - driver = str(','.join(str(d) for d in drivers)) - - except Exception: - LOG.debug("ATTR driver unknown for: %s " % pciaddr) - driver = None - pass - LOG.debug("driver: %s" % driver) - return driver - - def pci_devices_get(self): - - p = subprocess.Popen(["lspci", "-Dm"], stdout=subprocess.PIPE) - - pci_devices = [] - for line in p.stdout: - pci_device = shlex.split(line.strip()) - pci_device = self.format_lspci_output(pci_device) - - if any(x in pci_device[pclass].lower() for x in - IGNORE_PCI_CLASSES): - continue - - dirpcidev = '/sys/bus/pci/devices/' - physfn = dirpcidev + pci_device[pciaddr] + '/physfn' - if not os.path.isdir(physfn): - # Do not report VFs - pci_devices.append(PCI(pci_device[pciaddr], - pci_device[pclass], - pci_device[pvendor], - pci_device[pdevice], - pci_device[prevision], - pci_device[psvendor], - pci_device[psdevice])) - - p.wait() - - return pci_devices - - def inics_get(self): - - p = subprocess.Popen(["lspci", "-Dm"], stdout=subprocess.PIPE) - - pci_inics = [] - for line in p.stdout: - inic = shlex.split(line.strip()) - if any(x in inic[pclass].lower() for x in ETHERNET_PCI_CLASSES): - # hack for now - if inic[prevision].strip() == inic[pvendor].strip(): - # no revision info - inic.append(inic[psvendor]) - inic[psvendor] = inic[prevision] - inic[prevision] = "0" - elif len(inic) <= 6: # one less entry, no revision - LOG.debug("update psdevice length=%s" % len(inic)) - inic.append(inic[psvendor]) - - dirpcidev = '/sys/bus/pci/devices/' - physfn = dirpcidev + inic[pciaddr] + '/physfn' - if os.path.isdir(physfn): - # Do not report VFs - continue - pci_inics.append(PCI(inic[pciaddr], inic[pclass], - inic[pvendor], inic[pdevice], - inic[prevision], inic[psvendor], - inic[psdevice])) - - p.wait() - - return pci_inics - - def pci_get_enabled_attr(self, class_id, vendor_id, product_id): - for known_device in KNOWN_PCI_DEVICES: - if (class_id == known_device.get("class_id", None) or - (vendor_id == known_device.get("vendor_id", None) and - product_id == known_device.get("device_id", None))): - return True - return False - - def pci_get_device_attrs(self, pciaddr): - """For this pciaddr, build a list of device attributes """ - pci_attrs_array = [] - - dirpcidev = '/sys/bus/pci/devices/' - pciaddrs = os.listdir(dirpcidev) - - for a in pciaddrs: - if ((a == pciaddr) or (a == ("0000:" + pciaddr))): - LOG.debug("Found device pci bus: %s " % a) - - dirpcideva = dirpcidev + a - - numa_node = self.get_pci_numa_node(a) - sriov_totalvfs = self.get_pci_sriov_totalvfs(a) - sriov_numvfs = self.get_pci_sriov_numvfs(a) - sriov_vfs_pci_address = \ - self.get_pci_sriov_vfs_pci_address(a, sriov_numvfs) - driver = self.get_pci_driver_name(a) - - fclass = dirpcideva + '/class' - fvendor = dirpcideva + '/vendor' - fdevice = dirpcideva + '/device' - try: - with open(fvendor, 'r') as f: - pvendor_id = f.readline().strip('0x').strip() - except Exception: - LOG.debug("ATTR vendor unknown for: %s " % a) - pvendor_id = None - - try: - with open(fdevice, 'r') as f: - pdevice_id = f.readline().replace('0x', '').strip() - except Exception: - LOG.debug("ATTR device unknown for: %s " % a) - pdevice_id = None - - try: - with open(fclass, 'r') as f: - pclass_id = f.readline().replace('0x', '').strip() - except Exception: - LOG.debug("ATTR class unknown for: %s " % a) - pclass_id = None - - name = "pci_" + a.replace(':', '_').replace('.', '_') - - attrs = { - "name": name, - "pci_address": a, - "pclass_id": pclass_id, - "pvendor_id": pvendor_id, - "pdevice_id": pdevice_id, - "numa_node": numa_node, - "sriov_totalvfs": sriov_totalvfs, - "sriov_numvfs": sriov_numvfs, - "sriov_vfs_pci_address": - ','.join(str(x) for x in sriov_vfs_pci_address), - "driver": driver, - "enabled": self.pci_get_enabled_attr( - pclass_id, pvendor_id, pdevice_id), - } - - pci_attrs_array.append(attrs) - - return pci_attrs_array - - def get_pci_net_directory(self, pciaddr): - device_directory = '/sys/bus/pci/devices/' + pciaddr - # Look for the standard device 'net' directory - net_directory = device_directory + '/net/' - if os.path.exists(net_directory): - return net_directory - # Otherwise check whether this is a virtio based device - net_pattern = device_directory + '/virtio*/net/' - results = glob.glob(net_pattern) - if not results: - return None - if len(results) > 1: - LOG.warning("PCI device {} has multiple virtio " - "sub-directories".format(pciaddr)) - return results[0] - - def _read_flags(self, fflags): - try: - with open(fflags, 'r') as f: - hex_str = f.readline().rstrip() - flags = int(hex_str, 16) - except Exception: - flags = None - return flags - - def _get_netdev_flags(self, dirpcinet, pci): - fflags = dirpcinet + pci + '/flags' - return self._read_flags(fflags) - - def pci_get_net_flags(self, name): - fflags = '/sys/class/net/' + name + '/flags' - return self._read_flags(fflags) - - def pci_get_net_names(self): - '''build a list of network device names.''' - names = [] - for name in os.listdir('/sys/class/net/'): - if os.path.isdir('/sys/class/net/' + name): - names.append(name) - return names - - def pci_get_net_attrs(self, pciaddr): - """For this pciaddr, build a list of network attributes per port""" - pci_attrs_array = [] - - dirpcidev = '/sys/bus/pci/devices/' - pciaddrs = os.listdir(dirpcidev) - - for a in pciaddrs: - if ((a == pciaddr) or (a == ("0000:" + pciaddr))): - # Look inside net expect to find address,speed,mtu etc. info - # There may be more than 1 net device for this NIC. - LOG.debug("Found NIC pci bus: %s " % a) - - dirpcideva = dirpcidev + a - - numa_node = self.get_pci_numa_node(a) - sriov_totalvfs = self.get_pci_sriov_totalvfs(a) - sriov_numvfs = self.get_pci_sriov_numvfs(a) - sriov_vfs_pci_address = \ - self.get_pci_sriov_vfs_pci_address(a, sriov_numvfs) - driver = self.get_pci_driver_name(a) - - # Determine DPDK support - dpdksupport = False - fvendor = dirpcideva + '/vendor' - fdevice = dirpcideva + '/device' - try: - with open(fvendor, 'r') as f: - vendor = f.readline().strip() - except Exception: - LOG.debug("ATTR vendor unknown for: %s " % a) - vendor = None - - try: - with open(fdevice, 'r') as f: - device = f.readline().strip() - except Exception: - LOG.debug("ATTR device unknown for: %s " % a) - device = None - - try: - with open(os.devnull, "w") as fnull: - subprocess.check_call( - ["query_pci_id", "-v " + str(vendor), - "-d " + str(device)], - stdout=fnull, stderr=fnull) - dpdksupport = True - LOG.debug("DPDK does support NIC " - "(vendor: %s device: %s)", - vendor, device) - except subprocess.CalledProcessError as e: - dpdksupport = False - if e.returncode == 1: - # NIC is not supprted - LOG.debug("DPDK does not support NIC " - "(vendor: %s device: %s)", - vendor, device) - else: - # command failed, default to DPDK support to False - LOG.info("Could not determine DPDK support for " - "NIC (vendor %s device: %s), defaulting " - "to False", vendor, device) - - # determine the net directory for this device - dirpcinet = self.get_pci_net_directory(a) - if dirpcinet is None: - LOG.warning("no /net for PCI device: %s " % a) - continue # go to next PCI device - - # determine which netdevs are associated to this device - netdevs = os.listdir(dirpcinet) - for n in netdevs: - mac = None - fmac = dirpcinet + n + '/' + "address" - fmaster = dirpcinet + n + '/' + "master" - # if a port is a member of a bond the port MAC address - # must be retrieved from /proc/net/bonding/ - if os.path.exists(fmaster): - dirmaster = os.path.realpath(fmaster) - master_name = os.path.basename(dirmaster) - procnetbonding = '/proc/net/bonding/' + master_name - found_interface = False - - try: - with open(procnetbonding, 'r') as f: - for line in f: - if 'Slave Interface: ' + n in line: - found_interface = True - if (found_interface and - 'Permanent HW addr:' in line): - mac = line.split(': ')[1].rstrip() - mac = utils.validate_and_normalize_mac( - mac) - break - if not mac: - LOG.info("ATTR mac could not be determined" - " for slave interface %s" % n) - except Exception: - LOG.info("ATTR mac could not be determined, " - "could not open %s" % procnetbonding) - else: - try: - with open(fmac, 'r') as f: - mac = f.readline().rstrip() - mac = utils.validate_and_normalize_mac(mac) - except Exception: - LOG.info("ATTR mac unknown for: %s " % n) - - fmtu = dirpcinet + n + '/' + "mtu" - try: - with open(fmtu, 'r') as f: - mtu = f.readline().rstrip() - except Exception: - LOG.debug("ATTR mtu unknown for: %s " % n) - mtu = None - - # Check the administrative state before reading the speed - flags = self._get_netdev_flags(dirpcinet, n) - - # If administrative state is down, bring it up momentarily - if not(flags & IFF_UP): - LOG.warning("Enabling device %s to query link speed" % - n) - cmd = 'ip link set dev %s up' % n - subprocess.Popen(cmd, stdout=subprocess.PIPE, - shell=True) - # Read the speed - fspeed = dirpcinet + n + '/' + "speed" - try: - with open(fspeed, 'r') as f: - speed = f.readline().rstrip() - if speed not in VALID_PORT_SPEED: - LOG.error("Invalid port speed = %s for %s " % - (speed, n)) - speed = None - except Exception: - LOG.warning("ATTR speed unknown for: %s " - "(flags: %s)" % (n, hex(flags))) - speed = None - # If the administrative state was down, take it back down - if not(flags & IFF_UP): - LOG.warning("Disabling device %s after querying " - "link speed" % n) - cmd = 'ip link set dev %s down' % n - subprocess.Popen(cmd, stdout=subprocess.PIPE, - shell=True) - - flink_mode = dirpcinet + n + '/' + "link_mode" - try: - with open(flink_mode, 'r') as f: - link_mode = f.readline().rstrip() - except Exception: - LOG.debug("ATTR link_mode unknown for: %s " % n) - link_mode = None - - fdevport = dirpcinet + n + '/' + "dev_port" - try: - with open(fdevport, 'r') as f: - dev_port = int(f.readline().rstrip(), 0) - except Exception: - LOG.debug("ATTR dev_port unknown for: %s " % n) - # Kernel versions older than 3.15 used dev_id - # (incorrectly) to identify the network devices, - # therefore support the fallback if dev_port is not - # available - try: - fdevid = dirpcinet + n + '/' + "dev_id" - with open(fdevid, 'r') as f: - dev_port = int(f.readline().rstrip(), 0) - except Exception: - LOG.debug("ATTR dev_id unknown for: %s " % n) - dev_port = 0 - - attrs = { - "name": n, - "numa_node": numa_node, - "sriov_totalvfs": sriov_totalvfs, - "sriov_numvfs": sriov_numvfs, - "sriov_vfs_pci_address": - ','.join(str(x) for x in sriov_vfs_pci_address), - "driver": driver, - "pci_address": a, - "mac": mac, - "mtu": mtu, - "speed": speed, - "link_mode": link_mode, - "dev_id": dev_port, - "dpdksupport": dpdksupport - } - - pci_attrs_array.append(attrs) - - return pci_attrs_array diff --git a/inventory/inventory/inventory/agent/rpcapi.py b/inventory/inventory/inventory/agent/rpcapi.py deleted file mode 100644 index bd836f20..00000000 --- a/inventory/inventory/inventory/agent/rpcapi.py +++ /dev/null @@ -1,161 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 - -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -""" -Client side of the agent RPC API. -""" - -from oslo_log import log -import oslo_messaging as messaging - -from inventory.common import rpc -from inventory.objects import base as objects_base - - -LOG = log.getLogger(__name__) - -MANAGER_TOPIC = 'inventory.agent_manager' - - -class AgentAPI(object): - """Client side of the agent RPC API. - - API version history: - - 1.0 - Initial version. - """ - - RPC_API_VERSION = '1.0' - - def __init__(self, topic=None): - - super(AgentAPI, self).__init__() - self.topic = topic - if self.topic is None: - self.topic = MANAGER_TOPIC - target = messaging.Target(topic=self.topic, - version='1.0') - serializer = objects_base.InventoryObjectSerializer() - version_cap = self.RPC_API_VERSION - self.client = rpc.get_client(target, - version_cap=version_cap, - serializer=serializer) - - def host_inventory(self, context, values, topic=None): - """Synchronously, have a agent collect inventory for this host. - - Collect ihost inventory and report to conductor. - - :param context: request context. - :param values: dictionary with initial values for new host object - :returns: created ihost object, including all fields. - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'host_inventory', - values=values) - - def configure_ttys_dcd(self, context, uuid, ttys_dcd, topic=None): - """Asynchronously, have the agent configure the getty on the serial - console. - - :param context: request context. - :param uuid: the host uuid - :param ttys_dcd: the flag to enable/disable dcd - :returns: none ... uses asynchronous cast(). - """ - # fanout / broadcast message to all inventory agents - LOG.debug("AgentApi.configure_ttys_dcd: fanout_cast: sending " - "dcd update to agent: (%s) (%s" % (uuid, ttys_dcd)) - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0', - fanout=True) - retval = cctxt.cast(context, - 'configure_ttys_dcd', - uuid=uuid, - ttys_dcd=ttys_dcd) - - return retval - - def execute_command(self, context, host_uuid, command, topic=None): - """Asynchronously, have the agent execute a command - - :param context: request context. - :param host_uuid: the host uuid - :param command: the command to execute - :returns: none ... uses asynchronous cast(). - """ - # fanout / broadcast message to all inventory agents - LOG.debug("AgentApi.update_cpu_config: fanout_cast: sending " - "host uuid: (%s) " % host_uuid) - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0', - fanout=True) - retval = cctxt.cast(context, - 'execute_command', - host_uuid=host_uuid, - command=command) - return retval - - def agent_update(self, context, host_uuid, force_updates, - cinder_device=None, - topic=None): - """ - Asynchronously, have the agent update partitions, ipv and ilvg state - - :param context: request context - :param host_uuid: the host uuid - :param force_updates: list of inventory objects to update - :param cinder_device: device by path of cinder volumes - :return: none ... uses asynchronous cast(). - """ - - # fanout / broadcast message to all inventory agents - LOG.info("AgentApi.agent_update: fanout_cast: sending " - "update request to agent for: (%s)" % - (', '.join(force_updates))) - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0', - fanout=True) - retval = cctxt.cast(context, - 'agent_audit', - host_uuid=host_uuid, - force_updates=force_updates, - cinder_device=cinder_device) - return retval - - def disk_format_gpt(self, context, host_uuid, idisk_dict, - is_cinder_device, topic=None): - """Asynchronously, GPT format a disk. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param idisk_dict: values for disk object - :param is_cinder_device: bool value tells if the idisk is for cinder - :returns: pass or fail - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0', - fanout=True) - - return cctxt.cast(context, - 'disk_format_gpt', - host_uuid=host_uuid, - idisk_dict=idisk_dict, - is_cinder_device=is_cinder_device) diff --git a/inventory/inventory/inventory/api/__init__.py b/inventory/inventory/inventory/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/api/app.py b/inventory/inventory/inventory/api/app.py deleted file mode 100644 index 8d27ffd2..00000000 --- a/inventory/inventory/inventory/api/app.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from oslo_config import cfg -from oslo_log import log -from oslo_service import service -from oslo_service import wsgi -import pecan - -from inventory.api import config -from inventory.api import middleware -from inventory.common.i18n import _ -from inventory.common import policy - -CONF = cfg.CONF - -LOG = log.getLogger(__name__) - -_launcher = None -_launcher_pxe = None - - -def get_pecan_config(): - # Set up the pecan configuration - filename = config.__file__.replace('.pyc', '.py') - return pecan.configuration.conf_from_file(filename) - - -def setup_app(config=None): - policy.init_enforcer() - - if not config: - config = get_pecan_config() - - pecan.configuration.set_config(dict(config), overwrite=True) - app_conf = dict(config.app) - - app = pecan.make_app( - app_conf.pop('root'), - debug=CONF.debug, - logging=getattr(config, 'logging', {}), - force_canonical=getattr(config.app, 'force_canonical', True), - guess_content_type_from_ext=False, - wrap_app=middleware.ParsableErrorMiddleware, - **app_conf - ) - return app - - -def load_paste_app(app_name=None): - """Loads a WSGI app from a paste config file.""" - if app_name is None: - app_name = cfg.CONF.prog - - loader = wsgi.Loader(cfg.CONF) - app = loader.load_app(app_name) - return app - - -def app_factory(global_config, **local_conf): - return setup_app() - - -def serve(api_service, conf, workers=1): - global _launcher - - if _launcher: - raise RuntimeError(_('serve() _launcher can only be called once')) - - _launcher = service.launch(conf, api_service, workers=workers) - - -def serve_pxe(api_service, conf, workers=1): - global _launcher_pxe - - if _launcher_pxe: - raise RuntimeError(_('serve() _launcher_pxe can only be called once')) - - _launcher_pxe = service.launch(conf, api_service, workers=workers) - - -def wait(): - _launcher.wait() - - -def wait_pxe(): - _launcher_pxe.wait() diff --git a/inventory/inventory/inventory/api/config.py b/inventory/inventory/inventory/api/config.py deleted file mode 100644 index 4f2aaecc..00000000 --- a/inventory/inventory/inventory/api/config.py +++ /dev/null @@ -1,73 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from inventory.api import hooks -from inventory.common import config -from inventory import objects -from keystoneauth1 import loading as ks_loading -from oslo_config import cfg -from oslo_log import log as logging -import pbr.version -import sys - -LOG = logging.getLogger(__name__) - -sysinv_group = cfg.OptGroup( - 'sysinv', - title='Sysinv Options', - help="Configuration options for the platform service") - -sysinv_opts = [ - cfg.StrOpt('catalog_info', - default='platform:sysinv:internalURL', - help="Service catalog Look up info."), - cfg.StrOpt('os_region_name', - default='RegionOne', - help="Region name of this node. It is used for catalog lookup"), -] - -version_info = pbr.version.VersionInfo('inventory') - -# Pecan Application Configurations -app = { - 'root': 'inventory.api.controllers.root.RootController', - 'modules': ['inventory.api'], - 'hooks': [ - hooks.DBHook(), - hooks.ContextHook(), - hooks.RPCHook(), - hooks.SystemConfigHook(), - ], - 'acl_public_routes': [ - '/', - '/v1', - ], -} - - -def init(args, **kwargs): - cfg.CONF.register_group(sysinv_group) - cfg.CONF.register_opts(sysinv_opts, group=sysinv_group) - ks_loading.register_session_conf_options(cfg.CONF, - sysinv_group.name) - logging.register_options(cfg.CONF) - - cfg.CONF(args=args, project='inventory', - version='%%(prog)s %s' % version_info.release_string(), - **kwargs) - objects.register_all() - config.parse_args(args) - - -def setup_logging(): - """Sets up the logging options for a log with supplied name.""" - logging.setup(cfg.CONF, "inventory") - LOG.debug("Logging enabled!") - LOG.debug("%(prog)s version %(version)s", - {'prog': sys.argv[0], - 'version': version_info.release_string()}) - LOG.debug("command line: %s", " ".join(sys.argv)) diff --git a/inventory/inventory/inventory/api/controllers/__init__.py b/inventory/inventory/inventory/api/controllers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/api/controllers/root.py b/inventory/inventory/inventory/api/controllers/root.py deleted file mode 100644 index 01c26715..00000000 --- a/inventory/inventory/inventory/api/controllers/root.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import pecan -from pecan import rest -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers import v1 -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import link - -ID_VERSION = 'v1' - - -def expose(*args, **kwargs): - """Ensure that only JSON, and not XML, is supported.""" - if 'rest_content_types' not in kwargs: - kwargs['rest_content_types'] = ('json',) - return wsme_pecan.wsexpose(*args, **kwargs) - - -class Version(base.APIBase): - """An API version representation. - - This class represents an API version, including the minimum and - maximum minor versions that are supported within the major version. - """ - - id = wtypes.text - """The ID of the (major) version, also acts as the release number""" - - links = [link.Link] - """A Link that point to a specific version of the API""" - - @classmethod - def convert(cls, vid): - version = Version() - version.id = vid - version.links = [link.Link.make_link('self', pecan.request.host_url, - vid, '', bookmark=True)] - return version - - -class Root(base.APIBase): - - name = wtypes.text - """The name of the API""" - - description = wtypes.text - """Some information about this API""" - - versions = [Version] - """Links to all the versions available in this API""" - - default_version = Version - """A link to the default version of the API""" - - @staticmethod - def convert(): - root = Root() - root.name = "Inventory API" - root.description = ("Inventory is an OpenStack project which " - "provides REST API services for " - "system configuration.") - root.default_version = Version.convert(ID_VERSION) - root.versions = [root.default_version] - return root - - -class RootController(rest.RestController): - - _versions = [ID_VERSION] - """All supported API versions""" - - _default_version = ID_VERSION - """The default API version""" - - v1 = v1.Controller() - - @expose(Root) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return Root.convert() - - @pecan.expose() - def _route(self, args, request=None): - """Overrides the default routing behavior. - - It redirects the request to the default version of the Inventory API - if the version number is not specified in the url. - """ - - if args[0] and args[0] not in self._versions: - args = [self._default_version] + args - return super(RootController, self)._route(args, request) diff --git a/inventory/inventory/inventory/api/controllers/v1/__init__.py b/inventory/inventory/inventory/api/controllers/v1/__init__.py deleted file mode 100644 index 74b752df..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/__init__.py +++ /dev/null @@ -1,198 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import pecan -from pecan import rest - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import cpu -from inventory.api.controllers.v1 import ethernet_port -from inventory.api.controllers.v1 import host -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import lldp_agent -from inventory.api.controllers.v1 import lldp_neighbour -from inventory.api.controllers.v1 import memory -from inventory.api.controllers.v1 import node -from inventory.api.controllers.v1 import pci_device -from inventory.api.controllers.v1 import port -from inventory.api.controllers.v1 import sensor -from inventory.api.controllers.v1 import sensorgroup - -from inventory.api.controllers.v1 import system -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - - -class MediaType(base.APIBase): - """A media type representation.""" - - base = wtypes.text - type = wtypes.text - - def __init__(self, base, type): - self.base = base - self.type = type - - -class V1(base.APIBase): - """The representation of the version 1 of the API.""" - - id = wtypes.text - "The ID of the version, also acts as the release number" - - media_types = [MediaType] - "An array of supported media types for this version" - - links = [link.Link] - "Links that point to a specific URL for this version and documentation" - - systems = [link.Link] - "Links to the system resource" - - hosts = [link.Link] - "Links to the host resource" - - lldp_agents = [link.Link] - "Links to the lldp agents resource" - - lldp_neighbours = [link.Link] - "Links to the lldp neighbours resource" - - @classmethod - def convert(self): - v1 = V1() - v1.id = "v1" - v1.links = [link.Link.make_link('self', pecan.request.host_url, - 'v1', '', bookmark=True), - link.Link.make_link('describedby', - 'http://www.starlingx.io/', - 'developer/inventory/dev', - 'api-spec-v1.html', - bookmark=True, type='text/html') - ] - v1.media_types = [MediaType('application/json', - 'application/vnd.openstack.inventory.v1+json')] - - v1.systems = [link.Link.make_link('self', pecan.request.host_url, - 'systems', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'systems', '', - bookmark=True) - ] - - v1.hosts = [link.Link.make_link('self', pecan.request.host_url, - 'hosts', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'hosts', '', - bookmark=True) - ] - - v1.nodes = [link.Link.make_link('self', pecan.request.host_url, - 'nodes', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'nodes', '', - bookmark=True) - ] - - v1.cpus = [link.Link.make_link('self', pecan.request.host_url, - 'cpus', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'cpus', '', - bookmark=True) - ] - - v1.memory = [link.Link.make_link('self', pecan.request.host_url, - 'memory', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'memory', '', - bookmark=True) - ] - - v1.ports = [link.Link.make_link('self', - pecan.request.host_url, - 'ports', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'ports', '', - bookmark=True) - ] - - v1.ethernet_ports = [link.Link.make_link('self', - pecan.request.host_url, - 'ethernet_ports', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'ethernet_ports', '', - bookmark=True) - ] - - v1.lldp_agents = [link.Link.make_link('self', - pecan.request.host_url, - 'lldp_agents', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'lldp_agents', '', - bookmark=True) - ] - - v1.lldp_neighbours = [link.Link.make_link('self', - pecan.request.host_url, - 'lldp_neighbours', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'lldp_neighbours', '', - bookmark=True) - ] - - v1.sensors = [link.Link.make_link('self', - pecan.request.host_url, - 'sensors', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'sensors', '', - bookmark=True) - ] - - v1.sensorgroups = [link.Link.make_link('self', - pecan.request.host_url, - 'sensorgroups', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'sensorgroups', '', - bookmark=True) - ] - - return v1 - - -class Controller(rest.RestController): - """Version 1 API controller root.""" - - systems = system.SystemController() - hosts = host.HostController() - nodes = node.NodeController() - cpus = cpu.CPUController() - memorys = memory.MemoryController() - ports = port.PortController() - ethernet_ports = ethernet_port.EthernetPortController() - lldp_agents = lldp_agent.LLDPAgentController() - lldp_neighbours = lldp_neighbour.LLDPNeighbourController() - pci_devices = pci_device.PCIDeviceController() - sensors = sensor.SensorController() - sensorgroups = sensorgroup.SensorGroupController() - - @wsme_pecan.wsexpose(V1) - def get(self): - return V1.convert() - - -__all__ = ('Controller',) diff --git a/inventory/inventory/inventory/api/controllers/v1/base.py b/inventory/inventory/inventory/api/controllers/v1/base.py deleted file mode 100644 index f795e93a..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/base.py +++ /dev/null @@ -1,130 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import datetime -import functools -from oslo_utils._i18n import _ -from webob import exc -import wsme -from wsme import types as wtypes - - -class APIBase(wtypes.Base): - - created_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is created""" - - updated_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is updated""" - - def as_dict(self): - """Render this object as a dict of its fields.""" - return dict((k, getattr(self, k)) - for k in self.fields - if hasattr(self, k) and - getattr(self, k) != wsme.Unset) - - def unset_fields_except(self, except_list=None): - """Unset fields so they don't appear in the message body. - - :param except_list: A list of fields that won't be touched. - - """ - if except_list is None: - except_list = [] - - for k in self.as_dict(): - if k not in except_list: - setattr(self, k, wsme.Unset) - - @classmethod - def from_rpc_object(cls, m, fields=None): - """Convert a RPC object to an API object.""" - obj_dict = m.as_dict() - # Unset non-required fields so they do not appear - # in the message body - obj_dict.update(dict((k, wsme.Unset) - for k in obj_dict.keys() - if fields and k not in fields)) - return cls(**obj_dict) - - -@functools.total_ordering -class Version(object): - """API Version object.""" - - string = 'X-OpenStack-Inventory-API-Version' - """HTTP Header string carrying the requested version""" - - min_string = 'X-OpenStack-Inventory-API-Minimum-Version' - """HTTP response header""" - - max_string = 'X-OpenStack-Inventory-API-Maximum-Version' - """HTTP response header""" - - def __init__(self, headers, default_version, latest_version): - """Create an API Version object from the supplied headers. - - :param headers: webob headers - :param default_version: version to use if not specified in headers - :param latest_version: version to use if latest is requested - :raises: webob.HTTPNotAcceptable - """ - (self.major, self.minor) = Version.parse_headers( - headers, default_version, latest_version) - - def __repr__(self): - return '%s.%s' % (self.major, self.minor) - - @staticmethod - def parse_headers(headers, default_version, latest_version): - """Determine the API version requested based on the headers supplied. - - :param headers: webob headers - :param default_version: version to use if not specified in headers - :param latest_version: version to use if latest is requested - :returns: a tupe of (major, minor) version numbers - :raises: webob.HTTPNotAcceptable - """ - version_str = headers.get(Version.string, default_version) - - if version_str.lower() == 'latest': - parse_str = latest_version - else: - parse_str = version_str - - try: - version = tuple(int(i) for i in parse_str.split('.')) - except ValueError: - version = () - - if len(version) != 2: - raise exc.HTTPNotAcceptable(_( - "Invalid value for %s header") % Version.string) - return version - - def __gt__(self, other): - return (self.major, self.minor) > (other.major, other.minor) - - def __eq__(self, other): - return (self.major, self.minor) == (other.major, other.minor) - - def __ne__(self, other): - return not self.__eq__(other) diff --git a/inventory/inventory/inventory/api/controllers/v1/collection.py b/inventory/inventory/inventory/api/controllers/v1/collection.py deleted file mode 100644 index 63745352..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/collection.py +++ /dev/null @@ -1,57 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import pecan -from wsme import types as wtypes - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import link - - -class Collection(base.APIBase): - - next = wtypes.text - "A link to retrieve the next subset of the collection" - - @property - def collection(self): - return getattr(self, self._type) - - def has_next(self, limit): - """Return whether collection has more items.""" - return len(self.collection) and len(self.collection) == limit - - def get_next(self, limit, url=None, **kwargs): - """Return a link to the next subset of the collection.""" - if not self.has_next(limit): - return wtypes.Unset - - resource_url = url or self._type - q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) - next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { - 'args': q_args, 'limit': limit, - 'marker': self.collection[-1].uuid} - - return link.Link.make_link('next', pecan.request.host_url, - resource_url, next_args).href diff --git a/inventory/inventory/inventory/api/controllers/v1/cpu.py b/inventory/inventory/inventory/api/controllers/v1/cpu.py deleted file mode 100644 index df4cdfc2..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/cpu.py +++ /dev/null @@ -1,303 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import six - -import pecan -from pecan import rest - -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory import objects - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class CPUPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class CPU(base.APIBase): - """API representation of a host CPU. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a cpu. - """ - - uuid = types.uuid - "Unique UUID for this cpu" - - cpu = int - "Represent the cpu id cpu" - - core = int - "Represent the core id cpu" - - thread = int - "Represent the thread id cpu" - - cpu_family = wtypes.text - "Represent the cpu family of the cpu" - - cpu_model = wtypes.text - "Represent the cpu model of the cpu" - - function = wtypes.text - "Represent the function of the cpu" - - num_cores_on_processor0 = wtypes.text - "The number of cores on processors 0" - - num_cores_on_processor1 = wtypes.text - "The number of cores on processors 1" - - num_cores_on_processor2 = wtypes.text - "The number of cores on processors 2" - - num_cores_on_processor3 = wtypes.text - "The number of cores on processors 3" - - numa_node = int - "The numa node or zone the cpu. API only attribute" - - capabilities = {wtypes.text: utils.ValidTypes(wtypes.text, - six.integer_types)} - "This cpu's meta data" - - host_id = int - "The hostid that this cpu belongs to" - - node_id = int - "The nodeId that this cpu belongs to" - - host_uuid = types.uuid - "The UUID of the host this cpu belongs to" - - node_uuid = types.uuid - "The UUID of the node this cpu belongs to" - - links = [link.Link] - "A list containing a self link and associated cpu links" - - def __init__(self, **kwargs): - self.fields = objects.CPU.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - # API only attributes - self.fields.append('function') - setattr(self, 'function', kwargs.get('function', None)) - self.fields.append('num_cores_on_processor0') - setattr(self, 'num_cores_on_processor0', - kwargs.get('num_cores_on_processor0', None)) - self.fields.append('num_cores_on_processor1') - setattr(self, 'num_cores_on_processor1', - kwargs.get('num_cores_on_processor1', None)) - self.fields.append('num_cores_on_processor2') - setattr(self, 'num_cores_on_processor2', - kwargs.get('num_cores_on_processor2', None)) - self.fields.append('num_cores_on_processor3') - setattr(self, 'num_cores_on_processor3', - kwargs.get('num_cores_on_processor3', None)) - - @classmethod - def convert_with_links(cls, rpc_port, expand=True): - cpu = CPU(**rpc_port.as_dict()) - if not expand: - cpu.unset_fields_except( - ['uuid', 'cpu', 'core', 'thread', - 'cpu_family', 'cpu_model', - 'numa_node', 'host_uuid', 'node_uuid', - 'host_id', 'node_id', - 'capabilities', - 'created_at', 'updated_at']) - - # never expose the id attribute - cpu.host_id = wtypes.Unset - cpu.node_id = wtypes.Unset - - cpu.links = [link.Link.make_link('self', pecan.request.host_url, - 'cpus', cpu.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'cpus', cpu.uuid, - bookmark=True) - ] - return cpu - - -class CPUCollection(collection.Collection): - """API representation of a collection of cpus.""" - - cpus = [CPU] - "A list containing cpu objects" - - def __init__(self, **kwargs): - self._type = 'cpus' - - @classmethod - def convert_with_links(cls, rpc_ports, limit, url=None, - expand=False, **kwargs): - collection = CPUCollection() - collection.cpus = [ - CPU.convert_with_links(p, expand) for p in rpc_ports] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class CPUController(rest.RestController): - """REST controller for cpus.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False, from_node=False): - self._from_hosts = from_hosts - self._from_node = from_node - - def _get_cpus_collection(self, i_uuid, node_uuid, marker, - limit, sort_key, sort_dir, - expand=False, resource_url=None): - - if self._from_hosts and not i_uuid: - raise exception.InvalidParameterValue(_( - "Host id not specified.")) - - if self._from_node and not i_uuid: - raise exception.InvalidParameterValue(_( - "Node id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.CPU.get_by_uuid(pecan.request.context, - marker) - - if self._from_hosts: - # cpus = pecan.request.dbapi.cpu_get_by_host( - cpus = objects.CPU.get_by_host( - pecan.request.context, - i_uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - elif self._from_node: - # cpus = pecan.request.dbapi.cpu_get_by_node( - cpus = objects.CPU.get_by_node( - pecan.request.context, - i_uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - if i_uuid and not node_uuid: - # cpus = pecan.request.dbapi.cpu_get_by_host( - cpus = objects.CPU.get_by_host( - pecan.request.context, - i_uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - elif i_uuid and node_uuid: - # cpus = pecan.request.dbapi.cpu_get_by_host_node( - cpus = objects.CPU.get_by_host_node( - pecan.request.context, - i_uuid, - node_uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - elif node_uuid: - # cpus = pecan.request.dbapi.cpu_get_by_host_node( - cpus = objects.CPU.get_by_node( - pecan.request.context, - i_uuid, - node_uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - else: - # cpus = pecan.request.dbapi.icpu_get_list( - cpus = objects.CPU.list( - pecan.request.context, - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return CPUCollection.convert_with_links(cpus, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(CPUCollection, types.uuid, types.uuid, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, host_uuid=None, node_uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of cpus.""" - return self._get_cpus_collection(host_uuid, node_uuid, - marker, limit, - sort_key, sort_dir) - - @wsme_pecan.wsexpose(CPUCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, host_uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of cpus with detail.""" - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "cpus": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['cpus', 'detail']) - return self._get_cpus_collection(host_uuid, marker, limit, sort_key, - sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(CPU, types.uuid) - def get_one(self, cpu_uuid): - """Retrieve information about the given cpu.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_port = objects.CPU.get_by_uuid(pecan.request.context, cpu_uuid) - return CPU.convert_with_links(rpc_port) diff --git a/inventory/inventory/inventory/api/controllers/v1/cpu_utils.py b/inventory/inventory/inventory/api/controllers/v1/cpu_utils.py deleted file mode 100644 index b378ca64..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/cpu_utils.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import pecan - -from inventory.common import constants -from inventory.common import k_host -from oslo_log import log - -LOG = log.getLogger(__name__) - -CORE_FUNCTIONS = [ - constants.PLATFORM_FUNCTION, - constants.VSWITCH_FUNCTION, - constants.SHARED_FUNCTION, - constants.VM_FUNCTION, - constants.NO_FUNCTION -] - -VSWITCH_MIN_CORES = 1 -VSWITCH_MAX_CORES = 8 - - -class CpuProfile(object): - class CpuConfigure(object): - def __init__(self): - self.platform = 0 - self.vswitch = 0 - self.shared = 0 - self.vms = 0 - self.numa_node = 0 - - # cpus is a list of cpu sorted by numa_node, core and thread - # if not, provide a node list sorted by numa_node - # (id might not be reliable) - def __init__(self, cpus, nodes=None): - if nodes is not None: - cpus = CpuProfile.sort_cpu_by_numa_node(cpus, nodes) - cores = [] - - self.number_of_cpu = 0 - self.cores_per_cpu = 0 - self.hyper_thread = False - self.processors = [] - cur_processor = None - - for cpu in cpus: - key = '{0}-{1}'.format(cpu.numa_node, cpu.core) - if key not in cores: - cores.append(key) - else: - self.hyper_thread = True - continue - - if (cur_processor is None or - cur_processor.numa_node != cpu.numa_node): - cur_processor = CpuProfile.CpuConfigure() - cur_processor.numa_node = cpu.numa_node - self.processors.append(cur_processor) - - if cpu.allocated_function == constants.PLATFORM_FUNCTION: - cur_processor.platform += 1 - elif cpu.allocated_function == constants.VSWITCH_FUNCTION: - cur_processor.vswitch += 1 - elif cpu.allocated_function == constants.SHARED_FUNCTION: - cur_processor.shared += 1 - elif cpu.allocated_function == constants.VM_FUNCTION: - cur_processor.vms += 1 - - self.number_of_cpu = len(self.processors) - self.cores_per_cpu = len(cores) / self.number_of_cpu - - @staticmethod - def sort_cpu_by_numa_node(cpus, nodes): - newlist = [] - for node in nodes: - for cpu in cpus: - if cpu.node_id == node.id: - cpu.numa_node = node.numa_node - newlist.append(cpu) - return newlist - - -class HostCpuProfile(CpuProfile): - def __init__(self, subfunctions, cpus, nodes=None): - super(HostCpuProfile, self).__init__(cpus, nodes) - self.subfunctions = subfunctions - - # see if a cpu profile is applicable to this host - def profile_applicable(self, profile): - if self.number_of_cpu == profile.number_of_cpu and \ - self.cores_per_cpu == profile.cores_per_cpu: - return self.check_profile_core_functions(profile) - return False # Profile is not applicable to host - - def check_profile_core_functions(self, profile): - platform_cores = 0 - vswitch_cores = 0 - shared_cores = 0 - vm_cores = 0 - for cpu in profile.processors: - platform_cores += cpu.platform - vswitch_cores += cpu.vswitch - shared_cores += cpu.shared - vm_cores += cpu.vms - - error_string = "" - if platform_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.PLATFORM_FUNCTION - elif k_host.COMPUTE in self.subfunctions and vswitch_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.VSWITCH_FUNCTION - elif k_host.COMPUTE in self.subfunctions and vm_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.VM_FUNCTION - return error_string - - -def lookup_function(s): - for f in CORE_FUNCTIONS: - if s.lower() == f.lower(): - return f - return s - - -def check_profile_core_functions(personality, profile): - - platform_cores = 0 - vswitch_cores = 0 - shared_cores = 0 - vm_cores = 0 - for cpu in profile.processors: - platform_cores += cpu.platform - vswitch_cores += cpu.vswitch - shared_cores += cpu.shared - vm_cores += cpu.vms - - error_string = "" - if platform_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.PLATFORM_FUNCTION - elif k_host.COMPUTE in personality and vswitch_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.VSWITCH_FUNCTION - elif k_host.COMPUTE in personality and vm_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.VM_FUNCTION - return error_string - - -def check_core_functions(personality, icpus): - platform_cores = 0 - vswitch_cores = 0 - shared_cores = 0 - vm_cores = 0 - for cpu in icpus: - allocated_function = cpu.allocated_function - if allocated_function == constants.PLATFORM_FUNCTION: - platform_cores += 1 - elif allocated_function == constants.VSWITCH_FUNCTION: - vswitch_cores += 1 - elif allocated_function == constants.SHARED_FUNCTION: - shared_cores += 1 - elif allocated_function == constants.VM_FUNCTION: - vm_cores += 1 - - error_string = "" - if platform_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.PLATFORM_FUNCTION - elif k_host.COMPUTE in personality and vswitch_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.VSWITCH_FUNCTION - elif k_host.COMPUTE in personality and vm_cores == 0: - error_string = "There must be at least one core for %s." % \ - constants.VM_FUNCTION - return error_string - - -def get_default_function(host): - """Return the default function to be assigned to cpus on this host""" - if k_host.COMPUTE in host.subfunctions: - return constants.VM_FUNCTION - return constants.PLATFORM_FUNCTION - - -def get_cpu_function(host, cpu): - """Return the function that is assigned to the specified cpu""" - for s in range(0, len(host.nodes)): - functions = host.cpu_functions[s] - for f in CORE_FUNCTIONS: - if cpu.cpu in functions[f]: - return f - return constants.NO_FUNCTION - - -def get_cpu_counts(host): - """Return the CPU counts for this host by socket and function.""" - counts = {} - for s in range(0, len(host.nodes)): - counts[s] = {} - for f in CORE_FUNCTIONS: - counts[s][f] = len(host.cpu_functions[s][f]) - return counts - - -def init_cpu_counts(host): - """Create empty data structures to track CPU assignments by socket and - function. - """ - host.cpu_functions = {} - host.cpu_lists = {} - for s in range(0, len(host.nodes)): - host.cpu_functions[s] = {} - for f in CORE_FUNCTIONS: - host.cpu_functions[s][f] = [] - host.cpu_lists[s] = [] - - -def _sort_by_coreid(cpu): - """Sort a list of cpu database objects such that threads of the same core - are adjacent in the list with the lowest thread number appearing first. - """ - return (int(cpu.core), int(cpu.thread)) - - -def restructure_host_cpu_data(host): - """Reorganize the cpu list by socket and function so that it can more - easily be consumed by other utilities. - """ - init_cpu_counts(host) - host.sockets = len(host.nodes or []) - host.hyperthreading = False - host.physical_cores = 0 - if not host.cpus: - return - host.cpu_model = host.cpus[0].cpu_model - cpu_list = sorted(host.cpus, key=_sort_by_coreid) - for cpu in cpu_list: - inode = pecan.request.dbapi.inode_get(inode_id=cpu.node_id) - cpu.numa_node = inode.numa_node - if cpu.thread == 0: - host.physical_cores += 1 - elif cpu.thread > 0: - host.hyperthreading = True - function = cpu.allocated_function or get_default_function(host) - host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu)) - host.cpu_lists[cpu.numa_node].append(int(cpu.cpu)) - - -def check_core_allocations(host, cpu_counts, func): - """Check that minimum and maximum core values are respected.""" - total_platform_cores = 0 - total_vswitch_cores = 0 - total_shared_cores = 0 - for s in range(0, len(host.nodes)): - available_cores = len(host.cpu_lists[s]) - platform_cores = cpu_counts[s][constants.PLATFORM_FUNCTION] - vswitch_cores = cpu_counts[s][constants.VSWITCH_FUNCTION] - shared_cores = cpu_counts[s][constants.SHARED_FUNCTION] - requested_cores = platform_cores + vswitch_cores + shared_cores - if requested_cores > available_cores: - return ("More total logical cores requested than present on " - "'Processor %s' (%s cores)." % (s, available_cores)) - total_platform_cores += platform_cores - total_vswitch_cores += vswitch_cores - total_shared_cores += shared_cores - if func.lower() == constants.PLATFORM_FUNCTION.lower(): - if ((k_host.CONTROLLER in host.subfunctions) and - (k_host.COMPUTE in host.subfunctions)): - if total_platform_cores < 2: - return "%s must have at least two cores." % \ - constants.PLATFORM_FUNCTION - elif total_platform_cores == 0: - return "%s must have at least one core." % \ - constants.PLATFORM_FUNCTION - if k_host.COMPUTE in (host.subfunctions or host.personality): - if func.lower() == constants.VSWITCH_FUNCTION.lower(): - if host.hyperthreading: - total_physical_cores = total_vswitch_cores / 2 - else: - total_physical_cores = total_vswitch_cores - if total_physical_cores < VSWITCH_MIN_CORES: - return ("The %s function must have at least %s core(s)." % - (constants.VSWITCH_FUNCTION.lower(), - VSWITCH_MIN_CORES)) - elif total_physical_cores > VSWITCH_MAX_CORES: - return ("The %s function can only be assigned up to %s cores." - % (constants.VSWITCH_FUNCTION.lower(), - VSWITCH_MAX_CORES)) - reserved_for_vms = \ - len(host.cpus) - total_platform_cores - total_vswitch_cores - if reserved_for_vms <= 0: - return "There must be at least one unused core for %s." % \ - constants. VM_FUNCTION - else: - if total_platform_cores != len(host.cpus): - return "All logical cores must be reserved for platform use" - return "" - - -def update_core_allocations(host, cpu_counts): - """Update the per socket/function cpu list based on the newly requested - counts. - """ - # Remove any previous assignments - for s in range(0, len(host.nodes)): - for f in CORE_FUNCTIONS: - host.cpu_functions[s][f] = [] - # Set new assignments - for s in range(0, len(host.nodes)): - cpu_list = host.cpu_lists[s] if s in host.cpu_lists else [] - # Reserve for the platform first - for i in range(0, cpu_counts[s][constants.PLATFORM_FUNCTION]): - host.cpu_functions[s][constants.PLATFORM_FUNCTION].append( - cpu_list.pop(0)) - # Reserve for the vswitch next - for i in range(0, cpu_counts[s][constants.VSWITCH_FUNCTION]): - host.cpu_functions[s][constants.VSWITCH_FUNCTION].append( - cpu_list.pop(0)) - # Reserve for the shared next - for i in range(0, cpu_counts[s][constants.SHARED_FUNCTION]): - host.cpu_functions[s][constants.SHARED_FUNCTION].append( - cpu_list.pop(0)) - # Assign the remaining cpus to the default function for this host - host.cpu_functions[s][get_default_function(host)] += cpu_list - return diff --git a/inventory/inventory/inventory/api/controllers/v1/ethernet_port.py b/inventory/inventory/inventory/api/controllers/v1/ethernet_port.py deleted file mode 100644 index f397923e..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/ethernet_port.py +++ /dev/null @@ -1,310 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import six - -import pecan -from pecan import rest - -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory import objects - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class EthernetPortPatchType(types.JsonPatchType): - @staticmethod - def mandatory_attrs(): - return [] - - -class EthernetPort(base.APIBase): - """API representation of an Ethernet port - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - Ethernet port. - """ - - uuid = types.uuid - "Unique UUID for this port" - - type = wtypes.text - "Represent the type of port" - - name = wtypes.text - "Represent the name of the port. Unique per host" - - namedisplay = wtypes.text - "Represent the display name of the port. Unique per host" - - pciaddr = wtypes.text - "Represent the pci address of the port" - - dev_id = int - "The unique identifier of PCI device" - - pclass = wtypes.text - "Represent the pci class of the port" - - pvendor = wtypes.text - "Represent the pci vendor of the port" - - pdevice = wtypes.text - "Represent the pci device of the port" - - psvendor = wtypes.text - "Represent the pci svendor of the port" - - psdevice = wtypes.text - "Represent the pci sdevice of the port" - - numa_node = int - "Represent the numa node or zone sdevice of the port" - - sriov_totalvfs = int - "The total number of available SR-IOV VFs" - - sriov_numvfs = int - "The number of configured SR-IOV VFs" - - sriov_vfs_pci_address = wtypes.text - "The PCI Addresses of the VFs" - - driver = wtypes.text - "The kernel driver for this device" - - mac = wsme.wsattr(types.macaddress, mandatory=False) - "Represent the MAC Address of the port" - - mtu = int - "Represent the MTU size (bytes) of the port" - - speed = int - "Represent the speed (MBytes/sec) of the port" - - link_mode = int - "Represent the link mode of the port" - - duplex = wtypes.text - "Represent the duplex mode of the port" - - autoneg = wtypes.text - "Represent the auto-negotiation mode of the port" - - bootp = wtypes.text - "Represent the bootp port of the host" - - capabilities = {wtypes.text: utils.ValidTypes(wtypes.text, - six.integer_types)} - "Represent meta data of the port" - - host_id = int - "Represent the host_id the port belongs to" - - bootif = wtypes.text - "Represent whether the port is a boot port" - - dpdksupport = bool - "Represent whether or not the port supports DPDK acceleration" - - host_uuid = types.uuid - "Represent the UUID of the host the port belongs to" - - node_uuid = types.uuid - "Represent the UUID of the node the port belongs to" - - links = [link.Link] - "Represent a list containing a self link and associated port links" - - def __init__(self, **kwargs): - self.fields = objects.EthernetPort.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_port, expand=True): - port = EthernetPort(**rpc_port.as_dict()) - if not expand: - port.unset_fields_except(['uuid', 'host_id', 'node_id', - 'type', 'name', - 'namedisplay', 'pciaddr', 'dev_id', - 'pclass', 'pvendor', 'pdevice', - 'psvendor', 'psdevice', 'numa_node', - 'mac', 'sriov_totalvfs', 'sriov_numvfs', - 'sriov_vfs_pci_address', 'driver', - 'mtu', 'speed', 'link_mode', - 'duplex', 'autoneg', 'bootp', - 'capabilities', - 'host_uuid', - 'node_uuid', 'dpdksupport', - 'created_at', 'updated_at']) - - # never expose the id attribute - port.host_id = wtypes.Unset - port.node_id = wtypes.Unset - - port.links = [link.Link.make_link('self', pecan.request.host_url, - 'ethernet_ports', port.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'ethernet_ports', port.uuid, - bookmark=True) - ] - return port - - -class EthernetPortCollection(collection.Collection): - """API representation of a collection of EthernetPort objects.""" - - ethernet_ports = [EthernetPort] - "A list containing EthernetPort objects" - - def __init__(self, **kwargs): - self._type = 'ethernet_ports' - - @classmethod - def convert_with_links(cls, rpc_ports, limit, url=None, - expand=False, **kwargs): - collection = EthernetPortCollection() - collection.ethernet_ports = [EthernetPort.convert_with_links(p, expand) - for p in rpc_ports] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'EthernetPortController' - - -class EthernetPortController(rest.RestController): - """REST controller for EthernetPorts.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False, from_node=False): - self._from_hosts = from_hosts - self._from_node = from_node - - def _get_ports_collection(self, uuid, node_uuid, - marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - - if self._from_hosts and not uuid: - raise exception.InvalidParameterValue(_( - "Host id not specified.")) - - if self._from_node and not uuid: - raise exception.InvalidParameterValue(_( - "node id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.EthernetPort.get_by_uuid( - pecan.request.context, - marker) - - if self._from_hosts: - ports = objects.EthernetPort.get_by_host( - pecan.request.context, - uuid, limit, - marker=marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - elif self._from_node: - ports = objects.EthernetPort.get_by_numa_node( - pecan.request.context, - uuid, limit, - marker=marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - if uuid: - ports = objects.EthernetPort.get_by_host( - pecan.request.context, - uuid, limit, - marker=marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - ports = objects.EthernetPort.list( - pecan.request.context, - limit, marker=marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return EthernetPortCollection.convert_with_links( - ports, limit, url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(EthernetPortCollection, types.uuid, types.uuid, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, uuid=None, node_uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of ports.""" - - return self._get_ports_collection(uuid, - node_uuid, - marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(EthernetPortCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of ports with detail.""" - - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "ethernet_ports": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['ethernet_ports', 'detail']) - return self._get_ports_collection(uuid, marker, limit, sort_key, - sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(EthernetPort, types.uuid) - def get_one(self, port_uuid): - """Retrieve information about the given port.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_port = objects.EthernetPort.get_by_uuid( - pecan.request.context, port_uuid) - return EthernetPort.convert_with_links(rpc_port) diff --git a/inventory/inventory/inventory/api/controllers/v1/host.py b/inventory/inventory/inventory/api/controllers/v1/host.py deleted file mode 100644 index 611a30ce..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/host.py +++ /dev/null @@ -1,3577 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import ast -import cgi -from controllerconfig import HOST_XML_ATTRIBUTES -import copy -from fm_api import constants as fm_constants -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import cpu as cpu_api -# TODO(LK) from inventory.api.controllers.v1 import disk -# TODO(LK) from inventory.api.controllers.v1 import partition -from inventory.api.controllers.v1 import ethernet_port -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import lldp_agent -from inventory.api.controllers.v1 import lldp_neighbour -from inventory.api.controllers.v1 import memory -from inventory.api.controllers.v1 import node as node_api -from inventory.api.controllers.v1 import pci_device -from inventory.api.controllers.v1 import port -from inventory.api.controllers.v1.query import Query -from inventory.api.controllers.v1 import sensor as sensor_api -from inventory.api.controllers.v1 import sensorgroup -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import ceph -from inventory.common import constants -from inventory.common import exception -from inventory.common import health -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.common import mtce_api -from inventory.common import patch_api -from inventory.common import sm_api -from inventory.common.storage_backend_conf import StorageBackendConfig -# TODO(sc) to be removed StorageBackendConfig -from inventory.common import utils as cutils -from inventory.common import vim_api -from inventory import objects -import json -import jsonpatch -from oslo_log import log -from oslo_utils import uuidutils -import pecan -from pecan import expose -from pecan import rest -import psutil -import re -import six -from six import text_type as unicode -import tsconfig.tsconfig as tsc -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan -from xml.dom import minidom as dom -import xml.etree.ElementTree as ET -import xml.etree.ElementTree as et - -LOG = log.getLogger(__name__) -ERR_CODE_LOCK_SOLE_SERVICE_PROVIDER = "-1003" - - -class Host(base.APIBase): - """API representation of a host. - - This class enforces type checking and value constraints, and - converts between the internal object model and - the API representation of a host. - """ - - id = int - - uuid = wtypes.text - - hostname = wtypes.text - "Represent the hostname of the host" - - invprovision = wtypes.text - "Represent the current provision state of the host" - - administrative = wtypes.text - "Represent the administrative state of the host" - - operational = wtypes.text - "Represent the operational state of the host" - - availability = wtypes.text - "Represent the availability status of the host" - - mgmt_mac = wtypes.text - "Represent the boot mgmt MAC address of the host." - - mgmt_ip = wtypes.text - "Represent the boot mgmt IP address of the host." - - infra_ip = wtypes.text - "Represent the infrastructure IP address of the host." - - bm_ip = wtypes.text - "Represent the board management IP address of the host." - - bm_type = wtypes.text - "Represent the board management type of the host." - - bm_username = wtypes.text - "Represent the board management username of the host." - - bm_password = wtypes.text - "Represent the board management password of the host." - - personality = wtypes.text - "Represent the personality of the host" - - subfunctions = wtypes.text - "Represent the subfunctions of the host" - - subfunction_oper = wtypes.text - "Represent the subfunction operational state of the host" - - subfunction_avail = wtypes.text - "Represent the subfunction availability status of the host" - - serialid = wtypes.text - "Represent the serial id of the host" - - action = wtypes.text - 'Represent the action on the host' - - host_action = wtypes.text - 'Represent the current action task in progress' - - vim_progress_status = wtypes.text - 'Represent the vim progress status' - - task = wtypes.text - "Represent the mtce task state" - - mtce_info = wtypes.text - "Represent the mtce info" - - uptime = int - "Represent the uptime, in seconds, of the host." - - location = {wtypes.text: utils.ValidTypes(wtypes.text, six.integer_types)} - "Represent the location of the host" - - capabilities = {wtypes.text: utils.ValidTypes(wtypes.text, - six.integer_types)} - "Represent the capabilities of the host" - - system_uuid = types.uuid - "The UUID of the system this host belongs to" - - boot_device = wtypes.text - "Represent the boot device of the host" - - rootfs_device = wtypes.text - "Represent the rootfs device of the host" - - install_output = wtypes.text - "Represent the install_output of the host" - - console = wtypes.text - "Represent the console of the host" - - tboot = wtypes.text - "Represent the tboot of the host" - - ttys_dcd = wtypes.text - "Enable or disable serial console carrier detect" - - install_state = wtypes.text - "Represent the install state" - - install_state_info = wtypes.text - "Represent install state extra information if there is any" - - iscsi_initiator_name = wtypes.text - "The iscsi initiator name (only used for compute hosts)" - - links = [link.Link] - "A list containing a self link and associated host links" - - ports = [link.Link] - "Links to the collection of Ports on this host" - - ethernet_ports = [link.Link] - "Links to the collection of EthernetPorts on this host" - - nodes = [link.Link] - "Links to the collection of nodes on this host" - - cpus = [link.Link] - "Links to the collection of cpus on this host" - - memorys = [link.Link] - "Links to the collection of memorys on this host" - - # idisks = [link.Link] - # "Links to the collection of idisks on this ihost" - - sensors = [link.Link] - "Links to the collection of sensors on this host" - - sensorgroups = [link.Link] - "Links to the collection of sensorgruops on this host" - - pci_devices = [link.Link] - "Links to the collection of pci_devices on this host" - - lldp_agents = [link.Link] - "Links to the collection of LldpAgents on this ihost" - - lldp_neighbours = [link.Link] - "Links to the collection of LldpNeighbours on this ihost" - - def __init__(self, **kwargs): - self.fields = objects.Host.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_ihost, expand=True): - minimum_fields = [ - 'id', 'uuid', 'hostname', - 'personality', 'subfunctions', - 'subfunction_oper', 'subfunction_avail', - 'administrative', 'operational', 'availability', - 'invprovision', 'task', 'mtce_info', 'action', 'uptime', - 'host_action', 'mgmt_mac', 'mgmt_ip', 'infra_ip', 'location', - 'bm_ip', 'bm_type', 'bm_username', - 'system_uuid', 'capabilities', 'serialid', - 'created_at', 'updated_at', 'boot_device', - 'rootfs_device', 'install_output', 'console', - 'tboot', 'ttys_dcd', - 'install_state', 'install_state_info', - 'iscsi_initiator_name'] - - fields = minimum_fields if not expand else None - uhost = Host.from_rpc_object(rpc_ihost, fields) - uhost.links = [link.Link.make_link('self', pecan.request.host_url, - 'hosts', uhost.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'hosts', uhost.uuid, - bookmark=True) - ] - if expand: - uhost.ports = [link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/ports"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/ports", - bookmark=True) - ] - uhost.ethernet_ports = [ - link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/ethernet_ports"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/ethernet_ports", - bookmark=True) - ] - uhost.nodes = [link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/nodes"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/nodes", - bookmark=True) - ] - uhost.cpus = [link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/cpus"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/cpus", - bookmark=True) - ] - - uhost.memorys = [link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/memorys"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/memorys", - bookmark=True) - ] - - uhost.disks = [link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/disks"), - link.Link.make_link( - 'bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/disks", - bookmark=True) - ] - - uhost.sensors = [link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/sensors"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/sensors", - bookmark=True) - ] - - uhost.sensorgroups = [ - link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/sensorgroups"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/sensorgroups", - bookmark=True) - ] - - uhost.pci_devices = [ - link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/pci_devices"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/pci_devices", - bookmark=True) - ] - - uhost.lldp_agents = [ - link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/lldp_agents"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/lldp_agents", - bookmark=True) - ] - - uhost.lldp_neighbours = [ - link.Link.make_link('self', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/lldp_neighbors"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'hosts', - uhost.uuid + "/lldp_neighbors", - bookmark=True) - ] - - return uhost - - -class HostCollection(collection.Collection): - """API representation of a collection of hosts.""" - - hosts = [Host] - "A list containing hosts objects" - - def __init__(self, **kwargs): - self._type = 'hosts' - - @classmethod - def convert_with_links(cls, ihosts, limit, url=None, - expand=False, **kwargs): - collection = HostCollection() - collection.hosts = [ - Host.convert_with_links(n, expand) for n in ihosts] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class HostUpdate(object): - """Host update helper class. - """ - - CONTINUE = "continue" - EXIT_RETURN_HOST = "exit_return_host" - EXIT_UPDATE_PREVAL = "exit_update_preval" - FAILED = "failed" - PASSED = "passed" - - ACTIONS_TO_TASK_DISPLAY_CHOICES = ( - (None, ""), - ("", ""), - (k_host.ACTION_UNLOCK, _("Unlocking")), - (k_host.ACTION_FORCE_UNLOCK, _("Force Unlocking")), - (k_host.ACTION_LOCK, _("Locking")), - (k_host.ACTION_FORCE_LOCK, _("Force Locking")), - (k_host.ACTION_RESET, _("Resetting")), - (k_host.ACTION_REBOOT, _("Rebooting")), - (k_host.ACTION_REINSTALL, _("Reinstalling")), - (k_host.ACTION_POWERON, _("Powering-on")), - (k_host.ACTION_POWEROFF, _("Powering-off")), - (k_host.ACTION_SWACT, _("Swacting")), - (k_host.ACTION_FORCE_SWACT, _("Force-Swacting")), - ) - - def __init__(self, host_orig, host_patch, delta): - self.ihost_orig = dict(host_orig) - self.ihost_patch = dict(host_patch) - self._delta = list(delta) - self._ihost_val_prenotify = {} - self._ihost_val = {} - - self._configure_required = False - self._notify_vim = False - self._notify_mtce = False - self._notify_availability = None - self._notify_vim_add_host = False - self._notify_action_lock = False - self._notify_action_lock_force = False - self._skip_notify_mtce = False - self._bm_type_changed_to_none = False - self._nextstep = self.CONTINUE - - self._action = None - self.displayid = host_patch.get('hostname') - if not self.displayid: - self.displayid = host_patch.get('uuid') - - LOG.debug("host_orig=%s, host_patch=%s, delta=%s" % - (self.ihost_orig, self.ihost_patch, self.delta)) - - @property - def action(self): - return self._action - - @action.setter - def action(self, val): - self._action = val - - @property - def delta(self): - return self._delta - - @property - def nextstep(self): - return self._nextstep - - @nextstep.setter - def nextstep(self, val): - self._nextstep = val - - @property - def configure_required(self): - return self._configure_required - - @configure_required.setter - def configure_required(self, val): - self._configure_required = val - - @property - def bm_type_changed_to_none(self): - return self._bm_type_changed_to_none - - @bm_type_changed_to_none.setter - def bm_type_changed_to_none(self, val): - self._bm_type_changed_to_none = val - - @property - def notify_vim_add_host(self): - return self._notify_vim_add_host - - @notify_vim_add_host.setter - def notify_vim_add_host(self, val): - self._notify_vim_add_host = val - - @property - def skip_notify_mtce(self): - return self._skip_notify_mtce - - @skip_notify_mtce.setter - def skip_notify_mtce(self, val): - self._skip_notify_mtce = val - - @property - def notify_action_lock(self): - return self._notify_action_lock - - @notify_action_lock.setter - def notify_action_lock(self, val): - self._notify_action_lock = val - - @property - def notify_action_lock_force(self): - return self._notify_action_lock_force - - @notify_action_lock_force.setter - def notify_action_lock_force(self, val): - self._notify_action_lock_force = val - - @property - def ihost_val_prenotify(self): - return self._ihost_val_prenotify - - def ihost_val_prenotify_update(self, val): - self._ihost_val_prenotify.update(val) - - @property - def ihost_val(self): - return self._ihost_val - - def ihost_val_update(self, val): - self._ihost_val.update(val) - - @property - def notify_vim(self): - return self._notify_vim - - @notify_vim.setter - def notify_vim(self, val): - self._notify_vim = val - - @property - def notify_mtce(self): - return self._notify_mtce - - @notify_mtce.setter - def notify_mtce(self, val): - self._notify_mtce = val - - @property - def notify_availability(self): - return self._notify_availability - - @notify_availability.setter - def notify_availability(self, val): - self._notify_availability = val - - def get_task_from_action(self, action): - """Lookup the task value in the action to task dictionary.""" - - display_choices = self.ACTIONS_TO_TASK_DISPLAY_CHOICES - - display_value = [display for (value, display) in display_choices - if value and value.lower() == (action or '').lower()] - - if display_value: - return display_value[0] - return None - - -LOCK_NAME = 'HostController' -LOCK_NAME_SYS = 'HostControllerSys' - - -class HostController(rest.RestController): - """REST controller for hosts.""" - - ports = port.PortController( - from_hosts=True) - "Expose ports as a sub-element of hosts" - - ethernet_ports = ethernet_port.EthernetPortController( - from_hosts=True) - "Expose ethernet_ports as a sub-element of hosts" - - nodes = node_api.NodeController(from_hosts=True) - "Expose nodes as a sub-element of hosts" - - cpus = cpu_api.CPUController(from_hosts=True) - "Expose cpus as a sub-element of hosts" - - memorys = memory.MemoryController(from_hosts=True) - "Expose memorys as a sub-element of hosts" - - # TODO(LK) idisks = disk.DiskController(from_hosts=True) - # "Expose idisks as a sub-element of hosts" - - sensors = sensor_api.SensorController(from_hosts=True) - "Expose sensors as a sub-element of hosts" - - sensorgroups = sensorgroup.SensorGroupController(from_hosts=True) - "Expose sensorgroups as a sub-element of hosts" - - pci_devices = pci_device.PCIDeviceController(from_hosts=True) - "Expose pci_devices as a sub-element of hosts" - - lldp_agents = lldp_agent.LLDPAgentController( - from_hosts=True) - "Expose lldp_agents as a sub-element of hosts" - - lldp_neighbours = lldp_neighbour.LLDPNeighbourController( - from_hosts=True) - "Expose lldp_neighbours as a sub-element of hosts" - - _custom_actions = { - 'detail': ['GET'], - 'bulk_add': ['POST'], - 'bulk_export': ['GET'], - 'install_progress': ['POST'], - } - - def __init__(self, from_system=False): - self._from_system = from_system - self._mtc_address = k_host.LOCALHOST_HOSTNAME - self._mtc_port = 2112 - self._ceph = ceph.CephApiOperator() - self._api_token = None - - def _ihosts_get(self, isystem_id, marker, limit, personality, - sort_key, sort_dir, q=None): - if self._from_system and not isystem_id: - raise exception.InvalidParameterValue(_( - "System id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - filters = {} - if q is not None: - for i in q: - if i.op == 'eq': - filters[i.field] = i.value - - marker_obj = None - if marker: - marker_obj = objects.Host.get_by_uuid(pecan.request.context, - marker) - - if isystem_id: - ihosts = pecan.request.dbapi.host_get_by_isystem( - isystem_id, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - if personality: - ihosts = objects.Host.list( - pecan.request.context, - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - filters={'personality': personality}) - else: - ihosts = objects.Host.list( - pecan.request.context, - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters) - - for h in ihosts: - self._update_controller_personality(h) - - return ihosts - - @staticmethod - def _get_controller_address(hostname): - networktype = constants.NETWORK_TYPE_MGMT - name = '%s-%s' % (hostname, networktype) - address = pecan.request.systemconfig.address_get_by_name(name) - LOG.info("systemconfig _get_controller_address=%s" % address) - return address - - @staticmethod - def _get_storage_address(hostname): - networktype = constants.NETWORK_TYPE_MGMT - name = '%s-%s' % (hostname, networktype) - return pecan.request.systemconfig.address_get_by_name(name) - - @staticmethod - def _update_subfunctions(ihost): - subfunctions = ihost.get('subfunctions') or "" - personality = ihost.get('personality') or "" - # handle race condition with subfunctions being updated late. - if not subfunctions: - LOG.info("update_subfunctions: subfunctions not set. " - "personality=%s" % personality) - if personality == k_host.CONTROLLER: - subfunctions = ','.join(tsc.subfunctions) - else: - subfunctions = personality - ihost['subfunctions'] = subfunctions - - subfunctions_set = set(subfunctions.split(',')) - if personality not in subfunctions_set: - # Automatically add it - subfunctions_list = list(subfunctions_set) - subfunctions_list.insert(0, personality) - subfunctions = ','.join(subfunctions_list) - LOG.info("%s personality=%s update subfunctions=%s" % - (ihost.get('hostname'), personality, subfunctions)) - LOG.debug("update_subfunctions: personality=%s subfunctions=%s" % - (personality, subfunctions)) - return subfunctions - - @staticmethod - def _update_controller_personality(host): - if host['personality'] == k_host.CONTROLLER: - if utils.is_host_active_controller(host): - activity = 'Controller-Active' - else: - activity = 'Controller-Standby' - host['capabilities'].update({'Personality': activity}) - - @wsme_pecan.wsexpose(HostCollection, [Query], unicode, unicode, int, - unicode, unicode, unicode) - def get_all(self, q=[], isystem_id=None, marker=None, limit=None, - personality=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of hosts.""" - ihosts = self._ihosts_get( - isystem_id, marker, limit, personality, sort_key, sort_dir, q=q) - return HostCollection.convert_with_links(ihosts, limit, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(unicode, unicode, body=unicode) - def install_progress(self, uuid, install_state, - install_state_info=None): - """Update the install status for the given host.""" - LOG.debug("Update host uuid %s with install_state=%s " - "and install_state_info=%s" % - (uuid, install_state, install_state_info)) - if install_state == constants.INSTALL_STATE_INSTALLED: - # After an install a node will reboot right away. Change the state - # to reflect this. - install_state = constants.INSTALL_STATE_BOOTING - - host = objects.Host.get_by_uuid(pecan.request.context, uuid) - pecan.request.dbapi.host_update(host['uuid'], - {'install_state': install_state, - 'install_state_info': - install_state_info}) - - @wsme_pecan.wsexpose(HostCollection, unicode, unicode, int, unicode, - unicode, unicode) - def detail(self, isystem_id=None, marker=None, limit=None, - personality=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of hosts with detail.""" - # /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "hosts": - raise exception.HTTPNotFound - - ihosts = self._ihosts_get( - isystem_id, marker, limit, personality, sort_key, sort_dir) - resource_url = '/'.join(['hosts', 'detail']) - return HostCollection.convert_with_links(ihosts, limit, - url=resource_url, - expand=True, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(Host, unicode) - def get_one(self, uuid): - """Retrieve information about the given ihost.""" - if self._from_system: - raise exception.OperationNotPermitted - - rpc_ihost = objects.Host.get_by_uuid(pecan.request.context, uuid) - self._update_controller_personality(rpc_ihost) - - return Host.convert_with_links(rpc_ihost) - - def _add_host_semantic_checks(self, ihost_dict): - chosts = self._get_controllers() - if chosts and ihost_dict.get('personality') is None: - # Prevent adding any new host(s) until there is - # an unlocked-enabled controller to manage them. - for c in chosts: - if (c.administrative == k_host.ADMIN_UNLOCKED and - c.operational == k_host.OPERATIONAL_ENABLED): - break - else: - raise wsme.exc.ClientSideError( - _("Provisioning request for new host '%s' is not permitted" - " while there is no unlocked-enabled controller. Unlock " - "controller-0, wait for it to enable and then retry.") % - ihost_dict.get('mgmt_mac')) - - def _new_host_semantic_checks(self, ihost_dict): - - if self._get_controllers(): - self._add_host_semantic_checks(ihost_dict) - - mgmt_network = pecan.request.systemconfig.network_get_by_type( - constants.NETWORK_TYPE_MGMT) - LOG.info("systemconfig mgmt_network={}".format(mgmt_network)) - - if mgmt_network.dynamic and ihost_dict.get('mgmt_ip'): - # raise wsme.exc.ClientSideError(_( - LOG.info(_( - "Host-add Allowed: Specifying a mgmt_ip when dynamic " - "address allocation is configured")) - elif (not mgmt_network.dynamic and - not ihost_dict.get('mgmt_ip') and - ihost_dict.get('personality') not in - [k_host.STORAGE, k_host.CONTROLLER]): - raise wsme.exc.ClientSideError(_( - "Host-add Rejected: Cannot add a compute host without " - "specifying a mgmt_ip when static address allocation is " - "configured.")) - - # Check whether the system mode is simplex - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError(_( - "Host-add Rejected: Adding a host on a simplex system " - "is not allowed.")) - - personality = ihost_dict['personality'] - if not ihost_dict['hostname']: - if personality not in (k_host.CONTROLLER, k_host.STORAGE): - raise wsme.exc.ClientSideError(_( - "Host-add Rejected. Must provide a hostname for a node of " - "personality %s") % personality) - else: - self._validate_hostname(ihost_dict['hostname'], personality) - - HostController._personality_license_check(personality) - - def _do_post(self, ihost_dict): - """Create a new ihost based off a dictionary of attributes """ - - log_start = cutils.timestamped("ihost_post_start") - LOG.info("SYS_I host %s %s add" % (ihost_dict['hostname'], - log_start)) - - power_on = ihost_dict.get('power_on', None) - - ihost_obj = None - - # Semantic checks for adding a new node - if self._from_system: - raise exception.OperationNotPermitted - - self._new_host_semantic_checks(ihost_dict) - current_ihosts = objects.Host.list(pecan.request.context) - - # Check for missing/invalid hostname - # ips/hostnames are automatic for controller & storage nodes - if ihost_dict['personality'] not in (k_host.CONTROLLER, - k_host.STORAGE): - host_names = [h.hostname for h in current_ihosts] - if ihost_dict['hostname'] in host_names: - raise wsme.exc.ClientSideError( - _("Host-add Rejected: Hostname already exists")) - host_ips = [h.mgmt_ip for h in current_ihosts] - if (ihost_dict.get('mgmt_ip') and - ihost_dict['mgmt_ip'] in host_ips): - raise wsme.exc.ClientSideError( - _("Host-add Rejected: Host with mgmt_ip %s already " - "exists") % ihost_dict['mgmt_ip']) - - try: - ihost_obj = objects.Host.get_by_filters_one( - pecan.request.context, - {'mgmt_mac': ihost_dict['mgmt_mac']}) - # A host with this MAC already exists. We will allow it to be - # added if the hostname and personality have not been set. - if ihost_obj['hostname'] or ihost_obj['personality']: - raise wsme.exc.ClientSideError( - _("Host-add Rejected: Host with mgmt_mac {} already " - "exists").format(ihost_dict['mgmt_mac'])) - # Check DNSMASQ for ip/mac already existing - # -> node in use by someone else or has already been booted - elif (not ihost_obj and self._dnsmasq_mac_exists( - ihost_dict['mgmt_mac'])): - raise wsme.exc.ClientSideError( - _("Host-add Rejected: mgmt_mac {} has already been " - "active").format(ihost_dict['mgmt_mac'])) - - # Use the uuid from the existing host - ihost_dict['uuid'] = ihost_obj['uuid'] - except exception.HostNotFound: - ihost_dict['mgmt_mac'] = cutils.validate_and_normalize_mac( - ihost_dict['mgmt_mac']) - # This is a new host - pass - - if not ihost_dict.get('uuid'): - ihost_dict['uuid'] = uuidutils.generate_uuid() - - # BM handling - ihost_orig = copy.deepcopy(ihost_dict) - - subfunctions = self._update_subfunctions(ihost_dict) - ihost_dict['subfunctions'] = subfunctions - - changed_paths = [] - delta = set() - - for key in objects.Host.fields: - # Internal values that aren't being modified - if key in ['id', 'updated_at', 'created_at']: - continue - - # Update only the new fields - if key in ihost_dict and ihost_dict[key] != ihost_orig[key]: - delta.add(key) - ihost_orig[key] = ihost_dict[key] - - bm_list = ['bm_type', 'bm_ip', 'bm_username', 'bm_password'] - for bmi in bm_list: - if bmi in ihost_dict: - delta.add(bmi) - changed_paths.append({'path': '/' + str(bmi), - 'value': ihost_dict[bmi], - 'op': 'replace'}) - - self._bm_semantic_check_and_update(ihost_orig, ihost_dict, - delta, changed_paths, - current_ihosts) - - if not ihost_dict.get('capabilities', {}): - ihost_dict['capabilities'] = {} - - # If this is the first controller being set up, - # configure and return - if ihost_dict['personality'] == k_host.CONTROLLER: - if not self._get_controllers(): - pecan.request.rpcapi.create_controller_filesystems( - pecan.request.context, ihost_dict['rootfs_device']) - controller_ihost = pecan.request.rpcapi.create_host( - pecan.request.context, ihost_dict) - pecan.request.rpcapi.configure_host( - pecan.request.context, - controller_ihost) - return Host.convert_with_links(controller_ihost) - - if ihost_dict['personality'] in ( - k_host.CONTROLLER, k_host.STORAGE): - self._controller_storage_node_setup(ihost_dict) - - # Validate that management name and IP do not already exist - # If one exists, other value must match in addresses table - mgmt_address_name = cutils.format_address_name( - ihost_dict['hostname'], constants.NETWORK_TYPE_MGMT) - self._validate_address_not_allocated(mgmt_address_name, - ihost_dict.get('mgmt_ip')) - - if ihost_dict.get('mgmt_ip'): - self._validate_ip_in_mgmt_network(ihost_dict['mgmt_ip']) - else: - del ihost_dict['mgmt_ip'] - - # Set host to reinstalling - ihost_dict.update({k_host.HOST_ACTION_STATE: - k_host.HAS_REINSTALLING}) - - # Creation/Configuration - if ihost_obj: - # The host exists - do an update. - for key in objects.Host.fields: - # Internal values that shouldn't be updated - if key in ['id', 'uuid', 'updated_at', 'created_at']: - continue - - # Update only the fields that are not empty and have changed - if (key in ihost_dict and ihost_dict[key] and - (ihost_obj[key] != ihost_dict[key])): - ihost_obj[key] = ihost_dict[key] - ihost_obj = pecan.request.rpcapi.update_host( - pecan.request.context, ihost_obj) - else: - # The host doesn't exist - do an add. - LOG.info("create_host=%s" % ihost_dict.get('hostname')) - ihost_obj = pecan.request.rpcapi.create_host( - pecan.request.context, ihost_dict) - - ihost_obj = objects.Host.get_by_uuid(pecan.request.context, - ihost_obj.uuid) - - # mgmt_network = pecan.request.systemconfig.network_get_by_type( - # constants.NETWORK_TYPE_MGMT) - - # Configure the new ihost, gets info about its addresses - host = pecan.request.rpcapi.configure_host( - pecan.request.context, - ihost_obj) - - if not host: - raise wsme.exc.ClientSideError( - _("Host-add Rejected: Host configure {} rejected ").format( - ihost_obj.hostname)) - - # Add host to mtc - ihost_obj['mgmt_ip'] = host.get('mgmt_ip') - new_ihost_mtc = ihost_obj.as_dict() - new_ihost_mtc.update({'operation': 'add'}) - new_ihost_mtc = cutils.removekeys_nonmtce(new_ihost_mtc) - # new_ihost_mtc.update( - # {'infra_ip': self._get_infra_ip_by_ihost(ihost_obj['uuid'])}) - - mtce_response = mtce_api.host_add( - self._api_token, - self._mtc_address, - self._mtc_port, - new_ihost_mtc, - constants.MTC_ADD_TIMEOUT_IN_SECS) - - self._handle_mtce_response('host_add', mtce_response) - - # once the host is added to mtc, attempt to power it on if requested - if power_on is not None and ihost_obj['bm_type'] is not None: - new_ihost_mtc.update({'action': k_host.ACTION_POWERON}) - - mtce_response = mtce_api.host_modify( - self._api_token, - self._mtc_address, - self._mtc_port, - new_ihost_mtc, - constants.MTC_ADD_TIMEOUT_IN_SECS) - - self._handle_mtce_response('power_on', mtce_response) - - # Notify the VIM that the host has been added - must be done after - # the host has been added to mtc and saved to the DB. - LOG.info("VIM notify add host add %s subfunctions={}").format(( - ihost_obj['hostname'], subfunctions)) - try: - self._vim_host_add(ihost_obj) - except Exception as e: - LOG.warn(_("No response from vim_api {} e={}").format( - ihost_obj['hostname'], e)) - self._api_token = None - pass # VIM audit will pickup - - log_end = cutils.timestamped("ihost_post_end") - LOG.info("SYS_I host %s %s" % (ihost_obj.hostname, log_end)) - - return Host.convert_with_links(ihost_obj) - - @cutils.synchronized(LOCK_NAME) - @expose('json') - def bulk_add(self): - pending_creation = [] - success_str = "" - error_str = "" - - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - return dict( - success="", - error="Bulk add on a simplex system is not allowed." - ) - - # Semantic Check: Prevent bulk add until there is an unlocked - # and enabled controller to manage them. - controller_list = objects.Host.list( - pecan.request.context, - filters={'personality': k_host.CONTROLLER}) - - have_unlocked_enabled_controller = False - for c in controller_list: - if (c['administrative'] == k_host.ADMIN_UNLOCKED and - c['operational'] == k_host.OPERATIONAL_ENABLED): - have_unlocked_enabled_controller = True - break - - if not have_unlocked_enabled_controller: - return dict( - success="", - error="Bulk_add requires enabled controller. " - "Please unlock controller-0, wait for it to enable " - "and then retry." - ) - - LOG.info("Starting ihost bulk_add operation") - assert isinstance(pecan.request.POST['file'], cgi.FieldStorage) - fileitem = pecan.request.POST['file'] - if not fileitem.filename: - return dict(success="", error="Error: No file uploaded") - - try: - contents = fileitem.file.read() - # Generate an array of hosts' attributes to be used in creation - root = ET.fromstring(contents) - except Exception: - return dict( - success="", - error="No hosts have been added, invalid XML document" - ) - - for idx, xmlhost in enumerate(root.findall('host')): - - new_ihost = {} - for attr in HOST_XML_ATTRIBUTES: - elem = xmlhost.find(attr) - if elem is not None: - # If the element is found, set the attribute. - # If the text field is empty, set it to the empty string. - new_ihost[attr] = elem.text or "" - else: - # If the element is not found, set the attribute to None. - new_ihost[attr] = None - - # This is the expected format of the location field - if new_ihost['location'] is not None: - new_ihost['location'] = {"locn": new_ihost['location']} - - # Semantic checks - try: - LOG.debug(new_ihost) - self._new_host_semantic_checks(new_ihost) - except Exception as ex: - culprit = new_ihost.get('hostname') or "with index " + str(idx) - return dict( - success="", - error=" No hosts have been added, error parsing host %s: " - "%s" % (culprit, ex) - ) - pending_creation.append(new_ihost) - - # Find local network adapter MACs - my_macs = list() - for liSnics in psutil.net_if_addrs().values(): - for snic in liSnics: - if snic.family == psutil.AF_LINK: - my_macs.append(snic.address) - - # Perform the actual creations - for new_host in pending_creation: - try: - # Configuring for the setup controller, only uses BMC fields - if new_host['mgmt_mac'].lower() in my_macs: - changed_paths = list() - - bm_list = ['bm_type', 'bm_ip', - 'bm_username', 'bm_password'] - for bmi in bm_list: - if bmi in new_host: - changed_paths.append({ - 'path': '/' + str(bmi), - 'value': new_host[bmi], - 'op': 'replace' - }) - - ihost_obj = [ihost for ihost in - objects.Host.list(pecan.request.context) - if ihost['mgmt_mac'] in my_macs] - if len(ihost_obj) != 1: - raise Exception( - "Unexpected: no/more_than_one host(s) contain(s) " - "a management mac address from " - "local network adapters") - self._patch(ihost_obj[0]['uuid'], - changed_paths, None) - else: - self._do_post(new_host) - - if (new_host['power_on'] is not None and - new_host['bm_type'] is None): - success_str = ( - "%s\n %s Warning: Ignoring due to " - "insufficient board management (bm) data." % - (success_str, new_host['hostname'])) - else: - success_str = "%s\n %s" % (success_str, - new_host['hostname']) - except Exception as ex: - LOG.exception(ex) - error_str += " " + (new_host.get('hostname') or - new_host.get('personality')) + \ - ": " + str(ex) + "\n" - - return dict( - success=success_str, - error=error_str - ) - - @expose('json') - def bulk_export(self): - def host_personality_name_sort_key(host_obj): - if host_obj.personality == k_host.CONTROLLER: - rank = 0 - elif host_obj.personality == k_host.STORAGE: - rank = 1 - elif host_obj.personality == k_host.COMPUTE: - rank = 2 - else: - rank = 3 - return rank, host_obj.hostname - - xml_host_node = et.Element('hosts', - {'version': cutils.get_sw_version()}) - mgmt_network = pecan.request.systemconfig.network_get_by_type( - constants.NETWORK_TYPE_MGMT) - - host_list = objects.Host.list(pecan.request.context) - sorted_hosts = sorted(host_list, key=host_personality_name_sort_key) - - for host in sorted_hosts: - _create_node(host, xml_host_node, host.personality, - mgmt_network.dynamic) - - xml_text = dom.parseString(et.tostring(xml_host_node)).toprettyxml() - result = {'content': xml_text} - return result - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(Host, body=Host) - def post(self, host): - """Create a new ihost.""" - ihost_dict = host.as_dict() - - # bm_password is not a part of ihost, so retrieve it from the body - body = json.loads(pecan.request.body) - if 'bm_password' in body: - ihost_dict['bm_password'] = body['bm_password'] - else: - ihost_dict['bm_password'] = '' - - return self._do_post(ihost_dict) - - @wsme_pecan.wsexpose(Host, unicode, body=[unicode]) - def patch(self, uuid, patch): - """Update an existing ihost. - """ - utils.validate_patch(patch) - - optimizable = 0 - optimize_list = ['/uptime', '/location', '/serialid', '/task'] - for p in patch: - path = p['path'] - if path in optimize_list: - optimizable += 1 - - if len(patch) == optimizable: - return self._patch(uuid, patch) - elif (pecan.request.user_agent.startswith('mtce') or - pecan.request.user_agent.startswith('vim')): - return self._patch_sys(uuid, patch) - else: - return self._patch_gen(uuid, patch) - - @cutils.synchronized(LOCK_NAME_SYS) - def _patch_sys(self, uuid, patch): - return self._patch(uuid, patch) - - @cutils.synchronized(LOCK_NAME) - def _patch_gen(self, uuid, patch): - return self._patch(uuid, patch) - - @staticmethod - def _validate_capability_is_not_set(old, new): - is_set, _ = new - return not is_set - - @staticmethod - def _validate_capability_is_equal(old, new): - return old == new - - def _validate_capabilities(self, old_caps, new_caps): - """Reject updating read-only host capabilities: - 1. stor_function. This field is set to 'monitor' for hosts that are - running ceph monitor process: - controller-0, controller-1, storage-0. - 2. Personality. This field is "virtual": - not saved in the database but - returned via API and displayed via "system host-show". - - :param old_caps: current host capabilities - :type old_caps: dict - :param new_caps: updated host capabilies (to be set) - :type new_caps: str - :raises: wsme.exc.ClientSideError when attempting to - change read-only capabilities - """ - if type(new_caps) == str: - try: - new_caps = ast.literal_eval(new_caps) - except SyntaxError: - pass - if type(new_caps) != dict: - raise wsme.exc.ClientSideError( - _("Changing capabilities type is not allowed: " - "old_value={}, new_value={}").format( - old_caps, new_caps)) - PROTECTED_CAPABILITIES = [ - ('Personality', - self._validate_capability_is_not_set), - (k_host.HOST_STOR_FUNCTION, - self._validate_capability_is_equal)] - for capability, validate in PROTECTED_CAPABILITIES: - old_is_set, old_value = ( - capability in old_caps, old_caps.get(capability)) - new_is_set, new_value = ( - capability in new_caps, new_caps.get(capability)) - if not validate((old_is_set, old_value), - (new_is_set, new_value)): - if old_is_set: - raise wsme.exc.ClientSideError( - _("Changing capability not allowed: " - "name={}, old_value={}, new_value={}. ").format( - capability, old_value, new_value)) - else: - raise wsme.exc.ClientSideError( - _("Setting capability not allowed: " - "name={}, value={}. ").format( - capability, new_value)) - - def _patch(self, uuid, patch): - log_start = cutils.timestamped("host_patch_start") - - patch_obj = jsonpatch.JsonPatch(patch) - - ihost_obj = objects.Host.get_by_uuid(pecan.request.context, uuid) - ihost_dict = ihost_obj.as_dict() - - self._add_host_semantic_checks(ihost_dict) - - # Add transient fields that are not stored in the database - ihost_dict['bm_password'] = None - - try: - patched_ihost = jsonpatch.apply_patch(ihost_dict, - patch_obj) - except jsonpatch.JsonPatchException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Patching Error: %s") % e) - - self._validate_capabilities( - ihost_dict['capabilities'], patched_ihost['capabilities']) - - ihost_dict_orig = dict(ihost_obj.as_dict()) - # defaults = objects.Host.get_defaults() - for key in objects.Host.fields: - # Internal values that shouldn't be part of the patch - if key in ['id', 'updated_at', 'created_at', 'infra_ip']: - continue - - # In case of a remove operation, add the missing fields back - # to the document with their default value - if key in ihost_dict and key not in patched_ihost: - # patched_ihost[key] = defaults[key] - patched_ihost[key] = ihost_obj[key] - - # Update only the fields that have changed - if ihost_obj[key] != patched_ihost[key]: - ihost_obj[key] = patched_ihost[key] - - delta = ihost_obj.obj_what_changed() - delta_handle = list(delta) - - uptime_update = False - if 'uptime' in delta_handle: - # There is a log of uptime updates, so just do a debug log - uptime_update = True - LOG.debug("%s %s patch" % (ihost_obj.hostname, - log_start)) - else: - LOG.info("%s %s patch" % (ihost_obj.hostname, - log_start)) - - hostupdate = HostUpdate(ihost_dict_orig, patched_ihost, delta) - if delta_handle: - self._validate_delta(delta_handle) - if delta_handle == ['uptime']: - LOG.debug("%s 1. delta_handle %s" % - (hostupdate.displayid, delta_handle)) - else: - LOG.info("%s 1. delta_handle %s" % - (hostupdate.displayid, delta_handle)) - else: - LOG.info("%s ihost_patch_end. No changes from %s." % - (hostupdate.displayid, pecan.request.user_agent)) - return Host.convert_with_links(ihost_obj) - - myaction = patched_ihost.get('action') - if self.action_check(myaction, hostupdate): - LOG.info("%s post action_check hostupdate " - "action=%s notify_vim=%s notify_mtc=%s " - "skip_notify_mtce=%s" % - (hostupdate.displayid, - hostupdate.action, - hostupdate.notify_vim, - hostupdate.notify_mtce, - hostupdate.skip_notify_mtce)) - - if self.stage_action(myaction, hostupdate): - LOG.info("%s Action staged: %s" % - (hostupdate.displayid, myaction)) - else: - LOG.info("%s ihost_patch_end stage_action rc %s" % - (hostupdate.displayid, hostupdate.nextstep)) - if hostupdate.nextstep == hostupdate.EXIT_RETURN_HOST: - return Host.convert_with_links(ihost_obj) - elif hostupdate.nextstep == hostupdate.EXIT_UPDATE_PREVAL: - if hostupdate.ihost_val_prenotify: - # update value in db prior to notifications - LOG.info("update ihost_val_prenotify: %s" % - hostupdate.ihost_val_prenotify) - ihost_obj = pecan.request.dbapi.host_update( - ihost_obj['uuid'], hostupdate.ihost_val_prenotify) - return Host.convert_with_links(ihost_obj) - - if myaction == k_host.ACTION_SUBFUNCTION_CONFIG: - self.perform_action_subfunction_config(ihost_obj) - - if myaction in delta_handle: - delta_handle.remove(myaction) - - LOG.info("%s post action_stage hostupdate " - "action=%s notify_vim=%s notify_mtc=%s " - "skip_notify_mtce=%s" % - (hostupdate.displayid, - hostupdate.action, - hostupdate.notify_vim, - hostupdate.notify_mtce, - hostupdate.skip_notify_mtce)) - - self._optimize_delta_handling(delta_handle) - - if 'administrative' in delta or 'operational' in delta: - self.stage_administrative_update(hostupdate) - - if delta_handle: - LOG.info("%s 2. delta_handle %s" % - (hostupdate.displayid, delta_handle)) - self._check_provisioning(hostupdate, patch) - if (hostupdate.ihost_orig['administrative'] == - k_host.ADMIN_UNLOCKED): - self.check_updates_while_unlocked(hostupdate, delta) - - current_ihosts = None - hostupdate.bm_type_changed_to_none = \ - self._bm_semantic_check_and_update(hostupdate.ihost_orig, - hostupdate.ihost_patch, - delta, patch_obj, - current_ihosts, - hostupdate) - LOG.info("%s post delta_handle hostupdate " - "action=%s notify_vim=%s notify_mtc=%s " - "skip_notify_mtce=%s" % - (hostupdate.displayid, - hostupdate.action, - hostupdate.notify_vim, - hostupdate.notify_mtce, - hostupdate.skip_notify_mtce)) - - if hostupdate.bm_type_changed_to_none: - hostupdate.ihost_val_update({'bm_ip': None, - 'bm_username': None, - 'bm_password': None}) - - if hostupdate.ihost_val_prenotify: - # update value in db prior to notifications - LOG.info("update ihost_val_prenotify: %s" % - hostupdate.ihost_val_prenotify) - pecan.request.dbapi.host_update(ihost_obj['uuid'], - hostupdate.ihost_val_prenotify) - - if hostupdate.ihost_val: - # apply the staged updates in preparation for update - LOG.info("%s apply ihost_val %s" % - (hostupdate.displayid, hostupdate.ihost_val)) - for k, v in hostupdate.ihost_val.iteritems(): - ihost_obj[k] = v - LOG.debug("AFTER Apply ihost_val %s to iHost %s" % - (hostupdate.ihost_val, ihost_obj.as_dict())) - - if 'personality' in delta: - self._update_subfunctions(ihost_obj) - - if hostupdate.notify_vim: - action = hostupdate.action - LOG.info("Notify VIM host action %s action=%s" % ( - ihost_obj['hostname'], action)) - try: - vim_api.vim_host_action( - pecan.request.context, - ihost_obj['uuid'], - ihost_obj['hostname'], - action, - constants.VIM_DEFAULT_TIMEOUT_IN_SECS) - except Exception as e: - LOG.warn(_("No response vim_api {} on action={} e={}").format( - ihost_obj['hostname'], action, e)) - self._api_token = None - if action == k_host.ACTION_FORCE_LOCK: - pass - else: - # reject continuation if VIM rejects action - raise wsme.exc.ClientSideError(_( - "VIM API Error or Timeout on action = %s " - "Please retry and if problem persists then " - "contact your system administrator.") % action) - - if hostupdate.configure_required: - LOG.info("%s Perform configure_host." % hostupdate.displayid) - if not ((ihost_obj['hostname']) and (ihost_obj['personality'])): - raise wsme.exc.ClientSideError( - _("Please provision 'hostname' and 'personality'.")) - - ihost_ret = pecan.request.rpcapi.configure_host( - pecan.request.context, ihost_obj) - - pecan.request.dbapi.host_update( - ihost_obj['uuid'], - {'capabilities': ihost_obj['capabilities']}) - - # Notify maintenance about updated mgmt_ip - ihost_obj['mgmt_ip'] = ihost_ret.get('mgmt_ip') - - hostupdate.notify_mtce = True - - pecan.request.dbapi.host_update( - ihost_obj['uuid'], - {'capabilities': ihost_obj['capabilities']}) - - if (k_host.TASK_REINSTALLING == ihost_obj.task and - k_host.CONFIG_STATUS_REINSTALL == ihost_obj.config_status): - # Clear reinstall flag when reinstall starts - ihost_obj.config_status = None - - mtce_response = {'status': None} - nonmtc_change_count = 0 - if hostupdate.notify_mtce and not hostupdate.skip_notify_mtce: - nonmtc_change_count = self.check_notify_mtce(myaction, hostupdate) - if nonmtc_change_count > 0: - LOG.info("%s Action %s perform notify_mtce" % - (hostupdate.displayid, myaction)) - new_ihost_mtc = ihost_obj.as_dict() - new_ihost_mtc = cutils.removekeys_nonmtce(new_ihost_mtc) - - if hostupdate.ihost_orig['invprovision'] == \ - k_host.PROVISIONED: - new_ihost_mtc.update({'operation': 'modify'}) - else: - new_ihost_mtc.update({'operation': 'add'}) - new_ihost_mtc.update({"invprovision": - ihost_obj['invprovision']}) - - if hostupdate.notify_action_lock: - new_ihost_mtc['action'] = k_host.ACTION_LOCK - elif hostupdate.notify_action_lock_force: - new_ihost_mtc['action'] = k_host.ACTION_FORCE_LOCK - elif myaction == k_host.ACTION_FORCE_UNLOCK: - new_ihost_mtc['action'] = k_host.ACTION_UNLOCK - - new_ihost_mtc.update({ - 'infra_ip': self._get_infra_ip_by_ihost(ihost_obj['uuid']) - }) - - if new_ihost_mtc['operation'] == 'add': - mtce_response = mtce_api.host_add( - self._api_token, self._mtc_address, self._mtc_port, - new_ihost_mtc, - constants.MTC_DEFAULT_TIMEOUT_IN_SECS) - elif new_ihost_mtc['operation'] == 'modify': - mtce_response = mtce_api.host_modify( - self._api_token, self._mtc_address, self._mtc_port, - new_ihost_mtc, - constants.MTC_DEFAULT_TIMEOUT_IN_SECS, - 3) - else: - LOG.warn("Unsupported Operation: %s" % new_ihost_mtc) - mtce_response = None - - if mtce_response is None: - mtce_response = {'status': 'fail', - 'reason': 'no response', - 'action': 'retry'} - - ihost_obj['action'] = k_host.ACTION_NONE - hostupdate.ihost_val_update({'action': k_host.ACTION_NONE}) - - if ((mtce_response['status'] == 'pass') or - (nonmtc_change_count == 0) or hostupdate.skip_notify_mtce): - - ihost_obj.save() - - if hostupdate.ihost_patch['operational'] == \ - k_host.OPERATIONAL_ENABLED: - self._update_add_ceph_state() - - if hostupdate.notify_availability: - if (hostupdate.notify_availability == - k_host.VIM_SERVICES_DISABLED): - imsg_dict = {'availability': - k_host.AVAILABILITY_OFFLINE} - else: - imsg_dict = {'availability': - k_host.VIM_SERVICES_ENABLED} - if (hostupdate.notify_availability != - k_host.VIM_SERVICES_ENABLED): - LOG.error( - _("Unexpected notify_availability={}").format( - hostupdate.notify_availability)) - - LOG.info(_("{} notify_availability={}").format( - hostupdate.displayid, - hostupdate.notify_availability)) - - pecan.request.rpcapi.platform_update_by_host( - pecan.request.context, ihost_obj['uuid'], imsg_dict) - - if hostupdate.bm_type_changed_to_none: - ibm_msg_dict = {} - pecan.request.rpcapi.bm_deprovision_by_host( - pecan.request.context, - ihost_obj['uuid'], - ibm_msg_dict) - - elif mtce_response['status'] is None: - raise wsme.exc.ClientSideError( - _("Timeout waiting for maintenance response. " - "Please retry and if problem persists then " - "contact your system administrator.")) - else: - if hostupdate.configure_required: - # rollback to unconfigure host as mtce has failed the request - invprovision_state = hostupdate.ihost_orig.get( - 'invprovision') or "" - if invprovision_state != k_host.PROVISIONED: - LOG.warn("unconfigure ihost %s provision=%s" % - (ihost_obj.uuid, invprovision_state)) - pecan.request.rpcapi.unconfigure_host( - pecan.request.context, - ihost_obj) - - raise wsme.exc.ClientSideError( - _("Operation Rejected: {}.{}.").format( - mtce_response['reason'], - mtce_response['action'])) - - if hostupdate.notify_vim_add_host: - # Notify the VIM that the host has been added - must be done after - # the host has been added to mtc and saved to the DB. - LOG.info("inventory notify add host add %s subfunctions=%s" % - (ihost_obj['hostname'], ihost_obj['subfunctions'])) - try: - self._vim_host_add(ihost_obj) - except Exception as e: - LOG.warn(_("No response from vim_api {} e={}").format( - ihost_obj['hostname'], e)) - self._api_token = None - pass # VIM audit will pickup - - # check if ttys_dcd is updated and notify the agent via conductor - # if necessary - if 'ttys_dcd' in hostupdate.delta: - self._handle_ttys_dcd_change(hostupdate.ihost_orig, - hostupdate.ihost_patch['ttys_dcd']) - - log_end = cutils.timestamped("host_patch_end") - if uptime_update: - LOG.debug("host %s %s patch" % (ihost_obj.hostname, - log_end)) - else: - LOG.info("host %s %s patch" % (ihost_obj.hostname, - log_end)) - - if ('administrative' in hostupdate.delta and - hostupdate.ihost_patch['administrative'] == - k_host.ADMIN_LOCKED): - LOG.info("Update host memory for (%s)" % ihost_obj['hostname']) - pecan.request.rpcapi.update_host_memory(pecan.request.context, - ihost_obj['uuid']) - return Host.convert_with_links(ihost_obj) - - def _vim_host_add(self, ihost): - LOG.info("inventory notify vim add host %s personality=%s" % ( - ihost['hostname'], ihost['personality'])) - - subfunctions = self._update_subfunctions(ihost) - try: - vim_api.vim_host_add( - pecan.request.context, - ihost['uuid'], - ihost['hostname'], - subfunctions, - ihost['administrative'], - ihost['operational'], - ihost['availability'], - ihost['subfunction_oper'], - ihost['subfunction_avail']) - except Exception as e: - LOG.warn(_("No response from vim_api {} e={}").format( - (ihost['hostname'], e))) - self._api_token = None - pass # VIM audit will pickup - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(None, unicode, status_code=204) - def delete(self, host_id): - """Delete a host. - """ - - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError(_( - "Deleting a host on a simplex system is not allowed.")) - - ihost = objects.Host.get_by_uuid(pecan.request.context, - host_id) - - if ihost.administrative == k_host.ADMIN_UNLOCKED: - if not ihost.hostname: - host = ihost.uuid - else: - host = ihost.hostname - - raise exception.HostLocked( - action=k_host.ACTION_DELETE, host=host) - - personality = ihost.personality - # allow delete of unprovisioned locked disabled & offline storage hosts - skip_ceph_checks = ( - (not ihost.invprovision or - ihost.invprovision == k_host.UNPROVISIONED) and - ihost.administrative == k_host.ADMIN_LOCKED and - ihost.operational == k_host.OPERATIONAL_DISABLED and - ihost.availability == k_host.AVAILABILITY_OFFLINE) - - if (personality is not None and - personality.find(k_host.STORAGE_HOSTNAME) != -1 and - not skip_ceph_checks): - # perform self.sc_op.check_delete; send to systemconfig - # to check monitors - LOG.info("TODO storage check with systemconfig for quoroum, " - "delete storage pools and tiers") - hosts = objects.Host.list(pecan.request.context) - num_monitors, required_monitors = \ - self._ceph.get_monitors_status(hosts) - if num_monitors < required_monitors: - raise wsme.exc.ClientSideError( - _("Only %d storage " - "monitor available. At least {} unlocked and " - "enabled hosts with monitors are required. Please " - "ensure hosts with monitors are unlocked and " - "enabled - candidates: {}, {}, {}").format( - (num_monitors, constants.MIN_STOR_MONITORS, - k_host.CONTROLLER_0_HOSTNAME, - k_host.CONTROLLER_1_HOSTNAME, - k_host.STORAGE_0_HOSTNAME))) - # send to systemconfig to delete storage pools and tiers - - LOG.warn("REST API delete host=%s user_agent=%s" % - (ihost['uuid'], pecan.request.user_agent)) - if not pecan.request.user_agent.startswith('vim'): - try: - vim_api.vim_host_delete( - pecan.request.context, - ihost.uuid, - ihost.hostname) - except Exception: - LOG.warn(_("No response from vim_api {} ").format( - ihost['uuid'])) - raise wsme.exc.ClientSideError( - _("System rejected delete request. " - "Please retry and if problem persists then " - "contact your system administrator.")) - - if (ihost.hostname and ihost.personality and - ihost.invprovision and - ihost.invprovision == k_host.PROVISIONED and - (k_host.COMPUTE in ihost.subfunctions)): - # wait for VIM signal - return - - idict = {'operation': k_host.ACTION_DELETE, - 'uuid': ihost.uuid, - 'invprovision': ihost.invprovision} - - mtce_response = mtce_api.host_delete( - self._api_token, self._mtc_address, self._mtc_port, - idict, constants.MTC_DELETE_TIMEOUT_IN_SECS) - - # Check mtce response prior to attempting delete - if mtce_response.get('status') != 'pass': - self._vim_host_add(ihost) - self._handle_mtce_response(k_host.ACTION_DELETE, - mtce_response) - - pecan.request.rpcapi.unconfigure_host(pecan.request.context, - ihost) - - # Delete the stor entries associated with this host - # Notify sysinv of host-delete - LOG.info("notify systemconfig of host-delete which will" - "also do stors, lvgs, pvs, ceph crush remove") - - # tell conductor to delete the barbican secret associated - # with this host (if present) - pecan.request.rpcapi.delete_barbican_secret(pecan.request.context, - ihost.uuid) - - # Notify patching to drop the host - if ihost.hostname is not None: - try: - system = objects.System.get_one(pecan.request.context) - patch_api.patch_drop_host( - pecan.request.context, - hostname=ihost.hostname, - region_name=system.region_name) - except Exception as e: - LOG.warn(_("No response from drop-host patch api {}" - "e={}").format(ihost.hostname, e)) - pass - - pecan.request.dbapi.host_destroy(host_id) - - @staticmethod - def _handle_mtce_response(action, mtce_response): - LOG.info("mtce action %s response: %s" % - (action, mtce_response)) - if mtce_response is None: - mtce_response = {'status': 'fail', - 'reason': 'no response', - 'action': 'retry'} - - if mtce_response.get('reason') != 'no response': - raise wsme.exc.ClientSideError(_( - "Mtce rejected %s request." - "Please retry and if problem persists then contact your " - "system administrator.") % action) - else: - raise wsme.exc.ClientSideError(_( - "Timeout waiting for system response to %s. Please wait for a " - "few moments. If the host is not deleted,please retry. If " - "problem persists then contact your system administrator.") % - action) - - @staticmethod - def _get_infra_ip_by_ihost(ihost_uuid): - try: - # Get the list of interfaces for this ihost - iinterfaces = pecan.request.dbapi.iinterface_get_by_ihost( - ihost_uuid) - # Make a list of only the infra interfaces - infra_interfaces = [ - i for i in iinterfaces - if i['networktype'] == constants.NETWORK_TYPE_INFRA] - # Get the UUID of the infra interface (there is only one) - infra_interface_uuid = infra_interfaces[0]['uuid'] - # Return the first address for this interface (there is only one) - return pecan.request.dbapi.addresses_get_by_interface( - infra_interface_uuid)[0]['address'] - except Exception as ex: - LOG.debug("Could not find infra ip for host %s: %s" % ( - ihost_uuid, ex)) - return None - - @staticmethod - def _validate_ip_in_mgmt_network(ip): - network = pecan.request.systemconfig.network_get_by_type( - constants.NETWORK_TYPE_MGMT) - utils.validate_address_within_network(ip, network) - - @staticmethod - def _validate_address_not_allocated(name, ip_address): - """Validate that address isn't allocated - - :param name: Address name to check isn't allocated. - :param ip_address: IP address to check isn't allocated. - """ - # When a host is added by systemconfig, this would already - # have been checked - LOG.info("TODO(sc) _validate_address_not_allocated name={} " - "ip_address={}".format(name, ip_address)) - - @staticmethod - def _dnsmasq_mac_exists(mac_addr): - """Check the dnsmasq.hosts file for an existing mac. - - :param mac_addr: mac address to check for. - """ - - dnsmasq_hosts_file = tsc.CONFIG_PATH + 'dnsmasq.hosts' - with open(dnsmasq_hosts_file, 'r') as f_in: - for line in f_in: - if mac_addr in line: - return True - return False - - @staticmethod - def _get_controllers(): - return objects.Host.list( - pecan.request.context, - filters={'personality': k_host.CONTROLLER}) - - @staticmethod - def _validate_delta(delta): - restricted_updates = ['uuid', 'id', 'created_at', 'updated_at', - 'cstatus', - 'mgmt_mac', 'mgmt_ip', 'infra_ip', - 'invprovision', 'recordtype', - 'host_action', - 'action_state'] - - if not pecan.request.user_agent.startswith('mtce'): - # Allow mtc to modify these through inventory-api. - mtce_only_updates = ['administrative', - 'availability', - 'operational', - 'subfunction_oper', - 'subfunction_avail', - 'reserved', - 'mtce_info', - 'task', - 'uptime'] - restricted_updates.extend(mtce_only_updates) - - if not pecan.request.user_agent.startswith('vim'): - vim_only_updates = ['vim_progress_status'] - restricted_updates.extend(vim_only_updates) - - intersection = set.intersection(set(delta), set(restricted_updates)) - if intersection: - raise wsme.exc.ClientSideError( - _("Change {} contains restricted {}.").format( - delta, intersection)) - else: - LOG.debug("PASS deltaset=%s restricted_updates %s" % - (delta, intersection)) - - @staticmethod - def _valid_storage_hostname(hostname): - return bool(re.match('^%s-[0-9]+$' % k_host.STORAGE_HOSTNAME, - hostname)) - - def _validate_hostname(self, hostname, personality): - - if personality and personality == k_host.COMPUTE: - # Check for invalid hostnames - err_tl = 'Name restricted to at most 255 characters.' - err_ic = 'Name may only contain letters, ' \ - 'numbers, underscores, periods and hyphens.' - myexpression = re.compile("^[\w\.\-]+$") - if not myexpression.match(hostname): - raise wsme.exc.ClientSideError(_("Error: {}").format(err_ic)) - if len(hostname) > 255: - raise wsme.exc.ClientSideError(_("Error: {}").format(err_tl)) - non_compute_hosts = ([k_host.CONTROLLER_0_HOSTNAME, - k_host.CONTROLLER_1_HOSTNAME]) - if (hostname and (hostname in non_compute_hosts) or - hostname.startswith(k_host.STORAGE_HOSTNAME)): - raise wsme.exc.ClientSideError( - _("{} Reject attempt to configure " - "invalid hostname for personality {}.").format( - (hostname, personality))) - else: - if personality and personality == k_host.CONTROLLER: - valid_hostnames = [k_host.CONTROLLER_0_HOSTNAME, - k_host.CONTROLLER_1_HOSTNAME] - if hostname not in valid_hostnames: - raise wsme.exc.ClientSideError( - _("Host with personality={} can only have a hostname " - "from {}").format(personality, valid_hostnames)) - elif personality and personality == k_host.STORAGE: - if not self._valid_storage_hostname(hostname): - raise wsme.exc.ClientSideError( - _("Host with personality={} can only have a hostname " - "starting with %s-(number)").format( - (personality, k_host.STORAGE_HOSTNAME))) - - else: - raise wsme.exc.ClientSideError( - _("{}: Reject attempt to configure with " - "invalid personality={} ").format( - (hostname, personality))) - - def _check_compute(self, patched_ihost, hostupdate=None): - # Check for valid compute node setup - hostname = patched_ihost.get('hostname') or "" - - if not hostname: - raise wsme.exc.ClientSideError( - _("Host {} of personality {}, must be provisioned " - "with a hostname.").format( - (patched_ihost.get('uuid'), - patched_ihost.get('personality')))) - - non_compute_hosts = ([k_host.CONTROLLER_0_HOSTNAME, - k_host.CONTROLLER_1_HOSTNAME]) - if (hostname in non_compute_hosts or - self._valid_storage_hostname(hostname)): - raise wsme.exc.ClientSideError( - _("Hostname {} is not allowed for personality 'compute'. " - "Please check hostname and personality.").format(hostname)) - - def _controller_storage_node_setup(self, patched_ihost, hostupdate=None): - # Initially set the subfunction of the host to it's personality - - if hostupdate: - patched_ihost = hostupdate.ihost_patch - - patched_ihost['subfunctions'] = patched_ihost['personality'] - - if patched_ihost['personality'] == k_host.CONTROLLER: - controller_0_exists = False - controller_1_exists = False - current_ihosts = objects.Host.list( - pecan.request.context, - filters={'personality': k_host.CONTROLLER}) - - for h in current_ihosts: - if h['hostname'] == k_host.CONTROLLER_0_HOSTNAME: - controller_0_exists = True - elif h['hostname'] == k_host.CONTROLLER_1_HOSTNAME: - controller_1_exists = True - if controller_0_exists and controller_1_exists: - raise wsme.exc.ClientSideError( - _("Two controller nodes have already been configured. " - "This host can not be configured as a controller.")) - - # Look up the IP address to use for this controller and set - # the hostname. - if controller_0_exists: - hostname = k_host.CONTROLLER_1_HOSTNAME - mgmt_ip = self._get_controller_address(hostname) - if hostupdate: - hostupdate.ihost_val_update({'hostname': hostname, - 'mgmt_ip': mgmt_ip}) - else: - patched_ihost['hostname'] = hostname - patched_ihost['mgmt_ip'] = mgmt_ip - elif controller_1_exists: - hostname = k_host.CONTROLLER_0_HOSTNAME - mgmt_ip = self._get_controller_address(hostname) - if hostupdate: - hostupdate.ihost_val_update({'hostname': hostname, - 'mgmt_ip': mgmt_ip}) - else: - patched_ihost['hostname'] = hostname - patched_ihost['mgmt_ip'] = mgmt_ip - else: - raise wsme.exc.ClientSideError( - _("Attempting to provision a controller when none " - "exists. This is impossible.")) - - # Subfunctions can be set directly via the config file - subfunctions = ','.join(tsc.subfunctions) - if hostupdate: - hostupdate.ihost_val_update({'subfunctions': subfunctions}) - else: - patched_ihost['subfunctions'] = subfunctions - - elif patched_ihost['personality'] == k_host.STORAGE: - # Storage nodes are only allowed if we are configured to use - # ceph for the cinder backend. - if not StorageBackendConfig.has_backend_configured( - pecan.request.dbapi, - constants.CINDER_BACKEND_CEPH - ): - raise wsme.exc.ClientSideError( - _("Storage nodes can only be configured if storage " - "cluster is configured for the cinder backend.")) - - current_storage_ihosts = objects.Host.list( - pecan.request.context, - filters={'personality': k_host.STORAGE}) - - current_storage = [] - for h in current_storage_ihosts: - if self._valid_storage_hostname(h['hostname']): - current_storage.append(h['hostname']) - - max_storage_hostnames = ["storage-%s" % x for x in - range(len(current_storage_ihosts) + 1)] - - # Look up IP address to use storage hostname - for h in reversed(max_storage_hostnames): - if h not in current_storage: - hostname = h - mgmt_ip = self._get_storage_address(hostname) - LOG.info("Found new hostname=%s mgmt_ip=%s " - "current_storage=%s" % - (hostname, mgmt_ip, current_storage)) - break - - if patched_ihost['hostname']: - if patched_ihost['hostname'] != hostname: - raise wsme.exc.ClientSideError( - _("Storage name {} not allowed. Expected {}. " - "Storage nodes can be one of: " - "storage-#.").format( - (patched_ihost['hostname'], hostname))) - - if hostupdate: - hostupdate.ihost_val_update({'hostname': hostname, - 'mgmt_ip': mgmt_ip}) - else: - patched_ihost['hostname'] = hostname - patched_ihost['mgmt_ip'] = mgmt_ip - - @staticmethod - def _optimize_delta_handling(delta_handle): - """Optimize specific patch operations. - Updates delta_handle to identify remaining patch semantics to check. - """ - optimizable = ['location', 'serialid'] - if pecan.request.user_agent.startswith('mtce'): - mtc_optimizable = ['operational', 'availability', 'task', 'uptime', - 'subfunction_oper', 'subfunction_avail'] - optimizable.extend(mtc_optimizable) - - for k in optimizable: - if k in delta_handle: - delta_handle.remove(k) - - @staticmethod - def _semantic_mtc_check_action(hostupdate, action): - """ - Perform semantic checks with patch action vs current state - - returns: notify_mtc_check_action - """ - notify_mtc_check_action = True - ihost = hostupdate.ihost_orig - patched_ihost = hostupdate.ihost_patch - - if action in [k_host.VIM_SERVICES_DISABLED, - k_host.VIM_SERVICES_DISABLE_FAILED, - k_host.VIM_SERVICES_DISABLE_EXTEND, - k_host.VIM_SERVICES_ENABLED, - k_host.VIM_SERVICES_DELETE_FAILED]: - # These are not mtce actions - return notify_mtc_check_action - - LOG.info("%s _semantic_mtc_check_action %s" % - (hostupdate.displayid, action)) - - # Semantic Check: Auto-Provision: Reset, Reboot or Power-On case - if ((cutils.host_has_function(ihost, k_host.COMPUTE)) and - (ihost['administrative'] == k_host.ADMIN_LOCKED) and - ((patched_ihost['action'] == k_host.ACTION_RESET) or - (patched_ihost['action'] == k_host.ACTION_REBOOT) or - (patched_ihost['action'] == k_host.ACTION_POWERON) or - (patched_ihost['action'] == k_host.ACTION_POWEROFF))): - notify_mtc_check_action = True - - return notify_mtc_check_action - - @staticmethod - def _bm_semantic_check_and_update(ohost, phost, delta, patch_obj, - current_ihosts=None, hostupdate=None): - """Parameters: - ohost: object original host - phost: mutable dictionary patch host - delta: default keys changed - patch_obj: all changed paths - returns bm_type_changed_to_none - """ - - # NOTE: since the bm_mac is still in the DB; - # this is just to disallow user to modify it. - if 'bm_mac' in delta: - raise wsme.exc.ClientSideError( - _("Patching Error: can't replace non-existent object " - "'bm_mac' ")) - - bm_type_changed_to_none = False - - bm_set = {'bm_type', - 'bm_ip', - 'bm_username', - 'bm_password'} - - password_exists = any(p['path'] == '/bm_password' for p in patch_obj) - if not (delta.intersection(bm_set) or password_exists): - return bm_type_changed_to_none - - if hostupdate: - hostupdate.notify_mtce = True - - patch_bm_password = None - for p in patch_obj: - if p['path'] == '/bm_password': - patch_bm_password = p['value'] - - password_exists = password_exists and patch_bm_password is not None - - bm_type_orig = ohost.get('bm_type') or "" - bm_type_patch = phost.get('bm_type') or "" - if bm_type_patch.lower() == 'none': - bm_type_patch = '' - if (not bm_type_patch) and (bm_type_orig != bm_type_patch): - LOG.info("bm_type None from %s to %s." % - (ohost['bm_type'], phost['bm_type'])) - - bm_type_changed_to_none = True - - if 'bm_ip' in delta: - obm_ip = ohost['bm_ip'] or "" - nbm_ip = phost['bm_ip'] or "" - LOG.info("bm_ip in delta=%s obm_ip=%s nbm_ip=%s" % - (delta, obm_ip, nbm_ip)) - if obm_ip != nbm_ip: - if (pecan.request.user_agent.startswith('mtce') and - not bm_type_changed_to_none): - raise wsme.exc.ClientSideError( - _("Rejected: {} Board Management " - "controller IP Address is not" - "user-modifiable.").format(phost['hostname'])) - - if phost['bm_ip'] or phost['bm_type'] or phost['bm_username']: - if (not phost['bm_type'] or - (phost['bm_type'] and phost['bm_type'].lower() == - k_host.BM_TYPE_NONE)) and not bm_type_changed_to_none: - raise wsme.exc.ClientSideError( - _("{}: Rejected: Board Management controller Type " - "is not provisioned. Provisionable values: " - "'bmc'.").format(phost['hostname'])) - elif not phost['bm_username']: - raise wsme.exc.ClientSideError( - _("{}: Rejected: Board Management controller username " - "is not configured.").format(phost['hostname'])) - - # Semantic Check: Validate BM type against supported list - # ilo, quanta is kept for backwards compatability only - valid_bm_type_list = [None, 'None', k_host.BM_TYPE_NONE, - k_host.BM_TYPE_GENERIC, - 'ilo', 'ilo3', 'ilo4', 'quanta'] - - if not phost['bm_type']: - phost['bm_type'] = None - - if not (phost['bm_type'] in valid_bm_type_list): - raise wsme.exc.ClientSideError( - _("{}: Rejected: '{}' is not a supported board management " - "type. Must be one of {}").format( - (phost['hostname'], phost['bm_type'], valid_bm_type_list))) - - bm_type_str = phost['bm_type'] - if (phost['bm_type'] and - bm_type_str.lower() != k_host.BM_TYPE_NONE): - LOG.info("Updating bm_type from %s to %s" % - (phost['bm_type'], k_host.BM_TYPE_GENERIC)) - phost['bm_type'] = k_host.BM_TYPE_GENERIC - if hostupdate: - hostupdate.ihost_val_update( - {'bm_type': k_host.BM_TYPE_GENERIC}) - else: - phost['bm_type'] = None - if hostupdate: - hostupdate.ihost_val_update({'bm_type': None}) - - if (phost['bm_type'] and phost['bm_ip'] and - (ohost['bm_ip'] != phost['bm_ip'])): - if not cutils.is_valid_ip(phost['bm_ip']): - raise wsme.exc.ClientSideError( - _("{}: Rejected: Board Management controller IP Address " - "is not valid.").format(phost['hostname'])) - - if current_ihosts and ('bm_ip' in phost): - bm_ips = [h['bm_ip'] for h in current_ihosts] - - if phost['bm_ip'] and (phost['bm_ip'] in bm_ips): - raise wsme.exc.ClientSideError( - _("Host-add Rejected: bm_ip %s already exists") % - phost['bm_ip']) - - # Update barbican with updated board management credentials if supplied - if (ohost['bm_username'] and phost['bm_username'] and - (ohost['bm_username'] != phost['bm_username'])): - if not password_exists: - raise wsme.exc.ClientSideError( - _("{} Rejected: username change attempt from {} to {} " - "without corresponding password.").format( - (phost['hostname'], - ohost['bm_username'], - phost['bm_username']))) - - if password_exists and patch_bm_password: - pecan.request.rpcapi.create_barbican_secret(pecan.request.context, - phost['uuid'], - patch_bm_password) - - LOG.info("%s bm semantic checks for user_agent %s passed" % - (phost['hostname'], pecan.request.user_agent)) - - return bm_type_changed_to_none - - @staticmethod - def _semantic_check_nova_local_storage(ihost_uuid, personality): - """ - Perform semantic checking for nova local storage - :param ihost_uuid: uuid of host with compute functionality - :param personality: personality of host with compute functionality - """ - - LOG.info("TODO _semantic_check_nova_local_storage nova local obsol") - # TODO(sc) configure_check (unlock_compute) - return - - @staticmethod - def _handle_ttys_dcd_change(ihost, ttys_dcd): - """ - Handle serial line carrier detection enable or disable request. - :param ihost: unpatched ihost dictionary - :param ttys_dcd: attribute supplied in patch - """ - LOG.info("%s _handle_ttys_dcd_change from %s to %s" % - (ihost['hostname'], ihost['ttys_dcd'], ttys_dcd)) - - # check if the flag is changed - if ttys_dcd is not None: - if ihost['ttys_dcd'] is None or ihost['ttys_dcd'] != ttys_dcd: - if ((ihost['administrative'] == k_host.ADMIN_LOCKED and - ihost['availability'] == k_host.AVAILABILITY_ONLINE) or - (ihost['administrative'] == k_host.ADMIN_UNLOCKED and - ihost['operational'] == k_host.OPERATIONAL_ENABLED)): - LOG.info("Notify conductor ttys_dcd change: (%s) (%s)" % - (ihost['uuid'], ttys_dcd)) - pecan.request.rpcapi.configure_ttys_dcd( - pecan.request.context, ihost['uuid'], ttys_dcd) - - def action_check(self, action, hostupdate): - """Performs semantic checks related to action""" - - if not action or (action.lower() == k_host.ACTION_NONE): - rc = False - return rc - - valid_actions = [k_host.ACTION_UNLOCK, - k_host.ACTION_FORCE_UNLOCK, - k_host.ACTION_LOCK, - k_host.ACTION_FORCE_LOCK, - k_host.ACTION_SWACT, - k_host.ACTION_FORCE_SWACT, - k_host.ACTION_RESET, - k_host.ACTION_REBOOT, - k_host.ACTION_REINSTALL, - k_host.ACTION_POWERON, - k_host.ACTION_POWEROFF, - k_host.VIM_SERVICES_ENABLED, - k_host.VIM_SERVICES_DISABLED, - k_host.VIM_SERVICES_DISABLE_FAILED, - k_host.VIM_SERVICES_DISABLE_EXTEND, - k_host.VIM_SERVICES_DELETE_FAILED, - k_host.ACTION_SUBFUNCTION_CONFIG] - - if action not in valid_actions: - raise wsme.exc.ClientSideError( - _("'%s' is not a supported maintenance action") % action) - - force_unlock = False - if action == k_host.ACTION_FORCE_UNLOCK: - # set force_unlock for semantic check and update action - # for compatability with vim and mtce - action = k_host.ACTION_UNLOCK - force_unlock = True - hostupdate.action = action - rc = True - - if action == k_host.ACTION_UNLOCK: - # Set host_action in DB as early as possible as we need - # it as a synchronization point for things like lvg/pv - # deletion which is not allowed when ihost is unlokced - # or in the process of unlocking. - rc = self.update_host_action(action, hostupdate) - if rc: - pecan.request.dbapi.host_update(hostupdate.ihost_orig['uuid'], - hostupdate.ihost_val_prenotify) - try: - self.check_unlock(hostupdate, force_unlock) - except Exception as e: - LOG.info("host unlock check didn't pass, " - "so set the host_action back to None " - "and re-raise the exception") - self.update_host_action(None, hostupdate) - pecan.request.dbapi.host_update( - hostupdate.ihost_orig['uuid'], - hostupdate.ihost_val_prenotify) - raise e - elif action == k_host.ACTION_LOCK: - if self.check_lock(hostupdate): - rc = self.update_host_action(action, hostupdate) - elif action == k_host.ACTION_FORCE_LOCK: - if self.check_force_lock(hostupdate): - rc = self.update_host_action(action, hostupdate) - elif action == k_host.ACTION_SWACT: - self.check_swact(hostupdate) - elif action == k_host.ACTION_FORCE_SWACT: - self.check_force_swact(hostupdate) - elif action == k_host.ACTION_REBOOT: - self.check_reboot(hostupdate) - elif action == k_host.ACTION_RESET: - self.check_reset(hostupdate) - elif action == k_host.ACTION_REINSTALL: - self.check_reinstall(hostupdate) - elif action == k_host.ACTION_POWERON: - self.check_poweron(hostupdate) - elif action == k_host.ACTION_POWEROFF: - self.check_poweroff(hostupdate) - elif action == k_host.VIM_SERVICES_ENABLED: - self.update_vim_progress_status(action, hostupdate) - elif action == k_host.VIM_SERVICES_DISABLED: - self.update_vim_progress_status(action, hostupdate) - elif action == k_host.VIM_SERVICES_DISABLE_FAILED: - self.update_vim_progress_status(action, hostupdate) - elif action == k_host.VIM_SERVICES_DISABLE_EXTEND: - self.update_vim_progress_status(action, hostupdate) - elif action == k_host.VIM_SERVICES_DELETE_FAILED: - self.update_vim_progress_status(action, hostupdate) - elif action == k_host.ACTION_SUBFUNCTION_CONFIG: - self._check_subfunction_config(hostupdate) - self._semantic_check_nova_local_storage( - hostupdate.ihost_patch['uuid'], - hostupdate.ihost_patch['personality']) - else: - raise wsme.exc.ClientSideError( - _("action_check unrecognized action: {}").format(action)) - - if action in k_host.ACTIONS_MTCE: - if self._semantic_mtc_check_action(hostupdate, action): - hostupdate.notify_mtce = True - task_val = hostupdate.get_task_from_action(action) - if task_val: - hostupdate.ihost_val_update({'task': task_val}) - - elif 'administrative' in hostupdate.delta: - # administrative state changed, update task, host_action in case - hostupdate.ihost_val_update({'task': "", - 'host_action': ""}) - - LOG.info("%s action=%s ihost_val_prenotify: %s ihost_val: %s" % - (hostupdate.displayid, - hostupdate.action, - hostupdate.ihost_val_prenotify, - hostupdate.ihost_val)) - - if hostupdate.ihost_val_prenotify: - LOG.info("%s host.update.ihost_val_prenotify %s" % - (hostupdate.displayid, hostupdate.ihost_val_prenotify)) - - if self.check_notify_vim(action): - hostupdate.notify_vim = True - - if self.check_notify_mtce(action, hostupdate) > 0: - hostupdate.notify_mtce = True - - LOG.info("%s action_check action=%s, notify_vim=%s " - "notify_mtce=%s rc=%s" % - (hostupdate.displayid, - action, - hostupdate.notify_vim, - hostupdate.notify_mtce, - rc)) - - return rc - - @staticmethod - def check_notify_vim(action): - if action in k_host.ACTIONS_VIM: - return True - else: - return False - - @staticmethod - def check_notify_mtce(action, hostupdate): - """Determine whether mtce should be notified of this patch request - returns: Integer (nonmtc_change_count) - """ - - nonmtc_change_count = 0 - if action in k_host.ACTIONS_VIM: - return nonmtc_change_count - elif action in k_host.ACTIONS_CONFIG: - return nonmtc_change_count - elif action in k_host.VIM_SERVICES_ENABLED: - return nonmtc_change_count - - mtc_ignore_list = ['administrative', 'availability', 'operational', - 'task', 'uptime', 'capabilities', - 'host_action', - 'subfunction_oper', 'subfunction_avail', - 'vim_progress_status' - 'location', 'serialid', 'invprovision'] - - if pecan.request.user_agent.startswith('mtce'): - mtc_ignore_list.append('bm_ip') - - nonmtc_change_count = len(set(hostupdate.delta) - set(mtc_ignore_list)) - - return nonmtc_change_count - - @staticmethod - def stage_administrative_update(hostupdate): - # Always configure when the host is unlocked - this will set the - # hostname and allow the node to boot and configure itself. - # NOTE: This is being hit the second time through this function on - # the unlock. The first time through, the "action" is set to unlock - # on the patched_iHost, but the "administrative" is still locked. - # Once maintenance processes the unlock, they do another patch and - # set the "administrative" to unlocked. - if ('administrative' in hostupdate.delta and - hostupdate.ihost_patch['administrative'] == - k_host.ADMIN_UNLOCKED): - if hostupdate.ihost_orig['invprovision'] == \ - k_host.UNPROVISIONED or \ - hostupdate.ihost_orig['invprovision'] is None: - LOG.info("stage_administrative_update: provisioning") - hostupdate.ihost_val_update({'invprovision': - k_host.PROVISIONING}) - - if ('operational' in hostupdate.delta and - hostupdate.ihost_patch['operational'] == - k_host.OPERATIONAL_ENABLED): - if hostupdate.ihost_orig['invprovision'] == k_host.PROVISIONING: - # first time unlocked successfully - LOG.info("stage_administrative_update: provisioned") - hostupdate.ihost_val_update( - {'invprovision': k_host.PROVISIONED}) - - @staticmethod - def _update_add_ceph_state(): - # notify systemconfig of the new ceph state - LOG.info("TODO(SC) _update_add_ceph_state") - - @staticmethod - def update_host_action(action, hostupdate): - if action is None: - preval = {'host_action': ''} - elif action == k_host.ACTION_FORCE_LOCK: - preval = {'host_action': k_host.ACTION_FORCE_LOCK} - elif action == k_host.ACTION_LOCK: - preval = {'host_action': k_host.ACTION_LOCK} - elif (action == k_host.ACTION_UNLOCK or - action == k_host.ACTION_FORCE_UNLOCK): - preval = {'host_action': k_host.ACTION_UNLOCK} - else: - LOG.error("update_host_action unsupported action: %s" % action) - return False - hostupdate.ihost_val_prenotify.update(preval) - hostupdate.ihost_val.update(preval) - - task_val = hostupdate.get_task_from_action(action) - if task_val: - hostupdate.ihost_val_update({'task': task_val}) - return True - - @staticmethod - def update_vim_progress_status(action, hostupdate): - LOG.info("%s Pending update_vim_progress_status %s" % - (hostupdate.displayid, action)) - return True - - def _check_provisioning(self, hostupdate, patch): - # Once the host has been provisioned lock down additional fields - - ihost = hostupdate.ihost_patch - delta = hostupdate.delta - - provision_state = [k_host.PROVISIONED, k_host.PROVISIONING] - if hostupdate.ihost_orig['invprovision'] in provision_state: - state_rel_path = ['hostname', 'personality', 'subfunctions'] - if any(p in state_rel_path for p in delta): - raise wsme.exc.ClientSideError( - _("The following fields can not be modified because " - "this host {} has been configured: " - "hostname, personality, subfunctions").format( - hostupdate.ihost_orig['hostname'])) - - # Check whether any configurable installation parameters are updated - install_parms = ['boot_device', 'rootfs_device', 'install_output', - 'console', 'tboot'] - if any(p in install_parms for p in delta): - # Disallow changes if the node is not locked - if ihost['administrative'] != k_host.ADMIN_LOCKED: - raise wsme.exc.ClientSideError( - _("Host must be locked before updating " - "installation parameters.")) - - # An update to PXE boot information is required - hostupdate.configure_required = True - - if 'personality' in delta: - LOG.info("iHost['personality']=%s" % - hostupdate.ihost_orig['personality']) - - if hostupdate.ihost_orig['personality']: - raise wsme.exc.ClientSideError( - _("Can not change personality after it has been set. " - "Host {} must be deleted and re-added in order to change" - " the personality.").format( - hostupdate.ihost_orig['hostname'])) - - if (hostupdate.ihost_patch['personality'] in - (k_host.CONTROLLER, k_host.STORAGE)): - self._controller_storage_node_setup(hostupdate.ihost_patch, - hostupdate) - # check the subfunctions are updated properly - LOG.info("hostupdate.ihost_patch.subfunctions %s" % - hostupdate.ihost_patch['subfunctions']) - elif hostupdate.ihost_patch['personality'] == k_host.COMPUTE: - self._check_compute(hostupdate.ihost_patch, hostupdate) - else: - LOG.error("Unexpected personality: %s" % - hostupdate.ihost_patch['personality']) - - # Always configure when the personality has been set - this will - # set up the PXE boot information so the software can be installed - hostupdate.configure_required = True - - # Notify VIM when the personality is set. - hostupdate.notify_vim_add_host = True - - if k_host.SUBFUNCTIONS in delta: - if hostupdate.ihost_orig[k_host.SUBFUNCTIONS]: - raise wsme.exc.ClientSideError( - _("Can not change subfunctions after it has been set. Host" - "{} must be deleted and re-added in order to change " - "the subfunctions.").format( - hostupdate.ihost_orig['hostname'])) - - if hostupdate.ihost_patch['personality'] == k_host.COMPUTE: - valid_subfunctions = (k_host.COMPUTE, - k_host.LOWLATENCY) - elif hostupdate.ihost_patch['personality'] == k_host.CONTROLLER: - valid_subfunctions = (k_host.CONTROLLER, - k_host.COMPUTE, - k_host.LOWLATENCY) - elif hostupdate.ihost_patch['personality'] == k_host.STORAGE: - # Comparison is expecting a list - valid_subfunctions = (k_host.STORAGE, k_host.STORAGE) - - subfunctions_set = \ - set(hostupdate.ihost_patch[k_host.SUBFUNCTIONS].split(',')) - - if not subfunctions_set.issubset(valid_subfunctions): - raise wsme.exc.ClientSideError( - ("%s subfunctions %s contains unsupported values. " - "Allowable: %s." % - (hostupdate.displayid, - subfunctions_set, - valid_subfunctions))) - - if hostupdate.ihost_patch['personality'] == k_host.COMPUTE: - if k_host.COMPUTE not in subfunctions_set: - # Automatically add it - subfunctions_list = list(subfunctions_set) - subfunctions_list.insert(0, k_host.COMPUTE) - subfunctions = ','.join(subfunctions_list) - - LOG.info("%s update subfunctions=%s" % - (hostupdate.displayid, subfunctions)) - hostupdate.ihost_val_prenotify.update( - {'subfunctions': subfunctions}) - hostupdate.ihost_val.update({'subfunctions': subfunctions}) - - # The hostname for a controller or storage node cannot be modified - - # Disallow hostname changes - if 'hostname' in delta: - if hostupdate.ihost_orig['hostname']: - if (hostupdate.ihost_patch['hostname'] != - hostupdate.ihost_orig['hostname']): - raise wsme.exc.ClientSideError( - _("The hostname field can not be modified because " - "the hostname {} has already been configured. " - "If changing hostname is required, please delete " - "this host, then readd.").format( - hostupdate.ihost_orig['hostname'])) - - for attribute in patch: - # check for duplicate attributes - for attribute2 in patch: - if attribute['path'] == attribute2['path']: - if attribute['value'] != attribute2['value']: - raise wsme.exc.ClientSideError( - _("Illegal duplicate parameters passed.")) - - if 'personality' in delta or 'hostname' in delta: - personality = hostupdate.ihost_patch.get('personality') or "" - hostname = hostupdate.ihost_patch.get('hostname') or "" - if personality and hostname: - self._validate_hostname(hostname, personality) - - if 'personality' in delta: - HostController._personality_license_check( - hostupdate.ihost_patch['personality']) - - @staticmethod - def _personality_license_check(personality): - if personality == k_host.CONTROLLER: - return - - if not personality: - return - - if personality == k_host.COMPUTE and utils.is_aio_duplex_system(): - if utils.get_compute_count() >= constants.AIO_DUPLEX_MAX_COMPUTES: - msg = _("All-in-one Duplex is restricted to " - "%s computes.") % constants.AIO_DUPLEX_MAX_COMPUTES - raise wsme.exc.ClientSideError(msg) - else: - return - - if (utils.SystemHelper.get_product_build() == - constants.TIS_AIO_BUILD): - msg = _("Personality [%s] for host is not compatible " - "with installed software. ") % personality - - raise wsme.exc.ClientSideError(msg) - - @staticmethod - def check_reset(hostupdate): - """Check semantics on host-reset.""" - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError( - _("Can not 'Reset' a simplex system")) - - if hostupdate.ihost_orig['administrative'] == k_host.ADMIN_UNLOCKED: - raise wsme.exc.ClientSideError( - _("Can not 'Reset' an 'unlocked' host {}; " - "Please 'Lock' first").format(hostupdate.displayid)) - - return True - - @staticmethod - def check_poweron(hostupdate): - # Semantic Check: State Dependency: Power-On case - if (hostupdate.ihost_orig['administrative'] == - k_host.ADMIN_UNLOCKED): - raise wsme.exc.ClientSideError( - _("Can not 'Power-On' an already Powered-on " - "and 'unlocked' host {}").format(hostupdate.displayid)) - - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError( - _("Can not 'Power-On' an already Powered-on " - "simplex system")) - - @staticmethod - def check_poweroff(hostupdate): - # Semantic Check: State Dependency: Power-Off case - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError( - _("Can not 'Power-Off' a simplex system via " - "system commands")) - - if (hostupdate.ihost_orig['administrative'] == - k_host.ADMIN_UNLOCKED): - raise wsme.exc.ClientSideError( - _("Can not 'Power-Off' an 'unlocked' host {}; " - "Please 'Lock' first").format(hostupdate.displayid)) - - @staticmethod - def check_reinstall(hostupdate): - """Semantic Check: State Dependency: Reinstall case""" - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError(_( - "Reinstalling a simplex system is not allowed.")) - - ihost = hostupdate.ihost_orig - if ihost['administrative'] == k_host.ADMIN_UNLOCKED: - raise wsme.exc.ClientSideError( - _("Can not 'Reinstall' an 'unlocked' host {}; " - "Please 'Lock' first").format(hostupdate.displayid)) - elif ((ihost['administrative'] == k_host.ADMIN_LOCKED) and - (ihost['availability'] != "online")): - raise wsme.exc.ClientSideError( - _("Can not 'Reinstall' {} while it is 'offline'. " - "Please wait for this host's availability state " - "to be 'online' and then re-issue the reinstall " - "command.").format(hostupdate.displayid)) - - def check_unlock(self, hostupdate, force_unlock=False): - """Check semantics on host-unlock.""" - if (hostupdate.action != k_host.ACTION_UNLOCK and - hostupdate.action != k_host.ACTION_FORCE_UNLOCK): - LOG.error("check_unlock unexpected action: %s" % hostupdate.action) - return False - - # Semantic Check: Don't unlock if installation failed - if (hostupdate.ihost_orig['install_state'] == - constants.INSTALL_STATE_FAILED): - raise wsme.exc.ClientSideError( - _("Cannot unlock host {} due to installation failure").format( - hostupdate.displayid)) - - # Semantic Check: Avoid Unlock of Unlocked Host - if hostupdate.ihost_orig['administrative'] == k_host.ADMIN_UNLOCKED: - raise wsme.exc.ClientSideError( - _("Avoiding 'unlock' action on already " - "'unlocked' host {}").format( - hostupdate.ihost_orig['hostname'])) - - # Semantic Check: Action Dependency: Power-Off / Unlock case - if (hostupdate.ihost_orig['availability'] == - k_host.ACTION_POWEROFF): - raise wsme.exc.ClientSideError( - _("Can not 'Unlock a Powered-Off' host {}; Power-on, " - "wait for 'online' status and then 'unlock'").format( - hostupdate.displayid)) - - # Semantic Check: Action Dependency: Online / Unlock case - if (not force_unlock and hostupdate.ihost_orig['availability'] != - k_host.AVAILABILITY_ONLINE): - raise wsme.exc.ClientSideError( - _("Host {} is not online. " - "Wait for 'online' availability status and " - "then 'unlock'").format(hostupdate.displayid)) - - # To unlock, we need the following additional fields - if not (hostupdate.ihost_patch['mgmt_mac'] and - hostupdate.ihost_patch['mgmt_ip'] and - hostupdate.ihost_patch['hostname'] and - hostupdate.ihost_patch['personality'] and - hostupdate.ihost_patch['subfunctions']): - raise wsme.exc.ClientSideError( - _("Can not unlock an unprovisioned host {}. " - "Please perform 'Edit Host' to provision host.").format( - hostupdate.displayid)) - - # To unlock, ensure reinstall has completed - action_state = hostupdate.ihost_orig[k_host.HOST_ACTION_STATE] - if (action_state and - action_state == k_host.HAS_REINSTALLING): - if not force_unlock: - raise wsme.exc.ClientSideError( - _("Can not unlock host {} undergoing reinstall. " - "Please ensure host has completed reinstall " - "prior to unlock.").format(hostupdate.displayid)) - else: - LOG.warn("Allowing force-unlock of host %s " - "undergoing reinstall." % hostupdate.displayid) - - personality = hostupdate.ihost_patch.get('personality') - if personality == k_host.CONTROLLER: - self.check_unlock_controller(hostupdate, force_unlock) - if cutils.host_has_function(hostupdate.ihost_patch, k_host.COMPUTE): - self.check_unlock_compute(hostupdate) - elif personality == k_host.STORAGE: - self.check_unlock_storage(hostupdate) - - # self.check_unlock_interfaces(hostupdate) - # self.unlock_update_mgmt_infra_interface(hostupdate.ihost_patch) - # TODO(storage) self.check_unlock_partitions(hostupdate) - self.check_unlock_patching(hostupdate, force_unlock) - - hostupdate.configure_required = True - hostupdate.notify_vim = True - - return True - - def check_unlock_patching(self, hostupdate, force_unlock): - """Check whether the host is patch current. - """ - - if force_unlock: - return - - try: - system = objects.System.get_one(pecan.request.context) - response = patch_api.patch_query_hosts( - pecan.request.context, - system.region_name) - phosts = response['data'] - except Exception as e: - LOG.warn(_("No response from patch api {} e={}").format( - (hostupdate.displayid, e))) - self._api_token = None - return - - for phost in phosts: - if phost.get('hostname') == hostupdate.ihost_patch.get('hostname'): - if not phost.get('patch_current'): - raise wsme.exc.ClientSideError( - _("host-unlock rejected: Not patch current. " - "'sw-patch host-install {}' is required.").format( - hostupdate.displayid)) - - def check_lock(self, hostupdate): - """Check semantics on host-lock.""" - LOG.info("%s ihost check_lock" % hostupdate.displayid) - if hostupdate.action != k_host.ACTION_LOCK: - LOG.error("%s check_lock unexpected action: %s" % - (hostupdate.displayid, hostupdate.action)) - return False - - # Semantic Check: Avoid Lock of Locked Host - if hostupdate.ihost_orig['administrative'] == k_host.ADMIN_LOCKED: - raise wsme.exc.ClientSideError( - _("Avoiding {} action on already " - "'locked' host {}").format( - hostupdate.ihost_patch['action'], - hostupdate.ihost_orig['hostname'])) - - # personality specific lock checks - personality = hostupdate.ihost_patch.get('personality') - if personality == k_host.CONTROLLER: - self.check_lock_controller(hostupdate) - elif personality == k_host.STORAGE: - self.check_lock_storage(hostupdate) - - subfunctions_set = \ - set(hostupdate.ihost_patch[k_host.SUBFUNCTIONS].split(',')) - if k_host.COMPUTE in subfunctions_set: - self.check_lock_compute(hostupdate) - - hostupdate.notify_vim = True - hostupdate.notify_mtce = True - - return True - - def check_force_lock(self, hostupdate): - # personality specific lock checks - personality = hostupdate.ihost_patch.get('personality') - if personality == k_host.CONTROLLER: - self.check_lock_controller(hostupdate, force=True) - - elif personality == k_host.STORAGE: - self.check_lock_storage(hostupdate, force=True) - return True - - @staticmethod - def check_lock_controller(hostupdate, force=False): - """Pre lock semantic checks for controller""" - - LOG.info("%s ihost check_lock_controller" % hostupdate.displayid) - - if utils.get_system_mode() != constants.SYSTEM_MODE_SIMPLEX: - if utils.is_host_active_controller(hostupdate.ihost_orig): - raise wsme.exc.ClientSideError( - _("%s : Rejected: Can not lock an active controller") % - hostupdate.ihost_orig['hostname']) - - if StorageBackendConfig.has_backend_configured( - pecan.request.dbapi, - constants.CINDER_BACKEND_CEPH): - try: - st_nodes = objects.Host.list( - pecan.request.context, - filters={'personality': k_host.STORAGE}) - - except exception.HostNotFound: - # If we don't have any storage nodes we don't need to - # check for quorum. We'll allow the node to be locked. - return - # TODO(oponcea) remove once SM supports in-service config reload - # Allow locking controllers when all storage nodes are locked. - for node in st_nodes: - if node['administrative'] == k_host.ADMIN_UNLOCKED: - break - else: - return - - if not force: - # sm-lock-pre-check - node_name = hostupdate.displayid - response = sm_api.lock_pre_check(pecan.request.context, node_name) - if response: - error_code = response.get('error_code') - if ERR_CODE_LOCK_SOLE_SERVICE_PROVIDER == error_code: - impact_svc_list = response.get('impact_service_list') - svc_list = ','.join(impact_svc_list) - if len(impact_svc_list) > 1: - msg = _("Services {svc_list} are only running on " - "{host}, locking {host} will result " - "service outage. If lock {host} is required, " - "please use \"force lock\" command.").format( - svc_list=svc_list, host=node_name) - else: - msg = _("Service {svc_list} is only running on " - "{host}, locking {host} will result " - "service outage. If lock {host} is required, " - "please use \"force lock\" command.").format( - svc_list=svc_list, host=node_name) - - raise wsme.exc.ClientSideError(msg) - elif "0" != error_code: - raise wsme.exc.ClientSideError( - _("{}").format(response['error_details'])) - - @staticmethod - def _host_configure_check(host_uuid): - # check with systemconfig host//state/configure_check - if pecan.request.systemconfig.host_configure_check(host_uuid): - LOG.info("Configuration check {} passed".format(host_uuid)) - raise wsme.exc.ClientSideError("host_configure_check Passed") - else: - LOG.info("Configuration check {} failed".format(host_uuid)) - raise wsme.exc.ClientSideError("host_configure_check Failed") - - def check_unlock_controller(self, hostupdate, force_unlock=False): - """Pre unlock semantic checks for controller""" - LOG.info("{} host check_unlock_controller".format( - hostupdate.displayid)) - self._host_configure_check(hostupdate.ihost_orig['uuid']) - - def check_unlock_compute(self, hostupdate): - """Check semantics on host-unlock of a compute.""" - LOG.info("%s ihost check_unlock_compute" % hostupdate.displayid) - ihost = hostupdate.ihost_orig - if ihost['invprovision'] is None: - raise wsme.exc.ClientSideError( - _("Can not unlock an unconfigured host {}. Please " - "configure host and wait for Availability State " - "'online' prior to unlock.").format(hostupdate.displayid)) - self._host_configure_check(ihost['uuid']) - - def check_unlock_storage(self, hostupdate): - """Storage unlock semantic checks""" - self._host_configure_check(hostupdate.ihost_orig['uuid']) - - @staticmethod - def check_updates_while_unlocked(hostupdate, delta): - """Check semantics host-update of an unlocked host.""" - - ihost = hostupdate.ihost_patch - if ihost['administrative'] == k_host.ADMIN_UNLOCKED: - deltaset = set(delta) - - restricted_updates = () - if not pecan.request.user_agent.startswith('mtce'): - # Allow mtc to modify the state throughthe REST API. - # Eventually mtc should switch to using the - # conductor API to modify hosts because this check will also - # allow users to modify these states (which is bad). - restricted_updates = ('administrative', - 'availability', - 'operational', - 'subfunction_oper', - 'subfunction_avail', - 'task', 'uptime') - - if deltaset.issubset(restricted_updates): - raise wsme.exc.ClientSideError( - ("Change set %s contains a subset of restricted %s." % - (deltaset, restricted_updates))) - else: - LOG.debug("PASS deltaset=%s restricted_updates=%s" % - (deltaset, restricted_updates)) - - if 'administrative' in delta: - # Transition to unlocked - if ihost['host_action']: - LOG.info("Host: %s Admin state change to: %s " - "Clearing host_action=%s" % - (ihost['uuid'], - ihost['administrative'], - ihost['host_action'])) - hostupdate.ihost_val_update({'host_action': ""}) - pass - - @staticmethod - def check_force_swact(hostupdate): - """Pre swact semantic checks for controller""" - # Allow force-swact to continue - return True - - @staticmethod - def check_reboot(hostupdate): - """Pre reboot semantic checks""" - # Semantic Check: State Dependency: Reboot case - if hostupdate.ihost_orig['administrative'] == k_host.ADMIN_UNLOCKED: - raise wsme.exc.ClientSideError( - _("Can not 'Reboot' an 'unlocked' host {}; " - "Please 'Lock' first").format(hostupdate.displayid)) - - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError(_( - "Rebooting a simplex system is not allowed.")) - return True - - def check_swact(self, hostupdate): - """Pre swact semantic checks for controller""" - - if hostupdate.ihost_orig['personality'] != k_host.CONTROLLER: - raise wsme.exc.ClientSideError( - _("Swact action not allowed for " - "non controller host {}.").format( - hostupdate.ihost_orig['hostname'])) - - if hostupdate.ihost_orig['administrative'] == k_host.ADMIN_LOCKED: - raise wsme.exc.ClientSideError( - _("Controller is Locked ; No services to Swact")) - - if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError(_( - "Swact action not allowed for a simplex system.")) - - # check target controller - ihost_ctrs = objects.Host.list( - pecan.request.context, - filters={'personality': k_host.CONTROLLER}) - - for ihost_ctr in ihost_ctrs: - if ihost_ctr.hostname != hostupdate.ihost_orig['hostname']: - if (ihost_ctr.operational != - k_host.OPERATIONAL_ENABLED): - raise wsme.exc.ClientSideError( - _("{} is not enabled and has operational " - "state {}." - "Standby controller must be operationally " - "enabled.").format( - (ihost_ctr.hostname, ihost_ctr.operational))) - - if (ihost_ctr.availability == - k_host.AVAILABILITY_DEGRADED): - health_helper = health.Health( - pecan.request.context, - pecan.request.dbapi) - degrade_alarms = health_helper.get_alarms_degrade( - pecan.request.context, - alarm_ignore_list=[ - fm_constants.FM_ALARM_ID_HA_SERVICE_GROUP_STATE, - fm_constants.FM_ALARM_ID_HA_SERVICE_GROUP_REDUNDANCY, # noqa - fm_constants.FM_ALARM_ID_HA_NODE_LICENSE, - fm_constants.FM_ALARM_ID_HA_COMMUNICATION_FAILURE - ], - entity_instance_id_filter=ihost_ctr.hostname) - if degrade_alarms: - raise wsme.exc.ClientSideError( - _("%s has degraded availability status. Standby " - "controller must be in available status.") % - ihost_ctr.hostname) - - if k_host.COMPUTE in ihost_ctr.subfunctions: - if (ihost_ctr.subfunction_oper != - k_host.OPERATIONAL_ENABLED): - raise wsme.exc.ClientSideError( - _("{} subfunction is not enabled and has " - "operational state {}." - "Standby controller subfunctions {} " - "must all be operationally enabled.").format( - (ihost_ctr.hostname, - ihost_ctr.subfunction_oper, - ihost_ctr.subfunctions))) - - LOG.info("TODO sc_op to check swact (storage_backend, tpm_config" - ", DRBD resizing") - - # Check: Valid Swact action: Pre-Swact Check - response = sm_api.swact_pre_check( - pecan.request.context, - hostupdate.ihost_orig['hostname']) - if response and "0" != response['error_code']: - raise wsme.exc.ClientSideError( - _("%s").format(response['error_details'])) - - def check_lock_storage(self, hostupdate, force=False): - """Pre lock semantic checks for storage""" - LOG.info("%s ihost check_lock_storage" % hostupdate.displayid) - - if (hostupdate.ihost_orig['administrative'] == - k_host.ADMIN_UNLOCKED and - hostupdate.ihost_orig['operational'] == - k_host.OPERATIONAL_ENABLED): - num_monitors, required_monitors, quorum_names = \ - self._ceph.get_monitors_status(pecan.request.dbapi) - - if (hostupdate.ihost_orig['hostname'] in quorum_names and - num_monitors - 1 < required_monitors): - raise wsme.exc.ClientSideError(_( - "Only {} storage monitor available. " - "At least {} unlocked and enabled hosts with monitors " - "are required. Please ensure hosts with monitors are " - "unlocked and enabled - candidates: {}, {}, {}").format( - (num_monitors, constants.MIN_STOR_MONITORS, - k_host.CONTROLLER_0_HOSTNAME, - k_host.CONTROLLER_1_HOSTNAME, - k_host.STORAGE_0_HOSTNAME))) - - # send request to systemconfig to check disable storage - LOG.info("TODO sc_op to perform disable storage checks") - - @staticmethod - def check_lock_compute(hostupdate, force=False): - """Pre lock semantic checks for compute""" - - LOG.info("%s host check_lock_compute" % hostupdate.displayid) - if force: - return - - system = objects.System.get_one(pecan.request.context) - if system.system_mode == constants.SYSTEM_MODE_SIMPLEX: - return - - # send request to systemconfig to check disable storage - LOG.info("TODO sc_op to perform disable storage checks") - - def stage_action(self, action, hostupdate): - """Stage the action to be performed. - """ - LOG.info("%s stage_action %s" % (hostupdate.displayid, action)) - rc = True - if not action or ( - action and action.lower() == k_host.ACTION_NONE): - LOG.error("Unrecognized action perform: %s" % action) - return False - - if (action == k_host.ACTION_UNLOCK or - action == k_host.ACTION_FORCE_UNLOCK): - self._handle_unlock_action(hostupdate) - elif action == k_host.ACTION_LOCK: - self._handle_lock_action(hostupdate) - elif action == k_host.ACTION_FORCE_LOCK: - self._handle_force_lock_action(hostupdate) - elif action == k_host.ACTION_SWACT: - self._stage_swact(hostupdate) - elif action == k_host.ACTION_FORCE_SWACT: - self._stage_force_swact(hostupdate) - elif action == k_host.ACTION_REBOOT: - self._stage_reboot(hostupdate) - elif action == k_host.ACTION_RESET: - self._stage_reset(hostupdate) - elif action == k_host.ACTION_REINSTALL: - self._stage_reinstall(hostupdate) - elif action == k_host.ACTION_POWERON: - self._stage_poweron(hostupdate) - elif action == k_host.ACTION_POWEROFF: - self._stage_poweroff(hostupdate) - elif action == k_host.VIM_SERVICES_ENABLED: - self._handle_vim_services_enabled(hostupdate) - elif action == k_host.VIM_SERVICES_DISABLED: - if not self._handle_vim_services_disabled(hostupdate): - LOG.warn(_("{} exit _handle_vim_services_disabled").format( - hostupdate.ihost_patch['hostname'])) - hostupdate.nextstep = hostupdate.EXIT_RETURN_HOST - rc = False - elif action == k_host.VIM_SERVICES_DISABLE_FAILED: - if not self._handle_vim_services_disable_failed(hostupdate): - LOG.warn( - _("{} Exit _handle_vim_services_disable failed").format( - hostupdate.ihost_patch['hostname'])) - hostupdate.nextstep = hostupdate.EXIT_RETURN_HOST - rc = False - elif action == k_host.VIM_SERVICES_DISABLE_EXTEND: - self._handle_vim_services_disable_extend(hostupdate) - hostupdate.nextstep = hostupdate.EXIT_UPDATE_PREVAL - rc = False - elif action == k_host.VIM_SERVICES_DELETE_FAILED: - self._handle_vim_services_delete_failed(hostupdate) - hostupdate.nextstep = hostupdate.EXIT_UPDATE_PREVAL - rc = False - elif action == k_host.ACTION_SUBFUNCTION_CONFIG: - # Not a mtc action; disable mtc checks and config - self._stage_subfunction_config(hostupdate) - else: - LOG.error("%s Unrecognized action perform: %s" % - (hostupdate.displayid, action)) - rc = False - - if hostupdate.nextstep == hostupdate.EXIT_RETURN_HOST: - LOG.info("%s stage_action aborting request %s %s" % - (hostupdate.displayid, - hostupdate.action, - hostupdate.delta)) - - return rc - - @staticmethod - def _check_subfunction_config(hostupdate): - """Check subfunction config.""" - LOG.info("%s _check_subfunction_config" % hostupdate.displayid) - patched_ihost = hostupdate.ihost_patch - - if patched_ihost['action'] == "subfunction_config": - if (not patched_ihost['subfunctions'] or - patched_ihost['personality'] == - patched_ihost['subfunctions']): - raise wsme.exc.ClientSideError( - _("This host is not configured with a subfunction.")) - - return True - - @staticmethod - def _stage_subfunction_config(hostupdate): - """Stage subfunction config.""" - LOG.info("%s _stage_subfunction_config" % hostupdate.displayid) - - hostupdate.notify_mtce = False - hostupdate.skip_notify_mtce = True - - @staticmethod - def perform_action_subfunction_config(ihost_obj): - """Perform subfunction config via RPC to conductor.""" - LOG.info("%s perform_action_subfunction_config" % - ihost_obj['hostname']) - pecan.request.rpcapi.configure_host(pecan.request.context, - ihost_obj, - do_compute_apply=True) - - @staticmethod - def _stage_reboot(hostupdate): - """Stage reboot action.""" - LOG.info("%s stage_reboot" % hostupdate.displayid) - hostupdate.notify_mtce = True - - def _stage_reinstall(self, hostupdate): - """Stage reinstall action.""" - LOG.info("%s stage_reinstall" % hostupdate.displayid) - - # Remove manifests to enable standard install without manifests - # and enable storage allocation change - pecan.request.rpcapi.remove_host_config( - pecan.request.context, - hostupdate.ihost_orig['uuid']) - - hostupdate.notify_mtce = True - if hostupdate.ihost_orig['personality'] == k_host.STORAGE: - istors = pecan.request.dbapi.istor_get_by_ihost( - hostupdate.ihost_orig['uuid']) - for stor in istors: - istor_obj = objects.storage.get_by_uuid( - pecan.request.context, stor.uuid) - self._ceph.remove_osd_key(istor_obj['osdid']) - - hostupdate.ihost_val_update({k_host.HOST_ACTION_STATE: - k_host.HAS_REINSTALLING}) - - @staticmethod - def _stage_poweron(hostupdate): - """Stage poweron action.""" - LOG.info("%s stage_poweron" % hostupdate.displayid) - hostupdate.notify_mtce = True - - @staticmethod - def _stage_poweroff(hostupdate): - """Stage poweroff action.""" - LOG.info("%s stage_poweroff" % hostupdate.displayid) - hostupdate.notify_mtce = True - - @staticmethod - def _stage_swact(hostupdate): - """Stage swact action.""" - LOG.info("%s stage_swact" % hostupdate.displayid) - hostupdate.notify_mtce = True - - @staticmethod - def _stage_force_swact(hostupdate): - """Stage force-swact action.""" - LOG.info("%s stage_force_swact" % hostupdate.displayid) - hostupdate.notify_mtce = True - - @staticmethod - def _handle_vim_services_enabled(hostupdate): - """Handle VIM services-enabled signal.""" - vim_progress_status = hostupdate.ihost_orig.get( - 'vim_progress_status') or "" - LOG.info("%s received services-enabled task=%s vim_progress_status=%s" - % (hostupdate.displayid, - hostupdate.ihost_orig['task'], - vim_progress_status)) - - if (not vim_progress_status or - not vim_progress_status.startswith( - k_host.VIM_SERVICES_ENABLED)): - hostupdate.notify_availability = k_host.VIM_SERVICES_ENABLED - if (not vim_progress_status or - vim_progress_status == k_host.VIM_SERVICES_DISABLED): - # otherwise allow the audit to clear the error message - hostupdate.ihost_val_update({'vim_progress_status': - k_host.VIM_SERVICES_ENABLED}) - - hostupdate.skip_notify_mtce = True - - @staticmethod - def _handle_vim_services_disabled(hostupdate): - """Handle VIM services-disabled signal.""" - - LOG.info("%s _handle_vim_services_disabled'" % hostupdate.displayid) - ihost = hostupdate.ihost_orig - - hostupdate.ihost_val_update( - {'vim_progress_status': k_host.VIM_SERVICES_DISABLED}) - - ihost_task_string = ihost['host_action'] or "" - if ((ihost_task_string.startswith(k_host.ACTION_LOCK) or - ihost_task_string.startswith(k_host.ACTION_FORCE_LOCK)) and - ihost['administrative'] != k_host.ADMIN_LOCKED): - # passed - skip reset for force-lock - # iHost['host_action'] = k_host.ACTION_LOCK - hostupdate.notify_availability = k_host.VIM_SERVICES_DISABLED - hostupdate.notify_action_lock = True - hostupdate.notify_mtce = True - else: - # return False rather than failing request. - LOG.warn(_("{} Admin action task not Locking or Force Locking " - "upon receipt of 'services-disabled'.").format( - hostupdate.displayid)) - hostupdate.skip_notify_mtce = True - return False - - return True - - @staticmethod - def _handle_vim_services_disable_extend(hostupdate): - """Handle VIM services-disable-extend signal.""" - host_action = hostupdate.ihost_orig['host_action'] or "" - result_reason = hostupdate.ihost_patch.get('vim_progress_status') or "" - LOG.info("%s handle_vim_services_disable_extend " - "host_action=%s reason=%s" % - (hostupdate.displayid, host_action, result_reason)) - - hostupdate.skip_notify_mtce = True - if host_action.startswith(k_host.ACTION_LOCK): - val = {'task': k_host.LOCKING + '-', - 'host_action': k_host.ACTION_LOCK} - hostupdate.ihost_val_prenotify_update(val) - else: - LOG.warn("%s Skip vim services disable extend ihost action=%s" % - (hostupdate.displayid, host_action)) - return False - - LOG.info("services-disable-extend reason=%s" % result_reason) - return True - - @staticmethod - def _handle_vim_services_disable_failed(hostupdate): - """Handle VIM services-disable-failed signal.""" - ihost_task_string = hostupdate.ihost_orig['host_action'] or "" - LOG.info("%s handle_vim_services_disable_failed host_action=%s" % - (hostupdate.displayid, ihost_task_string)) - - result_reason = hostupdate.ihost_patch.get('vim_progress_status') or "" - - if ihost_task_string.startswith(k_host.ACTION_LOCK): - hostupdate.skip_notify_mtce = True - val = {'host_action': '', - 'task': '', - 'vim_progress_status': result_reason} - hostupdate.ihost_val_prenotify_update(val) - hostupdate.ihost_val.update(val) - hostupdate.skip_notify_mtce = True - elif ihost_task_string.startswith(k_host.ACTION_FORCE_LOCK): - # allow mtce to reset the host - hostupdate.notify_mtce = True - hostupdate.notify_action_lock_force = True - else: - hostupdate.skip_notify_mtce = True - LOG.warn("%s Skipping vim services disable notification task=%s" % - (hostupdate.displayid, ihost_task_string)) - return False - - if result_reason: - LOG.info("services-disable-failed reason=%s" % result_reason) - hostupdate.ihost_val_update({'vim_progress_status': - result_reason}) - else: - hostupdate.ihost_val_update({'vim_progress_status': - k_host.VIM_SERVICES_DISABLE_FAILED}) - - return True - - @staticmethod - def _handle_vim_services_delete_failed(hostupdate): - """Handle VIM services-delete-failed signal.""" - - ihost_admin = hostupdate.ihost_orig['administrative'] or "" - result_reason = hostupdate.ihost_patch.get('vim_progress_status') or "" - LOG.info("%s handle_vim_services_delete_failed admin=%s reason=%s" % - (hostupdate.displayid, ihost_admin, result_reason)) - - hostupdate.skip_notify_mtce = True - if ihost_admin.startswith(k_host.ADMIN_LOCKED): - val = {'host_action': '', - 'task': '', - 'vim_progress_status': result_reason} - hostupdate.ihost_val_prenotify_update(val) - # hostupdate.ihost_val.update(val) - else: - LOG.warn("%s Skip vim services delete failed notify admin=%s" % - (hostupdate.displayid, ihost_admin)) - return False - - if result_reason: - hostupdate.ihost_val_prenotify_update({'vim_progress_status': - result_reason}) - else: - hostupdate.ihost_val_prenotify_update( - {'vim_progress_status': k_host.VIM_SERVICES_DELETE_FAILED}) - - LOG.info("services-disable-failed reason=%s" % result_reason) - return True - - @staticmethod - def _stage_reset(hostupdate): - """Handle host-reset action.""" - LOG.info("%s _stage_reset" % hostupdate.displayid) - hostupdate.notify_mtce = True - - def _handle_unlock_action(self, hostupdate): - """Handle host-unlock action.""" - LOG.info("%s _handle_unlock_action" % hostupdate.displayid) - if hostupdate.ihost_patch.get('personality') == k_host.STORAGE: - self._handle_unlock_storage_host(hostupdate) - hostupdate.notify_vim_action = False - hostupdate.notify_mtce = True - val = {'host_action': k_host.ACTION_UNLOCK} - hostupdate.ihost_val_prenotify_update(val) - hostupdate.ihost_val.update(val) - - def _handle_unlock_storage_host(self, hostupdate): - self._ceph.update_crushmap(hostupdate) - - @staticmethod - def _handle_lock_action(hostupdate): - """Handle host-lock action.""" - LOG.info("%s _handle_lock_action" % hostupdate.displayid) - - hostupdate.notify_vim_action = True - hostupdate.skip_notify_mtce = True - val = {'host_action': k_host.ACTION_LOCK} - hostupdate.ihost_val_prenotify_update(val) - hostupdate.ihost_val.update(val) - - @staticmethod - def _handle_force_lock_action(hostupdate): - """Handle host-force-lock action.""" - LOG.info("%s _handle_force_lock_action" % hostupdate.displayid) - - hostupdate.notify_vim_action = True - hostupdate.skip_notify_mtce = True - val = {'host_action': k_host.ACTION_FORCE_LOCK} - hostupdate.ihost_val_prenotify_update(val) - hostupdate.ihost_val.update(val) - - -def _create_node(host, xml_node, personality, is_dynamic_ip): - host_node = et.SubElement(xml_node, 'host') - et.SubElement(host_node, 'personality').text = personality - if personality == k_host.COMPUTE: - et.SubElement(host_node, 'hostname').text = host.hostname - et.SubElement(host_node, 'subfunctions').text = host.subfunctions - - et.SubElement(host_node, 'mgmt_mac').text = host.mgmt_mac - if not is_dynamic_ip: - et.SubElement(host_node, 'mgmt_ip').text = host.mgmt_ip - if host.location is not None and 'locn' in host.location: - et.SubElement(host_node, 'location').text = host.location['locn'] - - pw_on_instruction = _('Uncomment the statement below to power on the host ' - 'automatically through board management.') - host_node.append(et.Comment(pw_on_instruction)) - host_node.append(et.Comment('')) - et.SubElement(host_node, 'bm_type').text = host.bm_type - et.SubElement(host_node, 'bm_username').text = host.bm_username - et.SubElement(host_node, 'bm_password').text = '' - - et.SubElement(host_node, 'boot_device').text = host.boot_device - et.SubElement(host_node, 'rootfs_device').text = host.rootfs_device - et.SubElement(host_node, 'install_output').text = host.install_output - et.SubElement(host_node, 'console').text = host.console - et.SubElement(host_node, 'tboot').text = host.tboot diff --git a/inventory/inventory/inventory/api/controllers/v1/link.py b/inventory/inventory/inventory/api/controllers/v1/link.py deleted file mode 100644 index b4038476..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/link.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from wsme import types as wtypes - -from inventory.api.controllers.v1 import base - - -def build_url(resource, resource_args, bookmark=False, base_url=None): - if base_url is None: - base_url = pecan.request.public_url - - template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' - # FIXME(lucasagomes): I'm getting a 404 when doing a GET on - # a nested resource that the URL ends with a '/'. - # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs - template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' - return template % {'url': base_url, 'res': resource, 'args': resource_args} - - -class Link(base.APIBase): - """A link representation.""" - - href = wtypes.text - """The url of a link.""" - - rel = wtypes.text - """The name of a link.""" - - type = wtypes.text - """Indicates the type of document/link.""" - - @staticmethod - def make_link(rel_name, url, resource, resource_args, - bookmark=False, type=wtypes.Unset): - href = build_url(resource, resource_args, - bookmark=bookmark, base_url=url) - return Link(href=href, rel=rel_name, type=type) - - @classmethod - def sample(cls): - sample = cls(href="http://localhost:18002" - "eeaca217-e7d8-47b4-bb41-3f99f20ead81", - rel="bookmark") - return sample diff --git a/inventory/inventory/inventory/api/controllers/v1/lldp_agent.py b/inventory/inventory/inventory/api/controllers/v1/lldp_agent.py deleted file mode 100644 index ea3e81ec..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/lldp_agent.py +++ /dev/null @@ -1,366 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2016 Wind River Systems, Inc. -# - - -import jsonpatch - -import pecan -from pecan import rest - -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import lldp_tlv -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import k_lldp -from inventory.common import utils as cutils -from inventory import objects - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class LLDPAgentPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class LLDPAgent(base.APIBase): - """API representation of an LLDP Agent - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - LLDP agent. - """ - - uuid = types.uuid - "Unique UUID for this port" - - status = wtypes.text - "Represent the status of the lldp agent" - - host_id = int - "Represent the host_id the lldp agent belongs to" - - port_id = int - "Represent the port_id the lldp agent belongs to" - - host_uuid = types.uuid - "Represent the UUID of the host the lldp agent belongs to" - - port_uuid = types.uuid - "Represent the UUID of the port the lldp agent belongs to" - - port_name = wtypes.text - "Represent the name of the port the lldp neighbour belongs to" - - port_namedisplay = wtypes.text - "Represent the display name of the port. Unique per host" - - links = [link.Link] - "Represent a list containing a self link and associated lldp agent links" - - tlvs = [link.Link] - "Links to the collection of LldpNeighbours on this ihost" - - chassis_id = wtypes.text - "Represent the status of the lldp agent" - - port_identifier = wtypes.text - "Represent the LLDP port id of the lldp agent" - - port_description = wtypes.text - "Represent the port description of the lldp agent" - - system_description = wtypes.text - "Represent the status of the lldp agent" - - system_name = wtypes.text - "Represent the status of the lldp agent" - - system_capabilities = wtypes.text - "Represent the status of the lldp agent" - - management_address = wtypes.text - "Represent the status of the lldp agent" - - ttl = wtypes.text - "Represent the time-to-live of the lldp agent" - - dot1_lag = wtypes.text - "Represent the 802.1 link aggregation status of the lldp agent" - - dot1_vlan_names = wtypes.text - "Represent the 802.1 vlan names of the lldp agent" - - dot3_mac_status = wtypes.text - "Represent the 802.3 MAC/PHY status of the lldp agent" - - dot3_max_frame = wtypes.text - "Represent the 802.3 maximum frame size of the lldp agent" - - def __init__(self, **kwargs): - self.fields = objects.LLDPAgent.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_lldp_agent, expand=True): - lldp_agent = LLDPAgent(**rpc_lldp_agent.as_dict()) - if not expand: - lldp_agent.unset_fields_except([ - 'uuid', 'host_id', 'port_id', 'status', 'host_uuid', - 'port_uuid', 'port_name', 'port_namedisplay', - 'created_at', 'updated_at', - k_lldp.LLDP_TLV_TYPE_CHASSIS_ID, - k_lldp.LLDP_TLV_TYPE_PORT_ID, - k_lldp.LLDP_TLV_TYPE_TTL, - k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME, - k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC, - k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP, - k_lldp.LLDP_TLV_TYPE_MGMT_ADDR, - k_lldp.LLDP_TLV_TYPE_PORT_DESC, - k_lldp.LLDP_TLV_TYPE_DOT1_LAG, - k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES, - k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS, - k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME]) - - # never expose the id attribute - lldp_agent.host_id = wtypes.Unset - lldp_agent.port_id = wtypes.Unset - - lldp_agent.links = [ - link.Link.make_link('self', pecan.request.host_url, - 'lldp_agents', lldp_agent.uuid), - link.Link.make_link('bookmark', pecan.request.host_url, - 'lldp_agents', lldp_agent.uuid, - bookmark=True)] - - if expand: - lldp_agent.tlvs = [ - link.Link.make_link('self', - pecan.request.host_url, - 'lldp_agents', - lldp_agent.uuid + "/tlvs"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'lldp_agents', - lldp_agent.uuid + "/tlvs", - bookmark=True)] - - return lldp_agent - - -class LLDPAgentCollection(collection.Collection): - """API representation of a collection of LldpAgent objects.""" - - lldp_agents = [LLDPAgent] - "A list containing LldpAgent objects" - - def __init__(self, **kwargs): - self._type = 'lldp_agents' - - @classmethod - def convert_with_links(cls, rpc_lldp_agents, limit, url=None, - expand=False, **kwargs): - collection = LLDPAgentCollection() - collection.lldp_agents = [LLDPAgent.convert_with_links(a, expand) - for a in rpc_lldp_agents] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'LLDPAgentController' - - -class LLDPAgentController(rest.RestController): - """REST controller for LldpAgents.""" - - tlvs = lldp_tlv.LLDPTLVController( - from_lldp_agents=True) - "Expose tlvs as a sub-element of LldpAgents" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False, from_ports=False): - self._from_hosts = from_hosts - self._from_ports = from_ports - - def _get_lldp_agents_collection(self, uuid, - marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - - if self._from_hosts and not uuid: - raise exception.InvalidParameterValue(_("Host id not specified.")) - - if self._from_ports and not uuid: - raise exception.InvalidParameterValue(_("Port id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.LLDPAgent.get_by_uuid(pecan.request.context, - marker) - - if self._from_hosts: - agents = objects.LLDPAgent.get_by_host( - pecan.request.context, - uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) - - elif self._from_ports: - agents = [] - agent = objects.LLDPAgent.get_by_port(pecan.request.context, uuid) - agents.append(agent) - else: - agents = objects.LLDPAgent.list( - pecan.request.context, - limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) - - return LLDPAgentCollection.convert_with_links(agents, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(LLDPAgentCollection, types.uuid, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of lldp agents.""" - return self._get_lldp_agents_collection(uuid, marker, limit, sort_key, - sort_dir) - - @wsme_pecan.wsexpose(LLDPAgentCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of lldp_agents with detail.""" - - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "lldp_agents": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['lldp_agents', 'detail']) - return self._get_lldp_agents_collection(uuid, marker, limit, sort_key, - sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(LLDPAgent, types.uuid) - def get_one(self, port_uuid): - """Retrieve information about the given lldp agent.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_lldp_agent = objects.LLDPAgent.get_by_uuid( - pecan.request.context, port_uuid) - return LLDPAgent.convert_with_links(rpc_lldp_agent) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(LLDPAgent, body=LLDPAgent) - def post(self, agent): - """Create a new lldp agent.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - try: - host_uuid = agent.host_uuid - port_uuid = agent.port_uuid - new_agent = objects.LLDPAgent.create( - pecan.request.context, - port_uuid, - host_uuid, - agent.as_dict()) - except exception.InventoryException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Invalid data")) - return agent.convert_with_links(new_agent) - - @cutils.synchronized(LOCK_NAME) - @wsme.validate(types.uuid, [LLDPAgentPatchType]) - @wsme_pecan.wsexpose(LLDPAgent, types.uuid, - body=[LLDPAgentPatchType]) - def patch(self, uuid, patch): - """Update an existing lldp agent.""" - if self._from_hosts: - raise exception.OperationNotPermitted - if self._from_ports: - raise exception.OperationNotPermitted - - rpc_agent = objects.LLDPAgent.get_by_uuid( - pecan.request.context, uuid) - - # replace ihost_uuid and port_uuid with corresponding - patch_obj = jsonpatch.JsonPatch(patch) - for p in patch_obj: - if p['path'] == '/host_uuid': - p['path'] = '/host_id' - host = objects.Host.get_by_uuid(pecan.request.context, - p['value']) - p['value'] = host.id - - if p['path'] == '/port_uuid': - p['path'] = '/port_id' - try: - port = objects.Port.get_by_uuid( - pecan.request.context, p['value']) - p['value'] = port.id - except exception.InventoryException as e: - LOG.exception(e) - p['value'] = None - - try: - agent = LLDPAgent(**jsonpatch.apply_patch(rpc_agent.as_dict(), - patch_obj)) - - except utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.LLDPAgent.fields: - if rpc_agent[field] != getattr(agent, field): - rpc_agent[field] = getattr(agent, field) - - rpc_agent.save() - return LLDPAgent.convert_with_links(rpc_agent) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, uuid): - """Delete an lldp agent.""" - if self._from_hosts: - raise exception.OperationNotPermitted - if self._from_ports: - raise exception.OperationNotPermitted - - pecan.request.dbapi.lldp_agent_destroy(uuid) diff --git a/inventory/inventory/inventory/api/controllers/v1/lldp_neighbour.py b/inventory/inventory/inventory/api/controllers/v1/lldp_neighbour.py deleted file mode 100644 index 68c11414..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/lldp_neighbour.py +++ /dev/null @@ -1,390 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2016 Wind River Systems, Inc. -# - - -import jsonpatch - -import pecan -from pecan import rest - -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import lldp_tlv -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import k_lldp -from inventory.common import utils as cutils -from inventory import objects -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class LLDPNeighbourPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class LLDPNeighbour(base.APIBase): - """API representation of an LLDP Neighbour - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - LLDP neighbour. - """ - - uuid = types.uuid - "Unique UUID for this port" - - msap = wtypes.text - "Represent the MAC service access point of the lldp neighbour" - - host_id = int - "Represent the host_id the lldp neighbour belongs to" - - port_id = int - "Represent the port_id the lldp neighbour belongs to" - - host_uuid = types.uuid - "Represent the UUID of the host the lldp neighbour belongs to" - - port_uuid = types.uuid - "Represent the UUID of the port the lldp neighbour belongs to" - - port_name = wtypes.text - "Represent the name of the port the lldp neighbour belongs to" - - port_namedisplay = wtypes.text - "Represent the display name of the port. Unique per host" - - links = [link.Link] - "Represent a list containing a self link and associated lldp neighbour" - "links" - - tlvs = [link.Link] - "Links to the collection of LldpNeighbours on this ihost" - - chassis_id = wtypes.text - "Represent the status of the lldp neighbour" - - system_description = wtypes.text - "Represent the status of the lldp neighbour" - - system_name = wtypes.text - "Represent the status of the lldp neighbour" - - system_capabilities = wtypes.text - "Represent the status of the lldp neighbour" - - management_address = wtypes.text - "Represent the status of the lldp neighbour" - - port_identifier = wtypes.text - "Represent the port identifier of the lldp neighbour" - - port_description = wtypes.text - "Represent the port description of the lldp neighbour" - - dot1_lag = wtypes.text - "Represent the 802.1 link aggregation status of the lldp neighbour" - - dot1_port_vid = wtypes.text - "Represent the 802.1 port vlan id of the lldp neighbour" - - dot1_vid_digest = wtypes.text - "Represent the 802.1 vlan id digest of the lldp neighbour" - - dot1_management_vid = wtypes.text - "Represent the 802.1 management vlan id of the lldp neighbour" - - dot1_vlan_names = wtypes.text - "Represent the 802.1 vlan names of the lldp neighbour" - - dot1_proto_vids = wtypes.text - "Represent the 802.1 protocol vlan ids of the lldp neighbour" - - dot1_proto_ids = wtypes.text - "Represent the 802.1 protocol ids of the lldp neighbour" - - dot3_mac_status = wtypes.text - "Represent the 802.3 MAC/PHY status of the lldp neighbour" - - dot3_max_frame = wtypes.text - "Represent the 802.3 maximum frame size of the lldp neighbour" - - dot3_power_mdi = wtypes.text - "Represent the 802.3 power mdi status of the lldp neighbour" - - ttl = wtypes.text - "Represent the neighbour time-to-live" - - def __init__(self, **kwargs): - self.fields = objects.LLDPNeighbour.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_lldp_neighbour, expand=True): - lldp_neighbour = LLDPNeighbour(**rpc_lldp_neighbour.as_dict()) - - if not expand: - lldp_neighbour.unset_fields_except([ - 'uuid', 'host_id', 'port_id', 'msap', 'host_uuid', 'port_uuid', - 'port_name', 'port_namedisplay', 'created_at', 'updated_at', - k_lldp.LLDP_TLV_TYPE_CHASSIS_ID, - k_lldp.LLDP_TLV_TYPE_PORT_ID, - k_lldp.LLDP_TLV_TYPE_TTL, - k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME, - k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC, - k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP, - k_lldp.LLDP_TLV_TYPE_MGMT_ADDR, - k_lldp.LLDP_TLV_TYPE_PORT_DESC, - k_lldp.LLDP_TLV_TYPE_DOT1_LAG, - k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID, - k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST, - k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID, - k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS, - k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS, - k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES, - k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST, - k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS, - k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME, - k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI]) - - # never expose the id attribute - lldp_neighbour.host_id = wtypes.Unset - lldp_neighbour.port_id = wtypes.Unset - - lldp_neighbour.links = [ - link.Link.make_link('self', pecan.request.host_url, - 'lldp_neighbours', lldp_neighbour.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'lldp_neighbours', lldp_neighbour.uuid, - bookmark=True)] - - if expand: - lldp_neighbour.tlvs = [ - link.Link.make_link('self', - pecan.request.host_url, - 'lldp_neighbours', - lldp_neighbour.uuid + "/tlvs"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'lldp_neighbours', - lldp_neighbour.uuid + "/tlvs", - bookmark=True)] - - return lldp_neighbour - - -class LLDPNeighbourCollection(collection.Collection): - """API representation of a collection of LldpNeighbour objects.""" - - lldp_neighbours = [LLDPNeighbour] - "A list containing LldpNeighbour objects" - - def __init__(self, **kwargs): - self._type = 'lldp_neighbours' - - @classmethod - def convert_with_links(cls, rpc_lldp_neighbours, limit, url=None, - expand=False, **kwargs): - collection = LLDPNeighbourCollection() - - collection.lldp_neighbours = [LLDPNeighbour.convert_with_links(a, - expand) - for a in rpc_lldp_neighbours] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'LLDPNeighbourController' - - -class LLDPNeighbourController(rest.RestController): - """REST controller for LldpNeighbours.""" - - tlvs = lldp_tlv.LLDPTLVController( - from_lldp_neighbours=True) - "Expose tlvs as a sub-element of LldpNeighbours" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False, from_ports=False): - self._from_hosts = from_hosts - self._from_ports = from_ports - - def _get_lldp_neighbours_collection(self, uuid, marker, limit, sort_key, - sort_dir, expand=False, - resource_url=None): - - if self._from_hosts and not uuid: - raise exception.InvalidParameterValue(_("Host id not specified.")) - - if self._from_ports and not uuid: - raise exception.InvalidParameterValue(_("Port id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.LLDPNeighbour.get_by_uuid( - pecan.request.context, marker) - - if self._from_hosts: - neighbours = pecan.request.dbapi.lldp_neighbour_get_by_host( - uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) - - elif self._from_ports: - neighbours = pecan.request.dbapi.lldp_neighbour_get_by_port( - uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) - else: - neighbours = pecan.request.dbapi.lldp_neighbour_get_list( - limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) - - return LLDPNeighbourCollection.convert_with_links(neighbours, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(LLDPNeighbourCollection, types.uuid, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of lldp neighbours.""" - - return self._get_lldp_neighbours_collection(uuid, marker, limit, - sort_key, sort_dir) - - @wsme_pecan.wsexpose(LLDPNeighbourCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of lldp_neighbours with detail.""" - - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "lldp_neighbours": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['lldp_neighbours', 'detail']) - return self._get_lldp_neighbours_collection(uuid, marker, limit, - sort_key, sort_dir, expand, - resource_url) - - @wsme_pecan.wsexpose(LLDPNeighbour, types.uuid) - def get_one(self, port_uuid): - """Retrieve information about the given lldp neighbour.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_lldp_neighbour = objects.LLDPNeighbour.get_by_uuid( - pecan.request.context, port_uuid) - return LLDPNeighbour.convert_with_links(rpc_lldp_neighbour) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(LLDPNeighbour, body=LLDPNeighbour) - def post(self, neighbour): - """Create a new lldp neighbour.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - try: - host_uuid = neighbour.host_uuid - port_uuid = neighbour.port_uuid - new_neighbour = pecan.request.dbapi.lldp_neighbour_create( - port_uuid, host_uuid, neighbour.as_dict()) - except exception.InventoryException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Invalid data")) - return neighbour.convert_with_links(new_neighbour) - - @cutils.synchronized(LOCK_NAME) - @wsme.validate(types.uuid, [LLDPNeighbourPatchType]) - @wsme_pecan.wsexpose(LLDPNeighbour, types.uuid, - body=[LLDPNeighbourPatchType]) - def patch(self, uuid, patch): - """Update an existing lldp neighbour.""" - if self._from_hosts: - raise exception.OperationNotPermitted - if self._from_ports: - raise exception.OperationNotPermitted - - rpc_neighbour = objects.LLDPNeighbour.get_by_uuid( - pecan.request.context, uuid) - - # replace host_uuid and port_uuid with corresponding - patch_obj = jsonpatch.JsonPatch(patch) - for p in patch_obj: - if p['path'] == '/host_uuid': - p['path'] = '/host_id' - host = objects.Host.get_by_uuid(pecan.request.context, - p['value']) - p['value'] = host.id - - if p['path'] == '/port_uuid': - p['path'] = '/port_id' - try: - port = objects.Port.get_by_uuid( - pecan.request.context, p['value']) - p['value'] = port.id - except exception.InventoryException as e: - LOG.exception(e) - p['value'] = None - - try: - neighbour = LLDPNeighbour( - **jsonpatch.apply_patch(rpc_neighbour.as_dict(), patch_obj)) - - except utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.LLDPNeighbour.fields: - if rpc_neighbour[field] != getattr(neighbour, field): - rpc_neighbour[field] = getattr(neighbour, field) - - rpc_neighbour.save() - return LLDPNeighbour.convert_with_links(rpc_neighbour) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, uuid): - """Delete an lldp neighbour.""" - if self._from_hosts: - raise exception.OperationNotPermitted - if self._from_ports: - raise exception.OperationNotPermitted - - pecan.request.dbapi.lldp_neighbour_destroy(uuid) diff --git a/inventory/inventory/inventory/api/controllers/v1/lldp_tlv.py b/inventory/inventory/inventory/api/controllers/v1/lldp_tlv.py deleted file mode 100644 index eb7bd034..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/lldp_tlv.py +++ /dev/null @@ -1,297 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2016-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import jsonpatch - -import pecan -from pecan import rest - -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import utils as cutils -from inventory import objects - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class LLDPTLVPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class LLDPTLV(base.APIBase): - """API representation of an LldpTlv - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - LLDP tlv. - """ - - type = wtypes.text - "Represent the type of the lldp tlv" - - value = wtypes.text - "Represent the value of the lldp tlv" - - agent_id = int - "Represent the agent_id the lldp tlv belongs to" - - neighbour_id = int - "Represent the neighbour the lldp tlv belongs to" - - agent_uuid = types.uuid - "Represent the UUID of the agent the lldp tlv belongs to" - - neighbour_uuid = types.uuid - "Represent the UUID of the neighbour the lldp tlv belongs to" - - links = [link.Link] - "Represent a list containing a self link and associated lldp tlv links" - - def __init__(self, **kwargs): - self.fields = objects.LLDPTLV.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_lldp_tlv, expand=True): - lldp_tlv = LLDPTLV(**rpc_lldp_tlv.as_dict()) - if not expand: - lldp_tlv.unset_fields_except(['type', 'value']) - - # never expose the id attribute - lldp_tlv.agent_id = wtypes.Unset - lldp_tlv.neighbour_id = wtypes.Unset - - lldp_tlv.links = [link.Link.make_link('self', pecan.request.host_url, - 'lldp_tlvs', lldp_tlv.type), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'lldp_tlvs', lldp_tlv.type, - bookmark=True)] - return lldp_tlv - - -class LLDPTLVCollection(collection.Collection): - """API representation of a collection of LldpTlv objects.""" - - lldp_tlvs = [LLDPTLV] - "A list containing LldpTlv objects" - - def __init__(self, **kwargs): - self._type = 'lldp_tlvs' - - @classmethod - def convert_with_links(cls, rpc_lldp_tlvs, limit, url=None, - expand=False, **kwargs): - collection = LLDPTLVCollection() - collection.lldp_tlvs = [LLDPTLV.convert_with_links(a, expand) - for a in rpc_lldp_tlvs] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'LLDPTLVController' - - -class LLDPTLVController(rest.RestController): - """REST controller for LldpTlvs.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_lldp_agents=False, from_lldp_neighbours=False): - self._from_lldp_agents = from_lldp_agents - self._from_lldp_neighbours = from_lldp_neighbours - - def _get_lldp_tlvs_collection(self, uuid, - marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - - if self._from_lldp_agents and not uuid: - raise exception.InvalidParameterValue( - _("LLDP agent id not specified.")) - - if self._from_lldp_neighbours and not uuid: - raise exception.InvalidParameterValue( - _("LLDP neighbour id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.LLDPTLV.get_by_id(pecan.request.context, - marker) - - if self._from_lldp_agents: - tlvs = objects.LLDPTLV.get_by_agent(pecan.request.context, - uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - elif self._from_lldp_neighbours: - tlvs = objects.LLDPTLV.get_by_neighbour( - pecan.request.context, - uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) - else: - tlvs = objects.LLDPTLV.list( - pecan.request.context, - limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) - - return LLDPTLVCollection.convert_with_links(tlvs, - limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(LLDPTLVCollection, types.uuid, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of lldp tlvs.""" - return self._get_lldp_tlvs_collection(uuid, marker, limit, sort_key, - sort_dir) - - @wsme_pecan.wsexpose(LLDPTLVCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of lldp_tlvs with detail.""" - - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "lldp_tlvs": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['lldp_tlvs', 'detail']) - return self._get_lldp_tlvs_collection(uuid, marker, limit, sort_key, - sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(LLDPTLV, int) - def get_one(self, id): - """Retrieve information about the given lldp tlv.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_lldp_tlv = objects.LLDPTLV.get_by_id( - pecan.request.context, id) - return LLDPTLV.convert_with_links(rpc_lldp_tlv) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(LLDPTLV, body=LLDPTLV) - def post(self, tlv): - """Create a new lldp tlv.""" - if self._from_lldp_agents: - raise exception.OperationNotPermitted - - if self._from_lldp_neighbours: - raise exception.OperationNotPermitted - - try: - agent_uuid = tlv.agent_uuid - neighbour_uuid = tlv.neighbour_uuid - new_tlv = pecan.request.dbapi.lldp_tlv_create(tlv.as_dict(), - agent_uuid, - neighbour_uuid) - except exception.InventoryException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Invalid data")) - return tlv.convert_with_links(new_tlv) - - @cutils.synchronized(LOCK_NAME) - @wsme.validate(types.uuid, [LLDPTLVPatchType]) - @wsme_pecan.wsexpose(LLDPTLV, int, - body=[LLDPTLVPatchType]) - def patch(self, id, patch): - """Update an existing lldp tlv.""" - if self._from_lldp_agents: - raise exception.OperationNotPermitted - if self._from_lldp_neighbours: - raise exception.OperationNotPermitted - - rpc_tlv = objects.LLDPTLV.get_by_id( - pecan.request.context, id) - - # replace agent_uuid and neighbour_uuid with corresponding - patch_obj = jsonpatch.JsonPatch(patch) - for p in patch_obj: - if p['path'] == '/agent_uuid': - p['path'] = '/agent_id' - agent = objects.LLDPAgent.get_by_uuid(pecan.request.context, - p['value']) - p['value'] = agent.id - - if p['path'] == '/neighbour_uuid': - p['path'] = '/neighbour_id' - try: - neighbour = objects.LLDPNeighbour.get_by_uuid( - pecan.request.context, p['value']) - p['value'] = neighbour.id - except exception.InventoryException as e: - LOG.exception(e) - p['value'] = None - - try: - tlv = LLDPTLV( - **jsonpatch.apply_patch(rpc_tlv.as_dict(), patch_obj)) - - except utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.LLDPTLV.fields: - if rpc_tlv[field] != getattr(tlv, field): - rpc_tlv[field] = getattr(tlv, field) - - rpc_tlv.save() - return LLDPTLV.convert_with_links(rpc_tlv) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(None, int, status_code=204) - def delete(self, id): - """Delete an lldp tlv.""" - if self._from_lldp_agents: - raise exception.OperationNotPermitted - if self._from_lldp_neighbours: - raise exception.OperationNotPermitted - - tlv = objects.LLDPTLV.get_by_id(pecan.request.context, id) - tlv.destroy() - # pecan.request.dbapi.lldp_tlv_destroy(id) diff --git a/inventory/inventory/inventory/api/controllers/v1/memory.py b/inventory/inventory/inventory/api/controllers/v1/memory.py deleted file mode 100644 index 20ddb4b8..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/memory.py +++ /dev/null @@ -1,729 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import jsonpatch -import six - -import pecan -from pecan import rest - -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import utils as cutils -from inventory import objects -from oslo_log import log - - -LOG = log.getLogger(__name__) - - -class MemoryPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class Memory(base.APIBase): - """API representation of host memory. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a memory. - """ - - _minimum_platform_reserved_mib = None - - def _get_minimum_platform_reserved_mib(self): - return self._minimum_platform_reserved_mib - - def _set_minimum_platform_reserved_mib(self, value): - if self._minimum_platform_reserved_mib is None: - try: - ihost = objects.Host.get_by_uuid(pecan.request.context, value) - self._minimum_platform_reserved_mib = \ - cutils.get_minimum_platform_reserved_memory(ihost, - self.numa_node) - except exception.HostNotFound as e: - # Change error code because 404 (NotFound) is inappropriate - # response for a POST request to create - e.code = 400 # BadRequest - raise e - elif value == wtypes.Unset: - self._minimum_platform_reserved_mib = wtypes.Unset - - uuid = types.uuid - "Unique UUID for this memory" - - memtotal_mib = int - "Represent the imemory total in MiB" - - memavail_mib = int - "Represent the imemory available in MiB" - - platform_reserved_mib = int - "Represent the imemory platform reserved in MiB" - - hugepages_configured = wtypes.text - "Represent whether huge pages are configured" - - vswitch_hugepages_size_mib = int - "Represent the imemory vswitch huge pages size in MiB" - - vswitch_hugepages_reqd = int - "Represent the imemory vswitch required number of hugepages" - - vswitch_hugepages_nr = int - "Represent the imemory vswitch number of hugepages" - - vswitch_hugepages_avail = int - "Represent the imemory vswitch number of hugepages available" - - vm_hugepages_nr_2M_pending = int - "Represent the imemory vm number of hugepages pending (2M pages)" - - vm_hugepages_nr_2M = int - "Represent the imemory vm number of hugepages (2M pages)" - - vm_hugepages_avail_2M = int - "Represent the imemory vm number of hugepages available (2M pages)" - - vm_hugepages_nr_1G_pending = int - "Represent the imemory vm number of hugepages pending (1G pages)" - - vm_hugepages_nr_1G = int - "Represent the imemory vm number of hugepages (1G pages)" - - vm_hugepages_nr_4K = int - "Represent the imemory vm number of hugepages (4K pages)" - - vm_hugepages_use_1G = wtypes.text - "1G hugepage is supported 'True' or not 'False' " - - vm_hugepages_avail_1G = int - "Represent the imemory vm number of hugepages available (1G pages)" - - vm_hugepages_possible_2M = int - "Represent the total possible number of vm hugepages available (2M pages)" - - vm_hugepages_possible_1G = int - "Represent the total possible number of vm hugepages available (1G pages)" - - minimum_platform_reserved_mib = wsme.wsproperty( - int, - _get_minimum_platform_reserved_mib, - _set_minimum_platform_reserved_mib, - mandatory=True) - "Represent the default platform reserved memory in MiB. API only attribute" - - numa_node = int - "The numa node or zone the imemory. API only attribute" - - capabilities = {wtypes.text: utils.ValidTypes(wtypes.text, - six.integer_types)} - "This memory's meta data" - - host_id = int - "The ihostid that this imemory belongs to" - - node_id = int - "The nodeId that this imemory belongs to" - - ihost_uuid = types.uuid - "The UUID of the ihost this memory belongs to" - - node_uuid = types.uuid - "The UUID of the node this memory belongs to" - - links = [link.Link] - "A list containing a self link and associated memory links" - - def __init__(self, **kwargs): - self.fields = objects.Memory.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - # API only attributes - self.fields.append('minimum_platform_reserved_mib') - setattr(self, 'minimum_platform_reserved_mib', - kwargs.get('host_id', None)) - - @classmethod - def convert_with_links(cls, rpc_mem, expand=True): - # fields = ['uuid', 'address'] if not expand else None - # memory = imemory.from_rpc_object(rpc_mem, fields) - - memory = Memory(**rpc_mem.as_dict()) - if not expand: - memory.unset_fields_except( - ['uuid', 'memtotal_mib', 'memavail_mib', - 'platform_reserved_mib', 'hugepages_configured', - 'vswitch_hugepages_size_mib', 'vswitch_hugepages_nr', - 'vswitch_hugepages_reqd', - 'vswitch_hugepages_avail', - 'vm_hugepages_nr_2M', - 'vm_hugepages_nr_1G', 'vm_hugepages_use_1G', - 'vm_hugepages_nr_2M_pending', - 'vm_hugepages_avail_2M', - 'vm_hugepages_nr_1G_pending', - 'vm_hugepages_avail_1G', - 'vm_hugepages_nr_4K', - 'vm_hugepages_possible_2M', 'vm_hugepages_possible_1G', - 'numa_node', 'ihost_uuid', 'node_uuid', - 'host_id', 'node_id', - 'capabilities', - 'created_at', 'updated_at', - 'minimum_platform_reserved_mib']) - - # never expose the id attribute - memory.host_id = wtypes.Unset - memory.node_id = wtypes.Unset - - memory.links = [link.Link.make_link('self', pecan.request.host_url, - 'memorys', memory.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'memorys', memory.uuid, - bookmark=True) - ] - return memory - - -class MemoryCollection(collection.Collection): - """API representation of a collection of memorys.""" - - memorys = [Memory] - "A list containing memory objects" - - def __init__(self, **kwargs): - self._type = 'memorys' - - @classmethod - def convert_with_links(cls, memorys, limit, url=None, - expand=False, **kwargs): - collection = MemoryCollection() - collection.memorys = [ - Memory.convert_with_links(n, expand) for n in memorys] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'MemoryController' - - -class MemoryController(rest.RestController): - """REST controller for memorys.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False, from_node=False): - self._from_hosts = from_hosts - self._from_node = from_node - - def _get_memorys_collection(self, i_uuid, node_uuid, - marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - - if self._from_hosts and not i_uuid: - raise exception.InvalidParameterValue(_( - "Host id not specified.")) - - if self._from_node and not i_uuid: - raise exception.InvalidParameterValue(_( - "Node id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Memory.get_by_uuid(pecan.request.context, - marker) - - if self._from_hosts: - # memorys = pecan.request.dbapi.imemory_get_by_ihost( - memorys = objects.Memory.get_by_host( - pecan.request.context, - i_uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - elif self._from_node: - # memorys = pecan.request.dbapi.imemory_get_by_node( - memorys = objects.Memory.get_by_node( - pecan.request.context, - i_uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - if i_uuid and not node_uuid: - # memorys = pecan.request.dbapi.imemory_get_by_ihost( - memorys = objects.Memory.get_by_host( - pecan.request.context, - i_uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - elif i_uuid and node_uuid: # Need ihost_uuid ? - # memorys = pecan.request.dbapi.imemory_get_by_ihost_node( - memorys = objects.Memory.get_by_host_node( - pecan.request.context, - i_uuid, - node_uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - elif node_uuid: - # memorys = pecan.request.dbapi.imemory_get_by_ihost_node( - memorys = objects.Memory.get_by_node( - pecan.request.context, - node_uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - # memorys = pecan.request.dbapi.imemory_get_list( - memorys = objects.Memory.list( - pecan.request.context, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return MemoryCollection.convert_with_links(memorys, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(MemoryCollection, types.uuid, types.uuid, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, ihost_uuid=None, node_uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of memorys.""" - - return self._get_memorys_collection( - ihost_uuid, node_uuid, marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(MemoryCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, ihost_uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of memorys with detail.""" - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "memorys": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['memorys', 'detail']) - return self._get_memorys_collection(ihost_uuid, marker, limit, - sort_key, sort_dir, - expand, resource_url) - - @wsme_pecan.wsexpose(Memory, types.uuid) - def get_one(self, memory_uuid): - """Retrieve information about the given memory.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_mem = objects.Memory.get_by_uuid(pecan.request.context, - memory_uuid) - return Memory.convert_with_links(rpc_mem) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(Memory, body=Memory) - def post(self, memory): - """Create a new memory.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - try: - ihost_uuid = memory.ihost_uuid - new_memory = pecan.request.dbapi.imemory_create(ihost_uuid, - memory.as_dict()) - - except exception.InventoryException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Invalid data")) - return Memory.convert_with_links(new_memory) - - @cutils.synchronized(LOCK_NAME) - @wsme.validate(types.uuid, [MemoryPatchType]) - @wsme_pecan.wsexpose(Memory, types.uuid, - body=[MemoryPatchType]) - def patch(self, memory_uuid, patch): - """Update an existing memory.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_mem = objects.Memory.get_by_uuid( - pecan.request.context, memory_uuid) - - if 'host_id' in rpc_mem: - ihostId = rpc_mem['host_id'] - else: - ihostId = rpc_mem['ihost_uuid'] - - host_id = pecan.request.dbapi.ihost_get(ihostId) - - vm_hugepages_nr_2M_pending = None - vm_hugepages_nr_1G_pending = None - platform_reserved_mib = None - for p in patch: - if p['path'] == '/platform_reserved_mib': - platform_reserved_mib = p['value'] - if p['path'] == '/vm_hugepages_nr_2M_pending': - vm_hugepages_nr_2M_pending = p['value'] - - if p['path'] == '/vm_hugepages_nr_1G_pending': - vm_hugepages_nr_1G_pending = p['value'] - - # The host must be locked - if host_id: - _check_host(host_id) - else: - raise wsme.exc.ClientSideError(_( - "Hostname or uuid must be defined")) - - try: - # Semantics checks and update hugepage memory accounting - patch = _check_huge_values( - rpc_mem, patch, - vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending) - except wsme.exc.ClientSideError as e: - node = pecan.request.dbapi.node_get(node_id=rpc_mem.node_id) - numa_node = node.numa_node - msg = _('Processor {0}:').format(numa_node) + e.message - raise wsme.exc.ClientSideError(msg) - - # Semantics checks for platform memory - _check_memory(rpc_mem, host_id, platform_reserved_mib, - vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending) - - # only allow patching allocated_function and capabilities - # replace ihost_uuid and node_uuid with corresponding - patch_obj = jsonpatch.JsonPatch(patch) - - for p in patch_obj: - if p['path'] == '/ihost_uuid': - p['path'] = '/host_id' - ihost = objects.Host.get_by_uuid(pecan.request.context, - p['value']) - p['value'] = ihost.id - - if p['path'] == '/node_uuid': - p['path'] = '/node_id' - try: - node = objects.Node.get_by_uuid( - pecan.request.context, p['value']) - p['value'] = node.id - except exception.InventoryException: - p['value'] = None - - try: - memory = Memory(**jsonpatch.apply_patch(rpc_mem.as_dict(), - patch_obj)) - - except utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.Memory.fields: - if rpc_mem[field] != getattr(memory, field): - rpc_mem[field] = getattr(memory, field) - - rpc_mem.save() - return Memory.convert_with_links(rpc_mem) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, memory_uuid): - """Delete a memory.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - pecan.request.dbapi.imemory_destroy(memory_uuid) - -############## -# UTILS -############## - - -def _update(mem_uuid, mem_values): - - rpc_mem = objects.Memory.get_by_uuid(pecan.request.context, mem_uuid) - if 'host_id' in rpc_mem: - ihostId = rpc_mem['host_id'] - else: - ihostId = rpc_mem['ihost_uuid'] - - host_id = pecan.request.dbapi.ihost_get(ihostId) - - if 'platform_reserved_mib' in mem_values: - platform_reserved_mib = mem_values['platform_reserved_mib'] - - if 'vm_hugepages_nr_2M_pending' in mem_values: - vm_hugepages_nr_2M_pending = mem_values['vm_hugepages_nr_2M_pending'] - - if 'vm_hugepages_nr_1G_pending' in mem_values: - vm_hugepages_nr_1G_pending = mem_values['vm_hugepages_nr_1G_pending'] - - # The host must be locked - if host_id: - _check_host(host_id) - else: - raise wsme.exc.ClientSideError(( - "Hostname or uuid must be defined")) - - # Semantics checks and update hugepage memory accounting - mem_values = _check_huge_values( - rpc_mem, mem_values, - vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending) - - # Semantics checks for platform memory - _check_memory(rpc_mem, host_id, platform_reserved_mib, - vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending) - - # update memory values - pecan.request.dbapi.imemory_update(mem_uuid, mem_values) - - -def _check_host(ihost): - if utils.is_aio_simplex_host_unlocked(ihost): - raise wsme.exc.ClientSideError(_("Host must be locked.")) - elif ihost['administrative'] != 'locked': - unlocked = False - current_ihosts = pecan.request.dbapi.ihost_get_list() - for h in current_ihosts: - if (h['administrative'] != 'locked' and - h['hostname'] != ihost['hostname']): - unlocked = True - if unlocked: - raise wsme.exc.ClientSideError(_("Host must be locked.")) - - -def _check_memory(rpc_mem, ihost, - platform_reserved_mib=None, - vm_hugepages_nr_2M_pending=None, - vm_hugepages_nr_1G_pending=None): - if platform_reserved_mib: - # Check for invalid characters - try: - val = int(platform_reserved_mib) - except ValueError: - raise wsme.exc.ClientSideError(( - "Platform memory must be a number")) - if val < 0: - raise wsme.exc.ClientSideError(( - "Platform memory must be greater than zero")) - - # Check for lower limit - node_id = rpc_mem['node_id'] - node = pecan.request.dbapi.node_get(node_id) - min_platform_memory = \ - cutils.get_minimum_platform_reserved_memory(ihost, node.numa_node) - if int(platform_reserved_mib) < min_platform_memory: - raise wsme.exc.ClientSideError( - _("Platform reserved memory for numa node {} " - "must be greater than the minimum value {}").format( - (node.numa_node, min_platform_memory))) - - # Check if it is within 2/3 percent of the total memory - node_memtotal_mib = rpc_mem['node_memtotal_mib'] - max_platform_reserved = node_memtotal_mib * 2 / 3 - if int(platform_reserved_mib) > max_platform_reserved: - low_core = cutils.is_low_core_system(ihost, pecan.request.dbapi) - required_platform_reserved = \ - cutils.get_required_platform_reserved_memory( - ihost, node.numa_node, low_core) - msg_platform_over = ( - _("Platform reserved memory {} MiB on node {} " - "is not within range [{}, {}]").format( - (int(platform_reserved_mib), - node.numa_node, - required_platform_reserved, - max_platform_reserved))) - - if cutils.is_virtual() or cutils.is_virtual_compute(ihost): - LOG.warn(msg_platform_over) - else: - raise wsme.exc.ClientSideError(msg_platform_over) - - # Check if it is within the total amount of memory - mem_alloc = 0 - if vm_hugepages_nr_2M_pending: - mem_alloc += int(vm_hugepages_nr_2M_pending) * 2 - elif rpc_mem['vm_hugepages_nr_2M']: - mem_alloc += int(rpc_mem['vm_hugepages_nr_2M']) * 2 - if vm_hugepages_nr_1G_pending: - mem_alloc += int(vm_hugepages_nr_1G_pending) * 1000 - elif rpc_mem['vm_hugepages_nr_1G']: - mem_alloc += int(rpc_mem['vm_hugepages_nr_1G']) * 1000 - LOG.debug("vm total=%s" % (mem_alloc)) - - vs_hp_size = rpc_mem['vswitch_hugepages_size_mib'] - vs_hp_nr = rpc_mem['vswitch_hugepages_nr'] - mem_alloc += vs_hp_size * vs_hp_nr - LOG.debug("vs_hp_nr=%s vs_hp_size=%s" % (vs_hp_nr, vs_hp_size)) - LOG.debug("memTotal %s mem_alloc %s" % (node_memtotal_mib, mem_alloc)) - - # Initial configuration defaults mem_alloc to consume 100% of 2M pages, - # so we may marginally exceed available non-huge memory. - # Note there will be some variability in total available memory, - # so we need to allow some tolerance so we do not hit the limit. - avail = node_memtotal_mib - mem_alloc - delta = int(platform_reserved_mib) - avail - mem_thresh = 32 - if int(platform_reserved_mib) > avail + mem_thresh: - msg = (_("Platform reserved memory {} MiB exceeds {} MiB " - "available by {} MiB (2M: {} pages; 1G: {} pages). " - "total memory={} MiB, allocated={} MiB.").format( - (platform_reserved_mib, avail, - delta, delta / 2, delta / 1024, - node_memtotal_mib, mem_alloc))) - raise wsme.exc.ClientSideError(msg) - else: - msg = (_("Platform reserved memory {} MiB, {} MiB available, " - "total memory={} MiB, allocated={} MiB.").format( - platform_reserved_mib, avail, - node_memtotal_mib, mem_alloc)) - LOG.info(msg) - - -def _check_huge_values(rpc_mem, patch, vm_hugepages_nr_2M=None, - vm_hugepages_nr_1G=None): - - if rpc_mem['vm_hugepages_use_1G'] == 'False' and vm_hugepages_nr_1G: - # cannot provision 1G huge pages if the processor does not support them - raise wsme.exc.ClientSideError(_( - "Processor does not support 1G huge pages.")) - - # Check for invalid characters - if vm_hugepages_nr_2M: - try: - val = int(vm_hugepages_nr_2M) - except ValueError: - raise wsme.exc.ClientSideError(_( - "VM huge pages 2M must be a number")) - if int(vm_hugepages_nr_2M) < 0: - raise wsme.exc.ClientSideError(_( - "VM huge pages 2M must be greater than or equal to zero")) - - if vm_hugepages_nr_1G: - try: - val = int(vm_hugepages_nr_1G) - except ValueError: - raise wsme.exc.ClientSideError(_( - "VM huge pages 1G must be a number")) - if val < 0: - raise wsme.exc.ClientSideError(_( - "VM huge pages 1G must be greater than or equal to zero")) - - # Check to make sure that the huge pages aren't over committed - if rpc_mem['vm_hugepages_possible_2M'] is None and vm_hugepages_nr_2M: - raise wsme.exc.ClientSideError(_( - "No available space for 2M huge page allocation")) - - if rpc_mem['vm_hugepages_possible_1G'] is None and vm_hugepages_nr_1G: - raise wsme.exc.ClientSideError(_( - "No available space for 1G huge page allocation")) - - # Update the number of available huge pages - num_2M_for_1G = 512 - - # None == unchanged - if vm_hugepages_nr_1G is not None: - new_1G_pages = int(vm_hugepages_nr_1G) - elif rpc_mem['vm_hugepages_nr_1G_pending']: - new_1G_pages = int(rpc_mem['vm_hugepages_nr_1G_pending']) - elif rpc_mem['vm_hugepages_nr_1G']: - new_1G_pages = int(rpc_mem['vm_hugepages_nr_1G']) - else: - new_1G_pages = 0 - - # None == unchanged - if vm_hugepages_nr_2M is not None: - new_2M_pages = int(vm_hugepages_nr_2M) - elif rpc_mem['vm_hugepages_nr_2M_pending']: - new_2M_pages = int(rpc_mem['vm_hugepages_nr_2M_pending']) - elif rpc_mem['vm_hugepages_nr_2M']: - new_2M_pages = int(rpc_mem['vm_hugepages_nr_2M']) - else: - new_2M_pages = 0 - - LOG.debug('new 2M pages: %s, 1G pages: %s' % (new_2M_pages, new_1G_pages)) - vm_possible_2M = 0 - vm_possible_1G = 0 - if rpc_mem['vm_hugepages_possible_2M']: - vm_possible_2M = int(rpc_mem['vm_hugepages_possible_2M']) - - if rpc_mem['vm_hugepages_possible_1G']: - vm_possible_1G = int(rpc_mem['vm_hugepages_possible_1G']) - - LOG.debug("max possible 2M pages: %s, max possible 1G pages: %s" % - (vm_possible_2M, vm_possible_1G)) - - if vm_possible_2M < new_2M_pages: - msg = _("No available space for 2M huge page allocation, " - "max 2M pages: %d") % vm_possible_2M - raise wsme.exc.ClientSideError(msg) - - if vm_possible_1G < new_1G_pages: - msg = _("No available space for 1G huge page allocation, " - "max 1G pages: %d") % vm_possible_1G - raise wsme.exc.ClientSideError(msg) - - # always use vm_possible_2M to compare, - if vm_possible_2M < (new_2M_pages + new_1G_pages * num_2M_for_1G): - max_1G = int((vm_possible_2M - new_2M_pages) / num_2M_for_1G) - max_2M = vm_possible_2M - new_1G_pages * num_2M_for_1G - if new_2M_pages > 0 and new_1G_pages > 0: - msg = _("No available space for new settings." - "Max 1G pages is {} when 2M is {}, or " - "Max 2M pages is %s when 1G is {}.").format( - max_1G, new_2M_pages, max_2M, new_1G_pages) - elif new_1G_pages > 0: - msg = _("No available space for 1G huge page allocation, " - "max 1G pages: %d") % vm_possible_1G - else: - msg = _("No available space for 2M huge page allocation, " - "max 2M pages: %d") % vm_possible_2M - - raise wsme.exc.ClientSideError(msg) - - return patch diff --git a/inventory/inventory/inventory/api/controllers/v1/node.py b/inventory/inventory/inventory/api/controllers/v1/node.py deleted file mode 100644 index cdca9d62..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/node.py +++ /dev/null @@ -1,261 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# - - -import six - -import pecan -from pecan import rest - -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import cpu -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import memory -from inventory.api.controllers.v1 import port -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory import objects - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class NodePatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return ['/address', '/host_uuid'] - - -class Node(base.APIBase): - """API representation of a host node. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of - an node. - """ - - uuid = types.uuid - "Unique UUID for this node" - - numa_node = int - "numa node zone for this node" - - capabilities = {wtypes.text: utils.ValidTypes(wtypes.text, - six.integer_types)} - "This node's meta data" - - host_id = int - "The hostid that this node belongs to" - - host_uuid = types.uuid - "The UUID of the host this node belongs to" - - links = [link.Link] - "A list containing a self link and associated node links" - - icpus = [link.Link] - "Links to the collection of cpus on this node" - - imemorys = [link.Link] - "Links to the collection of memorys on this node" - - ports = [link.Link] - "Links to the collection of ports on this node" - - def __init__(self, **kwargs): - self.fields = objects.Node.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_node, expand=True): - minimum_fields = ['uuid', 'numa_node', 'capabilities', - 'host_uuid', 'host_id', - 'created_at'] if not expand else None - fields = minimum_fields if not expand else None - - node = Node.from_rpc_object(rpc_node, fields) - - # never expose the host_id attribute - node.host_id = wtypes.Unset - - node.links = [link.Link.make_link('self', pecan.request.host_url, - 'nodes', node.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'nodes', node.uuid, - bookmark=True) - ] - if expand: - node.icpus = [link.Link.make_link('self', - pecan.request.host_url, - 'nodes', - node.uuid + "/cpus"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'nodes', - node.uuid + "/cpus", - bookmark=True) - ] - - node.imemorys = [link.Link.make_link('self', - pecan.request.host_url, - 'nodes', - node.uuid + "/memorys"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'nodes', - node.uuid + "/memorys", - bookmark=True) - ] - - node.ports = [link.Link.make_link('self', - pecan.request.host_url, - 'nodes', - node.uuid + "/ports"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'nodes', - node.uuid + "/ports", - bookmark=True) - ] - - return node - - -class NodeCollection(collection.Collection): - """API representation of a collection of nodes.""" - - nodes = [Node] - "A list containing node objects" - - def __init__(self, **kwargs): - self._type = 'nodes' - - @classmethod - def convert_with_links(cls, rpc_nodes, limit, url=None, - expand=False, **kwargs): - collection = NodeCollection() - collection.nodes = [Node.convert_with_links(p, expand) - for p in rpc_nodes] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'NodeController' - - -class NodeController(rest.RestController): - """REST controller for nodes.""" - - icpus = cpu.CPUController(from_node=True) - "Expose cpus as a sub-element of nodes" - - imemorys = memory.MemoryController(from_node=True) - "Expose memorys as a sub-element of nodes" - - ports = port.PortController(from_node=True) - "Expose ports as a sub-element of nodes" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False): - self._from_hosts = from_hosts - - def _get_nodes_collection(self, host_uuid, marker, limit, sort_key, - sort_dir, expand=False, resource_url=None): - if self._from_hosts and not host_uuid: - raise exception.InvalidParameterValue(_( - "Host id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Node.get_by_uuid(pecan.request.context, - marker) - - if host_uuid: - nodes = objects.Node.get_by_host(pecan.request.context, - host_uuid, - limit, - marker=marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - nodes = objects.Node.list(pecan.request.context, - limit, - marker=marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return NodeCollection.convert_with_links(nodes, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(NodeCollection, - types.uuid, types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, host_uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of nodes.""" - - return self._get_nodes_collection(host_uuid, marker, limit, - sort_key, sort_dir) - - @wsme_pecan.wsexpose(NodeCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, host_uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of nodes with detail.""" - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "nodes": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['nodes', 'detail']) - return self._get_nodes_collection(host_uuid, - marker, limit, - sort_key, sort_dir, - expand, resource_url) - - @wsme_pecan.wsexpose(Node, types.uuid) - def get_one(self, node_uuid): - """Retrieve information about the given node.""" - - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_node = objects.Node.get_by_uuid(pecan.request.context, node_uuid) - return Node.convert_with_links(rpc_node) diff --git a/inventory/inventory/inventory/api/controllers/v1/pci_device.py b/inventory/inventory/inventory/api/controllers/v1/pci_device.py deleted file mode 100644 index 4c182778..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/pci_device.py +++ /dev/null @@ -1,313 +0,0 @@ -# Copyright (c) 2015-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import jsonpatch -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.common import utils as cutils -from inventory import objects -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class PCIDevicePatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class PCIDevice(base.APIBase): - """API representation of an PCI device - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - Pci Device . - """ - - uuid = types.uuid - "Unique UUID for this device" - - type = wtypes.text - "Represent the type of device" - - name = wtypes.text - "Represent the name of the device. Unique per host" - - pciaddr = wtypes.text - "Represent the pci address of the device" - - pclass_id = wtypes.text - "Represent the numerical pci class of the device" - - pvendor_id = wtypes.text - "Represent the numerical pci vendor of the device" - - pdevice_id = wtypes.text - "Represent the numerical pci device of the device" - - pclass = wtypes.text - "Represent the pci class description of the device" - - pvendor = wtypes.text - "Represent the pci vendor description of the device" - - pdevice = wtypes.text - "Represent the pci device description of the device" - - psvendor = wtypes.text - "Represent the pci svendor of the device" - - psdevice = wtypes.text - "Represent the pci sdevice of the device" - - numa_node = int - "Represent the numa node or zone sdevice of the device" - - sriov_totalvfs = int - "The total number of available SR-IOV VFs" - - sriov_numvfs = int - "The number of configured SR-IOV VFs" - - sriov_vfs_pci_address = wtypes.text - "The PCI Addresses of the VFs" - - driver = wtypes.text - "The kernel driver for this device" - - extra_info = wtypes.text - "Extra information for this device" - - host_id = int - "Represent the host_id the device belongs to" - - host_uuid = types.uuid - "Represent the UUID of the host the device belongs to" - - enabled = types.boolean - "Represent the enabled status of the device" - - links = [link.Link] - "Represent a list containing a self link and associated device links" - - def __init__(self, **kwargs): - self.fields = objects.PCIDevice.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_device, expand=True): - device = PCIDevice(**rpc_device.as_dict()) - if not expand: - device.unset_fields_except(['uuid', 'host_id', - 'name', 'pciaddr', 'pclass_id', - 'pvendor_id', 'pdevice_id', 'pclass', - 'pvendor', 'pdevice', 'psvendor', - 'psdevice', 'numa_node', - 'sriov_totalvfs', 'sriov_numvfs', - 'sriov_vfs_pci_address', 'driver', - 'host_uuid', 'enabled', - 'created_at', 'updated_at']) - - # do not expose the id attribute - device.host_id = wtypes.Unset - device.node_id = wtypes.Unset - - device.links = [link.Link.make_link('self', pecan.request.host_url, - 'pci_devices', device.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'pci_devices', device.uuid, - bookmark=True) - ] - return device - - -class PCIDeviceCollection(collection.Collection): - """API representation of a collection of PciDevice objects.""" - - pci_devices = [PCIDevice] - "A list containing PciDevice objects" - - def __init__(self, **kwargs): - self._type = 'pci_devices' - - @classmethod - def convert_with_links(cls, rpc_devices, limit, url=None, - expand=False, **kwargs): - collection = PCIDeviceCollection() - collection.pci_devices = [PCIDevice.convert_with_links(d, expand) - for d in rpc_devices] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'PCIDeviceController' - - -class PCIDeviceController(rest.RestController): - """REST controller for PciDevices.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False): - self._from_hosts = from_hosts - - def _get_pci_devices_collection(self, uuid, marker, limit, sort_key, - sort_dir, expand=False, resource_url=None): - if self._from_hosts and not uuid: - raise exception.InvalidParameterValue(_( - "Host id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - marker_obj = None - if marker: - marker_obj = objects.PCIDevice.get_by_uuid( - pecan.request.context, - marker) - if self._from_hosts: - # devices = pecan.request.dbapi.pci_device_get_by_host( - devices = objects.PCIDevice.get_by_host( - pecan.request.context, - uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - if uuid: - # devices = pecan.request.dbapi.pci_device_get_by_host( - devices = objects.PCIDevice.get_by_host( - pecan.request.context, - uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - # devices = pecan.request.dbapi.pci_device_get_list( - devices = objects.PCIDevice.list( - pecan.request.context, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return PCIDeviceCollection.convert_with_links(devices, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(PCIDeviceCollection, types.uuid, types.uuid, - int, wtypes.text, wtypes.text) - def get_all(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of devices.""" - return self._get_pci_devices_collection( - uuid, marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(PCIDeviceCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of devices with detail.""" - - # NOTE: /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "pci_devices": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['pci_devices', 'detail']) - return self._get_pci_devices_collection(uuid, marker, limit, sort_key, - sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(PCIDevice, types.uuid) - def get_one(self, device_uuid): - """Retrieve information about the given device.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_device = objects.PCIDevice.get_by_uuid( - pecan.request.context, device_uuid) - return PCIDevice.convert_with_links(rpc_device) - - @cutils.synchronized(LOCK_NAME) - @wsme.validate(types.uuid, [PCIDevicePatchType]) - @wsme_pecan.wsexpose(PCIDevice, types.uuid, - body=[PCIDevicePatchType]) - def patch(self, device_uuid, patch): - """Update an existing device.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_device = objects.PCIDevice.get_by_uuid( - pecan.request.context, device_uuid) - - # replace host_uuid and with corresponding - patch_obj = jsonpatch.JsonPatch(patch) - for p in patch_obj: - if p['path'] == '/host_uuid': - p['path'] = '/host_id' - host = objects.Host.get_by_uuid(pecan.request.context, - p['value']) - p['value'] = host.id - - try: - device = PCIDevice(**jsonpatch.apply_patch(rpc_device.as_dict(), - patch_obj)) - - except utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Semantic checks - host = objects.Host.get_by_uuid(pecan.request.context, - device.host_id) - _check_host(host) - - # Update fields that have changed - for field in objects.PCIDevice.fields: - if rpc_device[field] != getattr(device, field): - _check_field(field) - rpc_device[field] = getattr(device, field) - - rpc_device.save() - return PCIDevice.convert_with_links(rpc_device) - - -def _check_host(host): - if utils.is_aio_simplex_host_unlocked(host): - raise wsme.exc.ClientSideError(_('Host must be locked.')) - elif host.administrative != k_host.ADMIN_LOCKED and not \ - utils.is_host_simplex_controller(host): - raise wsme.exc.ClientSideError(_('Host must be locked.')) - if k_host.COMPUTE not in host.subfunctions: - raise wsme.exc.ClientSideError( - _('Can only modify compute node cores.')) - - -def _check_field(field): - if field not in ["enabled", "name"]: - raise wsme.exc.ClientSideError( - _('Modifying %s attribute restricted') % field) diff --git a/inventory/inventory/inventory/api/controllers/v1/port.py b/inventory/inventory/inventory/api/controllers/v1/port.py deleted file mode 100644 index 643f4f05..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/port.py +++ /dev/null @@ -1,334 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# - - -import six - -import pecan -from pecan import rest - -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import lldp_agent -from inventory.api.controllers.v1 import lldp_neighbour -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory import objects - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class PortPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class Port(base.APIBase): - """API representation of a host port - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - port. - """ - uuid = types.uuid - "Unique UUID for this port" - - type = wtypes.text - "Represent the type of port" - - name = wtypes.text - "Represent the name of the port. Unique per host" - - namedisplay = wtypes.text - "Represent the display name of the port. Unique per host" - - pciaddr = wtypes.text - "Represent the pci address of the port" - - dev_id = int - "The unique identifier of PCI device" - - pclass = wtypes.text - "Represent the pci class of the port" - - pvendor = wtypes.text - "Represent the pci vendor of the port" - - pdevice = wtypes.text - "Represent the pci device of the port" - - psvendor = wtypes.text - "Represent the pci svendor of the port" - - psdevice = wtypes.text - "Represent the pci sdevice of the port" - - numa_node = int - "Represent the numa node or zone sdevice of the port" - - sriov_totalvfs = int - "The total number of available SR-IOV VFs" - - sriov_numvfs = int - "The number of configured SR-IOV VFs" - - sriov_vfs_pci_address = wtypes.text - "The PCI Addresses of the VFs" - - driver = wtypes.text - "The kernel driver for this device" - - capabilities = {wtypes.text: utils.ValidTypes(wtypes.text, - six.integer_types)} - "Represent meta data of the port" - - host_id = int - "Represent the host_id the port belongs to" - - interface_id = int - "Represent the interface_id the port belongs to" - - dpdksupport = bool - "Represent whether or not the port supports DPDK acceleration" - - host_uuid = types.uuid - "Represent the UUID of the host the port belongs to" - - interface_uuid = types.uuid - "Represent the UUID of the interface the port belongs to" - - node_uuid = types.uuid - "Represent the UUID of the node the port belongs to" - - links = [link.Link] - "Represent a list containing a self link and associated port links" - - lldp_agents = [link.Link] - "Links to the collection of LldpAgents on this port" - - lldp_neighbours = [link.Link] - "Links to the collection of LldpNeighbours on this port" - - def __init__(self, **kwargs): - self.fields = objects.Port.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_port, expand=True): - port = Port(**rpc_port.as_dict()) - if not expand: - port.unset_fields_except(['uuid', 'host_id', 'node_id', - 'interface_id', 'type', 'name', - 'namedisplay', 'pciaddr', 'dev_id', - 'pclass', 'pvendor', 'pdevice', - 'psvendor', 'psdevice', 'numa_node', - 'sriov_totalvfs', 'sriov_numvfs', - 'sriov_vfs_pci_address', 'driver', - 'capabilities', - 'host_uuid', 'interface_uuid', - 'node_uuid', 'dpdksupport', - 'created_at', 'updated_at']) - - # never expose the id attribute - port.host_id = wtypes.Unset - port.interface_id = wtypes.Unset - port.node_id = wtypes.Unset - - port.links = [link.Link.make_link('self', pecan.request.host_url, - 'ports', port.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'ports', port.uuid, - bookmark=True) - ] - - port.lldp_agents = [link.Link.make_link('self', - pecan.request.host_url, - 'ports', - port.uuid + "/lldp_agents"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'ports', - port.uuid + "/lldp_agents", - bookmark=True) - ] - - port.lldp_neighbours = [ - link.Link.make_link('self', - pecan.request.host_url, - 'ports', - port.uuid + "/lldp_neighbors"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'ports', - port.uuid + "/lldp_neighbors", - bookmark=True) - ] - - return port - - -class PortCollection(collection.Collection): - """API representation of a collection of Port objects.""" - - ports = [Port] - "A list containing Port objects" - - def __init__(self, **kwargs): - self._type = 'ports' - - @classmethod - def convert_with_links(cls, rpc_ports, limit, url=None, - expand=False, **kwargs): - collection = PortCollection() - collection.ports = [Port.convert_with_links(p, expand) - for p in rpc_ports] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class PortController(rest.RestController): - """REST controller for Ports.""" - - lldp_agents = lldp_agent.LLDPAgentController( - from_ports=True) - "Expose lldp_agents as a sub-element of ports" - - lldp_neighbours = lldp_neighbour.LLDPNeighbourController( - from_ports=True) - "Expose lldp_neighbours as a sub-element of ports" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False, from_iinterface=False, - from_node=False): - self._from_hosts = from_hosts - self._from_iinterface = from_iinterface - self._from_node = from_node - - def _get_ports_collection(self, uuid, interface_uuid, node_uuid, - marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - - if self._from_hosts and not uuid: - raise exception.InvalidParameterValue(_( - "Host id not specified.")) - - if self._from_iinterface and not uuid: - raise exception.InvalidParameterValue(_( - "Interface id not specified.")) - - if self._from_node and not uuid: - raise exception.InvalidParameterValue(_( - "node id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Port.get_by_uuid( - pecan.request.context, - marker) - - if self._from_hosts: - ports = objects.Port.get_by_host( - pecan.request.context, - uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - elif self._from_node: - ports = objects.Port.get_by_numa_node( - pecan.request.context, - uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - if uuid and not interface_uuid: - ports = objects.Port.get_by_host( - pecan.request.context, - uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - ports = objects.Port.list( - pecan.request.context, - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return PortCollection.convert_with_links(ports, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(PortCollection, types.uuid, types.uuid, - types.uuid, types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, uuid=None, interface_uuid=None, node_uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of ports.""" - - return self._get_ports_collection(uuid, - interface_uuid, - node_uuid, - marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(PortCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of ports with detail.""" - - # NOTE(lucasagomes): /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "ports": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['ports', 'detail']) - return self._get_ports_collection(uuid, marker, limit, sort_key, - sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(Port, types.uuid) - def get_one(self, port_uuid): - """Retrieve information about the given port.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_port = objects.Port.get_by_uuid( - pecan.request.context, port_uuid) - return Port.convert_with_links(rpc_port) diff --git a/inventory/inventory/inventory/api/controllers/v1/query.py b/inventory/inventory/inventory/api/controllers/v1/query.py deleted file mode 100644 index 976bfb9d..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/query.py +++ /dev/null @@ -1,168 +0,0 @@ -# coding: utf-8 -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright © 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import ast -import functools -import inspect -from inventory.common.i18n import _ -from oslo_log import log -from oslo_utils import strutils -from oslo_utils import timeutils -import six -import wsme -from wsme import types as wtypes - -LOG = log.getLogger(__name__) - -operation_kind = wtypes.Enum(str, 'lt', 'le', 'eq', 'ne', 'ge', 'gt') - - -class _Base(wtypes.Base): - - @classmethod - def from_db_model(cls, m): - return cls(**(m.as_dict())) - - @classmethod - def from_db_and_links(cls, m, links): - return cls(links=links, **(m.as_dict())) - - def as_dict(self, db_model): - valid_keys = inspect.getargspec(db_model.__init__)[0] - if 'self' in valid_keys: - valid_keys.remove('self') - return self.as_dict_from_keys(valid_keys) - - def as_dict_from_keys(self, keys): - return dict((k, getattr(self, k)) - for k in keys - if hasattr(self, k) and - getattr(self, k) != wsme.Unset) - - -class Query(_Base): - """Query filter. - """ - - # The data types supported by the query. - _supported_types = ['integer', 'float', 'string', 'boolean'] - - # Functions to convert the data field to the correct type. - _type_converters = {'integer': int, - 'float': float, - 'boolean': functools.partial( - strutils.bool_from_string, strict=True), - 'string': six.text_type, - 'datetime': timeutils.parse_isotime} - - _op = None # provide a default - - def get_op(self): - return self._op or 'eq' - - def set_op(self, value): - self._op = value - - field = wtypes.text - "The name of the field to test" - - # op = wsme.wsattr(operation_kind, default='eq') - # this ^ doesn't seem to work. - op = wsme.wsproperty(operation_kind, get_op, set_op) - "The comparison operator. Defaults to 'eq'." - - value = wtypes.text - "The value to compare against the stored data" - - type = wtypes.text - "The data type of value to compare against the stored data" - - def __repr__(self): - # for logging calls - return '' % (self.field, - self.op, - self.value, - self.type) - - @classmethod - def sample(cls): - return cls(field='resource_id', - op='eq', - value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - type='string' - ) - - def as_dict(self): - return self.as_dict_from_keys(['field', 'op', 'type', 'value']) - - def _get_value_as_type(self, forced_type=None): - """Convert metadata value to the specified data type. - - This method is called during metadata query to help convert the - querying metadata to the data type specified by user. If there is no - data type given, the metadata will be parsed by ast.literal_eval to - try to do a smart converting. - - NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised - from wsmeext/sphinxext.py. It's OK to call it outside the Query class. - Because the "public" side of that class is actually the outside of the - API, and the "private" side is the API implementation. The method is - only used in the API implementation, so it's OK. - - :returns: metadata value converted with the specified data type. - """ - type = forced_type or self.type - try: - converted_value = self.value - if not type: - try: - converted_value = ast.literal_eval(self.value) - except (ValueError, SyntaxError): - msg = _('Failed to convert the metadata value %s' - ' automatically') % (self.value) - LOG.debug(msg) - else: - if type not in self._supported_types: - # Types must be explicitly declared so the - # correct type converter may be used. Subclasses - # of Query may define _supported_types and - # _type_converters to define their own types. - raise TypeError() - converted_value = self._type_converters[type](self.value) - except ValueError: - msg = _('Failed to convert the value %(value)s' - ' to the expected data type %(type)s.') % \ - {'value': self.value, 'type': type} - raise wsme.exc.ClientSideError(msg) - except TypeError: - msg = _('The data type %(type)s is not supported. The supported' - ' data type list is: %(supported)s') % \ - {'type': type, 'supported': self._supported_types} - raise wsme.exc.ClientSideError(msg) - except Exception: - msg = _('Unexpected exception converting %(value)s to' - ' the expected data type %(type)s.') % \ - {'value': self.value, 'type': type} - raise wsme.exc.ClientSideError(msg) - return converted_value diff --git a/inventory/inventory/inventory/api/controllers/v1/sensor.py b/inventory/inventory/inventory/api/controllers/v1/sensor.py deleted file mode 100644 index 05a5b47e..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/sensor.py +++ /dev/null @@ -1,586 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import copy -import jsonpatch -import pecan -from pecan import rest -import six -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import constants -from inventory.common import exception -from inventory.common import hwmon_api -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.common import utils as cutils -from inventory import objects -from oslo_log import log - - -LOG = log.getLogger(__name__) - - -class SensorPatchType(types.JsonPatchType): - @staticmethod - def mandatory_attrs(): - return [] - - -class Sensor(base.APIBase): - """API representation of an Sensor - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - sensor. - """ - - uuid = types.uuid - "Unique UUID for this sensor" - - sensorname = wtypes.text - "Represent the name of the sensor. Unique with path per host" - - path = wtypes.text - "Represent the path of the sensor. Unique with sensorname per host" - - sensortype = wtypes.text - "Represent the type of sensor. e.g. Temperature, WatchDog" - - datatype = wtypes.text - "Represent the entity monitored. e.g. discrete, analog" - - status = wtypes.text - "Represent current sensor status: ok, minor, major, critical, disabled" - - state = wtypes.text - "Represent the current state of the sensor" - - state_requested = wtypes.text - "Represent the requested state of the sensor" - - audit_interval = int - "Represent the audit_interval of the sensor." - - algorithm = wtypes.text - "Represent the algorithm of the sensor." - - actions_minor = wtypes.text - "Represent the minor configured actions of the sensor. CSV." - - actions_major = wtypes.text - "Represent the major configured actions of the sensor. CSV." - - actions_critical = wtypes.text - "Represent the critical configured actions of the sensor. CSV." - - suppress = wtypes.text - "Represent supress sensor if True, otherwise not suppress sensor" - - value = wtypes.text - "Represent current value of the discrete sensor" - - unit_base = wtypes.text - "Represent the unit base of the analog sensor e.g. revolutions" - - unit_modifier = wtypes.text - "Represent the unit modifier of the analog sensor e.g. 10**2" - - unit_rate = wtypes.text - "Represent the unit rate of the sensor e.g. /minute" - - t_minor_lower = wtypes.text - "Represent the minor lower threshold of the analog sensor" - - t_minor_upper = wtypes.text - "Represent the minor upper threshold of the analog sensor" - - t_major_lower = wtypes.text - "Represent the major lower threshold of the analog sensor" - - t_major_upper = wtypes.text - "Represent the major upper threshold of the analog sensor" - - t_critical_lower = wtypes.text - "Represent the critical lower threshold of the analog sensor" - - t_critical_upper = wtypes.text - "Represent the critical upper threshold of the analog sensor" - - capabilities = {wtypes.text: utils.ValidTypes(wtypes.text, - six.integer_types)} - "Represent meta data of the sensor" - - host_id = int - "Represent the host_id the sensor belongs to" - - sensorgroup_id = int - "Represent the sensorgroup_id the sensor belongs to" - - host_uuid = types.uuid - "Represent the UUID of the host the sensor belongs to" - - sensorgroup_uuid = types.uuid - "Represent the UUID of the sensorgroup the sensor belongs to" - - links = [link.Link] - "Represent a list containing a self link and associated sensor links" - - def __init__(self, **kwargs): - self.fields = objects.Sensor.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_sensor, expand=True): - - sensor = Sensor(**rpc_sensor.as_dict()) - - sensor_fields_common = ['uuid', 'host_id', 'sensorgroup_id', - 'sensortype', 'datatype', - 'sensorname', 'path', - - 'status', - 'state', 'state_requested', - 'sensor_action_requested', - 'actions_minor', - 'actions_major', - 'actions_critical', - - 'suppress', - 'audit_interval', - 'algorithm', - 'capabilities', - 'host_uuid', 'sensorgroup_uuid', - 'created_at', 'updated_at', ] - - sensor_fields_analog = ['unit_base', - 'unit_modifier', - 'unit_rate', - - 't_minor_lower', - 't_minor_upper', - 't_major_lower', - 't_major_upper', - 't_critical_lower', - 't_critical_upper', ] - - if rpc_sensor.datatype == 'discrete': - sensor_fields = sensor_fields_common - elif rpc_sensor.datatype == 'analog': - sensor_fields = sensor_fields_common + sensor_fields_analog - else: - LOG.error(_("Invalid datatype={}").format(rpc_sensor.datatype)) - - if not expand: - sensor.unset_fields_except(sensor_fields) - - # never expose the id attribute - sensor.host_id = wtypes.Unset - sensor.sensorgroup_id = wtypes.Unset - - sensor.links = [link.Link.make_link('self', pecan.request.host_url, - 'sensors', sensor.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'sensors', sensor.uuid, - bookmark=True) - ] - return sensor - - -class SensorCollection(collection.Collection): - """API representation of a collection of Sensor objects.""" - - sensors = [Sensor] - "A list containing Sensor objects" - - def __init__(self, **kwargs): - self._type = 'sensors' - - @classmethod - def convert_with_links(cls, rpc_sensors, limit, url=None, - expand=False, **kwargs): - collection = SensorCollection() - collection.sensors = [Sensor.convert_with_links(p, expand) - for p in rpc_sensors] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'SensorController' - - -class SensorController(rest.RestController): - """REST controller for Sensors.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self, from_hosts=False, from_sensorgroup=False): - self._from_hosts = from_hosts - self._from_sensorgroup = from_sensorgroup - self._api_token = None - self._hwmon_address = k_host.LOCALHOST_HOSTNAME - self._hwmon_port = constants.HWMON_PORT - - def _get_sensors_collection(self, uuid, sensorgroup_uuid, - marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - - if self._from_hosts and not uuid: - raise exception.InvalidParameterValue(_( - "Host id not specified.")) - - if self._from_sensorgroup and not uuid: - raise exception.InvalidParameterValue(_( - "SensorGroup id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Sensor.get_by_uuid( - pecan.request.context, - marker) - - if self._from_hosts: - sensors = pecan.request.dbapi.sensor_get_by_host( - uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - LOG.debug("dbapi.sensor_get_by_host=%s" % sensors) - elif self._from_sensorgroup: - sensors = pecan.request.dbapi.sensor_get_by_sensorgroup( - uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - LOG.debug("dbapi.sensor_get_by_sensorgroup=%s" % sensors) - else: - if uuid and not sensorgroup_uuid: - sensors = pecan.request.dbapi.sensor_get_by_host( - uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - LOG.debug("dbapi.sensor_get_by_host=%s" % sensors) - elif uuid and sensorgroup_uuid: # Need ihost_uuid ? - sensors = pecan.request.dbapi.sensor_get_by_host_sensorgroup( - uuid, - sensorgroup_uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - LOG.debug("dbapi.sensor_get_by_host_sensorgroup=%s" % - sensors) - - elif sensorgroup_uuid: # Need ihost_uuid ? - sensors = pecan.request.dbapi.sensor_get_by_host_sensorgroup( - uuid, # None - sensorgroup_uuid, - limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - else: - sensors = pecan.request.dbapi.sensor_get_list( - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return SensorCollection.convert_with_links(sensors, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(SensorCollection, types.uuid, types.uuid, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, uuid=None, sensorgroup_uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of sensors.""" - - return self._get_sensors_collection(uuid, sensorgroup_uuid, - marker, limit, - sort_key, sort_dir) - - @wsme_pecan.wsexpose(SensorCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of sensors with detail.""" - - # NOTE(lucasagomes): /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "sensors": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['sensors', 'detail']) - return self._get_sensors_collection(uuid, marker, limit, sort_key, - sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(Sensor, types.uuid) - def get_one(self, sensor_uuid): - """Retrieve information about the given sensor.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_sensor = objects.Sensor.get_by_uuid( - pecan.request.context, sensor_uuid) - - if rpc_sensor.datatype == 'discrete': - rpc_sensor = objects.SensorDiscrete.get_by_uuid( - pecan.request.context, sensor_uuid) - elif rpc_sensor.datatype == 'analog': - rpc_sensor = objects.SensorAnalog.get_by_uuid( - pecan.request.context, sensor_uuid) - else: - LOG.error(_("Invalid datatype={}").format(rpc_sensor.datatype)) - - return Sensor.convert_with_links(rpc_sensor) - - @staticmethod - def _new_sensor_semantic_checks(sensor): - datatype = sensor.as_dict().get('datatype') or "" - sensortype = sensor.as_dict().get('sensortype') or "" - if not (datatype and sensortype): - raise wsme.exc.ClientSideError(_("sensor-add Cannot " - "add a sensor " - "without a valid datatype " - "and sensortype.")) - - if datatype not in constants.SENSOR_DATATYPE_VALID_LIST: - raise wsme.exc.ClientSideError( - _("sensor datatype must be one of %s.") % - constants.SENSOR_DATATYPE_VALID_LIST) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(Sensor, body=Sensor) - def post(self, sensor): - """Create a new sensor.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - self._new_sensor_semantic_checks(sensor) - try: - ihost = pecan.request.dbapi.host_get(sensor.host_uuid) - - if hasattr(sensor, 'datatype'): - if sensor.datatype == 'discrete': - new_sensor = pecan.request.dbapi.sensor_discrete_create( - ihost.id, sensor.as_dict()) - elif sensor.datatype == 'analog': - new_sensor = pecan.request.dbapi.sensor_analog_create( - ihost.id, sensor.as_dict()) - else: - raise wsme.exc.ClientSideError( - _("Invalid datatype. {}").format(sensor.datatype)) - else: - raise wsme.exc.ClientSideError(_("Unspecified datatype.")) - - except exception.InventoryException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Invalid data")) - return sensor.convert_with_links(new_sensor) - - @cutils.synchronized(LOCK_NAME) - @wsme.validate(types.uuid, [SensorPatchType]) - @wsme_pecan.wsexpose(Sensor, types.uuid, - body=[SensorPatchType]) - def patch(self, sensor_uuid, patch): - """Update an existing sensor.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rpc_sensor = objects.Sensor.get_by_uuid(pecan.request.context, - sensor_uuid) - if rpc_sensor.datatype == 'discrete': - rpc_sensor = objects.SensorDiscrete.get_by_uuid( - pecan.request.context, sensor_uuid) - elif rpc_sensor.datatype == 'analog': - rpc_sensor = objects.SensorAnalog.get_by_uuid( - pecan.request.context, sensor_uuid) - else: - raise wsme.exc.ClientSideError(_("Invalid datatype={}").format( - rpc_sensor.datatype)) - - rpc_sensor_orig = copy.deepcopy(rpc_sensor) - - # replace ihost_uuid and sensorgroup_uuid with corresponding - utils.validate_patch(patch) - patch_obj = jsonpatch.JsonPatch(patch) - my_host_uuid = None - for p in patch_obj: - if p['path'] == '/host_uuid': - p['path'] = '/host_id' - host = objects.Host.get_by_uuid(pecan.request.context, - p['value']) - p['value'] = host.id - my_host_uuid = host.uuid - - if p['path'] == '/sensorgroup_uuid': - p['path'] = '/sensorgroup_id' - try: - sensorgroup = objects.sensorgroup.get_by_uuid( - pecan.request.context, p['value']) - p['value'] = sensorgroup.id - LOG.info("sensorgroup_uuid=%s id=%s" % (p['value'], - sensorgroup.id)) - except exception.InventoryException: - p['value'] = None - - try: - sensor = Sensor(**jsonpatch.apply_patch(rpc_sensor.as_dict(), - patch_obj)) - - except utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - if rpc_sensor.datatype == 'discrete': - fields = objects.SensorDiscrete.fields - else: - fields = objects.SensorAnalog.fields - - for field in fields: - if rpc_sensor[field] != getattr(sensor, field): - rpc_sensor[field] = getattr(sensor, field) - - delta = rpc_sensor.obj_what_changed() - sensor_suppress_attrs = ['suppress'] - force_action = False - if any(x in delta for x in sensor_suppress_attrs): - valid_suppress = ['True', 'False', 'true', 'false', 'force_action'] - if rpc_sensor.suppress.lower() not in valid_suppress: - raise wsme.exc.ClientSideError(_("Invalid suppress value, " - "select 'True' or 'False'")) - elif rpc_sensor.suppress.lower() == 'force_action': - LOG.info("suppress=%s" % rpc_sensor.suppress.lower()) - rpc_sensor.suppress = rpc_sensor_orig.suppress - force_action = True - - self._semantic_modifiable_fields(patch_obj, force_action) - - if not pecan.request.user_agent.startswith('hwmon'): - hwmon_sensor = cutils.removekeys_nonhwmon( - rpc_sensor.as_dict()) - - if not my_host_uuid: - host = objects.Host.get_by_uuid(pecan.request.context, - rpc_sensor.host_id) - my_host_uuid = host.uuid - LOG.warn("Missing host_uuid updated=%s" % my_host_uuid) - - hwmon_sensor.update({'host_uuid': my_host_uuid}) - - hwmon_response = hwmon_api.sensor_modify( - self._api_token, self._hwmon_address, self._hwmon_port, - hwmon_sensor, - constants.HWMON_DEFAULT_TIMEOUT_IN_SECS) - - if not hwmon_response: - hwmon_response = {'status': 'fail', - 'reason': 'no response', - 'action': 'retry'} - - if hwmon_response['status'] != 'pass': - msg = _("HWMON has returned with a status of {}, reason: {}, " - "recommended action: {}").format( - hwmon_response.get('status'), - hwmon_response.get('reason'), - hwmon_response.get('action')) - - if force_action: - LOG.error(msg) - else: - raise wsme.exc.ClientSideError(msg) - - rpc_sensor.save() - - return Sensor.convert_with_links(rpc_sensor) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, sensor_uuid): - """Delete a sensor.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - pecan.request.dbapi.sensor_destroy(sensor_uuid) - - @staticmethod - def _semantic_modifiable_fields(patch_obj, force_action=False): - # Prevent auto populated fields from being updated - state_rel_path = ['/uuid', '/id', '/host_id', '/datatype', - '/sensortype'] - if any(p['path'] in state_rel_path for p in patch_obj): - raise wsme.exc.ClientSideError(_("The following fields can not be " - "modified: %s ") % state_rel_path) - - state_rel_path = ['/actions_critical', - '/actions_major', - '/actions_minor'] - if any(p['path'] in state_rel_path for p in patch_obj): - raise wsme.exc.ClientSideError( - _("The following fields can only be modified at the " - "sensorgroup level: %s") % state_rel_path) - - if not (pecan.request.user_agent.startswith('hwmon') or force_action): - state_rel_path = ['/sensorname', - '/path', - '/status', - '/state', - '/possible_states', - '/algorithm', - '/actions_critical_choices', - '/actions_major_choices', - '/actions_minor_choices', - '/unit_base', - '/unit_modifier', - '/unit_rate', - '/t_minor_lower', - '/t_minor_upper', - '/t_major_lower', - '/t_major_upper', - '/t_critical_lower', - '/t_critical_upper', - ] - - if any(p['path'] in state_rel_path for p in patch_obj): - raise wsme.exc.ClientSideError( - _("The following fields are not remote-modifiable: %s") % - state_rel_path) diff --git a/inventory/inventory/inventory/api/controllers/v1/sensorgroup.py b/inventory/inventory/inventory/api/controllers/v1/sensorgroup.py deleted file mode 100644 index 71e0e9e9..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/sensorgroup.py +++ /dev/null @@ -1,751 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 UnitedStack Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import copy -import jsonpatch -import pecan -from pecan import rest -import six -import uuid -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import sensor as sensor_api -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils -from inventory.common import constants -from inventory.common import exception -from inventory.common import hwmon_api -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.common import utils as cutils -from inventory import objects -from oslo_log import log -from oslo_utils import uuidutils -from six import text_type as unicode - -LOG = log.getLogger(__name__) - - -class SensorGroupPatchType(types.JsonPatchType): - @staticmethod - def mandatory_attrs(): - return ['/host_uuid', 'uuid'] - - -class SensorGroup(base.APIBase): - """API representation of an Sensor Group - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - sensorgroup. - """ - - uuid = types.uuid - "Unique UUID for this sensorgroup" - - sensorgroupname = wtypes.text - "Represent the name of the sensorgroup. Unique with path per host" - - path = wtypes.text - "Represent the path of the sensor. Unique with sensorname per host" - - sensortype = wtypes.text - "Represent the sensortype . e.g. Temperature, WatchDog" - - datatype = wtypes.text - "Represent the datatype e.g. discrete or analog," - - state = wtypes.text - "Represent the state of the sensorgroup" - - possible_states = wtypes.text - "Represent the possible states of the sensorgroup" - - algorithm = wtypes.text - "Represent the algorithm of the sensorgroup." - - audit_interval_group = int - "Represent the audit interval of the sensorgroup." - - actions_critical_choices = wtypes.text - "Represent the configurable critical severity actions of the sensorgroup." - - actions_major_choices = wtypes.text - "Represent the configurable major severity actions of the sensorgroup." - - actions_minor_choices = wtypes.text - "Represent the configurable minor severity actions of the sensorgroup." - - actions_minor_group = wtypes.text - "Represent the minor configured actions of the sensorgroup. CSV." - - actions_major_group = wtypes.text - "Represent the major configured actions of the sensorgroup. CSV." - - actions_critical_group = wtypes.text - "Represent the critical configured actions of the sensorgroup. CSV." - - unit_base_group = wtypes.text - "Represent the unit base of the analog sensorgroup e.g. revolutions" - - unit_modifier_group = wtypes.text - "Represent the unit modifier of the analog sensorgroup e.g. 10**2" - - unit_rate_group = wtypes.text - "Represent the unit rate of the sensorgroup e.g. /minute" - - t_minor_lower_group = wtypes.text - "Represent the minor lower threshold of the analog sensorgroup" - - t_minor_upper_group = wtypes.text - "Represent the minor upper threshold of the analog sensorgroup" - - t_major_lower_group = wtypes.text - "Represent the major lower threshold of the analog sensorgroup" - - t_major_upper_group = wtypes.text - "Represent the major upper threshold of the analog sensorgroup" - - t_critical_lower_group = wtypes.text - "Represent the critical lower threshold of the analog sensorgroup" - - t_critical_upper_group = wtypes.text - "Represent the critical upper threshold of the analog sensorgroup" - - capabilities = {wtypes.text: utils.ValidTypes(wtypes.text, - six.integer_types)} - "Represent meta data of the sensorgroup" - - suppress = wtypes.text - "Represent supress sensor if True, otherwise not suppress sensor" - - sensors = wtypes.text - "Represent the sensors of the sensorgroup" - - host_id = int - "Represent the host_id the sensorgroup belongs to" - - host_uuid = types.uuid - "Represent the UUID of the host the sensorgroup belongs to" - - links = [link.Link] - "Represent a list containing a self link and associated sensorgroup links" - - sensors = [link.Link] - "Links to the collection of sensors on this sensorgroup" - - def __init__(self, **kwargs): - self.fields = objects.SensorGroup.fields.keys() - for k in self.fields: - setattr(self, k, kwargs.get(k)) - - # 'sensors' is not part of objects.SenorGroups.fields (it's an - # API-only attribute) - self.fields.append('sensors') - setattr(self, 'sensors', kwargs.get('sensors', None)) - - @classmethod - def convert_with_links(cls, rsensorgroup, expand=True): - - sensorgroup = SensorGroup(**rsensorgroup.as_dict()) - - sensorgroup_fields_common = ['uuid', 'host_id', - 'host_uuid', - 'sensortype', 'datatype', - 'sensorgroupname', - 'path', - - 'state', - 'possible_states', - 'audit_interval_group', - 'algorithm', - 'actions_critical_choices', - 'actions_major_choices', - 'actions_minor_choices', - 'actions_minor_group', - 'actions_major_group', - 'actions_critical_group', - 'sensors', - - 'suppress', - 'capabilities', - 'created_at', 'updated_at', ] - - sensorgroup_fields_analog = ['unit_base_group', - 'unit_modifier_group', - 'unit_rate_group', - - 't_minor_lower_group', - 't_minor_upper_group', - 't_major_lower_group', - 't_major_upper_group', - 't_critical_lower_group', - 't_critical_upper_group', ] - - if rsensorgroup.datatype == 'discrete': - sensorgroup_fields = sensorgroup_fields_common - elif rsensorgroup.datatype == 'analog': - sensorgroup_fields = \ - sensorgroup_fields_common + sensorgroup_fields_analog - else: - LOG.error(_("Invalid datatype={}").format(rsensorgroup.datatype)) - - if not expand: - sensorgroup.unset_fields_except(sensorgroup_fields) - - if sensorgroup.host_id and not sensorgroup.host_uuid: - host = objects.Host.get_by_uuid(pecan.request.context, - sensorgroup.host_id) - sensorgroup.host_uuid = host.uuid - - # never expose the id attribute - sensorgroup.host_id = wtypes.Unset - sensorgroup.id = wtypes.Unset - - sensorgroup.links = [ - link.Link.make_link('self', pecan.request.host_url, - 'sensorgroups', - sensorgroup.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'sensorgroups', - sensorgroup.uuid, - bookmark=True)] - - sensorgroup.sensors = [ - link.Link.make_link('self', - pecan.request.host_url, - 'sensorgroups', - sensorgroup.uuid + "/sensors"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'sensorgroups', - sensorgroup.uuid + "/sensors", - bookmark=True)] - - return sensorgroup - - -class SensorGroupCollection(collection.Collection): - """API representation of a collection of SensorGroup objects.""" - - sensorgroups = [SensorGroup] - "A list containing SensorGroup objects" - - def __init__(self, **kwargs): - self._type = 'sensorgroups' - - @classmethod - def convert_with_links(cls, rsensorgroups, limit, url=None, - expand=False, **kwargs): - collection = SensorGroupCollection() - collection.sensorgroups = [SensorGroup.convert_with_links(p, expand) - for p in rsensorgroups] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'SensorGroupController' - - -class SensorGroupController(rest.RestController): - """REST controller for SensorGroups.""" - - sensors = sensor_api.SensorController(from_sensorgroup=True) - "Expose sensors as a sub-element of sensorgroups" - - _custom_actions = { - 'detail': ['GET'], - 'relearn': ['POST'], - } - - def __init__(self, from_hosts=False): - self._from_hosts = from_hosts - self._api_token = None - self._hwmon_address = k_host.LOCALHOST_HOSTNAME - self._hwmon_port = constants.HWMON_PORT - - def _get_sensorgroups_collection(self, uuid, - marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - - if self._from_hosts and not uuid: - raise exception.InvalidParameterValue(_( - "Host id not specified.")) - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.SensorGroup.get_by_uuid( - pecan.request.context, - marker) - - if self._from_hosts: - sensorgroups = pecan.request.dbapi.sensorgroup_get_by_host( - uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - if uuid: - sensorgroups = pecan.request.dbapi.sensorgroup_get_by_host( - uuid, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - else: - sensorgroups = pecan.request.dbapi.sensorgroup_get_list( - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return SensorGroupCollection.convert_with_links(sensorgroups, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(SensorGroupCollection, types.uuid, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, uuid=None, - marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of sensorgroups.""" - - return self._get_sensorgroups_collection(uuid, - marker, limit, - sort_key, sort_dir) - - @wsme_pecan.wsexpose(SensorGroupCollection, types.uuid, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, uuid=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of sensorgroups with detail.""" - - # NOTE(lucasagomes): /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "sensorgroups": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['sensorgroups', 'detail']) - return self._get_sensorgroups_collection(uuid, marker, limit, - sort_key, sort_dir, - expand, resource_url) - - @wsme_pecan.wsexpose(SensorGroup, types.uuid) - def get_one(self, sensorgroup_uuid): - """Retrieve information about the given sensorgroup.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rsensorgroup = objects.SensorGroup.get_by_uuid( - pecan.request.context, sensorgroup_uuid) - - if rsensorgroup.datatype == 'discrete': - rsensorgroup = objects.SensorGroupDiscrete.get_by_uuid( - pecan.request.context, sensorgroup_uuid) - elif rsensorgroup.datatype == 'analog': - rsensorgroup = objects.SensorGroupAnalog.get_by_uuid( - pecan.request.context, sensorgroup_uuid) - else: - LOG.error(_("Invalid datatype={}").format(rsensorgroup.datatype)) - - return SensorGroup.convert_with_links(rsensorgroup) - - @staticmethod - def _new_sensorgroup_semantic_checks(sensorgroup): - datatype = sensorgroup.as_dict().get('datatype') or "" - sensortype = sensorgroup.as_dict().get('sensortype') or "" - if not (datatype and sensortype): - raise wsme.exc.ClientSideError(_("sensorgroup-add: Cannot " - "add a sensorgroup " - "without a valid datatype " - "and sensortype.")) - - if datatype not in constants.SENSOR_DATATYPE_VALID_LIST: - raise wsme.exc.ClientSideError( - _("sensorgroup datatype must be one of %s.") % - constants.SENSOR_DATATYPE_VALID_LIST) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(SensorGroup, body=SensorGroup) - def post(self, sensorgroup): - """Create a new sensorgroup.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - self._new_sensorgroup_semantic_checks(sensorgroup) - try: - sensorgroup_dict = sensorgroup.as_dict() - new_sensorgroup = _create(sensorgroup_dict) - - except exception.InventoryException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Invalid data")) - return sensorgroup.convert_with_links(new_sensorgroup) - - def _get_host_uuid(self, body): - host_uuid = body.get('host_uuid') or "" - try: - host = pecan.request.dbapi.host_get(host_uuid) - except exception.NotFound: - raise wsme.exc.ClientSideError("_get_host_uuid lookup failed") - return host.uuid - - @wsme_pecan.wsexpose('json', body=unicode) - def relearn(self, body): - """Handle Sensor Model Relearn Request.""" - host_uuid = self._get_host_uuid(body) - # LOG.info("Host UUID: %s - BM_TYPE: %s" % (host_uuid, bm_type )) - - # hwmon_sensorgroup = {'ihost_uuid': host_uuid} - request_body = {'host_uuid': host_uuid} - hwmon_response = hwmon_api.sensorgroup_relearn( - self._api_token, self._hwmon_address, self._hwmon_port, - request_body, - constants.HWMON_DEFAULT_TIMEOUT_IN_SECS) - - if not hwmon_response: - hwmon_response = {'status': 'fail', - 'reason': 'no response', - 'action': 'retry'} - - elif hwmon_response['status'] != 'pass': - msg = _("HWMON has returned with " - "a status of {}, reason: {}, " - "recommended action: {}").format( - hwmon_response.get('status'), - hwmon_response.get('reason'), - hwmon_response.get('action')) - - raise wsme.exc.ClientSideError(msg) - - @cutils.synchronized(LOCK_NAME) - @wsme.validate(types.uuid, [SensorGroupPatchType]) - @wsme_pecan.wsexpose(SensorGroup, types.uuid, - body=[SensorGroupPatchType]) - def patch(self, sensorgroup_uuid, patch): - """Update an existing sensorgroup.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - rsensorgroup = objects.SensorGroup.get_by_uuid( - pecan.request.context, sensorgroup_uuid) - - if rsensorgroup.datatype == 'discrete': - rsensorgroup = objects.SensorGroupDiscrete.get_by_uuid( - pecan.request.context, sensorgroup_uuid) - elif rsensorgroup.datatype == 'analog': - rsensorgroup = objects.SensorGroupAnalog.get_by_uuid( - pecan.request.context, sensorgroup_uuid) - else: - raise wsme.exc.ClientSideError(_("Invalid datatype={}").format( - rsensorgroup.datatype)) - - rsensorgroup_orig = copy.deepcopy(rsensorgroup) - - host = pecan.request.dbapi.host_get( - rsensorgroup['host_id']).as_dict() - - utils.validate_patch(patch) - patch_obj = jsonpatch.JsonPatch(patch) - my_host_uuid = None - for p in patch_obj: - # For Profile replace host_uuid with corresponding id - if p['path'] == '/host_uuid': - p['path'] = '/host_id' - host = objects.Host.get_by_uuid(pecan.request.context, - p['value']) - p['value'] = host.id - my_host_uuid = host.uuid - - # update sensors if set - sensors = None - for s in patch: - if '/sensors' in s['path']: - sensors = s['value'] - patch.remove(s) - break - - if sensors: - _update_sensors("modify", rsensorgroup, host, sensors) - - try: - sensorgroup = SensorGroup(**jsonpatch.apply_patch( - rsensorgroup.as_dict(), - patch_obj)) - - except utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - if rsensorgroup.datatype == 'discrete': - fields = objects.SensorGroupDiscrete.fields - else: - fields = objects.SensorGroupAnalog.fields - - for field in fields: - if rsensorgroup[field] != getattr(sensorgroup, field): - rsensorgroup[field] = getattr(sensorgroup, field) - - delta = rsensorgroup.obj_what_changed() - - sensorgroup_suppress_attrs = ['suppress'] - force_action = False - if any(x in delta for x in sensorgroup_suppress_attrs): - valid_suppress = ['True', 'False', 'true', 'false', 'force_action'] - if rsensorgroup.suppress.lower() not in valid_suppress: - raise wsme.exc.ClientSideError(_("Invalid suppress value, " - "select 'True' or 'False'")) - elif rsensorgroup.suppress.lower() == 'force_action': - LOG.info("suppress=%s" % rsensorgroup.suppress.lower()) - rsensorgroup.suppress = rsensorgroup_orig.suppress - force_action = True - - self._semantic_modifiable_fields(patch_obj, force_action) - - if not pecan.request.user_agent.startswith('hwmon'): - hwmon_sensorgroup = cutils.removekeys_nonhwmon( - rsensorgroup.as_dict()) - - if not my_host_uuid: - host = objects.Host.get_by_uuid(pecan.request.context, - rsensorgroup.host_id) - my_host_uuid = host.uuid - - hwmon_sensorgroup.update({'host_uuid': my_host_uuid}) - - hwmon_response = hwmon_api.sensorgroup_modify( - self._api_token, self._hwmon_address, self._hwmon_port, - hwmon_sensorgroup, - constants.HWMON_DEFAULT_TIMEOUT_IN_SECS) - - if not hwmon_response: - hwmon_response = {'status': 'fail', - 'reason': 'no response', - 'action': 'retry'} - - if hwmon_response['status'] != 'pass': - msg = _("HWMON has returned with a status of {}, reason: {}, " - "recommended action: {}").format( - hwmon_response.get('status'), - hwmon_response.get('reason'), - hwmon_response.get('action')) - - if force_action: - LOG.error(msg) - else: - raise wsme.exc.ClientSideError(msg) - - sensorgroup_prop_attrs = ['audit_interval_group', - 'actions_minor_group', - 'actions_major_group', - 'actions_critical_group', - 'suppress'] - - if any(x in delta for x in sensorgroup_prop_attrs): - # propagate to Sensors within this SensorGroup - sensor_val = {'audit_interval': rsensorgroup.audit_interval_group, - 'actions_minor': rsensorgroup.actions_minor_group, - 'actions_major': rsensorgroup.actions_major_group, - 'actions_critical': - rsensorgroup.actions_critical_group} - if 'suppress' in delta: - sensor_val.update({'suppress': rsensorgroup.suppress}) - pecan.request.dbapi.sensorgroup_propagate( - rsensorgroup.uuid, sensor_val) - - rsensorgroup.save() - - return SensorGroup.convert_with_links(rsensorgroup) - - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, sensorgroup_uuid): - """Delete a sensorgroup.""" - if self._from_hosts: - raise exception.OperationNotPermitted - - pecan.request.dbapi.sensorgroup_destroy(sensorgroup_uuid) - - @staticmethod - def _semantic_modifiable_fields(patch_obj, force_action=False): - # Prevent auto populated fields from being updated - state_rel_path = ['/uuid', '/id', '/host_id', '/datatype', - '/sensortype'] - - if any(p['path'] in state_rel_path for p in patch_obj): - raise wsme.exc.ClientSideError(_("The following fields can not be " - "modified: %s ") % state_rel_path) - - if not (pecan.request.user_agent.startswith('hwmon') or force_action): - state_rel_path = ['/sensorgroupname', '/path', - '/state', '/possible_states', - '/actions_critical_choices', - '/actions_major_choices', - '/actions_minor_choices', - '/unit_base_group', - '/unit_modifier_group', - '/unit_rate_group', - '/t_minor_lower_group', - '/t_minor_upper_group', - '/t_major_lower_group', - '/t_major_upper_group', - '/t_critical_lower_group', - '/t_critical_upper_group', - ] - - if any(p['path'] in state_rel_path for p in patch_obj): - raise wsme.exc.ClientSideError( - _("The following fields are not remote-modifiable: %s") % - state_rel_path) - - -def _create(sensorgroup, from_profile=False): - """Create a sensorgroup through a non-HTTP request e.g. via profile.py - while still passing through sensorgroup semantic checks. - Hence, not declared inside a class. - Param: - sensorgroup - dictionary of sensorgroup values - from_profile - Boolean whether from profile - """ - - if 'host_id' in sensorgroup and sensorgroup['host_id']: - ihostid = sensorgroup['host_id'] - else: - ihostid = sensorgroup['host_uuid'] - - ihost = pecan.request.dbapi.host_get(ihostid) - if uuidutils.is_uuid_like(ihostid): - host_id = ihost['id'] - else: - host_id = ihostid - sensorgroup.update({'host_id': host_id}) - LOG.info("sensorgroup post sensorgroups ihostid: %s" % host_id) - sensorgroup['host_uuid'] = ihost['uuid'] - - # Assign UUID if not already done. - if not sensorgroup.get('uuid'): - sensorgroup['uuid'] = str(uuid.uuid4()) - - # Get sensors - sensors = None - if 'sensors' in sensorgroup: - sensors = sensorgroup['sensors'] - - # Set defaults - before checks to allow for optional attributes - # if not from_profile: - # sensorgroup = _set_defaults(sensorgroup) - - # Semantic checks - # sensorgroup = _check("add", - # sensorgroup, - # sensors=sensors, - # ifaces=uses_if, - # from_profile=from_profile) - - if sensorgroup.get('datatype'): - if sensorgroup['datatype'] == 'discrete': - new_sensorgroup = pecan.request.dbapi.sensorgroup_discrete_create( - ihost.id, sensorgroup) - elif sensorgroup['datatype'] == 'analog': - new_sensorgroup = pecan.request.dbapi.sensorgroup_analog_create( - ihost.id, sensorgroup) - else: - raise wsme.exc.ClientSideError(_("Invalid datatype. %s") % - sensorgroup.datatype) - else: - raise wsme.exc.ClientSideError(_("Unspecified datatype.")) - - # Update sensors - if sensors: - try: - _update_sensors("modify", - new_sensorgroup.as_dict(), - ihost, - sensors) - except Exception as e: - pecan.request.dbapi.sensorgroup_destroy( - new_sensorgroup.as_dict()['uuid']) - raise e - - # Update sensors - # return new_sensorgroup - return SensorGroup.convert_with_links(new_sensorgroup) - - -def _update_sensors(op, sensorgroup, ihost, sensors): - sensors = sensors.split(',') - - this_sensorgroup_datatype = None - this_sensorgroup_sensortype = None - if op == "add": - this_sensorgroup_id = 0 - else: - this_sensorgroup_id = sensorgroup['id'] - this_sensorgroup_datatype = sensorgroup['datatype'] - this_sensorgroup_sensortype = sensorgroup['sensortype'] - - if sensors: - # Update Sensors' sensorgroup_uuid attribute - sensors_list = pecan.request.dbapi.sensor_get_all( - host_id=ihost['id']) - for p in sensors_list: - # if new sensor associated - if (p.uuid in sensors or p.sensorname in sensors) \ - and not p.sensorgroup_id: - values = {'sensorgroup_id': sensorgroup['id']} - # else if old sensor disassociated - elif ((p.uuid not in sensors and p.sensorname not in sensors) and - p.sensorgroup_id and - p.sensorgroup_id == this_sensorgroup_id): - values = {'sensorgroup_id': None} - else: - continue - - if p.datatype != this_sensorgroup_datatype: - msg = _("Invalid datatype: host {} sensor {}: Expected: {} " - "Received: {}.").format( - (ihost['hostname'], p.sensorname, - this_sensorgroup_datatype, p.datatype)) - raise wsme.exc.ClientSideError(msg) - - if p.sensortype != this_sensorgroup_sensortype: - msg = _("Invalid sensortype: host {} sensor {}: Expected: {} " - "Received: {}.").format( - ihost['hostname'], p.sensorname, - this_sensorgroup_sensortype, p.sensortype) - raise wsme.exc.ClientSideError(msg) - - try: - pecan.request.dbapi.sensor_update(p.uuid, values) - except exception.HTTPNotFound: - msg = _("Sensor update of sensorgroup_uuid failed: host {} " - "sensor {}").format(ihost['hostname'], p.sensorname) - raise wsme.exc.ClientSideError(msg) diff --git a/inventory/inventory/inventory/api/controllers/v1/state.py b/inventory/inventory/inventory/api/controllers/v1/state.py deleted file mode 100644 index b4df06da..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/state.py +++ /dev/null @@ -1,38 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import link -from wsme import types as wtypes - - -class State(base.APIBase): - - current = wtypes.text - "The current state" - - target = wtypes.text - "The user modified desired state" - - available = [wtypes.text] - "A list of available states it is able to transition to" - - links = [link.Link] - "A list containing a self link and associated state links" diff --git a/inventory/inventory/inventory/api/controllers/v1/sysinv.py b/inventory/inventory/inventory/api/controllers/v1/sysinv.py deleted file mode 100644 index 95b38c92..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/sysinv.py +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from cgtsclient.v1 import client as cgts_client -from inventory.api import config -from keystoneauth1 import loading as ks_loading -from oslo_config import cfg -from oslo_log import log - - -CONF = cfg.CONF - -LOG = log.getLogger(__name__) - -_SESSION = None - - -def cgtsclient(context, version=1, endpoint=None): - """Constructs a cgts client object for making API requests. - - :param context: The FM request context for auth. - :param version: API endpoint version. - :param endpoint: Optional If the endpoint is not available, it will be - retrieved from session - """ - global _SESSION - - if not _SESSION: - _SESSION = ks_loading.load_session_from_conf_options( - CONF, config.sysinv_group.name) - - auth_token = context.auth_token - if endpoint is None: - auth = context.get_auth_plugin() - service_type, service_name, interface = \ - CONF.sysinv.catalog_info.split(':') - service_parameters = {'service_type': service_type, - 'service_name': service_name, - 'interface': interface, - 'region_name': CONF.sysinv.os_region_name} - endpoint = _SESSION.get_endpoint(auth, **service_parameters) - - return cgts_client.Client(version=version, - endpoint=endpoint, - token=auth_token) diff --git a/inventory/inventory/inventory/api/controllers/v1/system.py b/inventory/inventory/inventory/api/controllers/v1/system.py deleted file mode 100644 index a98734f5..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/system.py +++ /dev/null @@ -1,266 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from oslo_log import log -import pecan -from pecan import rest -import six -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from inventory.api.controllers.v1 import base -from inventory.api.controllers.v1 import collection -from inventory.api.controllers.v1 import host -from inventory.api.controllers.v1 import link -from inventory.api.controllers.v1 import types -from inventory.api.controllers.v1 import utils as api_utils -from inventory.common import constants -from inventory.common import exception -from inventory.common import k_host -from inventory import objects - -LOG = log.getLogger(__name__) - -VALID_VSWITCH_TYPES = [constants.VSWITCH_TYPE_OVS_DPDK] - - -class System(base.APIBase): - """API representation of a system. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of - a system. - """ - - uuid = types.uuid - "The UUID of the system" - - name = wtypes.text - "The name of the system" - - system_type = wtypes.text - "The type of the system" - - system_mode = wtypes.text - "The mode of the system" - - description = wtypes.text - "The name of the system" - - contact = wtypes.text - "The contact of the system" - - location = wtypes.text - "The location of the system" - - services = int - "The services of the system" - - software_version = wtypes.text - "A textual description of the entity" - - timezone = wtypes.text - "The timezone of the system" - - links = [link.Link] - "A list containing a self link and associated system links" - - hosts = [link.Link] - "Links to the collection of hosts contained in this system" - - capabilities = {wtypes.text: api_utils.ValidTypes(wtypes.text, bool, - six.integer_types)} - "System defined capabilities" - - region_name = wtypes.text - "The region name of the system" - - distributed_cloud_role = wtypes.text - "The distributed cloud role of the system" - - service_project_name = wtypes.text - "The service project name of the system" - - security_feature = wtypes.text - "Kernel arguments associated with enabled spectre/meltdown fix features" - - def __init__(self, **kwargs): - self.fields = objects.System.fields.keys() - - for k in self.fields: - # Translate any special internal representation of data to its - # customer facing form - if k == 'security_feature': - # look up which customer-facing-security-feature-string goes - # with the kernel arguments tracked in sysinv - kernel_args = kwargs.get(k) - translated_string = kernel_args - - for user_string, args_string in \ - constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS.iteritems(): # noqa - if args_string == kernel_args: - translated_string = user_string - break - setattr(self, k, translated_string) - else: - # No translation required - setattr(self, k, kwargs.get(k)) - - @classmethod - def convert_with_links(cls, rpc_system, expand=True): - minimum_fields = ['id', 'uuid', 'name', 'system_type', 'system_mode', - 'description', 'capabilities', - 'contact', 'location', 'software_version', - 'created_at', 'updated_at', 'timezone', - 'region_name', 'service_project_name', - 'distributed_cloud_role', 'security_feature'] - - fields = minimum_fields if not expand else None - - iSystem = System.from_rpc_object(rpc_system, fields) - - iSystem.links = [link.Link.make_link('self', pecan.request.host_url, - 'systems', iSystem.uuid), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'systems', iSystem.uuid, - bookmark=True) - ] - - if expand: - iSystem.hosts = [ - link.Link.make_link('self', - pecan.request.host_url, - 'systems', - iSystem.uuid + "/hosts"), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'systems', - iSystem.uuid + "/hosts", - bookmark=True)] - - return iSystem - - -class SystemCollection(collection.Collection): - """API representation of a collection of systems.""" - - systems = [System] - "A list containing system objects" - - def __init__(self, **kwargs): - self._type = 'systems' - - @classmethod - def convert_with_links(cls, systems, limit, url=None, - expand=False, **kwargs): - collection = SystemCollection() - collection.systems = [System.convert_with_links(ch, expand) - for ch in systems] - - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -LOCK_NAME = 'SystemController' - - -class SystemController(rest.RestController): - """REST controller for system.""" - - hosts = host.HostController(from_system=True) - "Expose hosts as a sub-element of system" - - _custom_actions = { - 'detail': ['GET'], - } - - def __init__(self): - self._bm_region = None - - def _bm_region_get(self): - # only supported region type is BM_EXTERNAL - if not self._bm_region: - self._bm_region = k_host.BM_EXTERNAL - return self._bm_region - - def _get_system_collection(self, marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - marker_obj = None - if marker: - marker_obj = objects.System.get_by_uuid(pecan.request.context, - marker) - system = pecan.request.dbapi.system_get_list(limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - for i in system: - i.capabilities['bm_region'] = self._bm_region_get() - - return SystemCollection.convert_with_links(system, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(SystemCollection, types.uuid, - int, wtypes.text, wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of systems. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - return self._get_system_collection(marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(SystemCollection, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of system with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - # /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "system": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['system', 'detail']) - return self._get_system_collection(marker, limit, sort_key, sort_dir, - expand, resource_url) - - @wsme_pecan.wsexpose(System, types.uuid) - def get_one(self, system_uuid): - """Retrieve information about the given system. - - :param system_uuid: UUID of a system. - """ - rpc_system = objects.System.get_by_uuid(pecan.request.context, - system_uuid) - - rpc_system.capabilities['bm_region'] = self._bm_region_get() - return System.convert_with_links(rpc_system) - - @wsme_pecan.wsexpose(System, body=System) - def post(self, system): - """Create a new system.""" - raise exception.OperationNotPermitted - - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, system_uuid): - """Delete a system. - - :param system_uuid: UUID of a system. - """ - raise exception.OperationNotPermitted diff --git a/inventory/inventory/inventory/api/controllers/v1/types.py b/inventory/inventory/inventory/api/controllers/v1/types.py deleted file mode 100644 index 2056c32f..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/types.py +++ /dev/null @@ -1,215 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding: utf-8 -# -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -from oslo_utils import strutils -import six - -import wsme -from wsme import types as wtypes - -from inventory.api.controllers.v1 import utils as apiutils -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import utils - - -class MACAddressType(wtypes.UserType): - """A simple MAC address type.""" - - basetype = wtypes.text - name = 'macaddress' - - @staticmethod - def validate(value): - return utils.validate_and_normalize_mac(value) - - @staticmethod - def frombasetype(value): - return MACAddressType.validate(value) - - -class UUIDType(wtypes.UserType): - """A simple UUID type.""" - - basetype = wtypes.text - name = 'uuid' - # FIXME(lucasagomes): When used with wsexpose decorator WSME will try - # to get the name of the type by accessing it's __name__ attribute. - # Remove this __name__ attribute once it's fixed in WSME. - # https://bugs.launchpad.net/wsme/+bug/1265590 - __name__ = name - - @staticmethod - def validate(value): - if not utils.is_uuid_like(value): - raise exception.InvalidUUID(uuid=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return UUIDType.validate(value) - - -class BooleanType(wtypes.UserType): - """A simple boolean type.""" - - basetype = wtypes.text - name = 'boolean' - - @staticmethod - def validate(value): - try: - return strutils.bool_from_string(value, strict=True) - except ValueError as e: - # raise Invalid to return 400 (BadRequest) in the API - raise exception.Invalid(six.text_type(e)) - - @staticmethod - def frombasetype(value): - if value is None: - return None - return BooleanType.validate(value) - - -class IPAddressType(wtypes.UserType): - """A generic IP address type that supports both IPv4 and IPv6.""" - - basetype = wtypes.text - name = 'ipaddress' - - @staticmethod - def validate(value): - if not utils.is_valid_ip(value): - raise exception.InvalidIPAddress(address=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return IPAddressType.validate(value) - - -macaddress = MACAddressType() -uuid = UUIDType() -boolean = BooleanType() -ipaddress = IPAddressType() - - -class ApiDictType(wtypes.UserType): - name = 'apidict' - __name__ = name - - basetype = {wtypes.text: - apiutils.ValidTypes(wtypes.text, six.integer_types)} - - -apidict = ApiDictType() - - -class JsonPatchType(wtypes.Base): - """A complex type that represents a single json-patch operation.""" - - path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'), - mandatory=True) - op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), - mandatory=True) - value = apiutils.ValidTypes(wtypes.text, six.integer_types, float) - - @staticmethod - def internal_attrs(): - """Returns a list of internal attributes. - - Internal attributes can't be added, replaced or removed. This - method may be overwritten by derived class. - - """ - return ['/created_at', '/id', '/links', '/updated_at', '/uuid'] - - @staticmethod - def mandatory_attrs(): - """Retruns a list of mandatory attributes. - - Mandatory attributes can't be removed from the document. This - method should be overwritten by derived class. - - """ - return [] - - @staticmethod - def validate(patch): - if patch.path in patch.internal_attrs(): - msg = _("'%s' is an internal attribute and can not be updated") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.path in patch.mandatory_attrs() and patch.op == 'remove': - msg = _("'%s' is a mandatory attribute and can not be removed") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.op == 'add': - if patch.path.count('/') == 1: - msg = _('Adding a new attribute (%s) to the root of ' - ' the resource is not allowed') - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.op != 'remove': - if not patch.value: - msg = _("Edit and Add operation of the field requires " - "non-empty value.") - raise wsme.exc.ClientSideError(msg) - - ret = {'path': patch.path, 'op': patch.op} - if patch.value: - ret['value'] = patch.value - return ret - - -class MultiType(wtypes.UserType): - """A complex type that represents one or more types. - - Used for validating that a value is an instance of one of the types. - - :param *types: Variable-length list of types. - - """ - def __init__(self, types): - self.types = types - - def validate(self, value): - for t in self.types: - if t is wsme.types.text and isinstance(value, wsme.types.bytes): - value = value.decode() - if isinstance(t, list): - if isinstance(value, list): - for v in value: - if not isinstance(v, t[0]): - break - else: - return value - elif isinstance(value, t): - return value - else: - raise ValueError( - _("Wrong type. Expected '%(type)s', got '%(value)s'") - % {'type': self.types, 'value': type(value)}) diff --git a/inventory/inventory/inventory/api/controllers/v1/utils.py b/inventory/inventory/inventory/api/controllers/v1/utils.py deleted file mode 100755 index c5aceb5c..00000000 --- a/inventory/inventory/inventory/api/controllers/v1/utils.py +++ /dev/null @@ -1,560 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import contextlib -import jsonpatch -import netaddr -import os -import pecan -import re -import socket -import sys -import traceback -import tsconfig.tsconfig as tsc -import wsme - -from inventory.api.controllers.v1.sysinv import cgtsclient -from inventory.common import constants -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.common.utils import memoized -from inventory import objects -from oslo_config import cfg -from oslo_log import log - -CONF = cfg.CONF -LOG = log.getLogger(__name__) -KEY_VALUE_SEP = '=' -JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, - jsonpatch.JsonPointerException, - KeyError) - - -def ip_version_to_string(ip_version): - return str(constants.IP_FAMILIES[ip_version]) - - -def validate_limit(limit): - if limit and limit < 0: - raise wsme.exc.ClientSideError(_("Limit must be positive")) - - return min(CONF.api.limit_max, limit) or CONF.api.limit_max - - -def validate_sort_dir(sort_dir): - if sort_dir not in ['asc', 'desc']: - raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " - "Acceptable values are " - "'asc' or 'desc'") % sort_dir) - return sort_dir - - -def validate_patch(patch): - """Performs a basic validation on patch.""" - - if not isinstance(patch, list): - patch = [patch] - - for p in patch: - path_pattern = re.compile("^/[a-zA-Z0-9-_]+(/[a-zA-Z0-9-_]+)*$") - - if not isinstance(p, dict) or \ - any(key for key in ["path", "op"] if key not in p): - raise wsme.exc.ClientSideError( - _("Invalid patch format: %s") % str(p)) - - path = p["path"] - op = p["op"] - - if op not in ["add", "replace", "remove"]: - raise wsme.exc.ClientSideError( - _("Operation not supported: %s") % op) - - if not path_pattern.match(path): - raise wsme.exc.ClientSideError(_("Invalid path: %s") % path) - - if op == "add": - if path.count('/') == 1: - raise wsme.exc.ClientSideError( - _("Adding an additional attribute (%s) to the " - "resource is not allowed") % path) - - -def validate_mtu(mtu): - """Check if MTU is valid""" - if mtu < 576 or mtu > 9216: - raise wsme.exc.ClientSideError(_( - "MTU must be between 576 and 9216 bytes.")) - - -def validate_address_within_address_pool(ip, pool): - """Determine whether an IP address is within the specified IP address pool. - :param ip netaddr.IPAddress object - :param pool objects.AddressPool object - """ - ipset = netaddr.IPSet() - for start, end in pool.ranges: - ipset.update(netaddr.IPRange(start, end)) - - if netaddr.IPAddress(ip) not in ipset: - raise wsme.exc.ClientSideError(_( - "IP address %s is not within address pool ranges") % str(ip)) - - -def validate_address_within_network(ip, network): - """Determine whether an IP address is within the specified IP network. - :param ip netaddr.IPAddress object - :param network objects.Network object - """ - LOG.info("TODO(sc) validate_address_within_address_pool " - "ip=%s, network=%s" % (ip, network)) - - -class ValidTypes(wsme.types.UserType): - """User type for validate that value has one of a few types.""" - - def __init__(self, *types): - self.types = types - - def validate(self, value): - for t in self.types: - if t is wsme.types.text and isinstance(value, wsme.types.bytes): - value = value.decode() - if isinstance(value, t): - return value - else: - raise ValueError("Wrong type. Expected '%s', got '%s'" % ( - self.types, type(value))) - - -def is_valid_hostname(hostname): - """Determine whether an address is valid as per RFC 1123. - """ - - # Maximum length of 255 - rc = True - length = len(hostname) - if length > 255: - raise wsme.exc.ClientSideError(_( - "Hostname {} is too long. Length {} is greater than 255." - "Please configure valid hostname.").format(hostname, length)) - - # Allow a single dot on the right hand side - if hostname[-1] == ".": - hostname = hostname[:-1] - # Create a regex to ensure: - # - hostname does not begin or end with a dash - # - each segment is 1 to 63 characters long - # - valid characters are A-Z (any case) and 0-9 - valid_re = re.compile("(?!-)[A-Z\d-]{1,63}(?' + - '\n'.join(app_iter) + - ''))] - except et.ElementTree.ParseError as err: - LOG.error('Error parsing HTTP response: %s', err) - body = ['%s' % state['status_code'] + - ''] - state['headers'].append(('Content-Type', 'application/xml')) - else: - if six.PY3: - app_iter = [i.decode('utf-8') for i in app_iter] - body = [json.dumps({'error_message': '\n'.join(app_iter)})] - if six.PY3: - body = [item.encode('utf-8') for item in body] - state['headers'].append(('Content-Type', 'application/json')) - state['headers'].append(('Content-Length', str(len(body[0])))) - else: - body = app_iter - return body diff --git a/inventory/inventory/inventory/cmd/__init__.py b/inventory/inventory/inventory/cmd/__init__.py deleted file mode 100644 index b1de0395..00000000 --- a/inventory/inventory/inventory/cmd/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os - -os.environ['EVENTLET_NO_GREENDNS'] = 'yes' # noqa E402 - -import eventlet - -eventlet.monkey_patch(os=False) - -import oslo_i18n as i18n # noqa I202 - -i18n.install('inventory') diff --git a/inventory/inventory/inventory/cmd/agent.py b/inventory/inventory/inventory/cmd/agent.py deleted file mode 100644 index c2c544a7..00000000 --- a/inventory/inventory/inventory/cmd/agent.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -The Inventory Agent Service -""" - -import sys - -from oslo_config import cfg -from oslo_log import log -from oslo_service import service - -from inventory.common import rpc_service -from inventory.common import service as inventory_service - - -CONF = cfg.CONF - -LOG = log.getLogger(__name__) - - -def main(): - # Parse config file and command line options, then start logging - inventory_service.prepare_service(sys.argv) - - # connection is based upon host and MANAGER_TOPIC - mgr = rpc_service.RPCService(CONF.host, - 'inventory.agent.manager', - 'AgentManager') - launcher = service.launch(CONF, mgr) - launcher.wait() - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/inventory/inventory/inventory/cmd/api.py b/inventory/inventory/inventory/cmd/api.py deleted file mode 100644 index a601cce7..00000000 --- a/inventory/inventory/inventory/cmd/api.py +++ /dev/null @@ -1,86 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import sys - -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import systemd -from oslo_service import wsgi - -import logging as std_logging - -from inventory.api import app -from inventory.api import config -from inventory.common.i18n import _ - -api_opts = [ - cfg.StrOpt('bind_host', - default="0.0.0.0", - help=_('IP address for inventory api to listen')), - cfg.IntOpt('bind_port', - default=6380, - help=_('listen port for inventory api')), - cfg.StrOpt('bind_host_pxe', - default="0.0.0.0", - help=_('IP address for inventory api pxe to listen')), - cfg.IntOpt('api_workers', default=2, - help=_("number of api workers")), - cfg.IntOpt('limit_max', - default=1000, - help='the maximum number of items returned in a single ' - 'response from a collection resource') -] - - -CONF = cfg.CONF - - -LOG = logging.getLogger(__name__) -eventlet.monkey_patch(os=False) - - -def main(): - - config.init(sys.argv[1:]) - config.setup_logging() - - application = app.load_paste_app() - - CONF.register_opts(api_opts, 'api') - - host = CONF.api.bind_host - port = CONF.api.bind_port - workers = CONF.api.api_workers - - if workers < 1: - LOG.warning("Wrong worker number, worker = %(workers)s", workers) - workers = 1 - - LOG.info("Serving on http://%(host)s:%(port)s with %(workers)s", - {'host': host, 'port': port, 'workers': workers}) - systemd.notify_once() - service = wsgi.Server(CONF, CONF.prog, application, host, port) - - app.serve(service, CONF, workers) - - pxe_host = CONF.api.bind_host_pxe - if pxe_host: - pxe_service = wsgi.Server(CONF, CONF.prog, application, pxe_host, port) - app.serve_pxe(pxe_service, CONF, 1) - - LOG.debug("Configuration:") - CONF.log_opt_values(LOG, std_logging.DEBUG) - - app.wait() - if pxe_host: - app.wait_pxe() - - -if __name__ == '__main__': - main() diff --git a/inventory/inventory/inventory/cmd/conductor.py b/inventory/inventory/inventory/cmd/conductor.py deleted file mode 100644 index 69b05669..00000000 --- a/inventory/inventory/inventory/cmd/conductor.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -""" -The Inventory Conductor Service -""" - -import sys - -from oslo_config import cfg -from oslo_log import log -from oslo_service import service - -from inventory.common import rpc_service -from inventory.common import service as inventory_service - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -def main(): - # Parse config file and command line options, then start logging - inventory_service.prepare_service(sys.argv) - - mgr = rpc_service.RPCService(CONF.host, - 'inventory.conductor.manager', - 'ConductorManager') - - launcher = service.launch(CONF, mgr) - launcher.wait() - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/inventory/inventory/inventory/cmd/dbsync.py b/inventory/inventory/inventory/cmd/dbsync.py deleted file mode 100644 index 4e1c7e9c..00000000 --- a/inventory/inventory/inventory/cmd/dbsync.py +++ /dev/null @@ -1,19 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from oslo_config import cfg -import sys - -from inventory.db import migration - -CONF = cfg.CONF - - -def main(): - cfg.CONF(sys.argv[1:], - project='inventory') - migration.db_sync() diff --git a/inventory/inventory/inventory/cmd/dnsmasq_lease_update.py b/inventory/inventory/inventory/cmd/dnsmasq_lease_update.py deleted file mode 100755 index bad1f606..00000000 --- a/inventory/inventory/inventory/cmd/dnsmasq_lease_update.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# - - -""" -Handle lease database updates from dnsmasq DHCP server -This file was based on dhcpbridge.py from nova -""" - -from __future__ import print_function - -import os -import sys - -from inventory.common import context -from inventory.common.i18n import _ -from inventory.common import service as inventory_service -from inventory.conductor import rpcapi as conductor_rpcapi - -from oslo_config import cfg -from oslo_log import log - -CONF = cfg.CONF - - -def add_lease(mac, ip_address): - """Called when a new lease is created.""" - - ctxt = context.get_admin_context() - rpcapi = \ - conductor_rpcapi.ConductorAPI(topic=conductor_rpcapi.MANAGER_TOPIC) - - cid = None - cid = os.getenv('DNSMASQ_CLIENT_ID') - - tags = None - tags = os.getenv('DNSMASQ_TAGS') - - if tags is not None: - # TODO(sc): Maybe this shouldn't be synchronous - if this hangs, - # we could cause dnsmasq to get stuck... - rpcapi.handle_dhcp_lease(ctxt, tags, mac, ip_address, cid) - - -def old_lease(mac, ip_address): - """Called when an old lease is recognized.""" - - # This happens when a node is rebooted, but it can also happen if the - # node was deleted and then rebooted, so we need to re-add in that case. - - ctxt = context.get_admin_context() - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - - cid = None - cid = os.getenv('DNSMASQ_CLIENT_ID') - - tags = None - tags = os.getenv('DNSMASQ_TAGS') - - if tags is not None: - # TODO(sc): Maybe this shouldn't be synchronous - if this hangs, - # we could cause dnsmasq to get stuck... - rpcapi.handle_dhcp_lease(ctxt, tags, mac, ip_address, cid) - - -def del_lease(mac, ip_address): - """Called when a lease expires.""" - # We will only delete the ihost when it is requested by the user. - pass - - -def add_action_parsers(subparsers): - # NOTE(cfb): dnsmasq always passes mac, and ip. hostname - # is passed if known. We don't care about - # hostname, but argparse will complain if we - # do not accept it. - for action in ['add', 'del', 'old']: - parser = subparsers.add_parser(action) - parser.add_argument('mac') - parser.add_argument('ip') - parser.add_argument('hostname', nargs='?', default='') - parser.set_defaults(func=globals()[action + '_lease']) - - -CONF.register_cli_opt( - cfg.SubCommandOpt('action', - title='Action options', - help='Available dnsmasq_lease_update options', - handler=add_action_parsers)) - - -def main(): - # Parse config file and command line options, then start logging - # The mac is to be truncated to 17 characters, which is the proper - # length of a mac address, in order to handle IPv6 where a DUID - # is provided instead of a mac address. The truncated DUID is - # then equivalent to the mac address. - inventory_service.prepare_service(sys.argv) - - LOG = log.getLogger(__name__) - - if CONF.action.name in ['add', 'del', 'old']: - msg = (_("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % - {"action": CONF.action.name, - "mac": CONF.action.mac[-17:], - "ip": CONF.action.ip}) - LOG.info(msg) - CONF.action.func(CONF.action.mac[-17:], CONF.action.ip) - else: - LOG.error(_("Unknown action: %(action)") % {"action": - CONF.action.name}) diff --git a/inventory/inventory/inventory/common/__init__.py b/inventory/inventory/inventory/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/common/base.py b/inventory/inventory/inventory/common/base.py deleted file mode 100644 index 429d2167..00000000 --- a/inventory/inventory/inventory/common/base.py +++ /dev/null @@ -1,43 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from oslo_log import log -LOG = log.getLogger(__name__) - - -class APIResourceWrapper(object): - """Simple wrapper for api objects. - - Define _attrs on the child class and pass in the - api object as the only argument to the constructor - """ - _attrs = [] - _apiresource = None # Make sure _apiresource is there even in __init__. - - def __init__(self, apiresource): - self._apiresource = apiresource - - def __getattribute__(self, attr): - try: - return object.__getattribute__(self, attr) - except AttributeError: - if attr not in self._attrs: - raise - # __getattr__ won't find properties - return getattr(self._apiresource, attr) - - def __repr__(self): - return "<%s: %s>" % (self.__class__.__name__, - dict((attr, getattr(self, attr)) - for attr in self._attrs - if hasattr(self, attr))) - - def as_dict(self): - obj = {} - for key in self._attrs: - obj[key] = getattr(self._apiresource, key, None) - return obj diff --git a/inventory/inventory/inventory/common/ceph.py b/inventory/inventory/inventory/common/ceph.py deleted file mode 100644 index 5eba597e..00000000 --- a/inventory/inventory/inventory/common/ceph.py +++ /dev/null @@ -1,211 +0,0 @@ - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# -# Copyright (c) 2016, 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -# All Rights Reserved. -# - -""" Inventory Ceph Utilities and helper functions.""" - -from __future__ import absolute_import - -from cephclient import wrapper as ceph -from inventory.common import constants -from inventory.common import k_host -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class CephApiOperator(object): - """Class to encapsulate Ceph operations for Inventory API - Methods on object-based storage devices (OSDs). - """ - - def __init__(self): - self._ceph_api = ceph.CephWrapper( - endpoint='https://localhost:5001/') - - def ceph_status_ok(self, timeout=10): - """ - returns rc bool. True if ceph ok, False otherwise - :param timeout: ceph api timeout - """ - rc = True - - try: - response, body = self._ceph_api.status(body='json', - timeout=timeout) - ceph_status = body['output']['health']['overall_status'] - if ceph_status != constants.CEPH_HEALTH_OK: - LOG.warn("ceph status=%s " % ceph_status) - rc = False - except Exception as e: - rc = False - LOG.warn("ceph status exception: %s " % e) - - return rc - - def _osd_quorum_names(self, timeout=10): - quorum_names = [] - try: - response, body = self._ceph_api.quorum_status(body='json', - timeout=timeout) - quorum_names = body['output']['quorum_names'] - except Exception as ex: - LOG.exception(ex) - return quorum_names - - return quorum_names - - def remove_osd_key(self, osdid): - osdid_str = "osd." + str(osdid) - # Remove the OSD authentication key - response, body = self._ceph_api.auth_del( - osdid_str, body='json') - if not response.ok: - LOG.error("Auth delete failed for OSD %s: %s", - osdid_str, response.reason) - - def osd_host_lookup(self, osd_id): - response, body = self._ceph_api.osd_crush_tree(body='json') - for i in range(0, len(body)): - # there are 2 chassis lists - cache-tier and root-tier - # that can be seen in the output of 'ceph osd crush tree': - # [{"id": -2,"name": "cache-tier", "type": "root", - # "type_id": 10, "items": [...]}, - # {"id": -1,"name": "storage-tier","type": "root", - # "type_id": 10, "items": [...]}] - chassis_list = body['output'][i]['items'] - for chassis in chassis_list: - # extract storage list/per chassis - storage_list = chassis['items'] - for storage in storage_list: - # extract osd list/per storage - storage_osd_list = storage['items'] - for osd in storage_osd_list: - if osd['id'] == osd_id: - # return storage name where osd is located - return storage['name'] - return None - - def check_osds_down_up(self, hostname, upgrade): - # check if osds from a storage are down/up - response, body = self._ceph_api.osd_tree(body='json') - osd_tree = body['output']['nodes'] - size = len(osd_tree) - for i in range(1, size): - if osd_tree[i]['type'] != "host": - continue - children_list = osd_tree[i]['children'] - children_num = len(children_list) - # when we do a storage upgrade, storage node must be locked - # and all the osds of that storage node must be down - if (osd_tree[i]['name'] == hostname): - for j in range(1, children_num + 1): - if (osd_tree[i + j]['type'] == - constants.STOR_FUNCTION_OSD and - osd_tree[i + j]['status'] == "up"): - # at least one osd is not down - return False - # all osds are up - return True - - def host_crush_remove(self, hostname): - # remove host from crushmap when system host-delete is executed - response, body = self._ceph_api.osd_crush_remove( - hostname, body='json') - - def host_osd_status(self, hostname): - # should prevent locking of a host if HEALTH_BLOCK - host_health = None - try: - response, body = self._ceph_api.pg_dump_stuck(body='json') - pg_detail = len(body['output']) - except Exception as e: - LOG.exception(e) - return host_health - - # osd_list is a list where I add - # each osd from pg_detail whose hostname - # is not equal with hostnamge given as parameter - osd_list = [] - for x in range(pg_detail): - # extract the osd and return the storage node - osd = body['output'][x]['acting'] - # osd is a list with osd where a stuck/degraded PG - # was replicated. If osd is empty, it means - # PG is not replicated to any osd - if not osd: - continue - osd_id = int(osd[0]) - if osd_id in osd_list: - continue - # potential future optimization to cache all the - # osd to host lookups for the single call to host_osd_status(). - host_name = self.osd_host_lookup(osd_id) - if (host_name is not None and - host_name == hostname): - # mark the selected storage node with HEALTH_BLOCK - # we can't lock any storage node marked with HEALTH_BLOCK - return constants.CEPH_HEALTH_BLOCK - osd_list.append(osd_id) - return constants.CEPH_HEALTH_OK - - def get_monitors_status(self, ihosts): - # first check that the monitors are available in inventory - num_active_monitors = 0 - num_inv_monitors = 0 - required_monitors = constants.MIN_STOR_MONITORS - quorum_names = [] - inventory_monitor_names = [] - for ihost in ihosts: - if ihost['personality'] == k_host.COMPUTE: - continue - capabilities = ihost['capabilities'] - if 'stor_function' in capabilities: - host_action = ihost['host_action'] or "" - locking = (host_action.startswith(k_host.ACTION_LOCK) or - host_action.startswith(k_host.ACTION_FORCE_LOCK)) - if (capabilities['stor_function'] == - constants.STOR_FUNCTION_MONITOR and - ihost['administrative'] == k_host.ADMIN_UNLOCKED and - ihost['operational'] == k_host.OPERATIONAL_ENABLED and - not locking): - num_inv_monitors += 1 - inventory_monitor_names.append(ihost['hostname']) - - LOG.info("Active ceph monitors in inventory = %s" % - str(inventory_monitor_names)) - - # check that the cluster is actually operational. - # if we can get the monitor quorum from ceph, then - # the cluster is truly operational - if num_inv_monitors >= required_monitors: - try: - quorum_names = self._osd_quorum_names() - except Exception: - # if the cluster is not responding to requests - # set quorum_names to an empty list, indicating a problem - quorum_names = [] - LOG.error("Ceph cluster not responding to requests.") - - LOG.info("Active ceph monitors in ceph cluster = %s" % - str(quorum_names)) - - # There may be cases where a host is in an unlocked-available state, - # but the monitor is down due to crashes or manual removal. - # For such cases, we determine the list of active ceph monitors to be - # the intersection of the inventory reported unlocked-available monitor - # hosts and the monitors reported in the quorum via the ceph API. - active_monitors = list(set(inventory_monitor_names) & - set(quorum_names)) - LOG.info("Active ceph monitors = %s" % str(active_monitors)) - - num_active_monitors = len(active_monitors) - - return num_active_monitors, required_monitors, active_monitors diff --git a/inventory/inventory/inventory/common/config.py b/inventory/inventory/inventory/common/config.py deleted file mode 100644 index b3086df8..00000000 --- a/inventory/inventory/inventory/common/config.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2016 Ericsson AB -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -File to store configurations -""" -from inventory.common import rpc -from inventory import version -from oslo_config import cfg - -global_opts = [ - cfg.BoolOpt('use_default_quota_class', - default=True, - help='Enables or disables use of default quota class ' - 'with default quota.'), - cfg.IntOpt('report_interval', - default=60, - help='Seconds between running periodic reporting tasks.'), -] - -# Pecan_opts -pecan_opts = [ - cfg.StrOpt( - 'root', - default='inventory.api.controllers.root.RootController', - help='Pecan root controller' - ), - cfg.ListOpt( - 'modules', - default=["inventory.api"], - help='A list of modules where pecan will search for applications.' - ), - cfg.BoolOpt( - 'debug', - default=False, - help='Enables the ability to display tracebacks in the browser and' - 'interactively debug during development.' - ), - cfg.BoolOpt( - 'auth_enable', - default=True, - help='Enables user authentication in pecan.' - ) -] - - -# OpenStack credentials used for Endpoint Cache -cache_opts = [ - cfg.StrOpt('auth_uri', - help='Keystone authorization url'), - cfg.StrOpt('identity_uri', - help='Keystone service url'), - cfg.StrOpt('admin_username', - help='Username of admin account, needed when' - ' auto_refresh_endpoint set to True'), - cfg.StrOpt('admin_password', - help='Password of admin account, needed when' - ' auto_refresh_endpoint set to True'), - cfg.StrOpt('admin_tenant', - help='Tenant name of admin account, needed when' - ' auto_refresh_endpoint set to True'), - cfg.StrOpt('admin_user_domain_name', - default='Default', - help='User domain name of admin account, needed when' - ' auto_refresh_endpoint set to True'), - cfg.StrOpt('admin_project_domain_name', - default='Default', - help='Project domain name of admin account, needed when' - ' auto_refresh_endpoint set to True') -] - -scheduler_opts = [ - cfg.BoolOpt('periodic_enable', - default=True, - help='boolean value for enable/disenable periodic tasks'), - cfg.IntOpt('periodic_interval', - default=600, - help='periodic time interval for automatic quota sync job' - ' and resource sync audit') -] - -common_opts = [ - cfg.IntOpt('workers', default=1, - help='number of workers'), -] - -scheduler_opt_group = cfg.OptGroup('scheduler', - title='Scheduler options for periodic job') - -# The group stores the pecan configurations. -pecan_group = cfg.OptGroup(name='pecan', - title='Pecan options') - -cache_opt_group = cfg.OptGroup(name='cache', - title='OpenStack Credentials') - - -def list_opts(): - yield cache_opt_group.name, cache_opts - yield scheduler_opt_group.name, scheduler_opts - yield pecan_group.name, pecan_opts - yield None, global_opts - yield None, common_opts - - -def register_options(): - for group, opts in list_opts(): - cfg.CONF.register_opts(opts, group=group) - - -def parse_args(argv, default_config_files=None): - rpc.set_defaults(control_exchange='inventory') - cfg.CONF(argv[1:], - project='inventory', - version=version.version_info.release_string(), - default_config_files=default_config_files) - rpc.init(cfg.CONF) diff --git a/inventory/inventory/inventory/common/constants.py b/inventory/inventory/inventory/common/constants.py deleted file mode 100644 index a36d358e..00000000 --- a/inventory/inventory/inventory/common/constants.py +++ /dev/null @@ -1,596 +0,0 @@ -# -# Copyright (c) 2013-2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.common import k_host -import os -import tsconfig.tsconfig as tsc - -INVENTORY_RUNNING_IN_LAB = '/etc/inventory/.running_in_lab' -INVENTORY_CONFIG_PATH = \ - os.path.join(tsc.PLATFORM_PATH, "inventory", tsc.SW_VERSION) - -VIM_DEFAULT_TIMEOUT_IN_SECS = 5 -VIM_DELETE_TIMEOUT_IN_SECS = 10 -MTC_ADD_TIMEOUT_IN_SECS = 6 -MTC_DELETE_TIMEOUT_IN_SECS = 10 -MTC_DEFAULT_TIMEOUT_IN_SECS = 6 -HWMON_DEFAULT_TIMEOUT_IN_SECS = 6 -PATCH_DEFAULT_TIMEOUT_IN_SECS = 6 - -DB_SUPPRESS_STATUS = 1 -DB_MGMT_AFFECTING = 2 -DB_DEGRADE_AFFECTING = 3 - -# CPU functions -PLATFORM_FUNCTION = "Platform" -VSWITCH_FUNCTION = "Vswitch" -SHARED_FUNCTION = "Shared" -VM_FUNCTION = "VMs" -NO_FUNCTION = "None" - -# Hugepage sizes in MiB -MIB_2M = 2 -MIB_1G = 1024 -Ki = 1024 -NUM_4K_PER_MiB = 256 - -# Dynamic IO Resident Set Size(RSS) in MiB per socket -DISK_IO_RESIDENT_SET_SIZE_MIB = 2000 -DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX = 500 - -# Memory reserved for platform core in MiB per host -PLATFORM_CORE_MEMORY_RESERVED_MIB = 2000 -PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX = 1100 - -# For combined node, memory reserved for controller in MiB -COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB = 10500 -COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB_VBOX = 6000 -COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB_XEOND = 7000 - -# Max number of physical cores in a xeon-d cpu -NUMBER_CORES_XEOND = 8 - -# Max number of computes that can be added to an AIO duplex system -AIO_DUPLEX_MAX_COMPUTES = 4 - -# Network overhead for DHCP or vrouter, assume 100 networks * 40 MB each -NETWORK_METADATA_OVERHEAD_MIB = 4000 -NETWORK_METADATA_OVERHEAD_MIB_VBOX = 0 - -# Sensors -SENSOR_DATATYPE_VALID_LIST = ['discrete', 'analog'] -HWMON_PORT = 2212 - -# Supported compute node vswitch types -VSWITCH_TYPE_OVS_DPDK = "ovs-dpdk" - -# Partition default sizes -DEFAULT_IMAGE_STOR_SIZE = 10 -DEFAULT_DOCKER_STOR_SIZE = 1 -DEFAULT_DOCKER_DISTRIBUTION_STOR_SIZE = 1 -DEFAULT_DATABASE_STOR_SIZE = 20 -DEFAULT_IMG_CONVERSION_STOR_SIZE = 20 -DEFAULT_SMALL_IMAGE_STOR_SIZE = 10 -DEFAULT_SMALL_DATABASE_STOR_SIZE = 10 -DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE = 10 -DEFAULT_SMALL_BACKUP_STOR_SIZE = 30 -DEFAULT_VIRTUAL_IMAGE_STOR_SIZE = 8 -DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = 5 -DEFAULT_VIRTUAL_IMG_CONVERSION_STOR_SIZE = 8 -DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = 5 -DEFAULT_EXTENSION_STOR_SIZE = 1 -DEFAULT_PATCH_VAULT_STOR_SIZE = 8 -DEFAULT_ETCD_STORE_SIZE = 1 -DEFAULT_GNOCCHI_STOR_SIZE = 5 - -# Openstack Interface names -OS_INTERFACE_PUBLIC = 'public' -OS_INTERFACE_INTERNAL = 'internal' -OS_INTERFACE_ADMIN = 'admin' - -# Default region one name -REGION_ONE_NAME = 'RegionOne' -# DC Region Must match VIRTUAL_MASTER_CLOUD in dcorch -SYSTEM_CONTROLLER_REGION = 'SystemController' - -# Valid major numbers for disks: -# https://www.kernel.org/doc/Documentation/admin-guide/devices.txt -# -# 3 block First MFM, RLL and IDE hard disk/CD-ROM interface -# 8 block SCSI disk devices (0-15) -# 65 block SCSI disk devices (16-31) -# 66 block SCSI disk devices (32-47) -# 67 block SCSI disk devices (48-63) -# 68 block SCSI disk devices (64-79) -# 69 block SCSI disk devices (80-95) -# 70 block SCSI disk devices (96-111) -# 71 block SCSI disk devices (112-127) -# 128 block SCSI disk devices (128-143) -# 129 block SCSI disk devices (144-159) -# 130 block SCSI disk devices (160-175) -# 131 block SCSI disk devices (176-191) -# 132 block SCSI disk devices (192-207) -# 133 block SCSI disk devices (208-223) -# 134 block SCSI disk devices (224-239) -# 135 block SCSI disk devices (240-255) -# 240-254 block LOCAL/EXPERIMENTAL USE (253 == /dev/vdX) -# 259 block Block Extended Major (NVMe - /dev/nvmeXn1) -VALID_MAJOR_LIST = ['3', '8', '65', '66', '67', '68', '69', '70', '71', - '128', '129', '130', '131', '132', '133', '134', - '135', '253', '259'] -VENDOR_ID_LIO = 'LIO-ORG' - -# Storage backends supported -SB_TYPE_FILE = 'file' -SB_TYPE_LVM = 'lvm' -SB_TYPE_CEPH = 'ceph' -SB_TYPE_CEPH_EXTERNAL = 'ceph-external' -SB_TYPE_EXTERNAL = 'external' - -SB_SUPPORTED = [SB_TYPE_FILE, - SB_TYPE_LVM, - SB_TYPE_CEPH, - SB_TYPE_CEPH_EXTERNAL, - SB_TYPE_EXTERNAL] - -# Storage backend default names -SB_DEFAULT_NAME_SUFFIX = "-store" -SB_DEFAULT_NAMES = { - SB_TYPE_FILE: SB_TYPE_FILE + SB_DEFAULT_NAME_SUFFIX, - SB_TYPE_LVM: SB_TYPE_LVM + SB_DEFAULT_NAME_SUFFIX, - SB_TYPE_CEPH: SB_TYPE_CEPH + SB_DEFAULT_NAME_SUFFIX, - SB_TYPE_CEPH_EXTERNAL: SB_TYPE_CEPH_EXTERNAL + SB_DEFAULT_NAME_SUFFIX, - SB_TYPE_EXTERNAL: 'shared_services' -} - -# Storage backends services -SB_SVC_CINDER = 'cinder' -SB_SVC_GLANCE = 'glance' -SB_SVC_NOVA = 'nova' -SB_SVC_SWIFT = 'swift' - -SB_FILE_SVCS_SUPPORTED = [SB_SVC_GLANCE] -SB_LVM_SVCS_SUPPORTED = [SB_SVC_CINDER] -SB_CEPH_SVCS_SUPPORTED = [ - SB_SVC_GLANCE, SB_SVC_CINDER, - SB_SVC_SWIFT, SB_SVC_NOVA] # supported primary tier svc -SB_CEPH_EXTERNAL_SVCS_SUPPORTED = [SB_SVC_CINDER, SB_SVC_GLANCE, SB_SVC_NOVA] -SB_EXTERNAL_SVCS_SUPPORTED = [SB_SVC_CINDER, SB_SVC_GLANCE] - -# Storage backend: Service specific backend nomenclature -CINDER_BACKEND_CEPH = SB_TYPE_CEPH -CINDER_BACKEND_CEPH_EXTERNAL = SB_TYPE_CEPH_EXTERNAL -CINDER_BACKEND_LVM = SB_TYPE_LVM -GLANCE_BACKEND_FILE = SB_TYPE_FILE -GLANCE_BACKEND_RBD = 'rbd' -GLANCE_BACKEND_HTTP = 'http' -GLANCE_BACKEND_GLANCE = 'glance' - -# Storage Tiers: types (aligns with polymorphic backends) -SB_TIER_TYPE_CEPH = SB_TYPE_CEPH -SB_TIER_SUPPORTED = [SB_TIER_TYPE_CEPH] -SB_TIER_DEFAULT_NAMES = { - SB_TIER_TYPE_CEPH: 'storage' # maps to crushmap 'storage-tier' root -} -SB_TIER_CEPH_SECONDARY_SVCS = [SB_SVC_CINDER] # supported secondary tier svcs - -SB_TIER_STATUS_DEFINED = 'defined' -SB_TIER_STATUS_IN_USE = 'in-use' - -# File name reserved for internal ceph cluster. -SB_TYPE_CEPH_CONF_FILENAME = "ceph.conf" - -# Glance images path when it is file backended -GLANCE_IMAGE_PATH = tsc.PLATFORM_PATH + "/" + SB_SVC_GLANCE + "/images" - -# Path for Ceph (internal and external) config files -CEPH_CONF_PATH = "/etc/ceph/" - -# Requested storage backend API operations -SB_API_OP_CREATE = "create" -SB_API_OP_MODIFY = "modify" -SB_API_OP_DELETE = "delete" - -# Storage backend state -SB_STATE_CONFIGURED = 'configured' -SB_STATE_CONFIGURING = 'configuring' -SB_STATE_CONFIG_ERR = 'configuration-failed' - -# Storage backend tasks -SB_TASK_NONE = None -SB_TASK_APPLY_MANIFESTS = 'applying-manifests' -SB_TASK_APPLY_CONFIG_FILE = 'applying-config-file' -SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller' -SB_TASK_PROVISION_STORAGE = 'provision-storage' -SB_TASK_PROVISION_SERVICES = 'provision-services' -SB_TASK_RECONFIG_COMPUTE = 'reconfig-compute' -SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv' -SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway' -SB_TASK_RESTORE = 'restore' - -# Storage backend ceph-mon-lv size -SB_CEPH_MON_GIB = 20 -SB_CEPH_MON_GIB_MIN = 20 -SB_CEPH_MON_GIB_MAX = 40 - -SB_CONFIGURATION_TIMEOUT = 1200 - -# Storage: Minimum number of monitors -MIN_STOR_MONITORS = 2 - -# Suffix used in LVM volume name to indicate that the -# volume is actually a thin pool. (And thin volumes will -# be created in the thin pool.) -LVM_POOL_SUFFIX = '-pool' - -# File system names -FILESYSTEM_NAME_BACKUP = 'backup' -FILESYSTEM_NAME_PLATFORM = 'platform' -FILESYSTEM_NAME_CINDER = 'cinder' -FILESYSTEM_NAME_DATABASE = 'database' -FILESYSTEM_NAME_IMG_CONVERSIONS = 'img-conversions' -FILESYSTEM_NAME_SCRATCH = 'scratch' -FILESYSTEM_NAME_DOCKER = 'docker' -FILESYSTEM_NAME_DOCKER_DISTRIBUTION = 'docker-distribution' -FILESYSTEM_NAME_EXTENSION = 'extension' -FILESYSTEM_NAME_ETCD = 'etcd' -FILESYSTEM_NAME_PATCH_VAULT = 'patch-vault' -FILESYSTEM_NAME_GNOCCHI = 'gnocchi' - -FILESYSTEM_LV_DICT = { - FILESYSTEM_NAME_BACKUP: 'backup-lv', - FILESYSTEM_NAME_SCRATCH: 'scratch-lv', - FILESYSTEM_NAME_DOCKER: 'docker-lv', - FILESYSTEM_NAME_DOCKER_DISTRIBUTION: 'dockerdistribution-lv', - FILESYSTEM_NAME_IMG_CONVERSIONS: 'img-conversions-lv', - FILESYSTEM_NAME_DATABASE: 'pgsql-lv', - FILESYSTEM_NAME_EXTENSION: 'extension-lv', - FILESYSTEM_NAME_ETCD: 'etcd-lv', - FILESYSTEM_NAME_PATCH_VAULT: 'patch-vault-lv', - FILESYSTEM_NAME_GNOCCHI: 'gnocchi-lv', - FILESYSTEM_NAME_PLATFORM: 'platform-lv' -} - -SUPPORTED_LOGICAL_VOLUME_LIST = FILESYSTEM_LV_DICT.values() - -SUPPORTED_FILEYSTEM_LIST = [ - FILESYSTEM_NAME_BACKUP, - FILESYSTEM_NAME_PLATFORM, - FILESYSTEM_NAME_CINDER, - FILESYSTEM_NAME_DATABASE, - FILESYSTEM_NAME_EXTENSION, - FILESYSTEM_NAME_IMG_CONVERSIONS, - FILESYSTEM_NAME_SCRATCH, - FILESYSTEM_NAME_DOCKER, - FILESYSTEM_NAME_DOCKER_DISTRIBUTION, - FILESYSTEM_NAME_PATCH_VAULT, - FILESYSTEM_NAME_ETCD, - FILESYSTEM_NAME_GNOCCHI -] - -SUPPORTED_REPLICATED_FILEYSTEM_LIST = [ - FILESYSTEM_NAME_PLATFORM, - FILESYSTEM_NAME_DATABASE, - FILESYSTEM_NAME_EXTENSION, - FILESYSTEM_NAME_PATCH_VAULT, - FILESYSTEM_NAME_ETCD, - FILESYSTEM_NAME_DOCKER_DISTRIBUTION, -] - -# Storage: Volume Group Types -LVG_NOVA_LOCAL = 'nova-local' -LVG_CGTS_VG = 'cgts-vg' -LVG_CINDER_VOLUMES = 'cinder-volumes' -LVG_ALLOWED_VGS = [LVG_NOVA_LOCAL, LVG_CGTS_VG, LVG_CINDER_VOLUMES] - -# Cinder LVM Parameters -CINDER_LVM_MINIMUM_DEVICE_SIZE_GIB = 5 # GiB -CINDER_LVM_DRBD_RESOURCE = 'drbd-cinder' -CINDER_LVM_DRBD_WAIT_PEER_RETRY = 5 -CINDER_LVM_DRBD_WAIT_PEER_SLEEP = 2 -CINDER_LVM_POOL_LV = LVG_CINDER_VOLUMES + LVM_POOL_SUFFIX -CINDER_LVM_POOL_META_LV = CINDER_LVM_POOL_LV + "_tmeta" -CINDER_RESIZE_FAILURE = "cinder-resize-failure" -CINDER_DRBD_DEVICE = '/dev/drbd4' - -CINDER_LVM_TYPE_THIN = 'thin' -CINDER_LVM_TYPE_THICK = 'thick' - -# Storage: Volume Group Parameter Types -LVG_CINDER_PARAM_LVM_TYPE = 'lvm_type' - -# Storage: Volume Group Parameter: Cinder: LVM provisioing -LVG_CINDER_LVM_TYPE_THIN = 'thin' -LVG_CINDER_LVM_TYPE_THICK = 'thick' - -# Controller audit requests (force updates from agents) -DISK_AUDIT_REQUEST = "audit_disk" -LVG_AUDIT_REQUEST = "audit_lvg" -PV_AUDIT_REQUEST = "audit_pv" -PARTITION_AUDIT_REQUEST = "audit_partition" -CONTROLLER_AUDIT_REQUESTS = [DISK_AUDIT_REQUEST, - LVG_AUDIT_REQUEST, - PV_AUDIT_REQUEST, - PARTITION_AUDIT_REQUEST] - -# IP families -IPV4_FAMILY = 4 -IPV6_FAMILY = 6 -IP_FAMILIES = {IPV4_FAMILY: "IPv4", - IPV6_FAMILY: "IPv6"} - -# Interface definitions -NETWORK_TYPE_NONE = 'none' -NETWORK_TYPE_INFRA = 'infra' -NETWORK_TYPE_MGMT = 'mgmt' -NETWORK_TYPE_OAM = 'oam' -NETWORK_TYPE_BM = 'bm' -NETWORK_TYPE_MULTICAST = 'multicast' -NETWORK_TYPE_DATA = 'data' -NETWORK_TYPE_SYSTEM_CONTROLLER = 'system-controller' - -NETWORK_TYPE_PCI_PASSTHROUGH = 'pci-passthrough' -NETWORK_TYPE_PCI_SRIOV = 'pci-sriov' -NETWORK_TYPE_PXEBOOT = 'pxeboot' - -PLATFORM_NETWORK_TYPES = [NETWORK_TYPE_PXEBOOT, - NETWORK_TYPE_MGMT, - NETWORK_TYPE_INFRA, - NETWORK_TYPE_OAM] - -PCI_NETWORK_TYPES = [NETWORK_TYPE_PCI_PASSTHROUGH, - NETWORK_TYPE_PCI_SRIOV] - -INTERFACE_TYPE_ETHERNET = 'ethernet' -INTERFACE_TYPE_VLAN = 'vlan' -INTERFACE_TYPE_AE = 'ae' -INTERFACE_TYPE_VIRTUAL = 'virtual' - -INTERFACE_CLASS_NONE = 'none' -INTERFACE_CLASS_PLATFORM = 'platform' -INTERFACE_CLASS_DATA = 'data' -INTERFACE_CLASS_PCI_PASSTHROUGH = 'pci-passthrough' -INTERFACE_CLASS_PCI_SRIOV = 'pci-sriov' - -SM_MULTICAST_MGMT_IP_NAME = "sm-mgmt-ip" -MTCE_MULTICAST_MGMT_IP_NAME = "mtce-mgmt-ip" -PATCH_CONTROLLER_MULTICAST_MGMT_IP_NAME = "patch-controller-mgmt-ip" -PATCH_AGENT_MULTICAST_MGMT_IP_NAME = "patch-agent-mgmt-ip" -SYSTEM_CONTROLLER_GATEWAY_IP_NAME = "system-controller-gateway-ip" - -ADDRESS_FORMAT_ARGS = (k_host.CONTROLLER_HOSTNAME, - NETWORK_TYPE_MGMT) -MGMT_CINDER_IP_NAME = "%s-cinder-%s" % ADDRESS_FORMAT_ARGS - -ETHERNET_NULL_MAC = '00:00:00:00:00:00' - -DEFAULT_MTU = 1500 - -# Stor function types -STOR_FUNCTION_CINDER = 'cinder' -STOR_FUNCTION_OSD = 'osd' -STOR_FUNCTION_MONITOR = 'monitor' -STOR_FUNCTION_JOURNAL = 'journal' - -# Disk types and names. -DEVICE_TYPE_HDD = 'HDD' -DEVICE_TYPE_SSD = 'SSD' -DEVICE_TYPE_NVME = 'NVME' -DEVICE_TYPE_UNDETERMINED = 'Undetermined' -DEVICE_TYPE_NA = 'N/A' -DEVICE_NAME_NVME = 'nvme' - -# Disk model types. -DEVICE_MODEL_UNKNOWN = 'Unknown' - -# Journal operations. -ACTION_CREATE_JOURNAL = "create" -ACTION_UPDATE_JOURNAL = "update" - -# Load constants -MNT_DIR = '/tmp/mnt' - -ACTIVE_LOAD_STATE = 'active' -IMPORTING_LOAD_STATE = 'importing' -IMPORTED_LOAD_STATE = 'imported' -ERROR_LOAD_STATE = 'error' -DELETING_LOAD_STATE = 'deleting' - -DELETE_LOAD_SCRIPT = '/etc/inventory/upgrades/delete_load.sh' - -# Ceph -CEPH_HEALTH_OK = 'HEALTH_OK' -CEPH_HEALTH_BLOCK = 'HEALTH_BLOCK' - -# See http://ceph.com/pgcalc/. We set it to more than 100 because pool usage -# varies greatly in Titanium Cloud and we want to avoid running too low on PGs -CEPH_TARGET_PGS_PER_OSD = 200 -CEPH_REPLICATION_FACTOR_DEFAULT = 2 -CEPH_REPLICATION_FACTOR_SUPPORTED = [2, 3] -CEPH_MIN_REPLICATION_FACTOR_SUPPORTED = [1, 2] -CEPH_REPLICATION_MAP_DEFAULT = { - # replication: min_replication - 2: 1, - 3: 2 -} -# ceph osd pool size -CEPH_BACKEND_REPLICATION_CAP = 'replication' -# ceph osd pool min size -CEPH_BACKEND_MIN_REPLICATION_CAP = 'min_replication' -CEPH_BACKEND_CAP_DEFAULT = { - CEPH_BACKEND_REPLICATION_CAP: - str(CEPH_REPLICATION_FACTOR_DEFAULT), - CEPH_BACKEND_MIN_REPLICATION_CAP: - str(CEPH_REPLICATION_MAP_DEFAULT[CEPH_REPLICATION_FACTOR_DEFAULT]) -} - -# Service Parameter -SERVICE_TYPE_IDENTITY = 'identity' -SERVICE_TYPE_KEYSTONE = 'keystone' -SERVICE_TYPE_IMAGE = 'image' -SERVICE_TYPE_VOLUME = 'volume' -SERVICE_TYPE_NETWORK = 'network' -SERVICE_TYPE_HORIZON = "horizon" -SERVICE_TYPE_CEPH = 'ceph' -SERVICE_TYPE_CINDER = 'cinder' -SERVICE_TYPE_MURANO = 'murano' -SERVICE_TYPE_MAGNUM = 'magnum' -SERVICE_TYPE_PLATFORM = 'configuration' -SERVICE_TYPE_NOVA = 'nova' -SERVICE_TYPE_SWIFT = 'swift' -SERVICE_TYPE_IRONIC = 'ironic' -SERVICE_TYPE_PANKO = 'panko' -SERVICE_TYPE_AODH = 'aodh' -SERVICE_TYPE_GLANCE = 'glance' -SERVICE_TYPE_BARBICAN = 'barbican' - -# TIS part number, CPE = combined load, STD = standard load -TIS_STD_BUILD = 'Standard' -TIS_AIO_BUILD = 'All-in-one' - -# sysadmin password aging. -# Setting aging to max defined value qualifies -# as "never" on certain Linux distros including WRL -SYSADMIN_PASSWORD_NO_AGING = 99999 - -# Partition table size in bytes. -PARTITION_TABLE_SIZE = 2097152 - -# States that describe the states of a partition. - -# Partition is ready for being used. -PARTITION_READY_STATUS = 0 -# Partition is used by a PV. -PARTITION_IN_USE_STATUS = 1 -# An in-service request to create the partition has been sent. -PARTITION_CREATE_IN_SVC_STATUS = 2 -# An unlock request to create the partition has been sent. -PARTITION_CREATE_ON_UNLOCK_STATUS = 3 -# A request to delete the partition has been sent. -PARTITION_DELETING_STATUS = 4 -# A request to modify the partition has been sent. -PARTITION_MODIFYING_STATUS = 5 -# The partition has been deleted. -PARTITION_DELETED_STATUS = 6 -# The creation of the partition has encountered a known error. -PARTITION_ERROR_STATUS = 10 -# Partition creation failed due to an internal error, check packstack logs. -PARTITION_ERROR_STATUS_INTERNAL = 11 -# Partition was not created because disk does not have a GPT. -PARTITION_ERROR_STATUS_GPT = 12 - -PARTITION_STATUS_MSG = { - PARTITION_IN_USE_STATUS: "In-Use", - PARTITION_CREATE_IN_SVC_STATUS: "Creating", - PARTITION_CREATE_ON_UNLOCK_STATUS: "Creating (on unlock)", - PARTITION_DELETING_STATUS: "Deleting", - PARTITION_MODIFYING_STATUS: "Modifying", - PARTITION_READY_STATUS: "Ready", - PARTITION_DELETED_STATUS: "Deleted", - PARTITION_ERROR_STATUS: "Error", - PARTITION_ERROR_STATUS_INTERNAL: "Error: Internal script error.", - PARTITION_ERROR_STATUS_GPT: "Error:Missing GPT Table."} - -PARTITION_STATUS_OK_TO_DELETE = [ - PARTITION_READY_STATUS, - PARTITION_CREATE_ON_UNLOCK_STATUS, - PARTITION_ERROR_STATUS, - PARTITION_ERROR_STATUS_INTERNAL, - PARTITION_ERROR_STATUS_GPT] - -PARTITION_STATUS_SEND_DELETE_RPC = [ - PARTITION_READY_STATUS, - PARTITION_ERROR_STATUS, - PARTITION_ERROR_STATUS_INTERNAL] - -PARTITION_CMD_CREATE = "create" -PARTITION_CMD_DELETE = "delete" -PARTITION_CMD_MODIFY = "modify" - -# User creatable, system managed, GUID partitions types. -PARTITION_USER_MANAGED_GUID_PREFIX = "ba5eba11-0000-1111-2222-" -USER_PARTITION_PHYSICAL_VOLUME = \ - PARTITION_USER_MANAGED_GUID_PREFIX + "000000000001" -LINUX_LVM_PARTITION = "e6d6d379-f507-44c2-a23c-238f2a3df928" - -# Partition name for those partitions deignated for PV use. -PARTITION_NAME_PV = "LVM Physical Volume" - -# Partition table types. -PARTITION_TABLE_GPT = "gpt" -PARTITION_TABLE_MSDOS = "msdos" - -# Optional services -ALL_OPTIONAL_SERVICES = [SERVICE_TYPE_CINDER, SERVICE_TYPE_MURANO, - SERVICE_TYPE_MAGNUM, SERVICE_TYPE_SWIFT, - SERVICE_TYPE_IRONIC] - -# System mode -SYSTEM_MODE_DUPLEX = "duplex" -SYSTEM_MODE_SIMPLEX = "simplex" -SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct" - -# System Security Profiles -SYSTEM_SECURITY_PROFILE_STANDARD = "standard" -SYSTEM_SECURITY_PROFILE_EXTENDED = "extended" - -# Install states -INSTALL_STATE_PRE_INSTALL = "preinstall" -INSTALL_STATE_INSTALLING = "installing" -INSTALL_STATE_POST_INSTALL = "postinstall" -INSTALL_STATE_FAILED = "failed" -INSTALL_STATE_INSTALLED = "installed" -INSTALL_STATE_BOOTING = "booting" -INSTALL_STATE_COMPLETED = "completed" - -tox_work_dir = os.environ.get("TOX_WORK_DIR") -if tox_work_dir: - INVENTORY_LOCK_PATH = tox_work_dir -else: - INVENTORY_LOCK_PATH = os.path.join(tsc.VOLATILE_PATH, "inventory") - -NETWORK_CONFIG_LOCK_FILE = os.path.join( - tsc.VOLATILE_PATH, "apply_network_config.lock") - -INVENTORY_USERNAME = "inventory" -INVENTORY_GRPNAME = "inventory" - -# License file -LICENSE_FILE = ".license" - -# Cinder lvm config complete file. -NODE_CINDER_LVM_CONFIG_COMPLETE_FILE = \ - os.path.join(tsc.PLATFORM_CONF_PATH, '.node_cinder_lvm_config_complete') -INITIAL_CINDER_LVM_CONFIG_COMPLETE_FILE = \ - os.path.join(tsc.CONFIG_PATH, '.initial_cinder_lvm_config_complete') - -DISK_WIPE_IN_PROGRESS_FLAG = \ - os.path.join(tsc.PLATFORM_CONF_PATH, '.disk_wipe_in_progress') -DISK_WIPE_COMPLETE_TIMEOUT = 5 # wait for a disk to finish wiping. - -# Clone label set in DB -CLONE_ISO_MAC = 'CLONEISOMAC_' -CLONE_ISO_DISK_SID = 'CLONEISODISKSID_' - -# kernel options for various security feature selections -SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1 = 'spectre_meltdown_v1' -SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1_OPTS = 'nopti nospectre_v2' -SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL = 'spectre_meltdown_all' -SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL_OPTS = '' -SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS = { - SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1: - SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1_OPTS, - SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL: - SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_ALL_OPTS -} - - -SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_DEFAULT_OPTS = \ - SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_V1_OPTS diff --git a/inventory/inventory/inventory/common/context.py b/inventory/inventory/inventory/common/context.py deleted file mode 100644 index 10008f62..00000000 --- a/inventory/inventory/inventory/common/context.py +++ /dev/null @@ -1,153 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from keystoneauth1.access import service_catalog as k_service_catalog -from keystoneauth1 import plugin -from oslo_config import cfg -from oslo_context import context - -from inventory.common import policy - -REQUIRED_SERVICE_TYPES = ('faultmanagement', - 'nfv', - 'patching', - 'platform', - 'smapi', - ) - - -CONF = cfg.CONF - - -class _ContextAuthPlugin(plugin.BaseAuthPlugin): - """A keystoneauth auth plugin that uses the values from the Context. - - Ideally we would use the plugin provided by auth_token middleware however - this plugin isn't serialized yet so we construct one from the serialized - auth data. - """ - - def __init__(self, auth_token, sc): - super(_ContextAuthPlugin, self).__init__() - - self.auth_token = auth_token - self.service_catalog = k_service_catalog.ServiceCatalogV2(sc) - - def get_token(self, *args, **kwargs): - return self.auth_token - - def get_endpoint(self, session, service_type=None, interface=None, - region_name=None, service_name=None, **kwargs): - return self.service_catalog.url_for(service_type=service_type, - service_name=service_name, - interface=interface, - region_name=region_name) - - -class RequestContext(context.RequestContext): - """Extends security contexts from the OpenStack common library.""" - - def __init__(self, auth_token=None, auth_url=None, domain_id=None, - domain_name=None, user_name=None, user_id=None, - user_domain_name=None, user_domain_id=None, - project_name=None, project_id=None, roles=None, - is_admin=None, read_only=False, show_deleted=False, - request_id=None, trust_id=None, auth_token_info=None, - all_tenants=False, password=None, service_catalog=None, - user_auth_plugin=None, - **kwargs): - """Stores several additional request parameters: - - :param domain_id: The ID of the domain. - :param domain_name: The name of the domain. - :param user_domain_id: The ID of the domain to - authenticate a user against. - :param user_domain_name: The name of the domain to - authenticate a user against. - :param service_catalog: Specifies the service_catalog - """ - super(RequestContext, self).__init__(auth_token=auth_token, - user=user_name, - tenant=project_name, - is_admin=is_admin, - read_only=read_only, - show_deleted=show_deleted, - request_id=request_id, - roles=roles) - - self.user_name = user_name - self.user_id = user_id - self.project_name = project_name - self.project_id = project_id - self.domain_id = domain_id - self.domain_name = domain_name - self.user_domain_id = user_domain_id - self.user_domain_name = user_domain_name - self.auth_url = auth_url - self.auth_token_info = auth_token_info - self.trust_id = trust_id - self.all_tenants = all_tenants - self.password = password - - if service_catalog: - # Only include required parts of service_catalog - self.service_catalog = [s for s in service_catalog - if s.get('type') in - REQUIRED_SERVICE_TYPES] - else: - # if list is empty or none - self.service_catalog = [] - - self.user_auth_plugin = user_auth_plugin - if is_admin is None: - self.is_admin = policy.check_is_admin(self) - else: - self.is_admin = is_admin - - def to_dict(self): - value = super(RequestContext, self).to_dict() - value.update({'auth_token': self.auth_token, - 'auth_url': self.auth_url, - 'domain_id': self.domain_id, - 'domain_name': self.domain_name, - 'user_domain_id': self.user_domain_id, - 'user_domain_name': self.user_domain_name, - 'user_name': self.user_name, - 'user_id': self.user_id, - 'project_name': self.project_name, - 'project_id': self.project_id, - 'is_admin': self.is_admin, - 'read_only': self.read_only, - 'roles': self.roles, - 'show_deleted': self.show_deleted, - 'request_id': self.request_id, - 'trust_id': self.trust_id, - 'auth_token_info': self.auth_token_info, - 'password': self.password, - 'all_tenants': self.all_tenants, - 'service_catalog': self.service_catalog}) - return value - - @classmethod - def from_dict(cls, values): - return cls(**values) - - def get_auth_plugin(self): - if self.user_auth_plugin: - return self.user_auth_plugin - else: - return _ContextAuthPlugin(self.auth_token, self.service_catalog) - - -def make_context(*args, **kwargs): - return RequestContext(*args, **kwargs) - - -def get_admin_context(show_deleted="no"): - context = make_context(tenant=None, - is_admin=True, - show_deleted=show_deleted) - return context diff --git a/inventory/inventory/inventory/common/exception.py b/inventory/inventory/inventory/common/exception.py deleted file mode 100644 index a7e931d2..00000000 --- a/inventory/inventory/inventory/common/exception.py +++ /dev/null @@ -1,738 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -"""Inventory base exception handling. -""" - -import six -import webob.exc - -from inventory.common.i18n import _ -from inventory.conf import CONF -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class ProcessExecutionError(IOError): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - self.exit_code = exit_code - self.stderr = stderr - self.stdout = stdout - self.cmd = cmd - self.description = description - - if description is None: - description = _('Unexpected error while running command.') - if exit_code is None: - exit_code = '-' - message = (_('%(description)s\nCommand: %(cmd)s\n' - 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' - 'Stderr: %(stderr)r') % - {'description': description, 'cmd': cmd, - 'exit_code': exit_code, 'stdout': stdout, - 'stderr': stderr}) - IOError.__init__(self, message) - - -def _cleanse_dict(original): - """Strip all admin_password, new_pass, rescue_pass keys from a dict.""" - return dict((k, v) for k, v in original.iteritems() if "_pass" not in k) - - -class InventoryException(Exception): - """Base Inventory Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - - """ - message = _("An unknown exception occurred.") - code = 500 - headers = {} - safe = False - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if 'code' not in self.kwargs: - try: - self.kwargs['code'] = self.code - except AttributeError: - pass - - if not message: - try: - message = self.message % kwargs - - except Exception as e: - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception(_('Exception in string format operation')) - for name, value in kwargs.iteritems(): - LOG.error("%s: %s" % (name, value)) - - if CONF.fatal_exception_format_errors: - raise e - else: - # at least get the core message out if something happened - message = self.message - - super(InventoryException, self).__init__(message) - - def format_message(self): - if self.__class__.__name__.endswith('_Remote'): - return self.args[0] - else: - return six.text_type(self) - - -class NotAuthorized(InventoryException): - message = _("Not authorized.") - code = 403 - - -class AdminRequired(NotAuthorized): - message = _("User does not have admin privileges") - - -class PolicyNotAuthorized(NotAuthorized): - message = _("Policy doesn't allow %(action)s to be performed.") - - -class OperationNotPermitted(NotAuthorized): - message = _("Operation not permitted.") - - -class Invalid(InventoryException): - message = _("Unacceptable parameters.") - code = 400 - - -class Conflict(InventoryException): - message = _('Conflict.') - code = 409 - - -class InvalidCPUInfo(Invalid): - message = _("Unacceptable CPU info") + ": %(reason)s" - - -class InvalidIpAddressError(Invalid): - message = _("%(address)s is not a valid IP v4/6 address.") - - -class IpAddressOutOfRange(Invalid): - message = _("%(address)s is not in the range: %(low)s to %(high)s") - - -class InfrastructureNetworkNotConfigured(Invalid): - message = _("An infrastructure network has not been configured") - - -class InvalidDiskFormat(Invalid): - message = _("Disk format %(disk_format)s is not acceptable") - - -class InvalidUUID(Invalid): - message = _("Expected a uuid but received %(uuid)s.") - - -class InvalidIPAddress(Invalid): - message = _("Expected an IPv4 or IPv6 address but received %(address)s.") - - -class InvalidIdentity(Invalid): - message = _("Expected an uuid or int but received %(identity)s.") - - -class PatchError(Invalid): - message = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") - - -class InvalidMAC(Invalid): - message = _("Expected a MAC address but received %(mac)s.") - - -class ManagedIPAddress(Invalid): - message = _("The infrastructure IP address for this nodetype is " - "specified by the system configuration and cannot be " - "modified.") - - -class IncorrectPrefix(Invalid): - message = _("A prefix length of %(length)s must be used for " - "addresses on the infrastructure network, as is specified in " - "the system configuration.") - - -class InterfaceNameAlreadyExists(Conflict): - message = _("Interface with name %(name)s already exists.") - - -class InterfaceNetworkTypeNotSet(Conflict): - message = _("The Interface must have a networktype configured to " - "support addresses. (data or infra)") - - -class AddressInUseByRouteGateway(Conflict): - message = _("Address %(address)s is in use by a route to " - "%(network)s/%(prefix)s via %(gateway)s") - - -class DuplicateAddressDetectionNotSupportedOnIpv4(Conflict): - message = _("Duplicate Address Detection (DAD) not supported on " - "IPv4 Addresses") - - -class DuplicateAddressDetectionRequiredOnIpv6(Conflict): - message = _("Duplicate Address Detection (DAD) required on " - "IPv6 Addresses") - - -class RouteAlreadyExists(Conflict): - message = _("Route %(network)s/%(prefix)s via %(gateway)s already " - "exists on this host.") - - -class RouteMaxPathsForSubnet(Conflict): - message = _("Maximum number of paths (%(count)s) already reached for " - "%(network)s/%(prefix)s already reached.") - - -class RouteGatewayNotReachable(Conflict): - message = _("Route gateway %(gateway)s is not reachable by any address " - " on this interface") - - -class RouteGatewayCannotBeLocal(Conflict): - message = _("Route gateway %(gateway)s cannot be another local interface") - - -class RoutesNotSupportedOnInterfaces(Conflict): - message = _("Routes may not be configured against interfaces with network " - "type '%(iftype)s'") - - -class DefaultRouteNotAllowedOnVRSInterface(Conflict): - message = _("Default route not permitted on 'data-vrs' interfaces") - - -class CannotDeterminePrimaryNetworkType(Conflict): - message = _("Cannot determine primary network type of interface " - "%(iface)s from %(types)s") - - -class AlarmAlreadyExists(Conflict): - message = _("An Alarm with UUID %(uuid)s already exists.") - - -class CPUAlreadyExists(Conflict): - message = _("A CPU with cpu ID %(cpu)s already exists.") - - -class MACAlreadyExists(Conflict): - message = _("A Port with MAC address %(mac)s already exists " - "on host %(host)s.") - - -class PCIAddrAlreadyExists(Conflict): - message = _("A Device with PCI address %(pciaddr)s " - "for %(host)s already exists.") - - -class DiskAlreadyExists(Conflict): - message = _("A Disk with UUID %(uuid)s already exists.") - - -class PortAlreadyExists(Conflict): - message = _("A Port with UUID %(uuid)s already exists.") - - -class SystemAlreadyExists(Conflict): - message = _("A System with UUID %(uuid)s already exists.") - - -class SensorAlreadyExists(Conflict): - message = _("A Sensor with UUID %(uuid)s already exists.") - - -class SensorGroupAlreadyExists(Conflict): - message = _("A SensorGroup with UUID %(uuid)s already exists.") - - -class TrapDestAlreadyExists(Conflict): - message = _("A TrapDest with UUID %(uuid)s already exists.") - - -class UserAlreadyExists(Conflict): - message = _("A User with UUID %(uuid)s already exists.") - - -class CommunityAlreadyExists(Conflict): - message = _("A Community with UUID %(uuid)s already exists.") - - -class ServiceAlreadyExists(Conflict): - message = _("A Service with UUID %(uuid)s already exists.") - - -class ServiceGroupAlreadyExists(Conflict): - message = _("A ServiceGroup with UUID %(uuid)s already exists.") - - -class NodeAlreadyExists(Conflict): - message = _("A Node with UUID %(uuid)s already exists.") - - -class MemoryAlreadyExists(Conflict): - message = _("A Memeory with UUID %(uuid)s already exists.") - - -class LLDPAgentExists(Conflict): - message = _("An LLDP agent with uuid %(uuid)s already exists.") - - -class LLDPNeighbourExists(Conflict): - message = _("An LLDP neighbour with uuid %(uuid)s already exists.") - - -class LLDPTlvExists(Conflict): - message = _("An LLDP TLV with type %(type) already exists.") - - -class LLDPDriverError(Conflict): - message = _("An LLDP driver error has occurred. method=%(method)") - - -class SystemConfigDriverError(Conflict): - message = _("A SystemConfig driver error has occurred. method=%(method)") - - -# Cannot be templated as the error syntax varies. -# msg needs to be constructed when raised. -class InvalidParameterValue(Invalid): - message = _("%(err)s") - - -class ApiError(Exception): - - message = _("An unknown exception occurred.") - - code = webob.exc.HTTPInternalServerError - - def __init__(self, message=None, **kwargs): - - self.kwargs = kwargs - - if 'code' not in self.kwargs and hasattr(self, 'code'): - self.kwargs['code'] = self.code - - if message: - self.message = message - - try: - super(ApiError, self).__init__(self.message % kwargs) - self.message = self.message % kwargs - except Exception: - LOG.exception('Exception in string format operation, ' - 'kwargs: %s', kwargs) - raise - - def __str__(self): - return repr(self.value) - - def __unicode__(self): - return self.message - - def format_message(self): - if self.__class__.__name__.endswith('_Remote'): - return self.args[0] - else: - return six.text_type(self) - - -class NotFound(InventoryException): - message = _("Resource could not be found.") - code = 404 - - -class MultipleResults(InventoryException): - message = _("More than one result found.") - - -class SystemNotFound(NotFound): - message = _("No System %(system)s found.") - - -class CPUNotFound(NotFound): - message = _("No CPU %(cpu)s found.") - - -class NTPNotFound(NotFound): - message = _("No NTP with id %(uuid)s found.") - - -class PTPNotFound(NotFound): - message = _("No PTP with id %(uuid)s found.") - - -class DiskNotFound(NotFound): - message = _("No disk with id %(disk_id)s") - - -class DiskPartitionNotFound(NotFound): - message = _("No disk partition with id %(partition_id)s") - - -class PartitionAlreadyExists(Conflict): - message = _("Disk partition %(device_path)s already exists.") - - -class LvmLvgNotFound(NotFound): - message = _("No LVM Local Volume Group with id %(lvg_id)s") - - -class LvmPvNotFound(NotFound): - message = _("No LVM Physical Volume with id %(pv_id)s") - - -class DriverNotFound(NotFound): - message = _("Failed to load driver %(driver_name)s.") - - -class PCIDeviceNotFound(NotFound): - message = _("Failed to load pci device %(pcidevice_id)s.") - - -class ImageNotFound(NotFound): - message = _("Image %(image_id)s could not be found.") - - -class HostNotFound(NotFound): - message = _("Host %(host)s could not be found.") - - -class HostAlreadyExists(Conflict): - message = _("Host %(uuid)s already exists.") - - -class ClonedInterfaceNotFound(NotFound): - message = _("Cloned Interface %(intf)s could not be found.") - - -class StaticAddressNotConfigured(Invalid): - message = _("The IP address for this interface is assigned " - "dynamically as specified during system configuration.") - - -class HostLocked(InventoryException): - message = _("Unable to complete the action %(action)s because " - "Host %(host)s is in administrative state = unlocked.") - - -class HostMustBeLocked(InventoryException): - message = _("Unable to complete the action because " - "Host %(host)s is in administrative state = unlocked.") - - -class ConsoleNotFound(NotFound): - message = _("Console %(console_id)s could not be found.") - - -class FileNotFound(NotFound): - message = _("File %(file_path)s could not be found.") - - -class NoValidHost(NotFound): - message = _("No valid host was found. %(reason)s") - - -class NodeNotFound(NotFound): - message = _("Node %(node)s could not be found.") - - -class MemoryNotFound(NotFound): - message = _("Memory %(memory)s could not be found.") - - -class PortNotFound(NotFound): - message = _("Port %(port)s could not be found.") - - -class SensorNotFound(NotFound): - message = _("Sensor %(sensor)s could not be found.") - - -class ServerNotFound(NotFound): - message = _("Server %(server)s could not be found.") - - -class ServiceNotFound(NotFound): - message = _("Service %(service)s could not be found.") - - -class AlarmNotFound(NotFound): - message = _("Alarm %(alarm)s could not be found.") - - -class EventLogNotFound(NotFound): - message = _("Event Log %(eventLog)s could not be found.") - - -class ExclusiveLockRequired(NotAuthorized): - message = _("An exclusive lock is required, " - "but the current context has a shared lock.") - - -class SSHConnectFailed(InventoryException): - message = _("Failed to establish SSH connection to host %(host)s.") - - -class UnsupportedObjectError(InventoryException): - message = _('Unsupported object type %(objtype)s') - - -class OrphanedObjectError(InventoryException): - message = _('Cannot call %(method)s on orphaned %(objtype)s object') - - -class IncompatibleObjectVersion(InventoryException): - message = _('Version %(objver)s of %(objname)s is not supported') - - -class GlanceConnectionFailed(InventoryException): - message = "Connection to glance host %(host)s:%(port)s failed: %(reason)s" - - -class ImageNotAuthorized(InventoryException): - message = "Not authorized for image %(image_id)s." - - -class LoadNotFound(NotFound): - message = _("Load %(load)s could not be found.") - - -class LldpAgentNotFound(NotFound): - message = _("LLDP agent %(agent)s could not be found") - - -class LldpAgentNotFoundForPort(NotFound): - message = _("LLDP agent for port %(port)s could not be found") - - -class LldpNeighbourNotFound(NotFound): - message = _("LLDP neighbour %(neighbour)s could not be found") - - -class LldpNeighbourNotFoundForMsap(NotFound): - message = _("LLDP neighbour could not be found for msap %(msap)s") - - -class LldpTlvNotFound(NotFound): - message = _("LLDP TLV %(type)s could not be found") - - -class InvalidImageRef(InventoryException): - message = "Invalid image href %(image_href)s." - code = 400 - - -class ServiceUnavailable(InventoryException): - message = "Connection failed" - - -class Forbidden(InventoryException): - message = "Requested OpenStack Images API is forbidden" - - -class BadRequest(InventoryException): - pass - - -class HTTPException(InventoryException): - message = "Requested version of OpenStack Images API is not available." - - -class InventorySignalTimeout(InventoryException): - message = "Inventory Timeout." - - -class InvalidEndpoint(InventoryException): - message = "The provided endpoint is invalid" - - -class CommunicationError(InventoryException): - message = "Unable to communicate with the server." - - -class HTTPForbidden(Forbidden): - pass - - -class Unauthorized(InventoryException): - pass - - -class HTTPNotFound(NotFound): - pass - - -class ConfigNotFound(InventoryException): - pass - - -class ConfigInvalid(InventoryException): - message = _("Invalid configuration file. %(error_msg)s") - - -class NotSupported(InventoryException): - message = "Action %(action)s is not supported." - - -class PeerAlreadyExists(Conflict): - message = _("Peer %(uuid)s already exists") - - -class PeerAlreadyContainsThisHost(Conflict): - message = _("Host %(host)s is already present in peer group %(peer_name)s") - - -class PeerNotFound(NotFound): - message = _("Peer %(peer_uuid)s not found") - - -class PeerContainsDuplicates(Conflict): - message = _("Peer with name % already exists") - - -class StoragePeerGroupUnexpected(InventoryException): - message = _("Host %(host)s cannot be assigned to group %(peer_name)s. " - "group-0 is reserved for storage-0 and storage-1") - - -class StorageBackendNotFoundByName(NotFound): - message = _("StorageBackend %(name)s not found") - - -class PickleableException(Exception): - """ - Pickleable Exception - Used to mark custom exception classes that can be pickled. - """ - pass - - -class OpenStackException(PickleableException): - """ - OpenStack Exception - """ - def __init__(self, message, reason): - """ - Create an OpenStack exception - """ - super(OpenStackException, self).__init__(message, reason) - self._reason = reason # a message string or another exception - self._message = message - - def __str__(self): - """ - Return a string representing the exception - """ - return "[OpenStack Exception:reason=%s]" % self._reason - - def __repr__(self): - """ - Provide a representation of the exception - """ - return str(self) - - def __reduce__(self): - """ - Return a tuple so that we can properly pickle the exception - """ - return OpenStackException, (self.message, self._reason) - - @property - def message(self): - """ - Returns the message for the exception - """ - return self._message - - @property - def reason(self): - """ - Returns the reason for the exception - """ - return self._reason - - -class OpenStackRestAPIException(PickleableException): - """ - OpenStack Rest-API Exception - """ - def __init__(self, message, http_status_code, reason): - """ - Create an OpenStack Rest-API exception - """ - super(OpenStackRestAPIException, self).__init__(message) - self._http_status_code = http_status_code # as defined in RFC 2616 - self._reason = reason # a message string or another exception - - def __str__(self): - """ - Return a string representing the exception - """ - return ("[OpenStack Rest-API Exception: code=%s, reason=%s]" - % (self._http_status_code, self._reason)) - - def __repr__(self): - """ - Provide a representation of the exception - """ - return str(self) - - def __reduce__(self): - """ - Return a tuple so that we can properly pickle the exception - """ - return OpenStackRestAPIException, (self.message, - self._http_status_code, - self._reason) - - @property - def http_status_code(self): - """ - Returns the HTTP status code - """ - return self._http_status_code - - @property - def reason(self): - """ - Returns the reason for the exception - """ - return self._reason diff --git a/inventory/inventory/inventory/common/fm.py b/inventory/inventory/inventory/common/fm.py deleted file mode 100644 index 901ad6b5..00000000 --- a/inventory/inventory/inventory/common/fm.py +++ /dev/null @@ -1,104 +0,0 @@ -# -# Copyright (c) 2016-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# FM Fault Management Handling - -from fm_api import constants as fm_constants -from fm_api import fm_api -import fmclient as fm_client -from keystoneauth1.access import service_catalog as k_service_catalog -from oslo_config import cfg -from oslo_log import log - -CONF = cfg.CONF - -LOG = log.getLogger(__name__) - - -fm_group = cfg.OptGroup( - 'fm', - title='FM Options', - help="Configuration options for the fault management service") - -fm_opts = [ - cfg.StrOpt('catalog_info', - default='faultmanagement:fm:internalURL', - help="Service catalog Look up info."), - cfg.StrOpt('os_region_name', - default='RegionOne', - help="Region name of this node. It is used for catalog lookup") -] - -CONF.register_group(fm_group) -CONF.register_opts(fm_opts, group=fm_group) - - -class FmCustomerLog(object): - """ - Fault Management Customer Log - """ - - _fm_api = None - - def __init__(self): - self._fm_api = fm_api.FaultAPIs() - - def customer_log(self, log_data): - LOG.info("Generating FM Customer Log %s" % log_data) - fm_event_id = log_data.get('event_id', None) - if fm_event_id is not None: - fm_event_state = fm_constants.FM_ALARM_STATE_MSG - entity_type = log_data.get('entity_type', None) - entity = log_data.get('entity', None) - fm_severity = log_data.get('fm_severity', None) - reason_text = log_data.get('reason_text', None) - fm_event_type = log_data.get('fm_event_type', None) - fm_probable_cause = fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN - fm_uuid = None - fault = fm_api.Fault(fm_event_id, - fm_event_state, - entity_type, - entity, - fm_severity, - reason_text, - fm_event_type, - fm_probable_cause, "", - False, True) - - response = self._fm_api.set_fault(fault) - if response is None: - LOG.error("Failed to generate customer log, fm_uuid=%s." % - fm_uuid) - else: - fm_uuid = response - LOG.info("Generated customer log, fm_uuid=%s." % fm_uuid) - else: - LOG.error("Unknown event id (%s) given." % fm_event_id) - - -def fmclient(context, version=1, endpoint=None): - """Constructs a fm client object for making API requests. - - :param context: The request context for auth. - :param version: API endpoint version. - :param endpoint: Optional If the endpoint is not available, it will be - retrieved from context - """ - auth_token = context.auth_token - if endpoint is None: - sc = k_service_catalog.ServiceCatalogV2(context.service_catalog) - service_type, service_name, interface = \ - CONF.fm.catalog_info.split(':') - service_parameters = {'service_type': service_type, - 'service_name': service_name, - 'interface': interface, - 'region_name': CONF.fm.os_region_name} - endpoint = sc.url_for(**service_parameters) - - return fm_client.Client(version=version, - endpoint=endpoint, - auth_token=auth_token) diff --git a/inventory/inventory/inventory/common/health.py b/inventory/inventory/inventory/common/health.py deleted file mode 100755 index 8ab49e64..00000000 --- a/inventory/inventory/inventory/common/health.py +++ /dev/null @@ -1,289 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import os -import subprocess - -from controllerconfig import backup_restore -from fm_api import fm_api - -from inventory.common import ceph -from inventory.common import constants -from inventory.common.fm import fmclient -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.common import patch_api -from inventory.common import vim_api -from oslo_log import log - -import cgcs_patch.constants as patch_constants - -LOG = log.getLogger(__name__) - - -class Health(object): - - SUCCESS_MSG = _('OK') - FAIL_MSG = _('Fail') - - def __init__(self, context, dbapi): - self._context = context - self._dbapi = dbapi - self._ceph = ceph.CephApiOperator() - - def _check_hosts_provisioned(self, hosts): - """Checks that each host is provisioned""" - provisioned_hosts = [] - unprovisioned_hosts = 0 - for host in hosts: - if host['invprovision'] != k_host.PROVISIONED or \ - host['hostname'] is None: - unprovisioned_hosts = unprovisioned_hosts + 1 - else: - provisioned_hosts.append(host) - - return unprovisioned_hosts, provisioned_hosts - - def _check_hosts_enabled(self, hosts): - """Checks that each host is enabled and unlocked""" - offline_host_list = [] - for host in hosts: - if host['administrative'] != k_host.ADMIN_UNLOCKED or \ - host['operational'] != k_host.OPERATIONAL_ENABLED: - offline_host_list.append(host.hostname) - - success = not offline_host_list - return success, offline_host_list - - def _check_hosts_config(self, hosts): - """Checks that the applied and target config match for each host""" - config_host_list = [] - for host in hosts: - if (host.config_target and - host.config_applied != host.config_target): - config_host_list.append(host.hostname) - - success = not config_host_list - return success, config_host_list - - def _check_patch_current(self, hosts): - """Checks that each host is patch current""" - system = self._dbapi.isystem_get_one() - response = patch_api.patch_query_hosts(context=self._context, - region_name=system.region_name) - patch_hosts = response['data'] - not_patch_current_hosts = [] - hostnames = [] - for host in hosts: - hostnames.append(host['hostname']) - - for host in patch_hosts: - # There may be instances where the patching db returns - # hosts that have been recently deleted. We will continue if a host - # is the patching db but not inventory - try: - hostnames.remove(host['hostname']) - except ValueError: - LOG.info('Host %s found in patching but not in inventory. ' - 'Continuing' % host['hostname']) - else: - if not host['patch_current']: - not_patch_current_hosts.append(host['hostname']) - - success = not not_patch_current_hosts and not hostnames - return success, not_patch_current_hosts, hostnames - - def _check_alarms(self, context, force=False): - """Checks that no alarms are active""" - db_alarms = fmclient(context).alarm.list(include_suppress=True) - - success = True - allowed = 0 - affecting = 0 - # Only fail if we find alarms past their affecting threshold - for db_alarm in db_alarms: - if isinstance(db_alarm, tuple): - alarm = db_alarm[0] - mgmt_affecting = db_alarm[constants.DB_MGMT_AFFECTING] - else: - alarm = db_alarm - mgmt_affecting = db_alarm.mgmt_affecting - if fm_api.FaultAPIs.alarm_allowed(alarm.severity, mgmt_affecting): - allowed += 1 - if not force: - success = False - else: - affecting += 1 - success = False - - return success, allowed, affecting - - def get_alarms_degrade(self, context, - alarm_ignore_list=[], entity_instance_id_filter=""): - """Return all the alarms that cause the degrade""" - db_alarms = fmclient(context).alarm.list(include_suppress=True) - degrade_alarms = [] - - for db_alarm in db_alarms: - if isinstance(db_alarm, tuple): - alarm = db_alarm[0] - degrade_affecting = db_alarm[constants.DB_DEGRADE_AFFECTING] - else: - alarm = db_alarm - degrade_affecting = db_alarm.degrade_affecting - # Ignore alarms that are part of the ignore list sent as parameter - # and also filter the alarms bases on entity instance id. - # If multiple alarms with the same ID exist, we only return the ID - # one time. - if not fm_api.FaultAPIs.alarm_allowed( - alarm.severity, degrade_affecting): - if (entity_instance_id_filter in alarm.entity_instance_id and - alarm.alarm_id not in alarm_ignore_list and - alarm.alarm_id not in degrade_alarms): - degrade_alarms.append(alarm.alarm_id) - return degrade_alarms - - def _check_ceph(self): - """Checks the ceph health status""" - return self._ceph.ceph_status_ok() - - def _check_license(self, version): - """Validates the current license is valid for the specified version""" - check_binary = "/usr/bin/sm-license-check" - license_file = '/etc/platform/.license' - system = self._dbapi.isystem_get_one() - system_type = system.system_type - system_mode = system.system_mode - - with open(os.devnull, "w") as fnull: - try: - subprocess.check_call([check_binary, license_file, version, - system_type, system_mode], - stdout=fnull, stderr=fnull) - except subprocess.CalledProcessError: - return False - - return True - - def _check_required_patches(self, patch_list): - """Validates that each patch provided is applied on the system""" - system = self._dbapi.isystem_get_one() - response = patch_api.patch_query(context=self._context, - region_name=system.region_name, - timeout=60) - query_patches = response['pd'] - applied_patches = [] - for patch_key in query_patches: - patch = query_patches[patch_key] - patchstate = patch.get('patchstate', None) - if patchstate == patch_constants.APPLIED or \ - patchstate == patch_constants.COMMITTED: - applied_patches.append(patch_key) - - missing_patches = [] - for required_patch in patch_list: - if required_patch not in applied_patches: - missing_patches.append(required_patch) - - success = not missing_patches - return success, missing_patches - - def _check_running_instances(self, host): - """Checks that no instances are running on the host""" - - vim_resp = vim_api.vim_host_get_instances( - self._context, - host['uuid'], - host['hostname']) - running_instances = vim_resp['instances'] - - success = running_instances == 0 - return success, running_instances - - def _check_simplex_available_space(self): - """Ensures there is free space for the backup""" - try: - backup_restore.check_size("/opt/backups", True) - except backup_restore.BackupFail: - return False - - return True - - def get_system_health(self, context, force=False): - """Returns the general health of the system""" - # Checks the following: - # All hosts are provisioned - # All hosts are patch current - # All hosts are unlocked/enabled - # All hosts having matching configs - # No management affecting alarms - # For ceph systems: The storage cluster is healthy - - hosts = self._dbapi.ihost_get_list() - output = _('System Health:\n') - health_ok = True - - unprovisioned_hosts, provisioned_hosts = \ - self._check_hosts_provisioned(hosts) - success = unprovisioned_hosts == 0 - output += (_('All hosts are provisioned: [%s]\n') - % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)) - if not success: - output += _('%s Unprovisioned hosts\n') % unprovisioned_hosts - # Set the hosts to the provisioned_hosts. This will allow the other - # checks to continue - hosts = provisioned_hosts - - health_ok = health_ok and success - - success, error_hosts = self._check_hosts_enabled(hosts) - output += _('All hosts are unlocked/enabled: [%s]\n') \ - % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) - if not success: - output += _('Locked or disabled hosts: %s\n') \ - % ', '.join(error_hosts) - - health_ok = health_ok and success - - success, error_hosts = self._check_hosts_config(hosts) - output += _('All hosts have current configurations: [%s]\n') \ - % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) - if not success: - output += _('Hosts with out of date configurations: %s\n') \ - % ', '.join(error_hosts) - - health_ok = health_ok and success - - success, error_hosts, missing_hosts = self._check_patch_current(hosts) - output += _('All hosts are patch current: [%s]\n') \ - % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) - if not success: - if error_hosts: - output += _('Hosts not patch current: %s\n') \ - % ', '.join(error_hosts) - if missing_hosts: - output += _('Hosts without patch data: %s\n') \ - % ', '.join(missing_hosts) - - health_ok = health_ok and success - - # if StorageBackendConfig.has_backend( - # self._dbapi, - # constants.CINDER_BACKEND_CEPH): - # success = self._check_ceph() - # output += _('Ceph Storage Healthy: [%s]\n') \ - # % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) - # health_ok = health_ok and success - - success, allowed, affecting = self._check_alarms(context, force) - output += _('No alarms: [%s]\n') \ - % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) - if not success: - output += _('[{}] alarms found, [{}] of which are management ' - 'affecting\n').format(allowed + affecting, affecting) - - health_ok = health_ok and success - - return health_ok, output diff --git a/inventory/inventory/inventory/common/hwmon_api.py b/inventory/inventory/inventory/common/hwmon_api.py deleted file mode 100755 index a531063d..00000000 --- a/inventory/inventory/inventory/common/hwmon_api.py +++ /dev/null @@ -1,184 +0,0 @@ -# -# Copyright (c) 2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import json -from oslo_log import log -from rest_api import rest_api_request -LOG = log.getLogger(__name__) - - -def sensorgroup_add(token, address, port, isensorgroup_hwmon, timeout): - """ - Sends a SensorGroup Add command to maintenance. - """ - - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/isensorgroups/" - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = dict() - api_cmd_payload = isensorgroup_hwmon - - LOG.info("sensorgroup_add for %s cmd=%s hdr=%s payload=%s" % - (isensorgroup_hwmon['sensorgroupname'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "POST", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response - - -def sensorgroup_modify(token, address, port, isensorgroup_hwmon, timeout): - """ - Sends a SensorGroup Modify command to maintenance. - """ - - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/isensorgroups/%s" % isensorgroup_hwmon['uuid'] - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = dict() - api_cmd_payload = isensorgroup_hwmon - - LOG.info("sensorgroup_modify for %s cmd=%s hdr=%s payload=%s" % - (isensorgroup_hwmon['sensorgroupname'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "PATCH", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - LOG.debug("sensorgroup modify response=%s" % response) - - return response - - -def sensorgroup_delete(token, address, port, isensorgroup_hwmon, timeout): - """ - Sends a SensorGroup Delete command to maintenance. - """ - - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/isensorgroups/%s" % isensorgroup_hwmon['uuid'] - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = None - - LOG.info("sensorgroup_delete for %s cmd=%s hdr=%s payload=%s" % - (isensorgroup_hwmon['uuid'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "DELETE", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response - - -def sensorgroup_relearn(token, address, port, payload, timeout): - """ - Sends a SensorGroup Relearn command to maintenance. - """ - - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/isensorgroups/relearn" - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = dict() - api_cmd_payload = payload - - LOG.info("sensorgroup_relearn for %s cmd=%s hdr=%s payload=%s" % - (payload['host_uuid'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "POST", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response - - -def sensor_add(token, address, port, isensor_hwmon, timeout): - """ - Sends a Sensor Add command to maintenance. - """ - - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/isensors/" - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = dict() - api_cmd_payload = isensor_hwmon - - LOG.info("sensor_add for %s cmd=%s hdr=%s payload=%s" % - (isensor_hwmon['sensorname'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "POST", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response - - -def sensor_modify(token, address, port, isensor_hwmon, timeout): - """ - Sends a Sensor Modify command to maintenance. - """ - - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/isensors/%s" % isensor_hwmon['uuid'] - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = dict() - api_cmd_payload = isensor_hwmon - - LOG.info("sensor_modify for %s cmd=%s hdr=%s payload=%s" % - (isensor_hwmon['sensorname'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "PATCH", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response - - -def sensor_delete(token, address, port, isensor_hwmon, timeout): - """ - Sends a Sensor Delete command to maintenance. - """ - - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/isensors/%s" % isensor_hwmon['uuid'] - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = None - - LOG.info("sensor_delete for %s cmd=%s hdr=%s payload=%s" % - (isensor_hwmon['uuid'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "DELETE", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response diff --git a/inventory/inventory/inventory/common/i18n.py b/inventory/inventory/inventory/common/i18n.py deleted file mode 100644 index 7f813263..00000000 --- a/inventory/inventory/inventory/common/i18n.py +++ /dev/null @@ -1,12 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import oslo_i18n - -_translators = oslo_i18n.TranslatorFactory(domain='inventory') - -# The primary translation function using the well-known name "_" -_ = _translators.primary diff --git a/inventory/inventory/inventory/common/k_host.py b/inventory/inventory/inventory/common/k_host.py deleted file mode 100644 index fc78e12d..00000000 --- a/inventory/inventory/inventory/common/k_host.py +++ /dev/null @@ -1,110 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# Inventory Host Management Constants - -# Administrative States -ADMIN_UNLOCKED = 'unlocked' -ADMIN_LOCKED = 'locked' - -# Operational States -OPERATIONAL_ENABLED = 'enabled' -OPERATIONAL_DISABLED = 'disabled' - -# Availability Status -AVAILABILITY_AVAILABLE = 'available' -AVAILABILITY_OFFLINE = 'offline' -AVAILABILITY_ONLINE = 'online' -AVAILABILITY_DEGRADED = 'degraded' - -# Host Actions: -ACTION_UNLOCK = 'unlock' -ACTION_FORCE_UNLOCK = 'force-unlock' -ACTION_LOCK = 'lock' -ACTION_FORCE_LOCK = 'force-lock' -ACTION_REBOOT = 'reboot' -ACTION_RESET = 'reset' -ACTION_REINSTALL = 'reinstall' -ACTION_POWERON = 'power-on' -ACTION_POWEROFF = 'power-off' -ACTION_SWACT = 'swact' -ACTION_FORCE_SWACT = 'force-swact' -ACTION_SUBFUNCTION_CONFIG = 'subfunction_config' -ACTION_DELETE = 'delete' -ACTION_NONE = 'none' - - -ACTIONS_VIM = [ACTION_LOCK, - ACTION_FORCE_LOCK] - -# VIM services -VIM_SERVICES_ENABLED = 'services-enabled' -VIM_SERVICES_DISABLED = 'services-disabled' -VIM_SERVICES_DISABLE_EXTEND = 'services-disable-extend' -VIM_SERVICES_DISABLE_FAILED = 'services-disable-failed' -VIM_SERVICES_DELETE_FAILED = 'services-delete-failed' - -ACTIONS_MTCE = [ - ACTION_REBOOT, - ACTION_REINSTALL, - ACTION_RESET, - ACTION_POWERON, - ACTION_POWEROFF, - ACTION_SWACT, - ACTION_UNLOCK, - VIM_SERVICES_DISABLED, - VIM_SERVICES_DISABLE_FAILED, - ACTION_FORCE_SWACT] - -ACTIONS_CONFIG = [ACTION_SUBFUNCTION_CONFIG] - -# Personalities -CONTROLLER = 'controller' -STORAGE = 'storage' -COMPUTE = 'compute' - -PERSONALITIES = [CONTROLLER, STORAGE, COMPUTE] - -# Host names -LOCALHOST_HOSTNAME = 'localhost' - -CONTROLLER_HOSTNAME = 'controller' -CONTROLLER_0_HOSTNAME = '%s-0' % CONTROLLER_HOSTNAME -CONTROLLER_1_HOSTNAME = '%s-1' % CONTROLLER_HOSTNAME - -STORAGE_HOSTNAME = 'storage' -STORAGE_0_HOSTNAME = '%s-0' % STORAGE_HOSTNAME -STORAGE_1_HOSTNAME = '%s-1' % STORAGE_HOSTNAME -STORAGE_2_HOSTNAME = '%s-2' % STORAGE_HOSTNAME -# Other Storage Hostnames are built dynamically. - -# SUBFUNCTION FEATURES -SUBFUNCTIONS = 'subfunctions' -LOWLATENCY = 'lowlatency' - -LOCKING = 'Locking' -FORCE_LOCKING = "Force Locking" - -# invprovision status -PROVISIONED = 'provisioned' -PROVISIONING = 'provisioning' -UNPROVISIONED = 'unprovisioned' - -# Board Management Controller -BM_EXTERNAL = "External" -BM_TYPE_GENERIC = 'bmc' -BM_TYPE_NONE = 'none' - -HOST_STOR_FUNCTION = 'stor_function' - -# ihost config_status field values -CONFIG_STATUS_REINSTALL = "Reinstall required" - -# when reinstall starts, mtc updates the db with task = 'Reinstalling' -TASK_REINSTALLING = "Reinstalling" -HOST_ACTION_STATE = "action_state" -HAS_REINSTALLING = "reinstalling" -HAS_REINSTALLED = "reinstalled" diff --git a/inventory/inventory/inventory/common/k_lldp.py b/inventory/inventory/inventory/common/k_lldp.py deleted file mode 100644 index d1c5e87c..00000000 --- a/inventory/inventory/inventory/common/k_lldp.py +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# LLDP constants - -LLDP_TLV_TYPE_CHASSIS_ID = 'chassis_id' -LLDP_TLV_TYPE_PORT_ID = 'port_identifier' -LLDP_TLV_TYPE_TTL = 'ttl' -LLDP_TLV_TYPE_SYSTEM_NAME = 'system_name' -LLDP_TLV_TYPE_SYSTEM_DESC = 'system_description' -LLDP_TLV_TYPE_SYSTEM_CAP = 'system_capabilities' -LLDP_TLV_TYPE_MGMT_ADDR = 'management_address' -LLDP_TLV_TYPE_PORT_DESC = 'port_description' -LLDP_TLV_TYPE_DOT1_LAG = 'dot1_lag' -LLDP_TLV_TYPE_DOT1_PORT_VID = 'dot1_port_vid' -LLDP_TLV_TYPE_DOT1_MGMT_VID = 'dot1_management_vid' -LLDP_TLV_TYPE_DOT1_PROTO_VIDS = 'dot1_proto_vids' -LLDP_TLV_TYPE_DOT1_PROTO_IDS = 'dot1_proto_ids' -LLDP_TLV_TYPE_DOT1_VLAN_NAMES = 'dot1_vlan_names' -LLDP_TLV_TYPE_DOT1_VID_DIGEST = 'dot1_vid_digest' -LLDP_TLV_TYPE_DOT3_MAC_STATUS = 'dot3_mac_status' -LLDP_TLV_TYPE_DOT3_MAX_FRAME = 'dot3_max_frame' -LLDP_TLV_TYPE_DOT3_POWER_MDI = 'dot3_power_mdi' -LLDP_TLV_VALID_LIST = [LLDP_TLV_TYPE_CHASSIS_ID, LLDP_TLV_TYPE_PORT_ID, - LLDP_TLV_TYPE_TTL, LLDP_TLV_TYPE_SYSTEM_NAME, - LLDP_TLV_TYPE_SYSTEM_DESC, LLDP_TLV_TYPE_SYSTEM_CAP, - LLDP_TLV_TYPE_MGMT_ADDR, LLDP_TLV_TYPE_PORT_DESC, - LLDP_TLV_TYPE_DOT1_LAG, LLDP_TLV_TYPE_DOT1_PORT_VID, - LLDP_TLV_TYPE_DOT1_VID_DIGEST, - LLDP_TLV_TYPE_DOT1_MGMT_VID, - LLDP_TLV_TYPE_DOT1_PROTO_VIDS, - LLDP_TLV_TYPE_DOT1_PROTO_IDS, - LLDP_TLV_TYPE_DOT1_VLAN_NAMES, - LLDP_TLV_TYPE_DOT1_VID_DIGEST, - LLDP_TLV_TYPE_DOT3_MAC_STATUS, - LLDP_TLV_TYPE_DOT3_MAX_FRAME, - LLDP_TLV_TYPE_DOT3_POWER_MDI] - -LLDP_AGENT_STATE_REMOVED = 'removed' -LLDP_NEIGHBOUR_STATE_REMOVED = LLDP_AGENT_STATE_REMOVED -# LLDP_FULL_AUDIT_COUNT based on frequency of host_lldp_get_and_report() -LLDP_FULL_AUDIT_COUNT = 6 diff --git a/inventory/inventory/inventory/common/k_pci.py b/inventory/inventory/inventory/common/k_pci.py deleted file mode 100644 index a23aec26..00000000 --- a/inventory/inventory/inventory/common/k_pci.py +++ /dev/null @@ -1,25 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# PCI device constants - -# PCI Alias types and names -NOVA_PCI_ALIAS_GPU_NAME = "gpu" -NOVA_PCI_ALIAS_GPU_CLASS = "030000" -NOVA_PCI_ALIAS_GPU_PF_NAME = "gpu-pf" -NOVA_PCI_ALIAS_GPU_VF_NAME = "gpu-vf" -NOVA_PCI_ALIAS_QAT_CLASS = "0x0b4000" -NOVA_PCI_ALIAS_QAT_DH895XCC_PF_NAME = "qat-dh895xcc-pf" -NOVA_PCI_ALIAS_QAT_C62X_PF_NAME = "qat-c62x-pf" -NOVA_PCI_ALIAS_QAT_PF_VENDOR = "8086" -NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE = "0435" -NOVA_PCI_ALIAS_QAT_C62X_PF_DEVICE = "37c8" -NOVA_PCI_ALIAS_QAT_DH895XCC_VF_NAME = "qat-dh895xcc-vf" -NOVA_PCI_ALIAS_QAT_C62X_VF_NAME = "qat-c62x-vf" -NOVA_PCI_ALIAS_QAT_VF_VENDOR = "8086" -NOVA_PCI_ALIAS_QAT_DH895XCC_VF_DEVICE = "0443" -NOVA_PCI_ALIAS_QAT_C62X_VF_DEVICE = "37c9" -NOVA_PCI_ALIAS_USER_NAME = "user" diff --git a/inventory/inventory/inventory/common/keystone.py b/inventory/inventory/inventory/common/keystone.py deleted file mode 100644 index 459d539a..00000000 --- a/inventory/inventory/inventory/common/keystone.py +++ /dev/null @@ -1,121 +0,0 @@ -# coding=utf-8 -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Central place for handling Keystone authorization and service lookup.""" - -from keystoneauth1 import exceptions as kaexception -from keystoneauth1 import loading as kaloading -from keystoneauth1 import service_token -from keystoneauth1 import token_endpoint -from oslo_config import cfg -from oslo_log import log -import six - -from inventory.common import exception - -CONF = cfg.CONF - - -LOG = log.getLogger(__name__) - - -def ks_exceptions(f): - """Wraps keystoneclient functions and centralizes exception handling.""" - @six.wraps(f) - def wrapper(*args, **kwargs): - try: - return f(*args, **kwargs) - except kaexception.EndpointNotFound: - service_type = kwargs.get('service_type', 'inventory') - endpoint_type = kwargs.get('endpoint_type', 'internal') - raise exception.CatalogNotFound( - service_type=service_type, endpoint_type=endpoint_type) - except (kaexception.Unauthorized, kaexception.AuthorizationFailure): - raise exception.KeystoneUnauthorized() - except (kaexception.NoMatchingPlugin, - kaexception.MissingRequiredOptions) as e: - raise exception.ConfigInvalid(six.text_type(e)) - except Exception as e: - LOG.exception('Keystone request failed: %(msg)s', - {'msg': six.text_type(e)}) - raise exception.KeystoneFailure(six.text_type(e)) - return wrapper - - -@ks_exceptions -def get_session(group, **session_kwargs): - """Loads session object from options in a configuration file section. - - The session_kwargs will be passed directly to keystoneauth1 Session - and will override the values loaded from config. - Consult keystoneauth1 docs for available options. - - :param group: name of the config section to load session options from - - """ - return kaloading.load_session_from_conf_options( - CONF, group, **session_kwargs) - - -@ks_exceptions -def get_auth(group, **auth_kwargs): - """Loads auth plugin from options in a configuration file section. - - The auth_kwargs will be passed directly to keystoneauth1 auth plugin - and will override the values loaded from config. - Note that the accepted kwargs will depend on auth plugin type as defined - by [group]auth_type option. - Consult keystoneauth1 docs for available auth plugins and their options. - - :param group: name of the config section to load auth plugin options from - - """ - try: - auth = kaloading.load_auth_from_conf_options(CONF, group, - **auth_kwargs) - except kaexception.MissingRequiredOptions: - LOG.error('Failed to load auth plugin from group %s', group) - raise - return auth - - -@ks_exceptions -def get_adapter(group, **adapter_kwargs): - """Loads adapter from options in a configuration file section. - - The adapter_kwargs will be passed directly to keystoneauth1 Adapter - and will override the values loaded from config. - Consult keystoneauth1 docs for available adapter options. - - :param group: name of the config section to load adapter options from - - """ - return kaloading.load_adapter_from_conf_options(CONF, group, - **adapter_kwargs) - - -def get_service_auth(context, endpoint, service_auth): - """Create auth plugin wrapping both user and service auth. - - When properly configured and using auth_token middleware, - requests with valid service auth will not fail - if the user token is expired. - - Ideally we would use the plugin provided by auth_token middleware - however this plugin isn't serialized yet. - """ - # TODO(pas-ha) use auth plugin from context when it is available - user_auth = token_endpoint.Token(endpoint, context.auth_token) - return service_token.ServiceTokenAuthWrapper(user_auth=user_auth, - service_auth=service_auth) diff --git a/inventory/inventory/inventory/common/mtce_api.py b/inventory/inventory/inventory/common/mtce_api.py deleted file mode 100644 index 9441f20d..00000000 --- a/inventory/inventory/inventory/common/mtce_api.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# Copyright (c) 2015-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -from inventory.common import exception as si_exception -import json -from oslo_log import log -from rest_api import rest_api_request -import time - -LOG = log.getLogger(__name__) - - -def host_add(token, address, port, ihost_mtce, timeout): - """ - Sends a Host Add command to maintenance. - """ - - # api_cmd = "http://localhost:2112" - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/hosts/" - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = dict() - api_cmd_payload = ihost_mtce - - LOG.info("host_add for %s cmd=%s hdr=%s payload=%s" % - (ihost_mtce['hostname'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "POST", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response - - -def host_modify(token, address, port, ihost_mtce, timeout, max_retries=1): - """ - Sends a Host Modify command to maintenance. - """ - - # api_cmd = "http://localhost:2112" - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/hosts/%s" % ihost_mtce['uuid'] - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = dict() - api_cmd_payload = ihost_mtce - - LOG.debug("host_modify for %s cmd=%s hdr=%s payload=%s" % - (ihost_mtce['hostname'], - api_cmd, api_cmd_headers, api_cmd_payload)) - - num_of_try = 0 - response = None - while num_of_try < max_retries and response is None: - try: - num_of_try = num_of_try + 1 - LOG.info("number of calls to rest_api_request=%d (max_retry=%d)" % - (num_of_try, max_retries)) - response = rest_api_request( - token, "PATCH", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - if response is None: - time.sleep(3) - except si_exception.SysInvSignalTimeout as e: - LOG.warn("WARNING rest_api_request Timeout Error e=%s" % (e)) - raise si_exception.SysInvSignalTimeout - except si_exception.InventoryException as e: - LOG.warn("WARNING rest_api_request Unexpected Error e=%s" % (e)) - - return response - - -def host_delete(token, address, port, ihost_mtce, timeout): - """ - Sends a Host Delete command to maintenance. - """ - - api_cmd = "http://%s:%s" % (address, port) - api_cmd += "/v1/hosts/%s" % ihost_mtce['uuid'] - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "sysinv/1.0" - - api_cmd_payload = None - - LOG.info("host_delete for %s cmd=%s hdr=%s payload=%s" % - (ihost_mtce['uuid'], api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(token, "DELETE", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response diff --git a/inventory/inventory/inventory/common/patch_api.py b/inventory/inventory/inventory/common/patch_api.py deleted file mode 100644 index b7677225..00000000 --- a/inventory/inventory/inventory/common/patch_api.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from inventory.common import constants -from keystoneauth1.access import service_catalog as k_service_catalog -from oslo_log import log -from rest_api import rest_api_request - -LOG = log.getLogger(__name__) - - -def _get_endpoint(context, region_name): - # service_type, service_name, interface = \ - # CONF.patching.catalog_info.split(':') - sc = k_service_catalog.ServiceCatalogV2(context.service_catalog) - service_parameters = {'service_type': 'patching', - 'service_name': 'patching', - 'interface': 'internalURL', - 'region_name': region_name} - endpoint = sc.url_for(**service_parameters) - return endpoint - - -def patch_query(context, region_name, - timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS): - """ - Request the list of patches known to the patch service - """ - - api_cmd = _get_endpoint(context, region_name) - api_cmd += "/v1/query/" - - return rest_api_request(context, "GET", api_cmd, timeout=timeout) - - -def patch_query_hosts(context, region_name, - timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS): - """ - Request the patch state for all hosts known to the patch service - """ - - api_cmd = _get_endpoint(context, region_name) - api_cmd += "/v1/query_hosts/" - - return rest_api_request(context, "GET", api_cmd, timeout=timeout) - - -def patch_drop_host(context, hostname, region_name, - timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS): - """ - Notify the patch service to drop the specified host - """ - - api_cmd = _get_endpoint(context, region_name) - api_cmd += "/v1/drop_host/%s" % hostname - - return rest_api_request(context, "POST", api_cmd, timeout=timeout) diff --git a/inventory/inventory/inventory/common/policy.py b/inventory/inventory/inventory/common/policy.py deleted file mode 100644 index e2a67589..00000000 --- a/inventory/inventory/inventory/common/policy.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -"""Policy Engine For Inventory.""" - -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log -from oslo_policy import policy - - -base_rules = [ - policy.RuleDefault('admin_required', 'role:admin or is_admin:1', - description='Who is considered an admin'), - policy.RuleDefault('admin_api', 'is_admin_required:True', - description='admin API requirement'), - policy.RuleDefault('default', 'rule:admin_api', - description='default rule'), -] - -_ENFORCER = None -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -# we can get a policy enforcer by this init. -# oslo policy support change policy rule dynamically. -# at present, policy.enforce will reload the policy rules when it checks -# the policy files have been touched. -@lockutils.synchronized('policy_enforcer') -def init_enforcer(policy_file=None, rules=None, - default_rule=None, use_conf=True, overwrite=True): - """Init an Enforcer class. - - :param policy_file: Custom policy file to use, if none is - specified, ``conf.policy_file`` will be - used. - :param rules: Default dictionary / Rules to use. It will be - considered just in the first instantiation. If - :meth:`load_rules` with ``force_reload=True``, - :meth:`clear` or :meth:`set_rules` with - ``overwrite=True`` is called this will be overwritten. - :param default_rule: Default rule to use, conf.default_rule will - be used if none is specified. - :param use_conf: Whether to load rules from cache or config file. - :param overwrite: Whether to overwrite existing rules when reload rules - from config file. - """ - global _ENFORCER - if not _ENFORCER: - # http://docs.openstack.org/developer/oslo.policy/usage.html - _ENFORCER = policy.Enforcer(CONF, - policy_file=policy_file, - rules=rules, - default_rule=default_rule, - use_conf=use_conf, - overwrite=overwrite) - _ENFORCER.register_defaults(base_rules) - return _ENFORCER - - -def get_enforcer(): - """Provides access to the single instance of Policy enforcer.""" - - if not _ENFORCER: - init_enforcer() - - return _ENFORCER - - -def check_is_admin(context): - """Whether or not role contains 'admin' role according to policy setting. - - """ - init_enforcer() - - target = {} - credentials = context.to_dict() - - return _ENFORCER.enforce('context_is_admin', target, credentials) diff --git a/inventory/inventory/inventory/common/rest_api.py b/inventory/inventory/inventory/common/rest_api.py deleted file mode 100644 index c33edb17..00000000 --- a/inventory/inventory/inventory/common/rest_api.py +++ /dev/null @@ -1,73 +0,0 @@ -# -# Copyright (c) 2015-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -import json -import signal -import urllib2 - -from inventory.common.exception import OpenStackException -from inventory.common.exception import OpenStackRestAPIException - -from oslo_log import log -LOG = log.getLogger(__name__) - - -def rest_api_request(context, method, api_cmd, api_cmd_headers=None, - api_cmd_payload=None, timeout=10): - """ - Make a rest-api request - Returns: response as a dictionary - """ - - LOG.info("%s cmd:%s hdr:%s payload:%s" % (method, - api_cmd, api_cmd_headers, api_cmd_payload)) - - if hasattr(context, 'auth_token'): - token = context.auth_token - else: - token = None - - response = None - try: - request_info = urllib2.Request(api_cmd) - request_info.get_method = lambda: method - if token: - request_info.add_header("X-Auth-Token", token) - request_info.add_header("Accept", "application/json") - - if api_cmd_headers is not None: - for header_type, header_value in api_cmd_headers.items(): - request_info.add_header(header_type, header_value) - - if api_cmd_payload is not None: - request_info.add_data(api_cmd_payload) - - request = urllib2.urlopen(request_info, timeout=timeout) - response = request.read() - - if response == "": - response = json.loads("{}") - else: - response = json.loads(response) - request.close() - - LOG.info("Response=%s" % response) - - except urllib2.HTTPError as e: - LOG.warn("HTTP Error e.code=%s e=%s" % (e.code, e)) - if hasattr(e, 'msg') and e.msg: - response = json.loads(e.msg) - else: - response = json.loads("{}") - - LOG.info("HTTPError response=%s" % (response)) - raise OpenStackRestAPIException(e.message, e.code, "%s" % e) - except urllib2.URLError as e: - LOG.warn("URLError Error e=%s" % (e)) - raise OpenStackException(e.message, "%s" % e) - - finally: - signal.alarm(0) - return response diff --git a/inventory/inventory/inventory/common/rpc.py b/inventory/inventory/inventory/common/rpc.py deleted file mode 100644 index 5c426bfb..00000000 --- a/inventory/inventory/inventory/common/rpc.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_messaging.rpc import dispatcher -from osprofiler import profiler - -from inventory.common import context as inventory_context -from inventory.common import exception - - -CONF = cfg.CONF - -TRANSPORT = None -NOTIFICATION_TRANSPORT = None -SENSORS_NOTIFIER = None -VERSIONED_NOTIFIER = None - -ALLOWED_EXMODS = [ - exception.__name__, -] -EXTRA_EXMODS = [] - - -def init(conf): - global TRANSPORT, NOTIFICATION_TRANSPORT - global SENSORS_NOTIFIER, VERSIONED_NOTIFIER - exmods = get_allowed_exmods() - TRANSPORT = messaging.get_rpc_transport(conf, - allowed_remote_exmods=exmods) - - NOTIFICATION_TRANSPORT = messaging.get_notification_transport( - conf, - allowed_remote_exmods=exmods) - - serializer = RequestContextSerializer(messaging.JsonPayloadSerializer()) - SENSORS_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer) - if conf.notification_level is None: - VERSIONED_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer, - driver='noop') - else: - VERSIONED_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer, - topics=['inventory_versioned_' - 'notifications']) - - -def cleanup(): - global TRANSPORT, NOTIFICATION_TRANSPORT - global SENSORS_NOTIFIER, VERSIONED_NOTIFIER - assert TRANSPORT is not None - assert NOTIFICATION_TRANSPORT is not None - assert SENSORS_NOTIFIER is not None - assert VERSIONED_NOTIFIER is not None - TRANSPORT.cleanup() - NOTIFICATION_TRANSPORT.cleanup() - TRANSPORT = NOTIFICATION_TRANSPORT = None - SENSORS_NOTIFIER = VERSIONED_NOTIFIER = None - - -def set_defaults(control_exchange): - messaging.set_transport_defaults(control_exchange) - - -def get_allowed_exmods(): - return ALLOWED_EXMODS + EXTRA_EXMODS - - -class RequestContextSerializer(messaging.Serializer): - - def __init__(self, base): - self._base = base - - def serialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.serialize_entity(context, entity) - - def deserialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.deserialize_entity(context, entity) - - def serialize_context(self, context): - _context = context.to_dict() - prof = profiler.get() - if prof: - trace_info = { - "hmac_key": prof.hmac_key, - "base_id": prof.get_base_id(), - "parent_id": prof.get_id() - } - _context.update({"trace_info": trace_info}) - return _context - - def deserialize_context(self, context): - trace_info = context.pop("trace_info", None) - if trace_info: - profiler.init(**trace_info) - return inventory_context.RequestContext.from_dict(context) - - -def get_transport_url(url_str=None): - return messaging.TransportURL.parse(CONF, url_str) - - -def get_client(target, version_cap=None, serializer=None): - assert TRANSPORT is not None - serializer = RequestContextSerializer(serializer) - return messaging.RPCClient(TRANSPORT, - target, - version_cap=version_cap, - serializer=serializer) - - -def get_server(target, endpoints, serializer=None): - assert TRANSPORT is not None - serializer = RequestContextSerializer(serializer) - access_policy = dispatcher.DefaultRPCAccessPolicy - return messaging.get_rpc_server(TRANSPORT, - target, - endpoints, - executor='eventlet', - serializer=serializer, - access_policy=access_policy) - - -def get_sensors_notifier(service=None, host=None, publisher_id=None): - assert SENSORS_NOTIFIER is not None - if not publisher_id: - publisher_id = "%s.%s" % (service, host or CONF.host) - return SENSORS_NOTIFIER.prepare(publisher_id=publisher_id) - - -def get_versioned_notifier(publisher_id=None): - assert VERSIONED_NOTIFIER is not None - assert publisher_id is not None - return VERSIONED_NOTIFIER.prepare(publisher_id=publisher_id) diff --git a/inventory/inventory/inventory/common/rpc_service.py b/inventory/inventory/inventory/common/rpc_service.py deleted file mode 100644 index 5a89f62f..00000000 --- a/inventory/inventory/inventory/common/rpc_service.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import signal - -from oslo_log import log -import oslo_messaging as messaging -from oslo_service import service -from oslo_utils import importutils - -from inventory.common import context -from inventory.common import rpc -from inventory.objects import base as objects_base - -LOG = log.getLogger(__name__) - - -class RPCService(service.Service): - - def __init__(self, host, manager_module, manager_class): - super(RPCService, self).__init__() - self.host = host - manager_module = importutils.try_import(manager_module) - manager_class = getattr(manager_module, manager_class) - self.manager = manager_class(host, manager_module.MANAGER_TOPIC) - self.topic = self.manager.topic - self.rpcserver = None - self.deregister = True - - def start(self): - super(RPCService, self).start() - admin_context = context.get_admin_context() - - target = messaging.Target(topic=self.topic, server=self.host) - endpoints = [self.manager] - serializer = objects_base.InventoryObjectSerializer() - self.rpcserver = rpc.get_server(target, endpoints, serializer) - self.rpcserver.start() - - self.handle_signal() - self.manager.init_host(admin_context) - - LOG.info('Created RPC server for service %(service)s on host ' - '%(host)s.', - {'service': self.topic, 'host': self.host}) - - def stop(self): - try: - self.rpcserver.stop() - self.rpcserver.wait() - except Exception as e: - LOG.exception('Service error occurred when stopping the ' - 'RPC server. Error: %s', e) - try: - self.manager.del_host(deregister=self.deregister) - except Exception as e: - LOG.exception('Service error occurred when cleaning up ' - 'the RPC manager. Error: %s', e) - - super(RPCService, self).stop(graceful=True) - LOG.info('Stopped RPC server for service %(service)s on host ' - '%(host)s.', - {'service': self.topic, 'host': self.host}) - - def _handle_signal(self, signo, frame): - LOG.info('Got signal SIGUSR1. Not deregistering on next shutdown ' - 'of service %(service)s on host %(host)s.', - {'service': self.topic, 'host': self.host}) - self.deregister = False - - def handle_signal(self): - """Add a signal handler for SIGUSR1. - - The handler ensures that the manager is not deregistered when it is - shutdown. - """ - signal.signal(signal.SIGUSR1, self._handle_signal) diff --git a/inventory/inventory/inventory/common/service.py b/inventory/inventory/inventory/common/service.py deleted file mode 100644 index d484e065..00000000 --- a/inventory/inventory/inventory/common/service.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from inventory.common import config -from inventory.conf import opts -from inventory import objects -from oslo_config import cfg -from oslo_log import log -from oslo_service import service - - -cfg.CONF.register_opts([ - cfg.IntOpt('periodic_interval', - default=60, - help='seconds between running periodic tasks'), -]) - -CONF = cfg.CONF - - -def prepare_service(argv=None): - argv = [] if argv is None else argv - - opts.update_opt_defaults() - log.register_options(CONF) - CONF(argv[1:], project='inventory') - config.parse_args(argv) - log.setup(CONF, 'inventory') - objects.register_all() - - -def process_launcher(): - return service.ProcessLauncher(CONF) diff --git a/inventory/inventory/inventory/common/sm_api.py b/inventory/inventory/inventory/common/sm_api.py deleted file mode 100644 index b51c447e..00000000 --- a/inventory/inventory/inventory/common/sm_api.py +++ /dev/null @@ -1,184 +0,0 @@ -# -# Copyright (c) 2016-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -from inventory import objects -import json -from keystoneauth1.access import service_catalog as k_service_catalog -from oslo_log import log -from rest_api import rest_api_request - -LOG = log.getLogger(__name__) - - -def _get_region(context): - system = objects.System.get_one(context) - return system.region_name - - -def _get_endpoint(context): - # service_type, service_name, interface = \ - # CONF.smapi.catalog_info.split(':') - region_name = _get_region(context) - sc = k_service_catalog.ServiceCatalogV2(context.service_catalog) - service_parameters = {'service_type': 'smapi', - 'service_name': 'smapi', - 'interface': 'internalURL', - 'region_name': region_name} - endpoint = sc.url_for(**service_parameters) - return endpoint - - -def swact_pre_check(context, hostname, timeout=30): - """ - Sends a Swact Pre-Check command to SM. - """ - api_cmd = _get_endpoint(context) - api_cmd += "/v1/servicenode/%s" % hostname - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - api_cmd_payload = dict() - api_cmd_payload['origin'] = "inventory" - api_cmd_payload['action'] = "swact-pre-check" - api_cmd_payload['admin'] = "unknown" - api_cmd_payload['oper'] = "unknown" - api_cmd_payload['avail'] = "" - - response = rest_api_request(context, "PATCH", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response - - -def lock_pre_check(context, hostname, timeout=30): - """ - Sends a Lock Pre-Check command to SM. - """ - api_cmd = _get_endpoint(context) - api_cmd += "/v1/servicenode/%s" % hostname - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - api_cmd_payload = dict() - api_cmd_payload['origin'] = "inventory" - api_cmd_payload['action'] = "lock-pre-check" - api_cmd_payload['admin'] = "unknown" - api_cmd_payload['oper'] = "unknown" - api_cmd_payload['avail'] = "" - - response = rest_api_request(context, "PATCH", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - - return response - - -def service_list(context): - """ - Sends a service list command to SM. - """ - api_cmd = _get_endpoint(context) - api_cmd += "/v1/services" - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['Accept'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - response = rest_api_request(context, "GET", api_cmd, api_cmd_headers, None) - - return response - - -def service_show(context, hostname): - """ - Sends a service show command to SM. - """ - api_cmd = _get_endpoint(context) - api_cmd += "/v1/services/%s" % hostname - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['Accept'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - response = rest_api_request(context, "GET", api_cmd, api_cmd_headers, None) - return response - - -def servicenode_list(context): - """ - Sends a service list command to SM. - """ - api_cmd = _get_endpoint(context) - api_cmd += "/v1/nodes" - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['Accept'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - response = rest_api_request(context, "GET", api_cmd, api_cmd_headers, None) - - return response - - -def servicenode_show(context, hostname): - """ - Sends a service show command to SM. - """ - api_cmd = _get_endpoint(context) - api_cmd += "/v1/nodes/%s" % hostname - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['Accept'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - response = rest_api_request(context, "GET", api_cmd, api_cmd_headers, None) - - return response - - -def sm_servicegroup_list(context): - """ - Sends a service list command to SM. - """ - api_cmd = _get_endpoint(context) - api_cmd += "/v1/sm_sda" - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['Accept'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - response = rest_api_request(context, "GET", api_cmd, api_cmd_headers, None) - - # rename the obsolete sm_sda to sm_servicegroups - if isinstance(response, dict): - if 'sm_sda' in response: - response['sm_servicegroup'] = response.pop('sm_sda') - - return response - - -def sm_servicegroup_show(context, hostname): - """ - Sends a service show command to SM. - """ - api_cmd = _get_endpoint(context) - api_cmd += "/v1/sm_sda/%s" % hostname - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['Accept'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - response = rest_api_request(context, "GET", api_cmd, api_cmd_headers, None) - - return response diff --git a/inventory/inventory/inventory/common/storage_backend_conf.py b/inventory/inventory/inventory/common/storage_backend_conf.py deleted file mode 100644 index 3619c0a9..00000000 --- a/inventory/inventory/inventory/common/storage_backend_conf.py +++ /dev/null @@ -1,450 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# -# Copyright (c) 2016-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -# All Rights Reserved. -# - -""" System Inventory Storage Backend Utilities and helper functions.""" - - -import ast -import pecan -import wsme - -from inventory.common import constants -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import k_host -from oslo_log import log - -LOG = log.getLogger(__name__) - - -class StorageBackendConfig(object): - - @staticmethod - def get_backend(api, target): - """Get the primary backend. """ - backend_list = api.storage_backend_get_list() - for backend in backend_list: - if (backend.backend == target and - backend.name == constants.SB_DEFAULT_NAMES[target]): - return backend - - @staticmethod - def get_backend_conf(api, target): - """Get the polymorphic primary backend. """ - - if target == constants.SB_TYPE_FILE: - # Only support a single file backend - storage_files = api.storage_file_get_list() - if storage_files: - return storage_files[0] - elif target == constants.SB_TYPE_LVM: - # Only support a single LVM backend - storage_lvms = api.storage_lvm_get_list() - if storage_lvms: - return storage_lvms[0] - elif target == constants.SB_TYPE_CEPH: - # Support multiple ceph backends - storage_cephs = api.storage_ceph_get_list() - primary_backends = filter( - lambda b: b['name'] == constants.SB_DEFAULT_NAMES[ - constants.SB_TYPE_CEPH], - storage_cephs) - if primary_backends: - return primary_backends[0] - elif target == constants.SB_TYPE_EXTERNAL: - # Only support a single external backend - storage_externals = api.storage_external_get_list() - if storage_externals: - return storage_externals[0] - elif target == constants.SB_TYPE_CEPH_EXTERNAL: - # Support multiple ceph external backends - storage_ceph_externals = api.storage_ceph_external_get_list() - if storage_ceph_externals: - return storage_ceph_externals[0] - - return None - - @staticmethod - def get_configured_backend_conf(api, target): - """Return the configured polymorphic primary backend - of a given type. - """ - - backend_list = api.storage_backend_get_list() - for backend in backend_list: - if backend.state == constants.SB_STATE_CONFIGURED and \ - backend.backend == target and \ - backend.name == constants.SB_DEFAULT_NAMES[target]: - return StorageBackendConfig.get_backend_conf(api, target) - return None - - @staticmethod - def get_configured_backend_list(api): - """Get the list of all configured backends. """ - - backends = [] - try: - backend_list = api.storage_backend_get_list() - except Exception: - backend_list = [] - - for backend in backend_list: - if backend.state == constants.SB_STATE_CONFIGURED: - backends.append(backend.backend) - return backends - - @staticmethod - def get_configured_backend(api, target): - """Return the configured primary backend of a given type.""" - - backend_list = api.storage_backend_get_list() - for backend in backend_list: - if backend.state == constants.SB_STATE_CONFIGURED and \ - backend.backend == target and \ - backend.name == constants.SB_DEFAULT_NAMES[target]: - return backend - return None - - @staticmethod - def get_configuring_backend(api): - """Get the primary backend that is configuring. """ - - backend_list = api.storage_backend_get_list() - for backend in backend_list: - if (backend.state == constants.SB_STATE_CONFIGURING and - backend.name == - constants.SB_DEFAULT_NAMES[backend.backend]): - # At this point we can have but only max 1 configuring backend - # at any moment - return backend - - # it is normal there isn't one being configured - return None - - @staticmethod - def get_configuring_target_backend(api, target): - """Get the primary backend that is configuring. """ - - backend_list = api.storage_backend_get_list() - for backend in backend_list: - if (backend.state == constants.SB_STATE_CONFIGURING and - backend.backend == target): - # At this point we can have but only max 1 configuring backend - # at any moment - return backend - - # it is normal there isn't one being configured - return None - - @staticmethod - def has_backend_configured(dbapi, target, service=None, - check_only_defaults=True, rpcapi=None): - """Check is a backend is configured. """ - # If cinder is a shared service on another region and - # we want to know if the ceph backend is configured, - # send a rpc to conductor which sends a query to the primary - system = dbapi.system_get_one() - shared_services = system.capabilities.get('shared_services', None) - configured = False - if (shared_services is not None and - constants.SERVICE_TYPE_VOLUME in shared_services and - target == constants.SB_TYPE_CEPH and - rpcapi is not None): - return rpcapi.region_has_ceph_backend( - pecan.request.context) - else: - backend_list = dbapi.storage_backend_get_list() - for backend in backend_list: - if (backend.state == constants.SB_STATE_CONFIGURED and - backend.backend == target): - configured = True - break - - # Supplementary semantics - if configured: - if check_only_defaults and \ - backend.name != constants.SB_DEFAULT_NAMES[target]: - configured = False - if service and service not in backend.services: - configured = False - - return configured - - @staticmethod - def has_backend(api, target): - backend_list = api.storage_backend_get_list() - for backend in backend_list: - if backend.backend == target: - return True - return False - - @staticmethod - def update_backend_states(api, target, state=None, task='N/A'): - """Update primary backend state. """ - - values = dict() - if state: - values['state'] = state - if task != 'N/A': - values['task'] = task - backend = StorageBackendConfig.get_backend(api, target) - if backend: - api.storage_backend_update(backend.uuid, values) - else: - raise exception.InvalidStorageBackend(backend=target) - - @staticmethod - def get_ceph_mon_ip_addresses(dbapi): - try: - dbapi.network_get_by_type( - constants.NETWORK_TYPE_INFRA - ) - network_type = constants.NETWORK_TYPE_INFRA - except exception.NetworkTypeNotFound: - network_type = constants.NETWORK_TYPE_MGMT - - targets = { - '%s-%s' % (k_host.CONTROLLER_0_HOSTNAME, - network_type): 'ceph-mon-0-ip', - '%s-%s' % (k_host.CONTROLLER_1_HOSTNAME, - network_type): 'ceph-mon-1-ip', - '%s-%s' % (k_host.STORAGE_0_HOSTNAME, - network_type): 'ceph-mon-2-ip' - } - results = {} - addrs = dbapi.addresses_get_all() - for addr in addrs: - if addr.name in targets: - results[targets[addr.name]] = addr.address - if len(results) != len(targets): - raise exception.IncompleteCephMonNetworkConfig( - targets=targets, results=results) - return results - - @staticmethod - def is_ceph_backend_ready(api): - """ - check if ceph primary backend is ready, i,e, when a ceph backend - is configured after config_controller, it is considered ready when - both controller nodes and 1st pair of storage nodes are reconfigured - with ceph - :param api: - :return: - """ - ceph_backend = None - backend_list = api.storage_backend_get_list() - for backend in backend_list: - if backend.backend == constants.SB_TYPE_CEPH and \ - backend.name == constants.SB_DEFAULT_NAMES[ - constants.SB_TYPE_CEPH]: - ceph_backend = backend - break - if not ceph_backend: - return False - - if ceph_backend.state != constants.SB_STATE_CONFIGURED: - return False - - if ceph_backend.task == constants.SB_TASK_PROVISION_STORAGE: - return False - - # if both controllers are reconfigured and 1st pair storage nodes - # are provisioned, the task will be either reconfig_compute or none - return True - - @staticmethod - def get_ceph_tier_size(dbapi, rpcapi, tier_name): - try: - # Make sure the default ceph backend is configured - if not StorageBackendConfig.has_backend_configured( - dbapi, - constants.SB_TYPE_CEPH - ): - return 0 - - tier_size = \ - rpcapi.get_ceph_tier_size(pecan.request.context, - tier_name) - return int(tier_size) - except Exception as exp: - LOG.exception(exp) - return 0 - - @staticmethod - def get_ceph_pool_replication(api): - """ - return the values of 'replication' and 'min_replication' - capabilities as configured in ceph backend - :param api: - :return: replication, min_replication - """ - # Get ceph backend from db - ceph_backend = StorageBackendConfig.get_backend( - api, - constants.CINDER_BACKEND_CEPH - ) - - # Workaround for upgrade from R4 to R5, where 'capabilities' field - # does not exist in R4 backend entry - if hasattr(ceph_backend, 'capabilities'): - if (constants.CEPH_BACKEND_REPLICATION_CAP in - ceph_backend.capabilities): - pool_size = int(ceph_backend.capabilities[ - constants.CEPH_BACKEND_REPLICATION_CAP]) - - pool_min_size = \ - constants.CEPH_REPLICATION_MAP_DEFAULT[pool_size] - else: - # Should not get here - pool_size = constants.CEPH_REPLICATION_FACTOR_DEFAULT - pool_min_size = \ - constants.CEPH_REPLICATION_MAP_DEFAULT[pool_size] - else: - # upgrade compatibility with R4 - pool_size = constants.CEPH_REPLICATION_FACTOR_DEFAULT - pool_min_size = constants.CEPH_REPLICATION_MAP_DEFAULT[pool_size] - - return pool_size, pool_min_size - - @staticmethod - def get_ceph_backend_task(api): - """ - return current ceph backend task - :param: api - :return: - """ - # Get ceph backend from db - ceph_backend = StorageBackendConfig.get_backend( - api, - constants.CINDER_BACKEND_CEPH - ) - - return ceph_backend.task - - @staticmethod - def get_ceph_backend_state(api): - """ - return current ceph backend state - :param: api - :return: - """ - # Get ceph backend from db - ceph_backend = StorageBackendConfig.get_backend( - api, - constants.CINDER_BACKEND_CEPH - ) - - return ceph_backend.state - - @staticmethod - def is_ceph_backend_restore_in_progress(api): - """ - check ceph primary backend has a restore task set - :param api: - :return: - """ - for backend in api.storage_backend_get_list(): - if (backend.backend == constants.SB_TYPE_CEPH and - backend.name == constants.SB_DEFAULT_NAMES[ - constants.SB_TYPE_CEPH]): - return backend.task == constants.SB_TASK_RESTORE - - @staticmethod - def set_img_conversions_defaults(dbapi, controller_fs_api): - """ - initialize img_conversion partitions with default values if not - already done - :param dbapi - :param controller_fs_api - """ - # Img conversions identification - values = {'name': constants.FILESYSTEM_NAME_IMG_CONVERSIONS, - 'logical_volume': constants.FILESYSTEM_LV_DICT[ - constants.FILESYSTEM_NAME_IMG_CONVERSIONS], - 'replicated': False} - - # Abort if is already defined - controller_fs_list = dbapi.controller_fs_get_list() - for fs in controller_fs_list: - if values['name'] == fs.name: - LOG.info("Image conversions already defined, " - "avoiding reseting values") - return - - # Check if there is enough space available - rootfs_max_GiB, cgtsvg_max_free_GiB = \ - controller_fs_api.get_controller_fs_limit() - args = {'avail': cgtsvg_max_free_GiB, - 'min': constants.DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE, - 'lvg': constants.LVG_CGTS_VG} - if cgtsvg_max_free_GiB >= constants.DEFAULT_IMG_CONVERSION_STOR_SIZE: - img_conversions_gib = constants.DEFAULT_IMG_CONVERSION_STOR_SIZE - elif (cgtsvg_max_free_GiB >= - constants.DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE): - img_conversions_gib = \ - constants.DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE - else: - msg = _("Not enough space for image conversion partition. " - "Please ensure that '%(lvg)s' VG has " - "at least %(min)s GiB free space." - "Currently available: %(avail)s GiB.") % args - raise wsme.exc.ClientSideError(msg) - - args['size'] = img_conversions_gib - LOG.info("Available space in '%(lvg)s' is %(avail)s GiB " - "from which img_conversions will use %(size)s GiB." % args) - - # Create entry - values['size'] = img_conversions_gib - dbapi.controller_fs_create(values) - - @staticmethod - def get_enabled_services(dbapi, filter_unconfigured=True, - filter_shared=False): - """Get the list of enabled services - :param dbapi - :param filter_unconfigured: Determine weather to ignore - unconfigured services - :param filter_shared: Determine weather to ignore shared services - :returns: list of services - """ - services = [] - if not filter_shared: - system = dbapi.system_get_one() - shared_services = system.capabilities.get('shared_services', None) - services = [] if shared_services is None \ - else ast.literal_eval(shared_services) - - backend_list = dbapi.storage_backend_get_list() - for backend in backend_list: - backend_services = [] if backend.services is None \ - else backend.services.split(',') - for service in backend_services: - if (backend.state == constants.SB_STATE_CONFIGURED or - not filter_unconfigured): - if service not in services: - services.append(service) - return services - # TODO(oponcea): Check for external cinder backend & test multiregion - - @staticmethod - def is_service_enabled(dbapi, service, filter_unconfigured=True, - filter_shared=False): - """Checks if a service is enabled - :param dbapi - :param service: service name, one of constants.SB_SVC_* - :param unconfigured: check also unconfigured/failed services - :returns: True or false - """ - if service in StorageBackendConfig.get_enabled_services( - dbapi, filter_unconfigured, filter_shared): - return True - else: - return False diff --git a/inventory/inventory/inventory/common/utils.py b/inventory/inventory/inventory/common/utils.py deleted file mode 100644 index e6890077..00000000 --- a/inventory/inventory/inventory/common/utils.py +++ /dev/null @@ -1,1263 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# Copyright (c) 2012 NTT DOCOMO, INC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -"""Utilities and helper functions.""" - -import collections -import contextlib -import datetime -import errno -import fcntl -import functools -import glob -import hashlib -import itertools as it -import netaddr -import os -import random -import re -import shutil -import signal -import six -import socket -import tempfile -import time -import uuid -import wsme - -from eventlet.green import subprocess -from eventlet import greenthread - -from oslo_concurrency import lockutils -from oslo_log import log - -from inventory.common import constants -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.conf import CONF -from six import text_type as unicode -from tsconfig.tsconfig import SW_VERSION - -LOG = log.getLogger(__name__) - -# Used for looking up extensions of text -# to their 'multiplied' byte amount -BYTE_MULTIPLIERS = { - '': 1, - 't': 1024 ** 4, - 'g': 1024 ** 3, - 'm': 1024 ** 2, - 'k': 1024, -} - - -class memoized(object): - """Decorator to cache a functions' return value. - - Decorator. Caches a function's return value each time it is called. - If called later with the same arguments, the cached value is returned - (not reevaluated). - - WARNING: This function should not be used for class methods since it - does not provide weak references; thus would prevent the instance from - being garbage collected. - """ - def __init__(self, func): - self.func = func - self.cache = {} - - def __call__(self, *args): - if not isinstance(args, collections.Hashable): - # uncacheable. a list, for instance. - # better to not cache than blow up. - return self.func(*args) - if args in self.cache: - return self.cache[args] - else: - value = self.func(*args) - self.cache[args] = value - return value - - def __repr__(self): - '''Return the function's docstring.''' - return self.func.__doc__ - - def __get__(self, obj, objtype): - '''Support instance methods.''' - return functools.partial(self.__call__, obj) - - -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - -def execute(*cmd, **kwargs): - """Helper method to execute command with optional retry. - - If you add a run_as_root=True command, don't forget to add the - corresponding filter to etc/inventory/rootwrap.d ! - - :param cmd: Passed to subprocess.Popen. - :param process_input: Send to opened process. - :param check_exit_code: Single bool, int, or list of allowed exit - codes. Defaults to [0]. Raise - exception.ProcessExecutionError unless - program exits with one of these code. - :param delay_on_retry: True | False. Defaults to True. If set to - True, wait a short amount of time - before retrying. - :param attempts: How many times to retry cmd. - :param run_as_root: True | False. Defaults to False. If set to True, - the command is run with rootwrap. - - :raises exception.InventoryException: on receiving unknown arguments - :raises exception.ProcessExecutionError: - - :returns: a tuple, (stdout, stderr) from the spawned process, or None if - the command fails. - """ - process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', [0]) - ignore_exit_code = False - if isinstance(check_exit_code, bool): - ignore_exit_code = not check_exit_code - check_exit_code = [0] - elif isinstance(check_exit_code, int): - check_exit_code = [check_exit_code] - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) - shell = kwargs.pop('shell', False) - - if len(kwargs): - raise exception.InventoryException( - _('Got unknown keyword args to utils.execute: %r') % kwargs) - - if run_as_root and os.geteuid() != 0: - cmd = (['sudo', 'inventory-rootwrap', CONF.rootwrap_config] + - list(cmd)) - - cmd = map(str, cmd) - - while attempts > 0: - attempts -= 1 - try: - LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) - _PIPE = subprocess.PIPE # pylint: disable=E1101 - - if os.name == 'nt': - preexec_fn = None - close_fds = False - else: - preexec_fn = _subprocess_setup - close_fds = True - - obj = subprocess.Popen(cmd, - stdin=_PIPE, - stdout=_PIPE, - stderr=_PIPE, - close_fds=close_fds, - preexec_fn=preexec_fn, - shell=shell) - result = None - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() - obj.stdin.close() # pylint: disable=E1101 - _returncode = obj.returncode # pylint: disable=E1101 - LOG.debug(_('Result was %s') % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - raise exception.ProcessExecutionError( - exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) - return result - except exception.ProcessExecutionError: - if not attempts: - raise - else: - LOG.debug(_('%r failed. Retrying.'), cmd) - if delay_on_retry: - greenthread.sleep(random.randint(20, 200) / 100.0) - finally: - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one - greenthread.sleep(0) - - -def trycmd(*args, **kwargs): - """A wrapper around execute() to more easily handle warnings and errors. - - Returns an (out, err) tuple of strings containing the output of - the command's stdout and stderr. If 'err' is not empty then the - command can be considered to have failed. - - :discard_warnings True | False. Defaults to False. If set to True, - then for succeeding commands, stderr is cleared - - """ - discard_warnings = kwargs.pop('discard_warnings', False) - - try: - out, err = execute(*args, **kwargs) - failed = False - except exception.ProcessExecutionError as exn: - out, err = '', str(exn) - failed = True - - if not failed and discard_warnings and err: - # Handle commands that output to stderr but otherwise succeed - err = '' - - return out, err - - -def is_int_like(val): - """Check if a value looks like an int.""" - try: - return str(int(val)) == str(val) - except Exception: - return False - - -def is_float_like(val): - """Check if a value looks like a float.""" - try: - return str(float(val)) == str(val) - except Exception: - return False - - -def is_valid_boolstr(val): - """Check if the provided string is a valid bool string or not.""" - boolstrs = ('true', 'false', 'yes', 'no', 'y', 'n', '1', '0') - return str(val).lower() in boolstrs - - -def is_valid_mac(address): - """Verify the format of a MAC addres.""" - m = "[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$" - if isinstance(address, six.string_types) and re.match(m, address.lower()): - return True - return False - - -def validate_and_normalize_mac(address): - """Validate a MAC address and return normalized form. - - Checks whether the supplied MAC address is formally correct and - normalize it to all lower case. - - :param address: MAC address to be validated and normalized. - :returns: Normalized and validated MAC address. - :raises: InvalidMAC If the MAC address is not valid. - :raises: ClonedInterfaceNotFound If MAC address is not updated - while installing a cloned image. - - """ - if not is_valid_mac(address): - if constants.CLONE_ISO_MAC in address: - # get interface name from the label - intf_name = address.rsplit('-', 1)[1][1:] - raise exception.ClonedInterfaceNotFound(intf=intf_name) - else: - raise exception.InvalidMAC(mac=address) - return address.lower() - - -def is_valid_ipv4(address): - """Verify that address represents a valid IPv4 address.""" - try: - return netaddr.valid_ipv4(address) - except Exception: - return False - - -def is_valid_ipv6(address): - try: - return netaddr.valid_ipv6(address) - except Exception: - return False - - -def is_valid_ip(address): - if not is_valid_ipv4(address): - return is_valid_ipv6(address) - return True - - -def is_valid_ipv6_cidr(address): - try: - str(netaddr.IPNetwork(address, version=6).cidr) - return True - except Exception: - return False - - -def get_shortened_ipv6(address): - addr = netaddr.IPAddress(address, version=6) - return str(addr.ipv6()) - - -def get_shortened_ipv6_cidr(address): - net = netaddr.IPNetwork(address, version=6) - return str(net.cidr) - - -def is_valid_cidr(address): - """Check if the provided ipv4 or ipv6 address is a valid CIDR address.""" - try: - # Validate the correct CIDR Address - netaddr.IPNetwork(address) - except netaddr.core.AddrFormatError: - return False - except UnboundLocalError: - # NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in - # https://github.com/drkjam/netaddr/issues/2) - return False - - # Prior validation partially verify /xx part - # Verify it here - ip_segment = address.split('/') - - if len(ip_segment) <= 1 or ip_segment[1] == '': - return False - - return True - - -def is_valid_hex(num): - try: - int(num, 16) - except ValueError: - return False - return True - - -def is_valid_pci_device_vendor_id(id): - """Check if the provided id is a valid 16 bit hexadecimal.""" - val = id.replace('0x', '').strip() - if not is_valid_hex(id): - return False - if len(val) > 4: - return False - return True - - -def is_valid_pci_class_id(id): - """Check if the provided id is a valid 16 bit hexadecimal.""" - val = id.replace('0x', '').strip() - if not is_valid_hex(id): - return False - if len(val) > 6: - return False - return True - - -def is_system_usable_block_device(pydev_device): - """Check if a block device is local and can be used for partitioning - - Example devices: - o local block devices: local HDDs, SSDs, RAID arrays - o remote devices: iscsi mounted, LIO, EMC - o non permanent devices: USB stick - :return bool: True if device can be used else False - """ - if pydev_device.get("ID_BUS") == "usb": - # Skip USB devices - return False - if pydev_device.get("DM_VG_NAME") or pydev_device.get("DM_LV_NAME"): - # Skip LVM devices - return False - id_path = pydev_device.get("ID_PATH", "") - if "iqn." in id_path or "eui." in id_path: - # Skip all iSCSI devices, they are links for volume storage. - # As per https://www.ietf.org/rfc/rfc3721.txt, "iqn." or "edu." - # have to be present when constructing iSCSI names. - return False - if pydev_device.get("ID_VENDOR") == constants.VENDOR_ID_LIO: - # LIO devices are iSCSI, should be skipped above! - LOG.error("Invalid id_path. Device %s (%s) is iSCSI!" % - (id_path, pydev_device.get('DEVNAME'))) - return False - return True - - -def get_ip_version(network): - """Returns the IP version of a network (IPv4 or IPv6). - - :raises: AddrFormatError if invalid network. - """ - if netaddr.IPNetwork(network).version == 6: - return "IPv6" - elif netaddr.IPNetwork(network).version == 4: - return "IPv4" - - -def convert_to_list_dict(lst, label): - """Convert a value or list into a list of dicts.""" - if not lst: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - - -def sanitize_hostname(hostname): - """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" - if isinstance(hostname, unicode): - hostname = hostname.encode('latin-1', 'ignore') - - hostname = re.sub('[ _]', '-', hostname) - hostname = re.sub('[^\w.-]+', '', hostname) - hostname = hostname.lower() - hostname = hostname.strip('.-') - - return hostname - - -def hash_file(file_like_object): - """Generate a hash for the contents of a file.""" - checksum = hashlib.sha1() - for chunk in iter(lambda: file_like_object.read(32768), b''): - checksum.update(chunk) - return checksum.hexdigest() - - -@contextlib.contextmanager -def tempdir(**kwargs): - tempfile.tempdir = CONF.tempdir - tmpdir = tempfile.mkdtemp(**kwargs) - try: - yield tmpdir - finally: - try: - shutil.rmtree(tmpdir) - except OSError as e: - LOG.error(_('Could not remove tmpdir: %s'), str(e)) - - -def mkfs(fs, path, label=None): - """Format a file or block device - - :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' - 'btrfs', etc.) - :param path: Path to file or block device to format - :param label: Volume label to use - """ - if fs == 'swap': - args = ['mkswap'] - else: - args = ['mkfs', '-t', fs] - # add -F to force no interactive execute on non-block device. - if fs in ('ext3', 'ext4'): - args.extend(['-F']) - if label: - if fs in ('msdos', 'vfat'): - label_opt = '-n' - else: - label_opt = '-L' - args.extend([label_opt, label]) - args.append(path) - execute(*args) - - -def safe_rstrip(value, chars=None): - """Removes trailing characters from a string if that does not make it empty - - :param value: A string value that will be stripped. - :param chars: Characters to remove. - :return: Stripped value. - - """ - if not isinstance(value, six.string_types): - LOG.warn(_("Failed to remove trailing character. Returning original " - "object. Supplied object is not a string: %s,") % value) - return value - - return value.rstrip(chars) or value - - -def generate_uuid(): - return str(uuid.uuid4()) - - -def is_uuid_like(val): - """Returns validation of a value as a UUID. - - For our purposes, a UUID is a canonical form string: - aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa - - """ - try: - return str(uuid.UUID(val)) == val - except (TypeError, ValueError, AttributeError): - return False - - -def removekey(d, key): - r = dict(d) - del r[key] - return r - - -def removekeys_nonmtce(d, keepkeys=None): - if not keepkeys: - keepkeys = [] - - nonmtce_keys = ['created_at', - 'updated_at', - 'host_action', - 'vim_progress_status', - 'task', - 'uptime', - 'location', - 'serialid', - 'config_status', - 'config_applied', - 'config_target', - 'reserved', - 'system_id'] - # 'action_state', - r = dict(d) - - for k in nonmtce_keys: - if r.get(k) and (k not in keepkeys): - del r[k] - return r - - -def removekeys_nonhwmon(d, keepkeys=None): - if not keepkeys: - keepkeys = [] - - nonmtce_keys = ['created_at', - 'updated_at', - ] - r = dict(d) - - for k in nonmtce_keys: - if r.get(k) and (k not in keepkeys): - del r[k] - return r - - -def touch(fname): - with open(fname, 'a'): - os.utime(fname, None) - - -def symlink_force(source, link_name): - """Force creation of a symlink - - :param: source: path to the source - :param: link_name: symbolic link name - """ - try: - os.symlink(source, link_name) - except OSError as e: - if e.errno == errno.EEXIST: - os.remove(link_name) - os.symlink(source, link_name) - - -@contextlib.contextmanager -def mounted(remote_dir, local_dir): - local_dir = os.path.abspath(local_dir) - try: - subprocess.check_output( - ["/bin/nfs-mount", remote_dir, local_dir], - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - raise OSError(("mount operation failed: " - "command={}, retcode={}, output='{}'").format( - e.cmd, e.returncode, e.output)) - try: - yield - finally: - try: - subprocess.check_output( - ["/bin/umount", local_dir], - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - raise OSError(("umount operation failed: " - "command={}, retcode={}, output='{}'").format( - e.cmd, e.returncode, e.output)) - - -def timestamped(dname, fmt='{dname}_%Y-%m-%d-%H-%M-%S'): - return datetime.datetime.now().strftime(fmt).format(dname=dname) - - -def host_has_function(host, function): - return function in (host.get('subfunctions') or - host.get('personality') or '') - - -@memoized -def is_virtual(): - '''Determines if the system is virtualized or not''' - subp = subprocess.Popen(['facter', 'is_virtual'], - stdout=subprocess.PIPE) - if subp.wait(): - raise Exception("Failed to read virtualization status from facter") - output = subp.stdout.readlines() - if len(output) != 1: - raise Exception("Unexpected number of lines: %d" % len(output)) - result = output[0].strip() - return bool(result == 'true') - - -def is_virtual_compute(ihost): - if not(os.path.isdir("/etc/inventory/.virtual_compute_nodes")): - return False - try: - ip = ihost['mgmt_ip'] - return os.path.isfile("/etc/inventory/.virtual_compute_nodes/%s" % - ip) - except AttributeError: - return False - - -def is_low_core_system(ihost, dba): - """Determine whether a low core cpu count system. - - Determine if the hosts core count is less than or equal to a xeon-d cpu - used with get_required_platform_reserved_memory to set the required - platform memory for xeon-d systems - """ - cpu_list = dba.cpu_get_by_host(ihost['uuid']) - number_physical_cores = 0 - for cpu in cpu_list: - if int(cpu['thread']) == 0: - number_physical_cores += 1 - return number_physical_cores <= constants.NUMBER_CORES_XEOND - - -def get_minimum_platform_reserved_memory(ihost, numa_node): - """Returns the minimum amount of memory to be reserved by the platform - - For a given NUMA node. Compute nodes require reserved memory because the - balance of the memory is allocated to VM instances. Other node types - have exclusive use of the memory so no explicit reservation is - required. Memory required by platform core is not included here. - """ - reserved = 0 - if numa_node is None: - return reserved - if is_virtual() or is_virtual_compute(ihost): - # minimal memory requirements for VirtualBox - if host_has_function(ihost, k_host.COMPUTE): - if numa_node == 0: - reserved += 1200 - if host_has_function(ihost, k_host.CONTROLLER): - reserved += 5000 - else: - reserved += 500 - else: - if host_has_function(ihost, k_host.COMPUTE): - # Engineer 2G per numa node for disk IO RSS overhead - reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB - return reserved - - -def get_required_platform_reserved_memory(ihost, numa_node, low_core=False): - """Returns the amount of memory to be reserved by the platform. - - For a a given NUMA node. Compute nodes require reserved memory because the - balance of the memory is allocated to VM instances. Other node types - have exclusive use of the memory so no explicit reservation is - required. - """ - required_reserved = 0 - if numa_node is None: - return required_reserved - if is_virtual() or is_virtual_compute(ihost): - # minimal memory requirements for VirtualBox - required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX - if host_has_function(ihost, k_host.COMPUTE): - if numa_node == 0: - required_reserved += \ - constants.PLATFORM_CORE_MEMORY_RESERVED_MIB_VBOX - if host_has_function(ihost, k_host.CONTROLLER): - required_reserved += \ - constants.COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB_VBOX # noqa - else: - # If not a controller, add overhead for - # metadata and vrouters - required_reserved += \ - constants.NETWORK_METADATA_OVERHEAD_MIB_VBOX - else: - required_reserved += \ - constants.DISK_IO_RESIDENT_SET_SIZE_MIB_VBOX - else: - if host_has_function(ihost, k_host.COMPUTE): - # Engineer 2G per numa node for disk IO RSS overhead - required_reserved += constants.DISK_IO_RESIDENT_SET_SIZE_MIB - if numa_node == 0: - # Engineer 2G for compute to give some headroom; - # typically requires 650 MB PSS - required_reserved += \ - constants.PLATFORM_CORE_MEMORY_RESERVED_MIB - if host_has_function(ihost, k_host.CONTROLLER): - # Over-engineer controller memory. - # Typically require 5GB PSS; accommodate 2GB headroom. - # Controller memory usage depends on number of workers. - if low_core: - required_reserved += \ - constants.COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB_XEOND # noqa - else: - required_reserved += \ - constants.COMBINED_NODE_CONTROLLER_MEMORY_RESERVED_MIB # noqa - else: - # If not a controller, - # add overhead for metadata and vrouters - required_reserved += \ - constants.NETWORK_METADATA_OVERHEAD_MIB - return required_reserved - - -def get_network_type_list(interface): - if interface['networktype']: - return [n.strip() for n in interface['networktype'].split(",")] - else: - return [] - - -def is_pci_network_types(networktypelist): - """Check whether pci network types in list - - Check if the network type consists of the combined PCI passthrough - and SRIOV network types. - """ - return (len(constants.PCI_NETWORK_TYPES) == len(networktypelist) and - all(i in networktypelist for i in constants.PCI_NETWORK_TYPES)) - - -def get_sw_version(): - return SW_VERSION - - -class ISO(object): - - def __init__(self, iso_path, mount_dir): - self.iso_path = iso_path - self.mount_dir = mount_dir - self._iso_mounted = False - self._mount_iso() - - def __del__(self): - if self._iso_mounted: - self._umount_iso() - - def _mount_iso(self): - with open(os.devnull, "w") as fnull: - subprocess.check_call(['mkdir', '-p', self.mount_dir], - stdout=fnull, - stderr=fnull) - subprocess.check_call(['mount', '-r', '-o', 'loop', self.iso_path, - self.mount_dir], - stdout=fnull, - stderr=fnull) - self._iso_mounted = True - - def _umount_iso(self): - try: - # Do a lazy unmount to handle cases where a file in the mounted - # directory is open when the umount is done. - subprocess.check_call(['umount', '-l', self.mount_dir]) - self._iso_mounted = False - except subprocess.CalledProcessError as e: - # If this fails for some reason, there's not a lot we can do - # Just log the exception and keep going - LOG.exception(e) - - -def get_active_load(loads): - active_load = None - for db_load in loads: - if db_load.state == constants.ACTIVE_LOAD_STATE: - active_load = db_load - - if active_load is None: - raise exception.InventoryException(_("No active load found")) - - return active_load - - -def get_imported_load(loads): - imported_load = None - for db_load in loads: - if db_load.state == constants.IMPORTED_LOAD_STATE: - imported_load = db_load - - if imported_load is None: - raise exception.InventoryException(_("No imported load found")) - - return imported_load - - -def validate_loads_for_import(loads): - for db_load in loads: - if db_load.state == constants.IMPORTED_LOAD_STATE: - raise exception.InventoryException(_("Imported load exists.")) - - -def validate_load_for_delete(load): - if not load: - raise exception.InventoryException(_("Load not found")) - - valid_delete_states = [ - constants.IMPORTED_LOAD_STATE, - constants.ERROR_LOAD_STATE, - constants.DELETING_LOAD_STATE - ] - - if load.state not in valid_delete_states: - raise exception.InventoryException( - _("Only a load in an imported or error state can be deleted")) - - -def gethostbyname(hostname): - return socket.getaddrinfo(hostname, None)[0][4][0] - - -def get_local_controller_hostname(): - try: - local_hostname = socket.gethostname() - except Exception as e: - raise exception.InventoryException(_( - "Failed to get the local hostname: %s") % str(e)) - return local_hostname - - -def get_mate_controller_hostname(hostname=None): - if not hostname: - try: - hostname = socket.gethostname() - except Exception as e: - raise exception.InventoryException(_( - "Failed to get the local hostname: %s") % str(e)) - - if hostname == k_host.CONTROLLER_0_HOSTNAME: - mate_hostname = k_host.CONTROLLER_1_HOSTNAME - elif hostname == k_host.CONTROLLER_1_HOSTNAME: - mate_hostname = k_host.CONTROLLER_0_HOSTNAME - else: - raise exception.InventoryException(_( - "Unknown local hostname: %s)") % hostname) - - return mate_hostname - - -def format_address_name(hostname, network_type): - return "%s-%s" % (hostname, network_type) - - -def validate_yes_no(name, value): - if value.lower() not in ['y', 'n']: - raise wsme.exc.ClientSideError(( - "Parameter '%s' must be a y/n value." % name)) - - -def get_interface_os_ifname(interface, interfaces, ports): - """Returns the operating system name for an interface. - - The user is allowed to override the inventory DB interface name for - convenience, but that name is not used at the operating system level for - all interface types. - For ethernet and VLAN interfaces the name follows the native interface - names while for AE interfaces the user defined name is used. - """ - if interface['iftype'] == constants.INTERFACE_TYPE_VLAN: - # VLAN interface names are built-in using the o/s name of the lower - # interface object. - lower_iface = interfaces[interface['uses'][0]] - lower_ifname = get_interface_os_ifname(lower_iface, interfaces, ports) - return '{}.{}'.format(lower_ifname, interface['vlan_id']) - elif interface['iftype'] == constants.INTERFACE_TYPE_ETHERNET: - # Ethernet interface names are always based on the port name which is - # just the normal o/s name of the original network interface - lower_ifname = ports[interface['id']]['name'] - return lower_ifname - else: - # All other interfaces default to the user-defined name - return interface['ifname'] - - -def get_dhcp_cid(hostname, network_type, mac): - """Create the CID for use with dnsmasq. - - We use a unique identifier for a client since different networks can - operate over the same device (and hence same MAC addr) when VLAN interfaces - are concerned. The format is different based on network type because the - mgmt network uses a default because it needs to exist before the board - is handled by inventory (i.e., the CID needs - to exist in the dhclient.conf file at build time) while the infra network - is built dynamically to avoid colliding with the mgmt CID. - - Example: - Format = 'id:' + colon-separated-hex(hostname:network_type) + ":" + mac - """ - if network_type == constants.NETWORK_TYPE_INFRA: - prefix = '{}:{}'.format(hostname, network_type) - prefix = ':'.join(x.encode('hex') for x in prefix) - elif network_type == constants.NETWORK_TYPE_MGMT: - # Our default dhclient.conf files requests a prefix of '00:03:00' to - # which dhclient adds a hardware address type of 01 to make final - # prefix of '00:03:00:01'. - prefix = '00:03:00:01' - else: - raise Exception("Network type {} does not support DHCP".format( - network_type)) - return '{}:{}'.format(prefix, mac) - - -def get_personalities(host_obj): - """Determine the personalities from host_obj""" - personalities = host_obj.subfunctions.split(',') - if k_host.LOWLATENCY in personalities: - personalities.remove(k_host.LOWLATENCY) - return personalities - - -def is_cpe(host_obj): - return (host_has_function(host_obj, k_host.CONTROLLER) and - host_has_function(host_obj, k_host.COMPUTE)) - - -def output_to_dict(output): - dict = {} - output = filter(None, output.split('\n')) - - for row in output: - values = row.split() - if len(values) != 2: - raise Exception("The following output does not respect the " - "format: %s" % row) - dict[values[1]] = values[0] - - return dict - - -def bytes_to_GiB(bytes_number): - return bytes_number / float(1024 ** 3) - - -def bytes_to_MiB(bytes_number): - return bytes_number / float(1024 ** 2) - - -def synchronized(name, external=True): - if external: - lock_path = constants.INVENTORY_LOCK_PATH - else: - lock_path = None - return lockutils.synchronized(name, - lock_file_prefix='inventory-', - external=external, - lock_path=lock_path) - - -def skip_udev_partition_probe(function): - def wrapper(*args, **kwargs): - """Decorator to skip partition rescanning in udev (fix for CGTS-8957) - - When reading partitions we have to avoid rescanning them as this - will temporarily delete their dev nodes causing devastating effects - for commands that rely on them (e.g. ceph-disk). - - UDEV triggers a partition rescan when a device node opened in write - mode is closed. To avoid this, we have to acquire a shared lock on the - device before other close operations do. - - Since both parted and sgdisk always open block devices in RW mode we - must disable udev from triggering the rescan when we just need to get - partition information. - - This happens due to a change in udev v214. For details see: - http://tracker.ceph.com/issues/14080 - http://tracker.ceph.com/issues/15176 - https://github.com/systemd/systemd/commit/02ba8fb3357 - daf57f6120ac512fb464a4c623419 - - :param device_node: dev node or path of the device - :returns decorated function - """ - device_node = kwargs.get('device_node', None) - if device_node: - with open(device_node, 'r') as f: - fcntl.flock(f, fcntl.LOCK_SH | fcntl.LOCK_NB) - try: - return function(*args, **kwargs) - finally: - # Since events are asynchronous we have to wait for udev - # to pick up the change. - time.sleep(0.1) - fcntl.flock(f, fcntl.LOCK_UN) - else: - return function(*args, **kwargs) - return wrapper - - -def disk_is_gpt(device_node): - """Checks if a device node is of GPT format. - - :param device_node: the disk's device node - :returns: True if partition table on disk is GPT - False if partition table on disk is not GPT - """ - parted_command = '{} {} {}'.format('parted -s', device_node, 'print') - parted_process = subprocess.Popen( - parted_command, stdout=subprocess.PIPE, shell=True) - parted_output = parted_process.stdout.read() - if re.search('Partition Table: gpt', parted_output): - return True - - return False - - -def partitions_are_in_order(disk_partitions, requested_partitions): - """Check if the disk partitions are in order with requested. - - Determine if a list of requested partitions can be created on a disk - with other existing partitions. - """ - - partitions_nr = [] - - for dp in disk_partitions: - part_number = re.match('.*?([0-9]+)$', dp.get('device_path')).group(1) - partitions_nr.append(int(part_number)) - - for rp in requested_partitions: - part_number = re.match('.*?([0-9]+)$', rp.get('device_path')).group(1) - partitions_nr.append(int(part_number)) - - return sorted(partitions_nr) == range(min(partitions_nr), - max(partitions_nr) + 1) - - -# TODO(oponcea): Remove once sm supports in-service configuration reload. -def is_single_controller(dbapi): - # Check the number of provisioned/provisioning hosts. If there is - # only one then we have a single controller (AIO-SX, single AIO-DX, or - # single std controller). If this is the case reset sm after adding - # cinder so that cinder DRBD/processes are managed. - hosts = dbapi.ihost_get_list() - prov_hosts = [h for h in hosts - if h.invprovision in [k_host.PROVISIONED, - k_host.PROVISIONING]] - if len(prov_hosts) == 1: - return True - return False - - -def is_partition_the_last(dbapi, partition): - """Check that the partition is the last partition. - - Used on check prior to delete. - """ - idisk_uuid = partition.get('idisk_uuid') - onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid) - part_number = re.match('.*?([0-9]+)$', - partition.get('device_path')).group(1) - - if int(part_number) != len(onidisk_parts): - return False - - return True - - -def _check_upgrade(dbapi, host_obj=None): - """Check whether partition operation may be allowed. - - If there is an upgrade in place, reject the operation if the - host was not created after upgrade start. - """ - try: - upgrade = dbapi.software_upgrade_get_one() - except exception.NotFound: - return - - if host_obj: - if host_obj.created_at > upgrade.created_at: - LOG.info("New host %s created after upgrade, allow partition" % - host_obj.hostname) - return - - raise wsme.exc.ClientSideError( - _("ERROR: Disk partition operations are not allowed during a " - "software upgrade. Try again after the upgrade is completed.")) - - -def disk_wipe(device): - """Wipe GPT table entries. - - We ignore exit codes in case disk is toasted or not present. - Note: Assumption is that entire disk is used - :param device: disk device node or device path - """ - LOG.info("Wiping device: %s " % device) - - # Wipe well known GPT table entries, if any. - trycmd('wipefs', '-f', '-a', device) - execute('udevadm', 'settle') - - # Wipe any other tables at the beginning of the device. - out, err = trycmd( - 'dd', 'if=/dev/zero', - 'of=%s' % device, - 'bs=512', 'count=2048', - 'conv=fdatasync') - LOG.info("Wiped beginning of disk: %s - %s" % (out, err)) - - # Get size of disk. - size, __ = trycmd('blockdev', '--getsz', - device) - size = size.rstrip() - - if size and size.isdigit(): - # Wipe at the end of device. - out, err = trycmd( - 'dd', 'if=/dev/zero', - 'of=%s' % device, - 'bs=512', 'count=2048', - 'seek=%s' % (int(size) - 2048), - 'conv=fdatasync') - LOG.info("Wiped end of disk: %s - %s" % (out, err)) - - LOG.info("Device %s zapped" % device) - - -def get_dhcp_client_iaid(mac_address): - """Retrieves the client IAID from its MAC address.""" - hwaddr = list(int(byte, 16) for byte in mac_address.split(':')) - return hwaddr[2] << 24 | hwaddr[3] << 16 | hwaddr[4] << 8 | hwaddr[5] - - -def get_cgts_vg_free_space(): - """Determine free space in cgts-vg""" - - try: - # Determine space in cgts-vg in GiB - vg_free_str = subprocess.check_output( - ['vgdisplay', '-C', '--noheadings', '--nosuffix', - '-o', 'vg_free', '--units', 'g', 'cgts-vg'], - close_fds=True).rstrip() - cgts_vg_free = int(float(vg_free_str)) - except subprocess.CalledProcessError: - LOG.error("Command vgdisplay failed") - raise Exception("Command vgdisplay failed") - - return cgts_vg_free - - -def read_filtered_directory_content(dirpath, *filters): - """Reads the content of a directory, filtered on glob like expressions. - - Returns a dictionary, with the "key" being the filename - and the "value" being the content of that file. - """ - def filter_directory_files(dirpath, *filters): - return it.chain.from_iterable(glob.iglob(dirpath + '/' + filter) - for filter in filters) - - content_dict = {} - for filename in filter_directory_files(dirpath, *filters): - content = "" - with open(os.path.join(filename), 'rb') as obj: - content = obj.read() - try: - # If the filter specified binary files then - # these will need to be base64 encoded so that - # they can be transferred over RPC and stored in DB - content.decode('utf-8') - except UnicodeError: - content = content.encode('base64') - content_dict['base64_encoded_files'] = \ - content_dict.get("base64_encoded_files", []) + [filename] - - content_dict[filename] = content - return content_dict - - -def get_disk_capacity_mib(device_node): - # Run command - fdisk_command = 'fdisk -l %s | grep "^Disk %s:"' % ( - device_node, device_node) - - try: - fdisk_output, _ = execute(fdisk_command, check_exit_code=[0], - run_as_root=True, attempts=3, - shell=True) - except exception.ProcessExecutionError: - LOG.error("Error running fdisk command: %s" % - fdisk_command) - return 0 - - # Parse output - second_half = fdisk_output.split(',')[1] - size_bytes = second_half.split()[0].strip() - - # Convert bytes to MiB (1 MiB = 1024*1024 bytes) - int_size = int(size_bytes) - size_mib = int_size / (1024 ** 2) - - return int(size_mib) - - -def format_range_set(items): - # Generate a pretty-printed value of ranges, such as 3-6,8-9,12-17 - ranges = [] - for k, iterable in it.groupby(enumerate(sorted(items)), - lambda x: x[1] - x[0]): - rng = list(iterable) - if len(rng) == 1: - s = str(rng[0][1]) - else: - s = "%s-%s" % (rng[0][1], rng[-1][1]) - ranges.append(s) - return ','.join(ranges) - - -def get_numa_index_list(obj): - """Create map of objects indexed by numa node""" - obj_lists = collections.defaultdict(list) - for index, o in enumerate(obj): - o["_index"] = index - obj_lists[o.numa_node].append(o) - return obj_lists - - -def compare(a, b): - return (a > b) - (a < b) diff --git a/inventory/inventory/inventory/common/vim_api.py b/inventory/inventory/inventory/common/vim_api.py deleted file mode 100644 index 7a12ad50..00000000 --- a/inventory/inventory/inventory/common/vim_api.py +++ /dev/null @@ -1,156 +0,0 @@ -# -# Copyright (c) 2015-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -from inventory.common import constants -from inventory.common import k_host -from inventory import objects -import json -from keystoneauth1.access import service_catalog as k_service_catalog -from oslo_log import log -from rest_api import rest_api_request - -LOG = log.getLogger(__name__) - - -def _get_region(context): - system = objects.System.get_one(context) - return system.region_name - - -def _get_endpoint(context): - # service_type, service_name, interface = \ - # CONF.nfv.catalog_info.split(':') - region_name = _get_region(context) - sc = k_service_catalog.ServiceCatalogV2(context.service_catalog) - service_parameters = {'service_type': 'nfv', - 'service_name': 'vim', - 'interface': 'internalURL', - 'region_name': region_name} - endpoint = sc.url_for(**service_parameters) - LOG.info("NFV endpoint=%s" % endpoint) - return endpoint - - -def vim_host_add(context, uuid, hostname, subfunctions, - admininistrative, operational, availability, - subfunction_oper, subfunction_avail, - timeout=constants.VIM_DEFAULT_TIMEOUT_IN_SECS): - """ - Requests VIM to add a host. - """ - LOG.info("vim_host_add hostname=%s, subfunctions=%s " - "%s-%s-%s subfunction_oper=%s subfunction_avail=%s" % - (hostname, subfunctions, admininistrative, operational, - availability, subfunction_oper, subfunction_avail)) - - api_cmd = _get_endpoint(context) - api_cmd += "/nfvi-plugins/v1/hosts/" - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - api_cmd_payload = dict() - api_cmd_payload['uuid'] = uuid - api_cmd_payload['hostname'] = hostname - api_cmd_payload['subfunctions'] = subfunctions - api_cmd_payload['administrative'] = admininistrative - api_cmd_payload['operational'] = operational - api_cmd_payload['availability'] = availability - api_cmd_payload['subfunction_oper'] = subfunction_oper - api_cmd_payload['subfunction_avail'] = subfunction_avail - - LOG.warn("vim_host_add api_cmd=%s headers=%s payload=%s" % - (api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(context, "POST", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - return response - - -def vim_host_action(context, uuid, hostname, action, - timeout=constants.VIM_DEFAULT_TIMEOUT_IN_SECS): - """ - Request VIM to perform host action. - """ - - response = None - _valid_actions = [k_host.ACTION_UNLOCK, - k_host.ACTION_LOCK, - k_host.ACTION_FORCE_LOCK] - - if action not in _valid_actions: - LOG.error("Unrecognized vim_host_action=%s" % action) - return response - - LOG.warn("vim_host_action hostname=%s, action=%s" % (hostname, action)) - - api_cmd = _get_endpoint(context) - api_cmd += "/nfvi-plugins/v1/hosts/%s" % uuid - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - api_cmd_payload = dict() - api_cmd_payload['uuid'] = uuid - api_cmd_payload['hostname'] = hostname - api_cmd_payload['action'] = action - - LOG.warn("vim_host_action hostname=%s, action=%s api_cmd=%s " - "headers=%s payload=%s" % - (hostname, action, api_cmd, api_cmd_headers, api_cmd_payload)) - - response = rest_api_request(context, "PATCH", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - return response - - -def vim_host_delete(context, uuid, hostname, - timeout=constants.VIM_DEFAULT_TIMEOUT_IN_SECS): - """ - Asks VIM to delete a host - """ - - api_cmd = _get_endpoint(context) - api_cmd += "/nfvi-plugins/v1/hosts/%s" % uuid - - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - api_cmd_payload = dict() - api_cmd_payload['uuid'] = uuid - api_cmd_payload['hostname'] = hostname - api_cmd_payload['action'] = 'delete' - - response = rest_api_request(context, "DELETE", api_cmd, - api_cmd_headers, - json.dumps(api_cmd_payload), - timeout=timeout) - return response - - -def vim_host_get_instances(context, uuid, hostname, - timeout=constants.VIM_DEFAULT_TIMEOUT_IN_SECS): - """ - Returns instance counts for a given host - """ - - response = None - - api_cmd = _get_endpoint(context) - api_cmd += "/nfvi-plugins/v1/hosts" - api_cmd_headers = dict() - api_cmd_headers['Content-type'] = "application/json" - api_cmd_headers['User-Agent'] = "inventory/1.0" - - api_cmd_payload = dict() - api_cmd_payload['uuid'] = uuid - api_cmd_payload['hostname'] = hostname - - response = rest_api_request(context, "GET", api_cmd, api_cmd_headers, - json.dumps(api_cmd_payload), timeout) - return response diff --git a/inventory/inventory/inventory/conductor/__init__.py b/inventory/inventory/inventory/conductor/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/conductor/base_manager.py b/inventory/inventory/inventory/conductor/base_manager.py deleted file mode 100644 index 404685ef..00000000 --- a/inventory/inventory/inventory/conductor/base_manager.py +++ /dev/null @@ -1,118 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -"""Base agent manager functionality.""" - -import inspect - -import futurist -from futurist import periodics -from futurist import rejection -from oslo_config import cfg -from oslo_log import log - -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.db import api as dbapi - -LOG = log.getLogger(__name__) - - -class BaseConductorManager(object): - - def __init__(self, host, topic): - super(BaseConductorManager, self).__init__() - if not host: - host = cfg.CONF.host - self.host = host - self.topic = topic - self._started = False - - def init_host(self, admin_context=None): - """Initialize the conductor host. - - :param admin_context: the admin context to pass to periodic tasks. - :raises: RuntimeError when conductor is already running. - """ - if self._started: - raise RuntimeError(_('Attempt to start an already running ' - 'conductor manager')) - - self.dbapi = dbapi.get_instance() - - rejection_func = rejection.reject_when_reached(64) - # CONF.conductor.workers_pool_size) - self._executor = futurist.GreenThreadPoolExecutor( - 64, check_and_reject=rejection_func) - """Executor for performing tasks async.""" - - # Collect driver-specific periodic tasks. - # Conductor periodic tasks accept context argument, - LOG.info('Collecting periodic tasks') - self._periodic_task_callables = [] - self._collect_periodic_tasks(self, (admin_context,)) - - self._periodic_tasks = periodics.PeriodicWorker( - self._periodic_task_callables, - executor_factory=periodics.ExistingExecutor(self._executor)) - - # Start periodic tasks - self._periodic_tasks_worker = self._executor.submit( - self._periodic_tasks.start, allow_empty=True) - self._periodic_tasks_worker.add_done_callback( - self._on_periodic_tasks_stop) - - self._started = True - - def del_host(self, deregister=True): - # Conductor deregistration fails if called on non-initialized - # conductor (e.g. when rpc server is unreachable). - if not hasattr(self, 'conductor'): - return - - self._periodic_tasks.stop() - self._periodic_tasks.wait() - self._executor.shutdown(wait=True) - self._started = False - - def _collect_periodic_tasks(self, obj, args): - """Collect periodic tasks from a given object. - - Populates self._periodic_task_callables with tuples - (callable, args, kwargs). - - :param obj: object containing periodic tasks as methods - :param args: tuple with arguments to pass to every task - """ - for name, member in inspect.getmembers(obj): - if periodics.is_periodic(member): - LOG.debug('Found periodic task %(owner)s.%(member)s', - {'owner': obj.__class__.__name__, - 'member': name}) - self._periodic_task_callables.append((member, args, {})) - - def _on_periodic_tasks_stop(self, fut): - try: - fut.result() - except Exception as exc: - LOG.critical('Periodic tasks worker has failed: %s', exc) - else: - LOG.info('Successfully shut down periodic tasks') - - def _spawn_worker(self, func, *args, **kwargs): - - """Create a greenthread to run func(*args, **kwargs). - - Spawns a greenthread if there are free slots in pool, otherwise raises - exception. Execution control returns immediately to the caller. - - :returns: Future object. - :raises: NoFreeConductorWorker if worker pool is currently full. - - """ - try: - return self._executor.submit(func, *args, **kwargs) - except futurist.RejectedSubmission: - raise exception.NoFreeConductorWorker() diff --git a/inventory/inventory/inventory/conductor/manager.py b/inventory/inventory/inventory/conductor/manager.py deleted file mode 100644 index 3cde5722..00000000 --- a/inventory/inventory/inventory/conductor/manager.py +++ /dev/null @@ -1,1930 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# Copyright 2013 International Business Machines Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -"""Conduct all activity related Inventory. - -A single instance of :py:class:`inventory.conductor.manager.ConductorManager` -is created within the inventory-conductor process, and is responsible for -performing actions for hosts managed by inventory. - -Commands are received via RPC calls. -""" - -import grp -import os -import oslo_messaging as messaging -import pwd -import socket -import subprocess -import tsconfig.tsconfig as tsc - -from fm_api import constants as fm_constants -from fm_api import fm_api -from futurist import periodics -from inventory.agent import rpcapi as agent_rpcapi -from inventory.api.controllers.v1 import cpu_utils -from inventory.api.controllers.v1 import utils -from inventory.common import constants -from inventory.common import exception -from inventory.common import fm -from inventory.common.i18n import _ -from inventory.common import k_host -from inventory.common import k_lldp -from inventory.common import mtce_api -from inventory.common import rpc as inventory_oslo_rpc -from inventory.common import utils as cutils -from inventory.conductor import base_manager -from inventory.conductor import openstack -from inventory.db import api as dbapi -from inventory import objects -from inventory.systemconfig import plugin as systemconfig_plugin -from netaddr import IPAddress -from netaddr import IPNetwork -from oslo_config import cfg -from oslo_log import log - -MANAGER_TOPIC = 'inventory.conductor_manager' - -LOG = log.getLogger(__name__) - -conductor_opts = [ - cfg.StrOpt('api_url', - default=None, - help=('Url of Inventory API service. If not set Inventory can ' - 'get current value from Keystone service catalog.')), - cfg.IntOpt('audit_interval', - default=60, - help='Interval to run conductor audit'), -] - -CONF = cfg.CONF -CONF.register_opts(conductor_opts, 'conductor') -MTC_ADDRESS = 'localhost' -MTC_PORT = 2112 - - -class ConductorManager(base_manager.BaseConductorManager): - """Inventory Conductor service main class.""" - - # Must be in sync with rpcapi.ConductorAPI's - RPC_API_VERSION = '1.0' - my_host_id = None - - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, host, topic): - super(ConductorManager, self).__init__(host, topic) - self.dbapi = None - self.fm_api = None - self.fm_log = None - self.sc_op = None - - self._openstack = None - self._api_token = None - self._mtc_address = MTC_ADDRESS - self._mtc_port = MTC_PORT - - def start(self): - self._start() - LOG.info("Start inventory-conductor") - - def init_host(self, admin_context=None): - super(ConductorManager, self).init_host(admin_context) - self._start(admin_context) - - def del_host(self, deregister=True): - return - - def _start(self, context=None): - self.dbapi = dbapi.get_instance() - self.fm_api = fm_api.FaultAPIs() - self.fm_log = fm.FmCustomerLog() - self.sc_op = systemconfig_plugin.SystemConfigPlugin( - invoke_kwds={'context': context}) - self._openstack = openstack.OpenStackOperator(self.dbapi) - - # create /var/run/inventory if required. On DOR, the manifests - # may not run to create this volatile directory. - self._create_volatile_dir() - - system = self._populate_default_system(context) - - inventory_oslo_rpc.init(cfg.CONF) - LOG.info("inventory-conductor start system=%s" % system.as_dict()) - - def periodic_tasks(self, context, raise_on_error=False): - """Periodic tasks are run at pre-specified intervals. """ - return self.run_periodic_tasks(context, raise_on_error=raise_on_error) - - @periodics.periodic(spacing=CONF.conductor.audit_interval) - def _conductor_audit(self, context): - # periodically, perform audit of inventory - LOG.info("Inventory Conductor running periodic audit task.") - - system = self._populate_default_system(context) - LOG.info("Inventory Conductor from systemconfig system=%s" % - system.as_dict()) - - hosts = objects.Host.list(context) - - for host in hosts: - self._audit_install_states(host) - - if not host.personality: - continue - # audit of configured hosts - self._audit_host_action(host) - - LOG.debug("Inventory Conductor audited hosts=%s" % hosts) - - @staticmethod - def _create_volatile_dir(): - """Create the volatile directory required for inventory service""" - if not os.path.isdir(constants.INVENTORY_LOCK_PATH): - try: - uid = pwd.getpwnam(constants.INVENTORY_USERNAME).pw_uid - gid = grp.getgrnam(constants.INVENTORY_GRPNAME).gr_gid - os.makedirs(constants.INVENTORY_LOCK_PATH) - os.chown(constants.INVENTORY_LOCK_PATH, uid, gid) - LOG.info("Created directory=%s" % - constants.INVENTORY_LOCK_PATH) - except OSError as e: - LOG.exception("makedir %s OSError=%s encountered" % - (constants.INVENTORY_LOCK_PATH, e)) - pass - - def _populate_default_system(self, context): - """Populate the default system tables""" - - try: - system = self.dbapi.system_get_one() - # TODO(sc) return system # system already configured - except exception.NotFound: - pass # create default system - - # Get the system from systemconfig - system = self.sc_op.system_get_one() - LOG.info("system retrieved from systemconfig=%s" % system.as_dict()) - - if not system: - # The audit will need to populate system - return - - values = { - 'uuid': system.uuid, - 'name': system.name, - 'system_mode': system.system_mode, - 'region_name': system.region_name, - 'software_version': cutils.get_sw_version(), - 'capabilities': {}} - - try: - system = self.dbapi.system_create(values) - except exception.SystemAlreadyExists: - system = self.dbapi.system_update(system.uuid, values) - - return system - - def _using_static_ip(self, ihost, personality=None, hostname=None): - using_static = False - if ihost: - ipersonality = ihost['personality'] - ihostname = ihost['hostname'] or "" - else: - ipersonality = personality - ihostname = hostname or "" - - if ipersonality and ipersonality == k_host.CONTROLLER: - using_static = True - elif ipersonality and ipersonality == k_host.STORAGE: - # only storage-0 and storage-1 have static (later storage-2) - if (ihostname[:len(k_host.STORAGE_0_HOSTNAME)] in - [k_host.STORAGE_0_HOSTNAME, - k_host.STORAGE_1_HOSTNAME]): - using_static = True - - return using_static - - def handle_dhcp_lease(self, context, tags, mac, ip_address, cid=None): - """Synchronously, have a conductor handle a DHCP lease update. - - Handling depends on the interface: - - management interface: do nothing - - infrastructure interface: do nothing - - pxeboot interface: create i_host - - :param cid: - :param context: request context. - :param tags: specifies the interface type (mgmt or infra) - :param mac: MAC for the lease - :param ip_address: IP address for the lease - """ - - LOG.info("receiving dhcp_lease: %s %s %s %s %s" % - (context, tags, mac, ip_address, cid)) - # Get the first field from the tags - first_tag = tags.split()[0] - - if 'pxeboot' == first_tag: - mgmt_network = \ - self.sc_op.network_get_by_type( - constants.NETWORK_TYPE_MGMT) - if not mgmt_network.dynamic: - return - - # This is a DHCP lease for a node on the pxeboot network - # Create the ihost (if necessary). - ihost_dict = {'mgmt_mac': mac} - self.create_host(context, ihost_dict, reason='dhcp pxeboot') - - def handle_dhcp_lease_from_clone(self, context, mac): - """Handle dhcp request from a cloned controller-1. - If MAC address in DB is still set to well known - clone label, then this is the first boot of the - other controller. Real MAC address from PXE request - is updated in the DB. - """ - controller_hosts = \ - self.dbapi.host_get_by_personality(k_host.CONTROLLER) - for host in controller_hosts: - if (constants.CLONE_ISO_MAC in host.mgmt_mac and - host.personality == k_host.CONTROLLER and - host.administrative == k_host.ADMIN_LOCKED): - LOG.info("create_host (clone): Host found: {}:{}:{}->{}" - .format(host.hostname, host.personality, - host.mgmt_mac, mac)) - values = {'mgmt_mac': mac} - self.dbapi.host_update(host.uuid, values) - host.mgmt_mac = mac - self._configure_controller_host(context, host) - if host.personality and host.hostname: - ihost_mtc = host.as_dict() - ihost_mtc['operation'] = 'modify' - ihost_mtc = cutils.removekeys_nonmtce(ihost_mtc) - mtce_api.host_modify( - self._api_token, self._mtc_address, - self._mtc_port, ihost_mtc, - constants.MTC_DEFAULT_TIMEOUT_IN_SECS) - return host - return None - - def create_host(self, context, values, reason=None): - """Create an ihost with the supplied data. - - This method allows an ihost to be created. - - :param reason: - :param context: an admin context - :param values: initial values for new ihost object - :returns: updated ihost object, including all fields. - """ - - if 'mgmt_mac' not in values: - raise exception.InventoryException(_( - "Invalid method call: create_host requires mgmt_mac.")) - - try: - mgmt_update_required = False - mac = values['mgmt_mac'] - mac = mac.rstrip() - mac = cutils.validate_and_normalize_mac(mac) - ihost = self.dbapi.host_get_by_mgmt_mac(mac) - LOG.info("Not creating ihost for mac: %s because it " - "already exists with uuid: %s" % (values['mgmt_mac'], - ihost['uuid'])) - mgmt_ip = values.get('mgmt_ip') or "" - - if mgmt_ip and not ihost.mgmt_ip: - LOG.info("%s create_host setting mgmt_ip to %s" % - (ihost.uuid, mgmt_ip)) - mgmt_update_required = True - elif mgmt_ip and ihost.mgmt_ip and \ - (ihost.mgmt_ip.strip() != mgmt_ip.strip()): - # Changing the management IP on an already configured - # host should not occur nor be allowed. - LOG.error("DANGER %s create_host mgmt_ip dnsmasq change " - "detected from %s to %s." % - (ihost.uuid, ihost.mgmt_ip, mgmt_ip)) - - if mgmt_update_required: - ihost = self.dbapi.host_update(ihost.uuid, values) - - if ihost.personality and ihost.hostname: - ihost_mtc = ihost.as_dict() - ihost_mtc['operation'] = 'modify' - ihost_mtc = cutils.removekeys_nonmtce(ihost_mtc) - LOG.info("%s create_host update mtce %s " % - (ihost.hostname, ihost_mtc)) - mtce_api.host_modify( - self._api_token, self._mtc_address, self._mtc_port, - ihost_mtc, - constants.MTC_DEFAULT_TIMEOUT_IN_SECS) - - return ihost - except exception.HostNotFound: - # If host is not found, check if this is cloning scenario. - # If yes, update management MAC in the DB and create PXE config. - clone_host = self.handle_dhcp_lease_from_clone(context, mac) - if clone_host: - return clone_host - - # assign default system - system = self.dbapi.system_get_one() - values.update({'system_id': system.id}) - values.update({k_host.HOST_ACTION_STATE: - k_host.HAS_REINSTALLING}) - - # get tboot value from the active controller - active_controller = None - hosts = self.dbapi.host_get_by_personality(k_host.CONTROLLER) - for h in hosts: - if utils.is_host_active_controller(h): - active_controller = h - break - - if active_controller is not None: - tboot_value = active_controller.get('tboot') - if tboot_value is not None: - values.update({'tboot': tboot_value}) - - host = objects.Host(context, **values).create() - - # A host is being created, generate discovery log. - self._log_host_create(host, reason) - - ihost_id = host.get('uuid') - LOG.info("RPC create_host called and created ihost %s." % ihost_id) - - return host - - def update_host(self, context, ihost_obj): - """Update an ihost with the supplied data. - - This method allows an ihost to be updated. - - :param context: an admin context - :param ihost_obj: a changed (but not saved) ihost object - :returns: updated ihost object, including all fields. - """ - - delta = ihost_obj.obj_what_changed() - if ('id' in delta) or ('uuid' in delta): - raise exception.InventoryException(_( - "Invalid method call: update_host cannot change id or uuid ")) - - ihost_obj.save(context) - return ihost_obj - - def _dnsmasq_host_entry_to_string(self, ip_addr, hostname, - mac_addr=None, cid=None): - if IPNetwork(ip_addr).version == constants.IPV6_FAMILY: - ip_addr = "[%s]" % ip_addr - if cid: - line = "id:%s,%s,%s,1d\n" % (cid, hostname, ip_addr) - elif mac_addr: - line = "%s,%s,%s,1d\n" % (mac_addr, hostname, ip_addr) - else: - line = "%s,%s\n" % (hostname, ip_addr) - return line - - def _dnsmasq_addn_host_entry_to_string(self, ip_addr, hostname, - aliases=[]): - line = "%s %s" % (ip_addr, hostname) - for alias in aliases: - line = "%s %s" % (line, alias) - line = "%s\n" % line - return line - - def get_my_host_id(self): - if not ConductorManager.my_host_id: - local_hostname = socket.gethostname() - controller = self.dbapi.host_get(local_hostname) - ConductorManager.my_host_id = controller['id'] - return ConductorManager.my_host_id - - def get_dhcp_server_duid(self): - """Retrieves the server DUID from the local DHCP server lease file.""" - lease_filename = tsc.CONFIG_PATH + 'dnsmasq.leases' - with open(lease_filename, 'r') as lease_file: - for columns in (line.strip().split() for line in lease_file): - if len(columns) != 2: - continue - keyword, value = columns - if keyword.lower() == "duid": - return value - - def _dhcp_release(self, interface, ip_address, mac_address, cid=None): - """Release a given DHCP lease""" - params = [interface, ip_address, mac_address] - if cid: - params += [cid] - if IPAddress(ip_address).version == 6: - params = ["--ip", ip_address, - "--iface", interface, - "--server-id", self.get_dhcp_server_duid(), - "--client-id", cid, - "--iaid", str(cutils.get_dhcp_client_iaid(mac_address))] - LOG.warning("Invoking dhcp_release6 for {}".format(params)) - subprocess.call(["dhcp_release6"] + params) - else: - LOG.warning("Invoking dhcp_release for {}".format(params)) - subprocess.call(["dhcp_release"] + params) - - def _find_networktype_for_address(self, ip_address): - LOG.info("SC to be queried from systemconfig") - # TODO(sc) query from systemconfig - - def _find_local_interface_name(self, network_type): - """Lookup the local interface name for a given network type.""" - host_id = self.get_my_host_id() - interface_list = self.dbapi.iinterface_get_all(host_id, expunge=True) - ifaces = dict((i['ifname'], i) for i in interface_list) - port_list = self.dbapi.port_get_all(host_id) - ports = dict((p['interface_id'], p) for p in port_list) - for interface in interface_list: - if interface.networktype == network_type: - return cutils.get_interface_os_ifname(interface, ifaces, ports) - - def _find_local_mgmt_interface_vlan_id(self): - """Lookup the local interface name for a given network type.""" - host_id = self.get_my_host_id() - interface_list = self.dbapi.iinterface_get_all(host_id, expunge=True) - for interface in interface_list: - if interface.networktype == constants.NETWORK_TYPE_MGMT: - if 'vlan_id' not in interface: - return 0 - else: - return interface['vlan_id'] - - def _remove_leases_by_mac_address(self, mac_address): - """Remove any leases that were added without a CID that we were not - able to delete. This is specifically looking for leases on the pxeboot - network that may still be present but will also handle the unlikely - event of deleting an old host during an upgrade. Hosts on previous - releases did not register a CID on the mgmt interface. - """ - lease_filename = tsc.CONFIG_PATH + 'dnsmasq.leases' - try: - with open(lease_filename, 'r') as lease_file: - for columns in (line.strip().split() for line in lease_file): - if len(columns) != 5: - continue - timestamp, address, ip_address, hostname, cid = columns - if address != mac_address: - continue - network_type = self._find_networktype_for_address( - ip_address) - if not network_type: - # Not one of our managed networks - LOG.warning("Lease for unknown network found in " - "dnsmasq.leases file: {}".format(columns)) - continue - interface_name = self._find_local_interface_name( - network_type - ) - self._dhcp_release(interface_name, ip_address, mac_address) - except Exception as e: - LOG.error("Failed to remove leases for %s: %s" % (mac_address, - str(e))) - - def configure_host(self, context, host_obj, - do_compute_apply=False): - """Configure a host. - - :param context: an admin context - :param host_obj: the host object - :param do_compute_apply: configure the compute subfunctions of the host - """ - - LOG.info("rpc conductor configure_host %s" % host_obj.uuid) - - # Request systemconfig plugin to configure_host - sc_host = self.sc_op.host_configure( - host_uuid=host_obj.uuid, - do_compute_apply=do_compute_apply) - - LOG.info("sc_op sc_host=%s" % sc_host) - - if sc_host: - return sc_host.as_dict() - - def unconfigure_host(self, context, host_obj): - """Unconfigure a host. - - :param context: an admin context. - :param host_obj: a host object. - """ - LOG.info("unconfigure_host %s." % host_obj.uuid) - - # Request systemconfig plugin to unconfigure_host - self.sc_op.host_unconfigure(host_obj.uuid) - - def port_update_by_host(self, context, - host_uuid, inic_dict_array): - """Create iports for an ihost with the supplied data. - - This method allows records for iports for ihost to be created. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param inic_dict_array: initial values for iport objects - :returns: pass or fail - """ - - LOG.debug("Entering port_update_by_host %s %s" % - (host_uuid, inic_dict_array)) - host_uuid.strip() - try: - ihost = self.dbapi.host_get(host_uuid) - except exception.HostNotFound: - LOG.exception("Invalid host_uuid %s" % host_uuid) - return - - for inic in inic_dict_array: - LOG.info("Processing inic %s" % inic) - bootp = None - port = None - # ignore port if no MAC address present, this will - # occur for data port after they are configured via DPDK driver - if not inic['mac']: - continue - - try: - inic_dict = {'host_id': ihost['id']} - inic_dict.update(inic) - if cutils.is_valid_mac(inic['mac']): - # Is this the port that the management interface is on? - if inic['mac'].strip() == ihost['mgmt_mac'].strip(): - # SKIP auto create management/pxeboot network - # was for all nodes but the active controller - bootp = 'True' - inic_dict.update({'bootp': bootp}) - - try: - LOG.debug("Attempting to create new port %s on host %s" % - (inic_dict, ihost['id'])) - - port = self.dbapi.ethernet_port_get_by_mac(inic['mac']) - # update existing port with updated attributes - try: - port_dict = { - 'sriov_totalvfs': inic['sriov_totalvfs'], - 'sriov_numvfs': inic['sriov_numvfs'], - 'sriov_vfs_pci_address': - inic['sriov_vfs_pci_address'], - 'driver': inic['driver'], - 'dpdksupport': inic['dpdksupport'], - 'speed': inic['speed'], - } - - LOG.info("port %s update attr: %s" % - (port.uuid, port_dict)) - self.dbapi.ethernet_port_update(port.uuid, port_dict) - except Exception: - LOG.exception("Failed to update port %s" % inic['mac']) - pass - - except Exception: - # adjust for field naming differences between the NIC - # dictionary returned by the agent and the Port model - port_dict = inic_dict.copy() - port_dict['name'] = port_dict.pop('pname', None) - port_dict['namedisplay'] = port_dict.pop('pnamedisplay', - None) - - LOG.info("Attempting to create new port %s " - "on host %s" % (inic_dict, ihost.uuid)) - port = self.dbapi.ethernet_port_create( - ihost.uuid, port_dict) - - except exception.HostNotFound: - raise exception.InventoryException( - _("Invalid host_uuid: host not found: %s") % - host_uuid) - - except Exception: - pass - - if ihost.invprovision not in [k_host.PROVISIONED, - k_host.PROVISIONING]: - value = {'invprovision': k_host.UNPROVISIONED} - self.dbapi.host_update(host_uuid, value) - - def lldp_tlv_dict(self, agent_neighbour_dict): - tlv_dict = {} - for k, v in agent_neighbour_dict.iteritems(): - if v is not None and k in k_lldp.LLDP_TLV_VALID_LIST: - tlv_dict.update({k: v}) - return tlv_dict - - def lldp_agent_tlv_update(self, tlv_dict, agent): - tlv_update_list = [] - tlv_create_list = [] - agent_id = agent['id'] - agent_uuid = agent['uuid'] - - tlvs = self.dbapi.lldp_tlv_get_by_agent(agent_uuid) - for k, v in tlv_dict.iteritems(): - for tlv in tlvs: - if tlv['type'] == k: - tlv_value = tlv_dict.get(tlv['type']) - entry = {'type': tlv['type'], - 'value': tlv_value} - if tlv['value'] != tlv_value: - tlv_update_list.append(entry) - break - else: - tlv_create_list.append({'type': k, - 'value': v}) - - if tlv_update_list: - try: - tlvs = self.dbapi.lldp_tlv_update_bulk(tlv_update_list, - agentid=agent_id) - except Exception as e: - LOG.exception("Error during bulk TLV update for agent %s: %s", - agent_id, str(e)) - raise - if tlv_create_list: - try: - self.dbapi.lldp_tlv_create_bulk(tlv_create_list, - agentid=agent_id) - except Exception as e: - LOG.exception("Error during bulk TLV create for agent %s: %s", - agent_id, str(e)) - raise - - def lldp_neighbour_tlv_update(self, tlv_dict, neighbour): - tlv_update_list = [] - tlv_create_list = [] - neighbour_id = neighbour['id'] - neighbour_uuid = neighbour['uuid'] - - tlvs = self.dbapi.lldp_tlv_get_by_neighbour(neighbour_uuid) - for k, v in tlv_dict.iteritems(): - for tlv in tlvs: - if tlv['type'] == k: - tlv_value = tlv_dict.get(tlv['type']) - entry = {'type': tlv['type'], - 'value': tlv_value} - if tlv['value'] != tlv_value: - tlv_update_list.append(entry) - break - else: - tlv_create_list.append({'type': k, - 'value': v}) - - if tlv_update_list: - try: - tlvs = self.dbapi.lldp_tlv_update_bulk( - tlv_update_list, - neighbourid=neighbour_id) - except Exception as e: - LOG.exception("Error during bulk TLV update for neighbour" - "%s: %s", neighbour_id, str(e)) - raise - if tlv_create_list: - try: - self.dbapi.lldp_tlv_create_bulk(tlv_create_list, - neighbourid=neighbour_id) - except Exception as e: - LOG.exception("Error during bulk TLV create for neighbour" - "%s: %s", - neighbour_id, str(e)) - raise - - def lldp_agent_update_by_host(self, context, - host_uuid, agent_dict_array): - """Create or update lldp agents for an host with the supplied data. - - This method allows records for lldp agents for ihost to be created or - updated. - - :param context: an admin context - :param host_uuid: host uuid unique id - :param agent_dict_array: initial values for lldp agent objects - :returns: pass or fail - """ - LOG.debug("Entering lldp_agent_update_by_host %s %s" % - (host_uuid, agent_dict_array)) - host_uuid.strip() - try: - db_host = self.dbapi.host_get(host_uuid) - except exception.HostNotFound: - raise exception.InventoryException(_( - "Invalid host_uuid: %s") % host_uuid) - - try: - db_ports = self.dbapi.port_get_by_host(host_uuid) - except Exception: - raise exception.InventoryException(_( - "Error getting ports for host %s") % host_uuid) - - try: - db_agents = self.dbapi.lldp_agent_get_by_host(host_uuid) - except Exception: - raise exception.InventoryException(_( - "Error getting LLDP agents for host %s") % host_uuid) - - for agent in agent_dict_array: - port_found = None - for db_port in db_ports: - if (db_port['name'] == agent['name_or_uuid'] or - db_port['uuid'] == agent['name_or_uuid']): - port_found = db_port - break - - if not port_found: - LOG.debug("Could not find port for agent %s", - agent['name_or_uuid']) - return - - hostid = db_host['id'] - portid = db_port['id'] - - agent_found = None - for db_agent in db_agents: - if db_agent['port_id'] == portid: - agent_found = db_agent - break - - LOG.debug("Processing agent %s" % agent) - - agent_dict = {'host_id': hostid, - 'port_id': portid, - 'status': agent['status']} - update_tlv = False - try: - if not agent_found: - LOG.info("Attempting to create new LLDP agent " - "%s on host %s" % (agent_dict, hostid)) - if agent['state'] != k_lldp.LLDP_AGENT_STATE_REMOVED: - db_agent = self.dbapi.lldp_agent_create(portid, - hostid, - agent_dict) - update_tlv = True - else: - # If the agent exists, try to update some of the fields - # or remove it - agent_uuid = db_agent['uuid'] - if agent['state'] == k_lldp.LLDP_AGENT_STATE_REMOVED: - db_agent = self.dbapi.lldp_agent_destroy(agent_uuid) - else: - attr = {'status': agent['status'], - 'system_name': agent['system_name']} - db_agent = self.dbapi.lldp_agent_update(agent_uuid, - attr) - update_tlv = True - - if update_tlv: - tlv_dict = self.lldp_tlv_dict(agent) - self.lldp_agent_tlv_update(tlv_dict, db_agent) - - except exception.InvalidParameterValue: - raise exception.InventoryException(_( - "Failed to update/delete non-existing" - "lldp agent %s") % agent_uuid) - except exception.LLDPAgentExists: - raise exception.InventoryException(_( - "Failed to add LLDP agent %s. " - "Already exists") % agent_uuid) - except exception.HostNotFound: - raise exception.InventoryException(_( - "Invalid host_uuid: host not found: %s") % - host_uuid) - except exception.PortNotFound: - raise exception.InventoryException(_( - "Invalid port id: port not found: %s") % - portid) - except Exception as e: - raise exception.InventoryException(_( - "Failed to update lldp agent: %s") % e) - - def lldp_neighbour_update_by_host(self, context, - host_uuid, neighbour_dict_array): - """Create or update lldp neighbours for an ihost with the supplied data. - - This method allows records for lldp neighbours for ihost to be created - or updated. - - :param context: an admin context - :param host_uuid: host uuid unique id - :param neighbour_dict_array: initial values for lldp neighbour objects - :returns: pass or fail - """ - LOG.debug("Entering lldp_neighbour_update_by_host %s %s" % - (host_uuid, neighbour_dict_array)) - host_uuid.strip() - try: - db_host = self.dbapi.host_get(host_uuid) - except Exception: - raise exception.InventoryException(_( - "Invalid host_uuid: %s") % host_uuid) - - try: - db_ports = self.dbapi.port_get_by_host(host_uuid) - except Exception: - raise exception.InventoryException(_( - "Error getting ports for host %s") % host_uuid) - - try: - db_neighbours = self.dbapi.lldp_neighbour_get_by_host(host_uuid) - except Exception: - raise exception.InventoryException(_( - "Error getting LLDP neighbours for host %s") % host_uuid) - - reported = set([(d['msap']) for d in neighbour_dict_array]) - stale = [d for d in db_neighbours if (d['msap']) not in reported] - for neighbour in stale: - db_neighbour = self.dbapi.lldp_neighbour_destroy( - neighbour['uuid']) - - for neighbour in neighbour_dict_array: - port_found = None - for db_port in db_ports: - if (db_port['name'] == neighbour['name_or_uuid'] or - db_port['uuid'] == neighbour['name_or_uuid']): - port_found = db_port - break - - if not port_found: - LOG.debug("Could not find port for neighbour %s", - neighbour['name']) - return - - LOG.debug("Processing lldp neighbour %s" % neighbour) - - hostid = db_host['id'] - portid = db_port['id'] - msap = neighbour['msap'] - state = neighbour['state'] - - neighbour_dict = {'host_id': hostid, - 'port_id': portid, - 'msap': msap} - - neighbour_found = False - for db_neighbour in db_neighbours: - if db_neighbour['msap'] == msap: - neighbour_found = db_neighbour - break - - update_tlv = False - try: - if not neighbour_found: - LOG.info("Attempting to create new lldp neighbour " - "%r on host %s" % (neighbour_dict, hostid)) - db_neighbour = self.dbapi.lldp_neighbour_create( - portid, hostid, neighbour_dict) - update_tlv = True - else: - # If the neighbour exists, remove it if requested by - # the agent. Otherwise, trigger a TLV update. There - # are currently no neighbour attributes that need to - # be updated. - if state == k_lldp.LLDP_NEIGHBOUR_STATE_REMOVED: - db_neighbour = self.dbapi.lldp_neighbour_destroy( - db_neighbour['uuid']) - else: - update_tlv = True - if update_tlv: - tlv_dict = self.lldp_tlv_dict(neighbour) - self.lldp_neighbour_tlv_update(tlv_dict, - db_neighbour) - except exception.InvalidParameterValue: - raise exception.InventoryException(_( - "Failed to update/delete lldp neighbour. " - "Invalid parameter: %r") % tlv_dict) - except exception.LLDPNeighbourExists: - raise exception.InventoryException(_( - "Failed to add lldp neighbour %r. " - "Already exists") % neighbour_dict) - except exception.HostNotFound: - raise exception.InventoryException(_( - "Invalid host_uuid: host not found: %s") % - host_uuid) - except exception.PortNotFound: - raise exception.InventoryException( - _("Invalid port id: port not found: %s") % - portid) - except Exception as e: - raise exception.InventoryException(_( - "Couldn't update LLDP neighbour: %s") % e) - - def pci_device_update_by_host(self, context, - host_uuid, pci_device_dict_array): - """Create devices for an ihost with the supplied data. - - This method allows records for devices for ihost to be created. - - :param context: an admin context - :param host_uuid: host uuid unique id - :param pci_device_dict_array: initial values for device objects - :returns: pass or fail - """ - LOG.debug("Entering device_update_by_host %s %s" % - (host_uuid, pci_device_dict_array)) - host_uuid.strip() - try: - host = self.dbapi.host_get(host_uuid) - except exception.HostNotFound: - LOG.exception("Invalid host_uuid %s" % host_uuid) - return - for pci_dev in pci_device_dict_array: - LOG.debug("Processing dev %s" % pci_dev) - try: - pci_dev_dict = {'host_id': host['id']} - pci_dev_dict.update(pci_dev) - dev_found = None - try: - dev = self.dbapi.pci_device_get(pci_dev['pciaddr'], - hostid=host['id']) - dev_found = dev - if not dev: - LOG.info("Attempting to create new device " - "%s on host %s" % (pci_dev_dict, host['id'])) - dev = self.dbapi.pci_device_create(host['id'], - pci_dev_dict) - except Exception: - LOG.info("Attempting to create new device " - "%s on host %s" % (pci_dev_dict, host['id'])) - dev = self.dbapi.pci_device_create(host['id'], - pci_dev_dict) - - # If the device exists, try to update some of the fields - if dev_found: - try: - attr = { - 'pclass_id': pci_dev['pclass_id'], - 'pvendor_id': pci_dev['pvendor_id'], - 'pdevice_id': pci_dev['pdevice_id'], - 'pclass': pci_dev['pclass'], - 'pvendor': pci_dev['pvendor'], - 'psvendor': pci_dev['psvendor'], - 'psdevice': pci_dev['psdevice'], - 'sriov_totalvfs': pci_dev['sriov_totalvfs'], - 'sriov_numvfs': pci_dev['sriov_numvfs'], - 'sriov_vfs_pci_address': - pci_dev['sriov_vfs_pci_address'], - 'driver': pci_dev['driver']} - LOG.info("attr: %s" % attr) - dev = self.dbapi.pci_device_update(dev['uuid'], attr) - except Exception: - LOG.exception("Failed to update port %s" % - dev['pciaddr']) - pass - - except exception.HostNotFound: - raise exception.InventoryException( - _("Invalid host_uuid: host not found: %s") % - host_uuid) - except Exception: - pass - - def numas_update_by_host(self, context, - host_uuid, inuma_dict_array): - """Create inumas for an ihost with the supplied data. - - This method allows records for inumas for ihost to be created. - Updates the port node_id once its available. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param inuma_dict_array: initial values for inuma objects - :returns: pass or fail - """ - - host_uuid.strip() - try: - ihost = self.dbapi.host_get(host_uuid) - except exception.HostNotFound: - LOG.exception("Invalid host_uuid %s" % host_uuid) - return - - try: - # Get host numa nodes which may already be in db - mynumas = self.dbapi.node_get_by_host(host_uuid) - except exception.HostNotFound: - raise exception.InventoryException(_( - "Invalid host_uuid: host not found: %s") % host_uuid) - - mynuma_nodes = [n.numa_node for n in mynumas] - - # perform update for ports - ports = self.dbapi.ethernet_port_get_by_host(host_uuid) - for i in inuma_dict_array: - if 'numa_node' in i and i['numa_node'] in mynuma_nodes: - LOG.info("Already in db numa_node=%s mynuma_nodes=%s" % - (i['numa_node'], mynuma_nodes)) - continue - - try: - inuma_dict = {'host_id': ihost['id']} - - inuma_dict.update(i) - - inuma = self.dbapi.node_create(ihost['id'], inuma_dict) - - for port in ports: - port_node = port['numa_node'] - if port_node == -1: - port_node = 0 # special handling - - if port_node == inuma['numa_node']: - attr = {'node_id': inuma['id']} - self.dbapi.ethernet_port_update(port['uuid'], attr) - - except exception.HostNotFound: - raise exception.InventoryException( - _("Invalid host_uuid: host not found: %s") % - host_uuid) - except Exception: # this info may have been posted previously - pass - - def _get_default_platform_cpu_count(self, ihost, node, - cpu_count, hyperthreading): - """Return the initial number of reserved logical cores for platform - use. This can be overridden later by the end user. - """ - cpus = 0 - if cutils.host_has_function(ihost, k_host.COMPUTE) and node == 0: - cpus += 1 if not hyperthreading else 2 - if cutils.host_has_function(ihost, k_host.CONTROLLER): - cpus += 1 if not hyperthreading else 2 - return cpus - - def _get_default_vswitch_cpu_count(self, ihost, node, - cpu_count, hyperthreading): - """Return the initial number of reserved logical cores for vswitch use. - This can be overridden later by the end user. - """ - if cutils.host_has_function(ihost, k_host.COMPUTE) and node == 0: - physical_cores = (cpu_count / 2) if hyperthreading else cpu_count - system_mode = self.dbapi.system_get_one().system_mode - if system_mode == constants.SYSTEM_MODE_SIMPLEX: - return 1 if not hyperthreading else 2 - else: - if physical_cores > 4: - return 2 if not hyperthreading else 4 - elif physical_cores > 1: - return 1 if not hyperthreading else 2 - return 0 - - def _get_default_shared_cpu_count(self, ihost, node, - cpu_count, hyperthreading): - """Return the initial number of reserved logical cores for shared - use. This can be overridden later by the end user. - """ - return 0 - - def _sort_by_socket_and_coreid(self, icpu_dict): - """Sort a list of cpu dict objects such that lower numbered sockets - appear first and that threads of the same core are adjacent in the - list with the lowest thread number appearing first. - """ - return int(icpu_dict['numa_node']), int(icpu_dict['core']), int(icpu_dict['thread']) # noqa - - def _get_hyperthreading_enabled(self, cpu_list): - """Determine if hyperthreading is enabled based on whether any threads - exist with a threadId greater than 0 - """ - for cpu in cpu_list: - if int(cpu['thread']) > 0: - return True - return False - - def _get_node_cpu_count(self, cpu_list, node): - count = 0 - for cpu in cpu_list: - count += 1 if int(cpu['numa_node']) == node else 0 - return count - - def _get_default_cpu_functions(self, host, node, cpu_list, hyperthreading): - """Return the default list of CPU functions to be reserved for this - host on the specified numa node. - """ - functions = [] - cpu_count = self._get_node_cpu_count(cpu_list, node) - # Determine how many platform cpus need to be reserved - count = self._get_default_platform_cpu_count( - host, node, cpu_count, hyperthreading) - for i in range(0, count): - functions.append(constants.PLATFORM_FUNCTION) - # Determine how many vswitch cpus need to be reserved - count = self._get_default_vswitch_cpu_count( - host, node, cpu_count, hyperthreading) - for i in range(0, count): - functions.append(constants.VSWITCH_FUNCTION) - # Determine how many shared cpus need to be reserved - count = self._get_default_shared_cpu_count( - host, node, cpu_count, hyperthreading) - for i in range(0, count): - functions.append(constants.SHARED_FUNCTION) - # Assign the default function to the remaining cpus - for i in range(0, (cpu_count - len(functions))): - functions.append(cpu_utils.get_default_function(host)) - return functions - - def print_cpu_topology(self, hostname=None, subfunctions=None, - reference=None, - sockets=None, cores=None, threads=None): - """Print logical cpu topology table (for debug reasons). - - :param hostname: hostname - :param subfunctions: subfunctions - :param reference: reference label - :param sockets: dictionary of socket_ids, sockets[cpu_id] - :param cores: dictionary of core_ids, cores[cpu_id] - :param threads: dictionary of thread_ids, threads[cpu_id] - :returns: None - """ - if sockets is None or cores is None or threads is None: - LOG.error("print_cpu_topology: topology not defined. " - "sockets=%s, cores=%s, threads=%s" - % (sockets, cores, threads)) - return - - # calculate overall cpu topology stats - n_sockets = len(set(sockets.values())) - n_cores = len(set(cores.values())) - n_threads = len(set(threads.values())) - if n_sockets < 1 or n_cores < 1 or n_threads < 1: - LOG.error("print_cpu_topology: unexpected topology. " - "n_sockets=%d, n_cores=%d, n_threads=%d" - % (n_sockets, n_cores, n_threads)) - return - - # build each line of output - ll = '' - s = '' - c = '' - t = '' - for cpu in sorted(cores.keys()): - ll += '%3d' % cpu - s += '%3d' % sockets[cpu] - c += '%3d' % cores[cpu] - t += '%3d' % threads[cpu] - - LOG.info('Logical CPU topology: host:%s (%s), ' - 'sockets:%d, cores/socket=%d, threads/core=%d, reference:%s' - % (hostname, subfunctions, n_sockets, n_cores, n_threads, - reference)) - LOG.info('%9s : %s' % ('cpu_id', ll)) - LOG.info('%9s : %s' % ('socket_id', s)) - LOG.info('%9s : %s' % ('core_id', c)) - LOG.info('%9s : %s' % ('thread_id', t)) - - def update_cpu_config(self, context, host_uuid): - LOG.info("TODO send to systemconfig update_cpu_config") - - def cpus_update_by_host(self, context, - host_uuid, icpu_dict_array, - force_grub_update=False): - """Create cpus for an ihost with the supplied data. - - This method allows records for cpus for ihost to be created. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param icpu_dict_array: initial values for cpu objects - :param force_grub_update: bool value to force grub update - :returns: pass or fail - """ - - host_uuid.strip() - try: - ihost = self.dbapi.host_get(host_uuid) - except exception.HostNotFound: - LOG.exception("Invalid host_uuid %s" % host_uuid) - return - - host_id = ihost['id'] - ihost_inodes = self.dbapi.node_get_by_host(host_uuid) - - icpus = self.dbapi.cpu_get_by_host(host_uuid) - - num_cpus_dict = len(icpu_dict_array) - num_cpus_db = len(icpus) - - # Capture 'current' topology in dictionary format - cs = {} - cc = {} - ct = {} - if num_cpus_dict > 0: - for icpu in icpu_dict_array: - cpu_id = icpu.get('cpu') - cs[cpu_id] = icpu.get('numa_node') - cc[cpu_id] = icpu.get('core') - ct[cpu_id] = icpu.get('thread') - - # Capture 'previous' topology in dictionary format - ps = {} - pc = {} - pt = {} - if num_cpus_db > 0: - for icpu in icpus: - cpu_id = icpu.get('cpu') - core_id = icpu.get('core') - thread_id = icpu.get('thread') - node_id = icpu.get('node_id') - socket_id = None - for inode in ihost_inodes: - if node_id == inode.get('id'): - socket_id = inode.get('numa_node') - break - ps[cpu_id] = socket_id - pc[cpu_id] = core_id - pt[cpu_id] = thread_id - - if num_cpus_dict > 0 and num_cpus_db == 0: - self.print_cpu_topology(hostname=ihost.get('hostname'), - subfunctions=ihost.get('subfunctions'), - reference='current (initial)', - sockets=cs, cores=cc, threads=ct) - - if num_cpus_dict > 0 and num_cpus_db > 0: - LOG.debug("num_cpus_dict=%d num_cpus_db= %d. " - "icpud_dict_array= %s cpus.as_dict= %s" % - (num_cpus_dict, num_cpus_db, icpu_dict_array, icpus)) - - # Skip update if topology has not changed - if ps == cs and pc == cc and pt == ct: - self.print_cpu_topology(hostname=ihost.get('hostname'), - subfunctions=ihost.get('subfunctions'), - reference='current (unchanged)', - sockets=cs, cores=cc, threads=ct) - if ihost.administrative == k_host.ADMIN_LOCKED and \ - force_grub_update: - self.update_cpu_config(context, host_uuid) - return - - self.print_cpu_topology(hostname=ihost.get('hostname'), - subfunctions=ihost.get('subfunctions'), - reference='previous', - sockets=ps, cores=pc, threads=pt) - self.print_cpu_topology(hostname=ihost.get('hostname'), - subfunctions=ihost.get('subfunctions'), - reference='current (CHANGED)', - sockets=cs, cores=cc, threads=ct) - - # there has been an update. Delete db entries and replace. - for icpu in icpus: - self.dbapi.cpu_destroy(icpu.uuid) - - # sort the list of cpus by socket and coreid - cpu_list = sorted(icpu_dict_array, key=self._sort_by_socket_and_coreid) - - # determine if hyperthreading is enabled - hyperthreading = self._get_hyperthreading_enabled(cpu_list) - - # build the list of functions to be assigned to each cpu - functions = {} - for n in ihost_inodes: - numa_node = int(n.numa_node) - functions[numa_node] = self._get_default_cpu_functions( - ihost, numa_node, cpu_list, hyperthreading) - - for data in cpu_list: - try: - node_id = None - for n in ihost_inodes: - numa_node = int(n.numa_node) - if numa_node == int(data['numa_node']): - node_id = n['id'] - break - - cpu_dict = {'host_id': host_id, - 'node_id': node_id, - 'allocated_function': functions[numa_node].pop(0)} - - cpu_dict.update(data) - - self.dbapi.cpu_create(host_id, cpu_dict) - - except exception.HostNotFound: - raise exception.InventoryException( - _("Invalid host_uuid: host not found: %s") % - host_uuid) - except Exception: - # info may have already been posted - pass - - # if it is the first controller wait for the initial config to - # be completed - if ((utils.is_host_simplex_controller(ihost) and - os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG)) or - (not utils.is_host_simplex_controller(ihost) and - ihost.administrative == k_host.ADMIN_LOCKED)): - LOG.info("Update CPU grub config, host_uuid (%s), name (%s)" - % (host_uuid, ihost.get('hostname'))) - self.update_cpu_config(context, host_uuid) - - return - - def _get_platform_reserved_memory(self, ihost, node): - low_core = cutils.is_low_core_system(ihost, self.dbapi) - reserved = cutils.get_required_platform_reserved_memory( - ihost, node, low_core) - return {'platform_reserved_mib': reserved} if reserved else {} - - def memory_update_by_host(self, context, - host_uuid, imemory_dict_array, - force_update): - """Create or update memory for a host with the supplied data. - - This method allows records for memory for host to be created, - or updated. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param imemory_dict_array: initial values for cpu objects - :param force_update: force host memory update - :returns: pass or fail - """ - - host_uuid.strip() - try: - ihost = self.dbapi.host_get(host_uuid) - except exception.ServerNotFound: - LOG.exception("Invalid host_uuid %s" % host_uuid) - return - - if ihost['administrative'] == k_host.ADMIN_LOCKED and \ - ihost['invprovision'] == k_host.PROVISIONED and \ - not force_update: - LOG.debug("Ignore the host memory audit after the host is locked") - return - - forihostid = ihost['id'] - ihost_inodes = self.dbapi.node_get_by_host(host_uuid) - - for i in imemory_dict_array: - forinodeid = None - inode_uuid = None - for n in ihost_inodes: - numa_node = int(n.numa_node) - if numa_node == int(i['numa_node']): - forinodeid = n['id'] - inode_uuid = n['uuid'] - inode_uuid.strip() - break - else: - # not found in host_nodes, do not add memory element - continue - - mem_dict = {'forihostid': forihostid, - 'forinodeid': forinodeid} - - mem_dict.update(i) - - # Do not allow updates to the amounts of reserved memory. - mem_dict.pop('platform_reserved_mib', None) - - # numa_node is not stored against imemory table - mem_dict.pop('numa_node', None) - - # clear the pending hugepage number for unlocked nodes - if ihost.administrative == k_host.ADMIN_UNLOCKED: - mem_dict['vm_hugepages_nr_2M_pending'] = None - mem_dict['vm_hugepages_nr_1G_pending'] = None - - try: - imems = self.dbapi.memory_get_by_host_node(host_uuid, - inode_uuid) - if not imems: - # Set the amount of memory reserved for platform use. - mem_dict.update(self._get_platform_reserved_memory( - ihost, i['numa_node'])) - self.dbapi.memory_create(forihostid, mem_dict) - else: - for imem in imems: - # Include 4K pages in the displayed VM memtotal - if imem.vm_hugepages_nr_4K is not None: - vm_4K_mib = \ - (imem.vm_hugepages_nr_4K / - constants.NUM_4K_PER_MiB) - mem_dict['memtotal_mib'] += vm_4K_mib - mem_dict['memavail_mib'] += vm_4K_mib - self.dbapi.memory_update(imem['uuid'], - mem_dict) - except Exception: - # Set the amount of memory reserved for platform use. - mem_dict.update(self._get_platform_reserved_memory( - ihost, i['numa_node'])) - self.dbapi.memory_create(forihostid, mem_dict) - pass - - return - - def _get_disk_available_mib(self, disk, agent_disk_dict): - partitions = self.dbapi.partition_get_by_idisk(disk['uuid']) - - if not partitions: - LOG.debug("Disk %s has no partitions" % disk.uuid) - return agent_disk_dict['available_mib'] - - available_mib = agent_disk_dict['available_mib'] - for part in partitions: - if (part.status in - [constants.PARTITION_CREATE_IN_SVC_STATUS, - constants.PARTITION_CREATE_ON_UNLOCK_STATUS]): - available_mib = available_mib - part.size_mib - - LOG.debug("Disk available mib host - %s disk - %s av - %s" % - (disk.host_id, disk.device_node, available_mib)) - return available_mib - - def platform_update_by_host(self, context, - host_uuid, imsg_dict): - """Create or update imemory for an ihost with the supplied data. - - This method allows records for memory for ihost to be created, - or updated. - - This method is invoked on initialization once. Note, swact also - results in restart, but not of inventory-agent? - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param imsg_dict: inventory message - :returns: pass or fail - """ - - host_uuid.strip() - try: - ihost = self.dbapi.host_get(host_uuid) - except exception.HostNotFound: - LOG.exception("Invalid host_uuid %s" % host_uuid) - return - - availability = imsg_dict.get('availability') - - val = {} - action_state = imsg_dict.get(k_host.HOST_ACTION_STATE) - if action_state and action_state != ihost.action_state: - LOG.info("%s updating action_state=%s" % (ihost.hostname, - action_state)) - val[k_host.HOST_ACTION_STATE] = action_state - - iscsi_initiator_name = imsg_dict.get('iscsi_initiator_name') - if (iscsi_initiator_name and - ihost.iscsi_initiator_name is None): - LOG.info("%s updating iscsi initiator=%s" % - (ihost.hostname, iscsi_initiator_name)) - val['iscsi_initiator_name'] = iscsi_initiator_name - - if val: - ihost = self.dbapi.host_update(host_uuid, val) - - if not availability: - return - - if cutils.host_has_function(ihost, k_host.COMPUTE): - if availability == k_host.VIM_SERVICES_ENABLED: - # TODO(sc) report to systemconfig platform available, it will - # independently also update with config applied - LOG.info("Need report to systemconfig iplatform available " - "for ihost=%s imsg=%s" - % (host_uuid, imsg_dict)) - elif availability == k_host.AVAILABILITY_OFFLINE: - # TODO(sc) report to systemconfig platform AVAILABILITY_OFFLINE - LOG.info("Need report iplatform not available for host=%s " - "imsg= %s" % (host_uuid, imsg_dict)) - - if ((ihost.personality == k_host.STORAGE and - ihost.hostname == k_host.STORAGE_0_HOSTNAME) or - (ihost.personality == k_host.CONTROLLER)): - # TODO(sc) report to systemconfig platform available - LOG.info("TODO report to systemconfig platform available") - - def subfunctions_update_by_host(self, context, - host_uuid, subfunctions): - """Update subfunctions for a host. - - This method allows records for subfunctions to be updated. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param subfunctions: subfunctions provided by the ihost - :returns: pass or fail - """ - host_uuid.strip() - - # Create the host entry in neutron to allow for data interfaces to - # be configured on a combined node - if (k_host.CONTROLLER in subfunctions and - k_host.COMPUTE in subfunctions): - try: - ihost = self.dbapi.host_get(host_uuid) - except exception.HostNotFound: - LOG.exception("Invalid host_uuid %s" % host_uuid) - return - - try: - neutron_host_id = \ - self._openstack.get_neutron_host_id_by_name( - context, ihost['hostname']) - if not neutron_host_id: - self._openstack.create_neutron_host(context, - host_uuid, - ihost['hostname']) - elif neutron_host_id != host_uuid: - self._openstack.delete_neutron_host(context, - neutron_host_id) - self._openstack.create_neutron_host(context, - host_uuid, - ihost['hostname']) - except Exception: # TODO(sc) Needs better exception - LOG.exception("Failed in neutron stuff") - - ihost_val = {'subfunctions': subfunctions} - self.dbapi.host_update(host_uuid, ihost_val) - - def get_host_by_macs(self, context, host_macs): - """Finds ihost db entry based upon the mac list - - This method returns an ihost if it matches a mac - - :param context: an admin context - :param host_macs: list of mac addresses - :returns: ihost object, including all fields. - """ - - ihosts = objects.Host.list(context) - - LOG.debug("Checking ihost db for macs: %s" % host_macs) - for mac in host_macs: - try: - mac = mac.rstrip() - mac = cutils.validate_and_normalize_mac(mac) - except Exception: - LOG.warn("get_host_by_macs invalid mac: %s" % mac) - continue - - for host in ihosts: - if host.mgmt_mac == mac: - LOG.info("Host found ihost db for macs: %s" % - host.hostname) - return host - LOG.debug("RPC get_host_by_macs called but found no ihost.") - - def get_host_by_hostname(self, context, hostname): - """Finds host db entry based upon the host hostname - - This method returns a host if it matches the host - hostname. - - :param context: an admin context - :param hostname: host hostname - :returns: host object, including all fields. - """ - - try: - return objects.Host.get_by_filters_one(context, - {'hostname': hostname}) - except exception.HostNotFound: - pass - - LOG.info("RPC host_get_by_hostname called but found no host.") - - def _audit_host_action(self, host): - """Audit whether the host_action needs to be terminated or escalated. - """ - - if host.administrative == k_host.ADMIN_UNLOCKED: - host_action_str = host.host_action or "" - - if (host_action_str.startswith(k_host.ACTION_FORCE_LOCK) or - host_action_str.startswith(k_host.ACTION_LOCK)): - - task_str = host.task or "" - if (('--' in host_action_str and - host_action_str.startswith( - k_host.ACTION_FORCE_LOCK)) or - ('----------' in host_action_str and - host_action_str.startswith(k_host.ACTION_LOCK))): - ihost_mtc = host.as_dict() - keepkeys = ['host_action', 'vim_progress_status'] - ihost_mtc = cutils.removekeys_nonmtce(ihost_mtc, - keepkeys) - - if host_action_str.startswith( - k_host.ACTION_FORCE_LOCK): - timeout_in_secs = 6 - ihost_mtc['operation'] = 'modify' - ihost_mtc['action'] = k_host.ACTION_FORCE_LOCK - ihost_mtc['task'] = k_host.FORCE_LOCKING - LOG.warn("host_action override %s" % - ihost_mtc) - mtce_api.host_modify( - self._api_token, self._mtc_address, self._mtc_port, - ihost_mtc, timeout_in_secs) - - # need time for FORCE_LOCK mtce to clear - if '----' in host_action_str: - host_action_str = "" - else: - host_action_str += "-" - - if (task_str.startswith(k_host.FORCE_LOCKING) or - task_str.startswith(k_host.LOCKING)): - val = {'task': "", - 'host_action': host_action_str, - 'vim_progress_status': ""} - else: - val = {'host_action': host_action_str, - 'vim_progress_status': ""} - else: - host_action_str += "-" - if (task_str.startswith(k_host.FORCE_LOCKING) or - task_str.startswith(k_host.LOCKING)): - task_str += "-" - val = {'task': task_str, - 'host_action': host_action_str} - else: - val = {'host_action': host_action_str} - - self.dbapi.host_update(host.uuid, val) - else: # Administrative locked already - task_str = host.task or "" - if (task_str.startswith(k_host.FORCE_LOCKING) or - task_str.startswith(k_host.LOCKING)): - val = {'task': ""} - self.dbapi.host_update(host.uuid, val) - - vim_progress_status_str = host.get('vim_progress_status') or "" - if (vim_progress_status_str and - (vim_progress_status_str != k_host.VIM_SERVICES_ENABLED) and - (vim_progress_status_str != k_host.VIM_SERVICES_DISABLED)): - if '..' in vim_progress_status_str: - LOG.info("Audit clearing vim_progress_status=%s" % - vim_progress_status_str) - vim_progress_status_str = "" - else: - vim_progress_status_str += ".." - - val = {'vim_progress_status': vim_progress_status_str} - self.dbapi.host_update(host.uuid, val) - - def _audit_install_states(self, host): - # A node could shutdown during it's installation and the install_state - # for example could get stuck at the value "installing". To avoid - # this situation we audit the sanity of the states by appending the - # character '+' to the states in the database. After 15 minutes of the - # states not changing, set the install_state to failed. - - # The audit's interval is 60sec - MAX_COUNT = 15 - - # Allow longer duration for booting phase - MAX_COUNT_BOOTING = 40 - - LOG.info("Auditing %s, install_state is %s", - host.hostname, host.install_state) - LOG.debug("Auditing %s, availability is %s", - host.hostname, host.availability) - - if (host.administrative == k_host.ADMIN_LOCKED and - host.install_state is not None): - - install_state = host.install_state.rstrip('+') - - if host.install_state != constants.INSTALL_STATE_FAILED: - if (install_state == constants.INSTALL_STATE_BOOTING and - host.availability != - k_host.AVAILABILITY_OFFLINE): - host.install_state = constants.INSTALL_STATE_COMPLETED - - if (install_state != constants.INSTALL_STATE_INSTALLED and - install_state != - constants.INSTALL_STATE_COMPLETED): - if (install_state == - constants.INSTALL_STATE_INSTALLING and - host.install_state_info is not None): - if host.install_state_info.count('+') >= MAX_COUNT: - LOG.info( - "Auditing %s, install_state changed from " - "'%s' to '%s'", host.hostname, - host.install_state, - constants.INSTALL_STATE_FAILED) - host.install_state = \ - constants.INSTALL_STATE_FAILED - else: - host.install_state_info += "+" - else: - if (install_state == - constants.INSTALL_STATE_BOOTING): - max_count = MAX_COUNT_BOOTING - else: - max_count = MAX_COUNT - if host.install_state.count('+') >= max_count: - LOG.info( - "Auditing %s, install_state changed from " - "'%s' to '%s'", host.hostname, - host.install_state, - constants.INSTALL_STATE_FAILED) - host.install_state = \ - constants.INSTALL_STATE_FAILED - else: - host.install_state += "+" - - # It is possible we get stuck in an installed failed state. For - # example if a node gets powered down during an install booting - # state and then powered on again. Clear it if the node is - # online. - elif (host.availability == k_host.AVAILABILITY_ONLINE and - host.install_state == constants.INSTALL_STATE_FAILED): - host.install_state = constants.INSTALL_STATE_COMPLETED - - self.dbapi.host_update(host.uuid, - {'install_state': host.install_state, - 'install_state_info': - host.install_state_info}) - - def configure_systemname(self, context, systemname): - """Configure the systemname with the supplied data. - - :param context: an admin context. - :param systemname: the systemname - """ - - LOG.debug("configure_systemname: sending systemname to agent(s)") - rpcapi = agent_rpcapi.AgentAPI() - rpcapi.configure_systemname(context, systemname=systemname) - - return - - @staticmethod - def _get_fm_entity_instance_id(host_obj): - """ - Create 'entity_instance_id' from host_obj data - """ - - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - host_obj.hostname) - return entity_instance_id - - def _log_host_create(self, host, reason=None): - """ - Create host discovery event customer log. - """ - if host.hostname: - hostid = host.hostname - else: - hostid = host.mgmt_mac - - if reason is not None: - reason_text = ("%s has been 'discovered' on the network. (%s)" % - (hostid, reason)) - else: - reason_text = ("%s has been 'discovered'." % hostid) - - # action event -> FM_ALARM_TYPE_4 = 'equipment' - # FM_ALARM_SEVERITY_CLEAR to be consistent with 200.x series Info - log_data = {'hostid': hostid, - 'event_id': fm_constants.FM_LOG_ID_HOST_DISCOVERED, - 'entity_type': fm_constants.FM_ENTITY_TYPE_HOST, - 'entity': 'host=%s.event=discovered' % hostid, - 'fm_severity': fm_constants.FM_ALARM_SEVERITY_CLEAR, - 'fm_event_type': fm_constants.FM_ALARM_TYPE_4, - 'reason_text': reason_text, - } - self.fm_log.customer_log(log_data) - - def _update_subfunctions(self, context, ihost_obj): - """Update subfunctions.""" - - ihost_obj.invprovision = k_host.PROVISIONED - ihost_obj.save(context) - - def notify_subfunctions_config(self, context, - host_uuid, ihost_notify_dict): - """ - Notify inventory of host subfunctions configuration status - """ - - subfunctions_configured = ihost_notify_dict.get( - 'subfunctions_configured') or "" - try: - ihost_obj = self.dbapi.host_get(host_uuid) - except Exception as e: - LOG.exception("notify_subfunctions_config e=%s " - "ihost=%s subfunctions=%s" % - (e, host_uuid, subfunctions_configured)) - return False - - if not subfunctions_configured: - self._update_subfunctions(context, ihost_obj) - - def _add_port_to_list(self, interface_id, networktype, port_list): - info = {} - ports = self.dbapi.port_get_all(interfaceid=interface_id) - if ports: - info['name'] = ports[0]['name'] - info['numa_node'] = ports[0]['numa_node'] - info['networktype'] = networktype - if info not in port_list: - port_list.append(info) - return port_list - - def bm_deprovision_by_host(self, context, host_uuid, ibm_msg_dict): - """Update ihost upon notification of board management controller - deprovisioning. - - This method also allows a dictionary of values to be passed in to - affort additional controls, if and as needed. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param ibm_msg_dict: values for additional controls or changes - :returns: pass or fail - """ - LOG.info("bm_deprovision_by_host=%s msg=%s" % - (host_uuid, ibm_msg_dict)) - - isensorgroups = self.dbapi.sensorgroup_get_by_host(host_uuid) - - for isensorgroup in isensorgroups: - isensors = self.dbapi.sensor_get_by_sensorgroup(isensorgroup.uuid) - for isensor in isensors: - self.dbapi.sensor_destroy(isensor.uuid) - - self.dbapi.sensorgroup_destroy(isensorgroup.uuid) - - isensors = self.dbapi.sensor_get_by_host(host_uuid) - if isensors: - LOG.info("bm_deprovision_by_host=%s Non-group sensors=%s" % - (host_uuid, isensors)) - for isensor in isensors: - self.dbapi.sensor_destroy(isensor.uuid) - - isensors = self.dbapi.sensor_get_by_host(host_uuid) - - return True - - def configure_ttys_dcd(self, context, uuid, ttys_dcd): - """Notify agent to configure the dcd with the supplied data. - - :param context: an admin context. - :param uuid: the host uuid - :param ttys_dcd: the flag to enable/disable dcd - """ - - LOG.debug("ConductorManager.configure_ttys_dcd: sending dcd update %s " - "%s to agents" % (ttys_dcd, uuid)) - rpcapi = agent_rpcapi.AgentAPI() - rpcapi.configure_ttys_dcd(context, uuid=uuid, ttys_dcd=ttys_dcd) - - def get_host_ttys_dcd(self, context, ihost_id): - """ - Retrieve the serial line carrier detect state for a given host - """ - ihost = self.dbapi.host_get(ihost_id) - if ihost: - return ihost.ttys_dcd - else: - LOG.error("Host: %s not found in database" % ihost_id) - return None - - def _get_cinder_address_name(self, network_type): - ADDRESS_FORMAT_ARGS = (k_host.CONTROLLER_HOSTNAME, - network_type) - return "%s-cinder-%s" % ADDRESS_FORMAT_ARGS - - def create_barbican_secret(self, context, name, payload): - """Calls Barbican API to create a secret - - :param context: request context. - :param name: secret name - :param payload: secret payload - """ - self._openstack.create_barbican_secret(context=context, - name=name, payload=payload) - - def delete_barbican_secret(self, context, name): - """Calls Barbican API to delete a secret - - :param context: request context. - :param name: secret name - """ - self._openstack.delete_barbican_secret(context=context, name=name) diff --git a/inventory/inventory/inventory/conductor/openstack.py b/inventory/inventory/inventory/conductor/openstack.py deleted file mode 100644 index 9ae0bccb..00000000 --- a/inventory/inventory/inventory/conductor/openstack.py +++ /dev/null @@ -1,772 +0,0 @@ -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -""" Inventory Openstack Utilities and helper functions.""" - -from barbicanclient.v1 import client as barbican_client_v1 -from cinderclient.v2 import client as cinder_client_v2 -from inventory.common import constants -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common.storage_backend_conf import StorageBackendConfig -from keystoneclient.auth.identity import v3 -from keystoneclient import exceptions as identity_exc -from keystoneclient import session -from keystoneclient.v3 import client as keystone_client -from neutronclient.v2_0 import client as neutron_client_v2_0 -from novaclient.v2 import client as nova_client_v2 -from oslo_config import cfg -from oslo_log import log -from sqlalchemy.orm import exc - - -LOG = log.getLogger(__name__) - -keystone_opts = [ - cfg.StrOpt('auth_host', - default='controller', - help=_("Authentication host server")), - cfg.IntOpt('auth_port', - default=5000, - help=_("Authentication host port number")), - cfg.StrOpt('auth_protocol', - default='http', - help=_("Authentication protocol")), - cfg.StrOpt('admin_user', - default='admin', - help=_("Admin user")), - cfg.StrOpt('admin_password', - default='admin', # this is usually some value - help=_("Admin password"), - secret=True), - cfg.StrOpt('admin_tenant_name', - default='services', - help=_("Admin tenant name")), - cfg.StrOpt('auth_uri', - default='http://192.168.204.2:5000/', - help=_("Authentication URI")), - cfg.StrOpt('auth_url', - default='http://127.0.0.1:5000/', - help=_("Admin Authentication URI")), - cfg.StrOpt('region_name', - default='RegionOne', - help=_("Region Name")), - cfg.StrOpt('neutron_region_name', - default='RegionOne', - help=_("Neutron Region Name")), - cfg.StrOpt('cinder_region_name', - default='RegionOne', - help=_("Cinder Region Name")), - cfg.StrOpt('nova_region_name', - default='RegionOne', - help=_("Nova Region Name")), - cfg.StrOpt('barbican_region_name', - default='RegionOne', - help=_("Barbican Region Name")), - cfg.StrOpt('username', - default='inventory', - help=_("Inventory keystone user name")), - cfg.StrOpt('password', - default='inventory', - help=_("Inventory keystone user password")), - cfg.StrOpt('project_name', - default='services', - help=_("Inventory keystone user project name")), - cfg.StrOpt('user_domain_name', - default='Default', - help=_("Inventory keystone user domain name")), - cfg.StrOpt('project_domain_name', - default='Default', - help=_("Inventory keystone user project domain name")) -] - -# Register the configuration options -cfg.CONF.register_opts(keystone_opts, "KEYSTONE_AUTHTOKEN") - - -class OpenStackOperator(object): - """Class to encapsulate OpenStack operations for Inventory""" - - def __init__(self, dbapi): - self.dbapi = dbapi - self.barbican_client = None - self.cinder_client = None - self.keystone_client = None - self.keystone_session = None - self.nova_client = None - self.neutron_client = None - self._neutron_extension_list = [] - self.auth_url = cfg.CONF.KEYSTONE_AUTHTOKEN.auth_url + "/v3" - - ################# - # NEUTRON - ################# - - def _get_neutronclient(self): - if not self.neutron_client: # should not cache this forever - # neutronclient doesn't yet support v3 keystone auth - # use keystoneauth.session - self.neutron_client = neutron_client_v2_0.Client( - session=self._get_keystone_session(), - auth_url=self.auth_url, - endpoint_type='internalURL', - region_name=cfg.CONF.KEYSTONE_AUTHTOKEN.neutron_region_name) - return self.neutron_client - - def get_providernetworksdict(self, pn_names=None, quiet=False): - """ - Returns names and MTU values of neutron's providernetworks - """ - pn_dict = {} - - # Call neutron - try: - pn_list = self._get_neutronclient().list_providernets().get( - 'providernets', []) - except Exception as e: - if not quiet: - LOG.error("Failed to access Neutron client") - LOG.error(e) - return pn_dict - - # Get dict - # If no names specified, will add all providenets to dict - for pn in pn_list: - if pn_names and pn['name'] not in pn_names: - continue - else: - pn_dict.update({pn['name']: pn}) - - return pn_dict - - def neutron_extension_list(self, context): - """ - Send a request to neutron to query the supported extension list. - """ - if not self._neutron_extension_list: - client = self._get_neutronclient() - extensions = client.list_extensions().get('extensions', []) - self._neutron_extension_list = [e['alias'] for e in extensions] - return self._neutron_extension_list - - def bind_interface(self, context, host_uuid, interface_uuid, - network_type, providernets, mtu, - vlans=None, test=False): - """ - Send a request to neutron to bind an interface to a set of provider - networks, and inform neutron of some key attributes of the interface - for semantic checking purposes. - - Any remote exceptions from neutron are allowed to pass-through and are - expected to be handled by the caller. - """ - client = self._get_neutronclient() - body = {'interface': {'uuid': interface_uuid, - 'providernets': providernets, - 'network_type': network_type, - 'mtu': mtu}} - if vlans: - body['interface']['vlans'] = vlans - if test: - body['interface']['test'] = True - client.host_bind_interface(host_uuid, body=body) - return True - - def unbind_interface(self, context, host_uuid, interface_uuid): - """ - Send a request to neutron to unbind an interface from a set of - provider networks. - - Any remote exceptions from neutron are allowed to pass-through and are - expected to be handled by the caller. - """ - client = self._get_neutronclient() - body = {'interface': {'uuid': interface_uuid}} - client.host_unbind_interface(host_uuid, body=body) - return True - - def get_neutron_host_id_by_name(self, context, name): - """ - Get a neutron host - """ - - client = self._get_neutronclient() - - hosts = client.list_hosts() - - if not hosts: - return "" - - for host in hosts['hosts']: - if host['name'] == name: - return host['id'] - - return "" - - def create_neutron_host(self, context, host_uuid, name, - availability='down'): - """ - Send a request to neutron to create a host - """ - client = self._get_neutronclient() - body = {'host': {'id': host_uuid, - 'name': name, - 'availability': availability - }} - client.create_host(body=body) - return True - - def delete_neutron_host(self, context, host_uuid): - """ - Delete a neutron host - """ - client = self._get_neutronclient() - - client.delete_host(host_uuid) - - return True - - ################# - # NOVA - ################# - - def _get_novaclient(self): - if not self.nova_client: # should not cache this forever - # novaclient doesn't yet support v3 keystone auth - # use keystoneauth.session - self.nova_client = nova_client_v2.Client( - session=self._get_keystone_session(), - auth_url=self.auth_url, - endpoint_type='internalURL', - direct_use=False, - region_name=cfg.CONF.KEYSTONE_AUTHTOKEN.nova_region_name) - return self.nova_client - - def try_interface_get_by_host(self, host_uuid): - try: - interfaces = self.dbapi.iinterface_get_by_ihost(host_uuid) - except exc.DetachedInstanceError: - # A rare DetachedInstanceError exception may occur, retry - LOG.exception("Detached Instance Error, retry " - "iinterface_get_by_ihost %s" % host_uuid) - interfaces = self.dbapi.iinterface_get_by_ihost(host_uuid) - - return interfaces - - def nova_host_available(self, ihost_uuid): - """ - Perform inventory driven nova operations for an available ihost - """ - # novaclient/v3 - # - # # On unlock, check whether exists: - # 1. nova aggregate-create provider_physnet0 nova - # cs.aggregates.create(args.name, args.availability_zone) - # e.g. create(provider_physnet0, None) - # - # can query it from do_aggregate_list - # ('Name', 'Availability Zone'); anyways it doesnt - # allow duplicates on Name. can be done prior to compute nodes? - # - # # On unlock, check whether exists: metadata is a key/value pair - # 2. nova aggregate-set-metadata provider_physnet0 \ - # provider:physical_network=physnet0 - # aggregate = _find_aggregate(cs, args.aggregate) - # metadata = _extract_metadata(args) - # cs.aggregates.set_metadata(aggregate.id, metadata) - # - # This can be run mutliple times regardless. - # - # 3. nova aggregate-add-host provider_physnet0 compute-0 - # cs.aggregates.add_host(aggregate.id, args.host) - # - # Can only be after nova knows about this resource!!! - # Doesnt allow duplicates,therefore agent must trigger conductor - # to perform the function. A single sync call upon init. - # On every unlock try for about 5 minutes? or check admin state - # and skip it. it needs to try several time though or needs to - # know that nova is up and running before sending it. - # e.g. agent audit look for and transitions - # /etc/platform/.initial_config_complete - # however, it needs to do this on every unlock may update - # - # Remove aggregates from provider network - on delete of host. - # 4. nova aggregate-remove-host provider_physnet0 compute-0 - # cs.aggregates.remove_host(aggregate.id, args.host) - # - # Do we ever need to do this? - # 5. nova aggregate-delete provider_physnet0 - # cs.aggregates.delete(aggregate) - # - # report to nova host aggregate groupings once node is available - - availability_zone = None - aggregate_name_prefix = 'provider_' - ihost_providernets = [] - - ihost_aggset_provider = set() - nova_aggset_provider = set() - - # determine which providernets are on this ihost - try: - iinterfaces = self.try_interface_get_by_host(ihost_uuid) - for interface in iinterfaces: - networktypelist = [] - if interface.networktype: - networktypelist = [ - network.strip() - for network in interface['networktype'].split(",")] - if constants.NETWORK_TYPE_DATA in networktypelist: - providernets = interface.providernetworks - for providernet in providernets.split(',') \ - if providernets else []: - ihost_aggset_provider.add(aggregate_name_prefix + - providernet) - - ihost_providernets = list(ihost_aggset_provider) - except Exception: - LOG.exception("AGG iinterfaces_get failed for %s." % ihost_uuid) - - try: - aggregates = self._get_novaclient().aggregates.list() - except Exception: - self.nova_client = None # password may have updated - aggregates = self._get_novaclient().aggregates.list() - pass - - for aggregate in aggregates: - nova_aggset_provider.add(aggregate.name) - - if ihost_providernets: - agglist_missing = \ - list(ihost_aggset_provider - nova_aggset_provider) - LOG.debug("AGG agglist_missing = %s." % agglist_missing) - - for i in agglist_missing: - # 1. nova aggregate-create provider_physnet0 - # use None for the availability zone - # cs.aggregates.create(args.name, args.availability_zone) - try: - aggregate = self._get_novaclient().aggregates.create( - i, availability_zone) - aggregates.append(aggregate) - LOG.debug("AGG6 aggregate= %s. aggregates= %s" % - (aggregate, aggregates)) - except Exception: - # do not continue i, redo as potential race condition - LOG.error("AGG6 EXCEPTION aggregate i=%s, aggregates=%s" % - (i, aggregates)) - - # let it try again, so it can rebuild the aggregates list - return False - - # 2. nova aggregate-set-metadata provider_physnet0 \ - # provider:physical_network=physnet0 - # aggregate = _find_aggregate(cs, args.aggregate) - # metadata = _extract_metadata(args) - # cs.aggregates.set_metadata(aggregate.id, metadata) - try: - metadata = {} - key = 'provider:physical_network' - metadata[key] = i[9:] - - # pre-check: only add/modify if aggregate is valid - if aggregate_name_prefix + metadata[key] == aggregate.name: - LOG.debug("AGG8 aggregate metadata = %s." % metadata) - aggregate = \ - self._get_novaclient().aggregates.set_metadata( - aggregate.id, metadata) - except Exception: - LOG.error("AGG8 EXCEPTION aggregate") - pass - - # 3. nova aggregate-add-host provider_physnet0 compute-0 - # cs.aggregates.add_host(aggregate.id, args.host) - - # aggregates = self._get_novaclient().aggregates.list() - ihost = self.dbapi.ihost_get(ihost_uuid) - - for i in aggregates: - if i.name in ihost_providernets: - metadata = self._get_novaclient().aggregates.get(int(i.id)) - - nhosts = [] - if hasattr(metadata, 'hosts'): - nhosts = metadata.hosts or [] - - if ihost.hostname in nhosts: - LOG.warn("host=%s in already in aggregate id=%s" % - (ihost.hostname, i.id)) - else: - try: - metadata = \ - self._get_novaclient().aggregates.add_host( - i.id, ihost.hostname) - except Exception: - LOG.warn("AGG10 EXCEPTION aggregate id = %s " - "ihost= %s." - % (i.id, ihost.hostname)) - return False - else: - LOG.warn("AGG ihost_providernets empty %s." % ihost_uuid) - - def nova_host_offline(self, ihost_uuid): - """ - Perform inventory driven nova operations for an unavailable ihost, - such as may occur when a host is locked, since if providers - may change before being unlocked again. - """ - # novaclient/v3 - # - # # On delete, check whether exists: - # - # Remove aggregates from provider network - on delete of host. - # 4. nova aggregate-remove-host provider_physnet0 compute-0 - # cs.aggregates.remove_host(aggregate.id, args.host) - # - # Do we ever need to do this? - # 5. nova aggregate-delete provider_physnet0 - # cs.aggregates.delete(aggregate) - # - - aggregate_name_prefix = 'provider_' - ihost_providernets = [] - - ihost_aggset_provider = set() - nova_aggset_provider = set() - - # determine which providernets are on this ihost - try: - iinterfaces = self.try_interface_get_by_host(ihost_uuid) - for interface in iinterfaces: - networktypelist = [] - if interface.networktype: - networktypelist = [network.strip() for network in - interface['networktype'].split(",")] - if constants.NETWORK_TYPE_DATA in networktypelist: - providernets = interface.providernetworks - for providernet in ( - providernets.split(',') if providernets else []): - ihost_aggset_provider.add(aggregate_name_prefix + - providernet) - ihost_providernets = list(ihost_aggset_provider) - except Exception: - LOG.exception("AGG iinterfaces_get failed for %s." % ihost_uuid) - - try: - aggregates = self._get_novaclient().aggregates.list() - except Exception: - self.nova_client = None # password may have updated - aggregates = self._get_novaclient().aggregates.list() - - if ihost_providernets: - for aggregate in aggregates: - nova_aggset_provider.add(aggregate.name) - else: - LOG.debug("AGG ihost_providernets empty %s." % ihost_uuid) - - # Remove aggregates from provider network. Anything with host in list. - # 4. nova aggregate-remove-host provider_physnet0 compute-0 - # cs.aggregates.remove_host(aggregate.id, args.host) - - ihost = self.dbapi.ihost_get(ihost_uuid) - - for aggregate in aggregates: - try: - LOG.debug("AGG10 remove aggregate id = %s ihost= %s." % - (aggregate.id, ihost.hostname)) - self._get_novaclient().aggregates.remove_host( - aggregate.id, ihost.hostname) - except Exception: - LOG.debug("AGG10 EXCEPTION remove aggregate") - pass - - return True - - ################# - # Keystone - ################# - def _get_keystone_session(self): - if not self.keystone_session: - auth = v3.Password(auth_url=self.auth_url, - username=cfg.CONF.KEYSTONE_AUTHTOKEN.username, - password=cfg.CONF.KEYSTONE_AUTHTOKEN.password, - user_domain_name=cfg.CONF.KEYSTONE_AUTHTOKEN. - user_domain_name, - project_name=cfg.CONF.KEYSTONE_AUTHTOKEN. - project_name, - project_domain_name=cfg.CONF.KEYSTONE_AUTHTOKEN. - project_domain_name) - self.keystone_session = session.Session(auth=auth) - return self.keystone_session - - def _get_keystoneclient(self): - if not self.keystone_client: # should not cache this forever - self.keystone_client = keystone_client.Client( - username=cfg.CONF.KEYSTONE_AUTHTOKEN.username, - user_domain_name=cfg.CONF.KEYSTONE_AUTHTOKEN.user_domain_name, - project_name=cfg.CONF.KEYSTONE_AUTHTOKEN.project_name, - project_domain_name=cfg.CONF.KEYSTONE_AUTHTOKEN - .project_domain_name, - password=cfg.CONF.KEYSTONE_AUTHTOKEN.password, - auth_url=self.auth_url, - region_name=cfg.CONF.KEYSTONE_AUTHTOKEN.region_name) - return self.keystone_client - - def _get_identity_id(self): - try: - LOG.debug("Search service id for : (%s)" % - constants.SERVICE_TYPE_IDENTITY) - service = self._get_keystoneclient().services.find( - type=constants.SERVICE_TYPE_IDENTITY) - except identity_exc.NotFound: - LOG.error("Could not find service id for (%s)" % - constants.SERVICE_TYPE_IDENTITY) - return None - except identity_exc.NoUniqueMatch: - LOG.error("Multiple service matches found for (%s)" % - constants.SERVICE_TYPE_IDENTITY) - return None - return service.id - - ################# - # Cinder - ################# - def _get_cinder_endpoints(self): - endpoint_list = [] - try: - # get region one name from platform.conf - region1_name = get_region_name('region_1_name') - if region1_name is None: - region1_name = 'RegionOne' - service_list = self._get_keystoneclient().services.list() - for s in service_list: - if s.name.find(constants.SERVICE_TYPE_CINDER) != -1: - endpoint_list += self._get_keystoneclient().endpoints.list( - service=s, region=region1_name) - except Exception: - LOG.error("Failed to get keystone endpoints for cinder.") - return endpoint_list - - def _get_cinderclient(self): - if not self.cinder_client: - self.cinder_client = cinder_client_v2.Client( - session=self._get_keystone_session(), - auth_url=self.auth_url, - endpoint_type='internalURL', - region_name=cfg.CONF.KEYSTONE_AUTHTOKEN.cinder_region_name) - - return self.cinder_client - - def get_cinder_pools(self): - pools = {} - - # Check to see if cinder is present - # TODO(rchurch): Need to refactor with storage backend - if ((StorageBackendConfig.has_backend_configured( - self.dbapi, constants.CINDER_BACKEND_CEPH)) or - (StorageBackendConfig.has_backend_configured( - self.dbapi, constants.CINDER_BACKEND_LVM))): - try: - pools = self._get_cinderclient().pools.list(detailed=True) - except Exception as e: - LOG.error("get_cinder_pools: Failed to access " - "Cinder client: %s" % e) - - return pools - - def get_cinder_volumes(self): - volumes = [] - - # Check to see if cinder is present - # TODO(rchurch): Need to refactor with storage backend - if ((StorageBackendConfig.has_backend_configured( - self.dbapi, constants.CINDER_BACKEND_CEPH)) or - (StorageBackendConfig.has_backend_configured( - self.dbapi, constants.CINDER_BACKEND_LVM))): - search_opts = { - 'all_tenants': 1 - } - try: - volumes = self._get_cinderclient().volumes.list( - search_opts=search_opts) - except Exception as e: - LOG.error("get_cinder_volumes: Failed to access " - "Cinder client: %s" % e) - - return volumes - - def get_cinder_services(self): - service_list = [] - - # Check to see if cinder is present - # TODO(rchurch): Need to refactor with storage backend - if ((StorageBackendConfig.has_backend_configured( - self.dbapi, constants.CINDER_BACKEND_CEPH)) or - (StorageBackendConfig.has_backend_configured( - self.dbapi, constants.CINDER_BACKEND_LVM))): - try: - service_list = self._get_cinderclient().services.list() - except Exception as e: - LOG.error("get_cinder_services:Failed to access " - "Cinder client: %s" % e) - - return service_list - - def get_cinder_volume_types(self): - """Obtain the current list of volume types.""" - volume_types_list = [] - - if StorageBackendConfig.is_service_enabled(self.dbapi, - constants.SB_SVC_CINDER, - filter_shared=True): - try: - volume_types_list = \ - self._get_cinderclient().volume_types.list() - except Exception as e: - LOG.error("get_cinder_volume_types: Failed to access " - "Cinder client: %s" % e) - - return volume_types_list - - ################# - # Barbican - ################# - def _get_barbicanclient(self): - if not self.barbican_client: - self.barbican_client = barbican_client_v1.Client( - session=self._get_keystone_session(), - auth_url=self.auth_url, - endpoint_type='internalURL', - region_name=cfg.CONF.KEYSTONE_AUTHTOKEN.barbican_region_name) - return self.barbican_client - - def get_barbican_secret_by_name(self, context, name): - try: - client = self._get_barbicanclient() - secret_list = client.secrets.list(name=name) - secret = next(iter(secret_list), None) - return secret - except Exception: - LOG.error("Unable to find Barbican secret %s", name) - return None - - def create_barbican_secret(self, context, name, payload): - if not payload: - LOG.error("Empty password is passed to Barbican %s" % name) - return None - try: - client = self._get_barbicanclient() - secret = self.get_barbican_secret_by_name(context, name) - if secret: - client.secrets.delete(secret.secret_ref) - secret = client.secrets.create(name, payload) - secret.store() - return secret.secret_ref - except Exception: - LOG.error("Unable to create Barbican secret %s" % name) - return None - - def delete_barbican_secret(self, context, name): - try: - client = self._get_barbicanclient() - secret = self.get_barbican_secret_by_name(context=context, - name=name) - if not secret: - LOG.error("Unable to delete unknown Barbican secret %s" % name) - return False - client.secrets.delete(secret_ref=secret.secret_ref) - return True - except Exception: - LOG.error("Unable to delete Barbican secret %s" % name) - return False - - ######################### - # Primary Region Inventory - # Region specific methods - ######################### - def _get_primary_cgtsclient(self): - # import the module in the function that uses it - # as the cgtsclient is only installed on the controllers - from cgtsclient.v1 import client as cgts_client - # get region one name from platform.conf - region1_name = get_region_name('region_1_name') - if region1_name is None: - region1_name = 'RegionOne' - auth_ref = self._get_keystoneclient().auth_ref - if auth_ref is None: - raise exception.InventoryException( - _("Unable to get auth ref from keystone client")) - auth_token = auth_ref.service_catalog.get_token() - endpoint = (auth_ref.service_catalog. - get_endpoints(service_type='platform', - endpoint_type='internal', - region_name=region1_name)) - endpoint = endpoint['platform'][0] - version = 1 - return cgts_client.Client(version=version, - endpoint=endpoint['url'], - auth_url=self.auth_url, - token=auth_token['id']) - - def get_ceph_mon_info(self): - ceph_mon_info = dict() - try: - cgtsclient = self._get_primary_cgtsclient() - clusters = cgtsclient.cluster.list() - if clusters: - ceph_mon_info['cluster_id'] = clusters[0].cluster_uuid - else: - LOG.error("Unable to get the cluster from the primary region") - return None - ceph_mon_ips = cgtsclient.ceph_mon.ip_addresses() - if ceph_mon_ips: - ceph_mon_info['ceph-mon-0-ip'] = ceph_mon_ips.get( - 'ceph-mon-0-ip', '') - ceph_mon_info['ceph-mon-1-ip'] = ceph_mon_ips.get( - 'ceph-mon-1-ip', '') - ceph_mon_info['ceph-mon-2-ip'] = ceph_mon_ips.get( - 'ceph-mon-2-ip', '') - else: - LOG.error("Unable to get the ceph mon IPs from the primary " - "region") - return None - except Exception as e: - LOG.error("Unable to get ceph info from the " - "primary region: %s" % e) - return None - return ceph_mon_info - - def region_has_ceph_backend(self): - ceph_present = False - try: - backend_list = \ - self._get_primary_cgtsclient().storage_backend.list() - for backend in backend_list: - if backend.backend == constants.CINDER_BACKEND_CEPH: - ceph_present = True - break - except Exception as e: - LOG.error("Unable to get storage backend list from the primary " - "region: %s" % e) - return ceph_present - - -def get_region_name(region): - # get region name from platform.conf - lines = [line.rstrip('\n') for line in - open('/etc/platform/platform.conf')] - for line in lines: - values = line.split('=') - if values[0] == region: - return values[1] - LOG.error("Unable to get %s from the platform.conf." % region) - return None diff --git a/inventory/inventory/inventory/conductor/rpcapi.py b/inventory/inventory/inventory/conductor/rpcapi.py deleted file mode 100644 index e46b5a98..00000000 --- a/inventory/inventory/inventory/conductor/rpcapi.py +++ /dev/null @@ -1,560 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 - -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -""" -Client side of the conductor RPC API. -""" - -from oslo_log import log -import oslo_messaging as messaging - -from inventory.common import rpc -from inventory.objects import base as objects_base - -LOG = log.getLogger(__name__) - -MANAGER_TOPIC = 'inventory.conductor_manager' - - -class ConductorAPI(object): - """Client side of the conductor RPC API. - - API version history: - - 1.0 - Initial version. - """ - - RPC_API_VERSION = '1.0' - - # The default namespace, which can be overridden in a subclass. - RPC_API_NAMESPACE = None - - def __init__(self, topic=None): - super(ConductorAPI, self).__init__() - self.topic = topic - if self.topic is None: - self.topic = MANAGER_TOPIC - target = messaging.Target(topic=self.topic, - version='1.0') - serializer = objects_base.InventoryObjectSerializer() - # release_ver = versions.RELEASE_MAPPING.get(CONF.pin_release_version) - # version_cap = (release_ver['rpc'] if release_ver - # else self.RPC_API_VERSION) - version_cap = self.RPC_API_VERSION - self.client = rpc.get_client(target, - version_cap=version_cap, - serializer=serializer) - - @staticmethod - def make_namespaced_msg(method, namespace, **kwargs): - return {'method': method, 'namespace': namespace, 'args': kwargs} - - def make_msg(self, method, **kwargs): - return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE, - **kwargs) - - # This is to be in inventory? However, it'll need to know the ip_address! - def handle_dhcp_lease(self, context, tags, mac, ip_address, cid=None, - topic=None): - """Synchronously, have a conductor handle a DHCP lease update. - - Handling depends on the interface: - - management interface: creates an ihost - - infrastructure interface: just updated the dnsmasq config - - :param context: request context. - :param tags: specifies the interface type (mgmt or infra) - :param mac: MAC for the lease - :param ip_address: IP address for the lease - :param cid: Client ID for the lease - """ - cctxt = self.client.prepare(topic=topic or self.topic, - version='1.0') - return cctxt.call(context, - 'handle_dhcp_lease', - tags=tags, - mac=mac, - ip_address=ip_address, - cid=cid) - - def create_host(self, context, values, topic=None): - """Synchronously, have a conductor create an ihost. - - Create an ihost in the database and return an object. - - :param context: request context. - :param values: dictionary with initial values for new ihost object - :returns: created ihost object, including all fields. - """ - cctxt = self.client.prepare(topic=topic or self.topic, - version='1.0') - return cctxt.call(context, - 'create_host', - values=values) - - def update_host(self, context, ihost_obj, topic=None): - """Synchronously, have a conductor update the hosts's information. - - Update the ihost's information in the database and return an object. - - :param context: request context. - :param ihost_obj: a changed (but not saved) ihost object. - :returns: updated ihost object, including all fields. - """ - cctxt = self.client.prepare(topic=topic or self.topic, - version='1.0') - return cctxt.call(context, - 'update_host', - ihost_obj=ihost_obj) - - def configure_host(self, context, host_obj, - do_compute_apply=False, - topic=None): - """Synchronously, have a conductor configure an ihost. - - Does the following tasks: - - invoke systemconfig to perform host configuration - - Update puppet hiera configuration files for the ihost. - - Add (or update) a host entry in the dnsmasq.conf file. - - Set up PXE configuration to run installer - - :param context: request context. - :param host_obj: an ihost object. - :param do_compute_apply: apply the newly created compute manifests. - """ - cctxt = self.client.prepare(topic=topic or self.topic, - version='1.0') - return cctxt.call(context, - 'configure_host', - host_obj=host_obj, - do_compute_apply=do_compute_apply) - - def unconfigure_host(self, context, host_obj, topic=None): - """Synchronously, have a conductor unconfigure a host. - - Does the following tasks: - - Remove hiera config files for the host. - - Remove the host entry from the dnsmasq.conf file. - - Remove the PXE configuration - - :param context: request context. - :param host_obj: a host object. - """ - cctxt = self.client.prepare(topic=topic or self.topic, - version='1.0') - return cctxt.call(context, - 'unconfigure_host', - host_obj=host_obj) - - def get_host_by_macs(self, context, host_macs, topic=None): - """Finds hosts db entry based upon the mac list - - This method returns a host if it matches a mac - - :param context: an admin context - :param host_macs: list of mac addresses - :returns: host object - """ - cctxt = self.client.prepare(topic=topic or self.topic, - version='1.0') - - return cctxt.call(context, - 'get_host_by_macs', - host_macs=host_macs) - - def get_host_by_hostname(self, context, hostname, topic=None): - """Finds host db entry based upon the ihost hostname - - This method returns an ihost if it matches the - hostname. - - :param context: an admin context - :param hostname: host hostname - :returns: host object, including all fields. - """ - - cctxt = self.client.prepare(topic=topic or self.topic, - version='1.0') - return cctxt.call(context, - 'get_host_by_hostname', - hostname=hostname) - - def platform_update_by_host(self, context, - host_uuid, imsg_dict, topic=None): - """Create or update memory for an ihost with the supplied data. - - This method allows records for memory for ihost to be created, - or updated. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param imsg_dict: inventory message dict - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, - version='1.0') - return cctxt.call(context, - 'platform_update_by_host', - host_uuid=host_uuid, - imsg_dict=imsg_dict) - - def subfunctions_update_by_host(self, context, - host_uuid, subfunctions, topic=None): - """Create or update local volume group for an ihost with the supplied - data. - - This method allows records for a local volume group for ihost to be - created, or updated. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param subfunctions: subfunctions of the host - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'subfunctions_update_by_host', - host_uuid=host_uuid, - subfunctions=subfunctions) - - def mgmt_ip_set_by_host(self, - context, - host_uuid, - mgmt_ip, - topic=None): - """Call inventory to update host mgmt_ip (removes previous entry if - necessary) - - :param context: an admin context - :param host_uuid: ihost uuid - :param mgmt_ip: mgmt_ip - :returns: Address - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'mgmt_ip_set_by_host', - host_uuid=host_uuid, - mgmt_ip=mgmt_ip) - - def infra_ip_set_by_host(self, - context, - host_uuid, - infra_ip, topic=None): - """Call inventory to update host infra_ip (removes previous entry if - necessary) - - :param context: an admin context - :param host_uuid: ihost uuid - :param infra_ip: infra_ip - :returns: Address - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'infra_ip_set_by_host', - host_uuid=host_uuid, - infra_ip=infra_ip) - - def vim_host_add(self, context, api_token, host_uuid, - hostname, subfunctions, administrative, - operational, availability, - subfunction_oper, subfunction_avail, timeout, topic=None): - """ - Asynchronously, notify VIM of host add - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.cast(context, - 'vim_host_add', - api_token=api_token, - host_uuid=host_uuid, - hostname=hostname, - personality=subfunctions, - administrative=administrative, - operational=operational, - availability=availability, - subfunction_oper=subfunction_oper, - subfunction_avail=subfunction_avail, - timeout=timeout) - - def notify_subfunctions_config(self, context, - host_uuid, ihost_notify_dict, topic=None): - """ - Synchronously, notify inventory of host subfunctions config status - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'notify_subfunctions_config', - host_uuid=host_uuid, - ihost_notify_dict=ihost_notify_dict) - - def bm_deprovision_by_host(self, context, - host_uuid, ibm_msg_dict, topic=None): - """Update ihost upon notification of board management controller - deprovisioning. - - This method also allows a dictionary of values to be passed in to - affort additional controls, if and as needed. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param ibm_msg_dict: values for additional controls or changes - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'bm_deprovision_by_host', - host_uuid=host_uuid, - ibm_msg_dict=ibm_msg_dict) - - def configure_ttys_dcd(self, context, uuid, ttys_dcd, topic=None): - """Synchronously, have a conductor configure the dcd. - - Does the following tasks: - - sends a message to conductor - - who sends a message to all inventory agents - - who has the uuid updates dcd - - :param context: request context. - :param uuid: the host uuid - :param ttys_dcd: the flag to enable/disable dcd - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - LOG.debug("ConductorApi.configure_ttys_dcd: sending (%s %s) to " - "conductor" % (uuid, ttys_dcd)) - return cctxt.call(context, - 'configure_ttys_dcd', - uuid=uuid, - ttys_dcd=ttys_dcd) - - def get_host_ttys_dcd(self, context, ihost_id, topic=None): - """Synchronously, have a agent collect carrier detect state for this - ihost. - - :param context: request context. - :param ihost_id: id of this host - :returns: ttys_dcd. - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'get_host_ttys_dcd', - ihost_id=ihost_id) - - def port_update_by_host(self, context, - host_uuid, inic_dict_array, topic=None): - """Create iports for an ihost with the supplied data. - - This method allows records for iports for ihost to be created. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param inic_dict_array: initial values for iport objects - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'port_update_by_host', - host_uuid=host_uuid, - inic_dict_array=inic_dict_array) - - def lldp_agent_update_by_host(self, context, host_uuid, agent_dict_array, - topic=None): - """Create lldp_agents for an ihost with the supplied data. - - This method allows records for lldp_agents for a host to be created. - - :param context: an admin context - :param host_uuid: host uuid unique id - :param agent_dict_array: initial values for lldp_agent objects - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'lldp_agent_update_by_host', - host_uuid=host_uuid, - agent_dict_array=agent_dict_array) - - def lldp_neighbour_update_by_host(self, context, - host_uuid, neighbour_dict_array, - topic=None): - """Create lldp_neighbours for an ihost with the supplied data. - - This method allows records for lldp_neighbours for a host to be - created. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param neighbour_dict_array: initial values for lldp_neighbour objects - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call( - context, - 'lldp_neighbour_update_by_host', - host_uuid=host_uuid, - neighbour_dict_array=neighbour_dict_array) - - def pci_device_update_by_host(self, context, - host_uuid, pci_device_dict_array, - topic=None): - """Create pci_devices for an ihost with the supplied data. - - This method allows records for pci_devices for ihost to be created. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param pci_device_dict_array: initial values for device objects - :returns: pass or fail - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'pci_device_update_by_host', - host_uuid=host_uuid, - pci_device_dict_array=pci_device_dict_array) - - def numas_update_by_host(self, - context, - host_uuid, - inuma_dict_array, - topic=None): - """Create inumas for an ihost with the supplied data. - - This method allows records for inumas for ihost to be created. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param inuma_dict_array: initial values for inuma objects - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'numas_update_by_host', - host_uuid=host_uuid, - inuma_dict_array=inuma_dict_array) - - def cpus_update_by_host(self, - context, - host_uuid, - icpu_dict_array, - force_grub_update, - topic=None): - """Create cpus for an ihost with the supplied data. - - This method allows records for cpus for ihost to be created. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param icpu_dict_array: initial values for cpu objects - :param force_grub_update: bool value to force grub update - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'cpus_update_by_host', - host_uuid=host_uuid, - icpu_dict_array=icpu_dict_array, - force_grub_update=force_grub_update) - - def memory_update_by_host(self, context, - host_uuid, imemory_dict_array, - force_update=False, - topic=None): - """Create or update memory for an ihost with the supplied data. - - This method allows records for memory for ihost to be created, - or updated. - - :param context: an admin context - :param host_uuid: ihost uuid unique id - :param imemory_dict_array: initial values for memory objects - :param force_update: force a memory update - :returns: pass or fail - """ - - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'memory_update_by_host', - host_uuid=host_uuid, - imemory_dict_array=imemory_dict_array, - force_update=force_update) - - def update_cpu_config(self, context, topic=None): - """Synchronously, have the conductor update the cpu - configuration. - - :param context: request context. - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, 'update_cpu_config') - - def create_barbican_secret(self, context, name, payload, topic=None): - """Calls Barbican API to create a secret - - :param context: request context. - :param name: secret name - :param payload: secret payload - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'create_barbican_secret', - name=name, - payload=payload) - - def delete_barbican_secret(self, context, name, topic=None): - """Calls Barbican API to delete a secret - - :param context: request context. - :param name: secret name - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'delete_barbican_secret', - name=name) - - def reload_snmp_config(self, context, topic=None): - """Synchronously, have a conductor reload the SNMP configuration. - - Does the following tasks: - - sighup snmpd to reload the snmpd configuration. - - :param context: request context. - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, - 'reload_snmp_config') - - def region_has_ceph_backend(self, context, topic=None): - """ - Send a request to primary region to see if ceph backend is configured - """ - cctxt = self.client.prepare(topic=topic or self.topic, version='1.0') - return cctxt.call(context, 'region_has_ceph_backend') diff --git a/inventory/inventory/inventory/conf/__init__.py b/inventory/inventory/inventory/conf/__init__.py deleted file mode 100644 index c6d440ed..00000000 --- a/inventory/inventory/inventory/conf/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from inventory.conf import default -from oslo_config import cfg - -CONF = cfg.CONF -default.register_opts(CONF) diff --git a/inventory/inventory/inventory/conf/database.py b/inventory/inventory/inventory/conf/database.py deleted file mode 100644 index b2fd16fd..00000000 --- a/inventory/inventory/inventory/conf/database.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2016 Intel Corporation -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from inventory.common.i18n import _ - -opts = [ - cfg.StrOpt('mysql_engine', - default='InnoDB', - help=_('MySQL engine to use.')), - cfg.StrOpt('sql_connection', - default='sqlite://', - help=_('sql connection to use.')) -] - - -def register_opts(conf): - conf.register_opts(opts, group='database') diff --git a/inventory/inventory/inventory/conf/default.py b/inventory/inventory/inventory/conf/default.py deleted file mode 100644 index 87b5569c..00000000 --- a/inventory/inventory/inventory/conf/default.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2016 Intel Corporation -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# Copyright 2013 Red Hat, Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import socket -import tempfile - -from inventory.common.i18n import _ -from oslo_config import cfg - - -api_opts = [ - cfg.StrOpt( - 'auth_strategy', - default='keystone', - choices=['noauth', 'keystone'], - help=_('Authentication strategy used by inventory-api. "noauth" ' - 'should not be used in a production environment because all ' - 'authentication will be disabled.')), - cfg.BoolOpt('debug_tracebacks_in_api', - default=False, - help=_('Return server tracebacks in the API response for any ' - 'error responses. WARNING: this is insecure ' - 'and should not be used in a production environment.')), - cfg.BoolOpt('pecan_debug', - default=False, - help=_('Enable pecan debug mode. WARNING: this is insecure ' - 'and should not be used in a production environment.')), - cfg.StrOpt('default_resource_class', - help=_('Resource class to use for new nodes when no resource ' - 'class is provided in the creation request.')), -] - -exc_log_opts = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help=_('Used if there is a formatting error when generating ' - 'an exception message (a programming error). If True, ' - 'raise an exception; if False, use the unformatted ' - 'message.')), -] - -# NOTE(mariojv) By default, accessing this option when it's unset will return -# None, indicating no notifications will be sent. oslo.config returns None by -# default for options without set defaults that aren't required. -notification_opts = [ - cfg.StrOpt('notification_level', - choices=[('debug', _('"debug" level')), - ('info', _('"info" level')), - ('warning', _('"warning" level')), - ('error', _('"error" level')), - ('critical', _('"critical" level'))], - help=_('Specifies the minimum level for which to send ' - 'notifications. If not set, no notifications will ' - 'be sent. The default is for this option to be unset.')) -] - -path_opts = [ - cfg.StrOpt( - 'pybasedir', - default=os.path.abspath(os.path.join(os.path.dirname(__file__), - '../')), - sample_default='/usr/lib64/python/site-packages/inventory', - help=_('Directory where the inventory python module is ' - 'installed.')), - cfg.StrOpt('bindir', - default='$pybasedir/bin', - help=_('Directory where inventory binaries are installed.')), - cfg.StrOpt('state_path', - default='$pybasedir', - help=_("Top-level directory for maintaining inventory's " - "state.")), -] - -service_opts = [ - cfg.StrOpt('host', - default=socket.getfqdn(), - sample_default='localhost', - help=_('Name of this node. This can be an opaque identifier. ' - 'It is not necessarily a hostname, FQDN, or IP address. ' - 'However, the node name must be valid within ' - 'an AMQP key, and if using ZeroMQ (will be removed in ' - 'the Stein release), a valid hostname, FQDN, ' - 'or IP address.')), -] - -utils_opts = [ - cfg.StrOpt('rootwrap_config', - default="/etc/inventory/rootwrap.conf", - help=_('Path to the rootwrap configuration file to use for ' - 'running commands as root.')), - cfg.StrOpt('tempdir', - default=tempfile.gettempdir(), - sample_default='/tmp', - help=_('Temporary working directory, default is Python temp ' - 'dir.')), -] - - -def register_opts(conf): - conf.register_opts(exc_log_opts) - conf.register_opts(notification_opts) - conf.register_opts(path_opts) - conf.register_opts(service_opts) - conf.register_opts(utils_opts) diff --git a/inventory/inventory/inventory/conf/opts.py b/inventory/inventory/inventory/conf/opts.py deleted file mode 100644 index 63578f74..00000000 --- a/inventory/inventory/inventory/conf/opts.py +++ /dev/null @@ -1,31 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -from oslo_log import log - - -def update_opt_defaults(): - log.set_defaults( - default_log_levels=[ - 'amqp=WARNING', - 'amqplib=WARNING', - 'qpid.messaging=INFO', - # TODO(therve): when bug #1685148 is fixed in oslo.messaging, we - # should be able to remove one of those 2 lines. - 'oslo_messaging=INFO', - 'oslo.messaging=INFO', - 'sqlalchemy=WARNING', - 'stevedore=INFO', - 'eventlet.wsgi.server=INFO', - 'iso8601=WARNING', - 'requests=WARNING', - 'neutronclient=WARNING', - 'urllib3.connectionpool=WARNING', - 'keystonemiddleware.auth_token=INFO', - 'keystoneauth.session=INFO', - ] - ) - -# 'glanceclient=WARNING', diff --git a/inventory/inventory/inventory/config-generator.conf b/inventory/inventory/inventory/config-generator.conf deleted file mode 100644 index 7ae653a3..00000000 --- a/inventory/inventory/inventory/config-generator.conf +++ /dev/null @@ -1,18 +0,0 @@ -[DEFAULT] -output_file = inventory.conf.sample -wrap_width = 79 -# namespace = inventory.api.config -namespace = inventory.agent.manager -namespace = inventory.conductor.manager -namespace = inventory.conductor.openstack -namespace = inventory.conf -namespace = inventory.default -# from setup.py -namespace = inventory.common.config -namespace = keystonemiddleware.auth_token -namespace = oslo.middleware -namespace = oslo.log -namespace = oslo.policy -namespace = oslo.db -namespace = oslo.messaging -namespace = oslo.service.service diff --git a/inventory/inventory/inventory/db/__init__.py b/inventory/inventory/inventory/db/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/db/api.py b/inventory/inventory/inventory/db/api.py deleted file mode 100644 index 8db55404..00000000 --- a/inventory/inventory/inventory/db/api.py +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Interface for database access. -""" - -import abc - -from oslo_config import cfg -from oslo_db import api as db_api -import six - -CONF = cfg.CONF - -_BACKEND_MAPPING = {'sqlalchemy': 'inventory.db.sqlalchemy.api'} -IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING, - lazy=True) - - -def get_instance(): - """Return a DB API instance.""" - return IMPL - - -def get_engine(): - return IMPL.get_engine() - - -def get_session(): - return IMPL.get_session() - - -@six.add_metaclass(abc.ABCMeta) -class Connection(object): - """Base class for database connections.""" - - @abc.abstractmethod - def __init__(self): - """Constructor.""" - - # TODO(sc) Enforcement of required methods for db api diff --git a/inventory/inventory/inventory/db/migration.py b/inventory/inventory/inventory/db/migration.py deleted file mode 100644 index eb360f51..00000000 --- a/inventory/inventory/inventory/db/migration.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# under the License. - -"""Database setup and migration commands.""" - -from inventory.db.sqlalchemy import api as db_api -import os -from oslo_config import cfg -from oslo_db import options -from stevedore import driver - -options.set_defaults(cfg.CONF) - - -_IMPL = None - -MIGRATE_REPO_PATH = os.path.join( - os.path.abspath(os.path.dirname(__file__)), - 'sqlalchemy', - 'migrate_repo', -) - - -def get_backend(): - global _IMPL - if not _IMPL: - _IMPL = driver.DriverManager("inventory.database.migration_backend", - cfg.CONF.database.backend).driver - return _IMPL - - -def db_sync(version=None, engine=None): - """Migrate the database to `version` or the most recent version.""" - - if engine is None: - engine = db_api.get_engine() - return get_backend().db_sync(engine=engine, - abs_path=MIGRATE_REPO_PATH, - version=version - ) - - -def upgrade(version=None): - """Migrate the database to `version` or the most recent version.""" - return get_backend().upgrade(version) - - -def version(): - return get_backend().version() - - -def create_schema(): - return get_backend().create_schema() diff --git a/inventory/inventory/inventory/db/sqlalchemy/__init__.py b/inventory/inventory/inventory/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/db/sqlalchemy/api.py b/inventory/inventory/inventory/db/sqlalchemy/api.py deleted file mode 100644 index ff02de46..00000000 --- a/inventory/inventory/inventory/db/sqlalchemy/api.py +++ /dev/null @@ -1,2570 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -"""SQLAlchemy backend.""" - -import eventlet -import threading - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import session as db_session -from oslo_db.sqlalchemy import utils as db_utils - -from oslo_log import log -from oslo_utils import uuidutils - -from sqlalchemy import inspect -from sqlalchemy import or_ -from sqlalchemy.orm.exc import MultipleResultsFound -from sqlalchemy.orm.exc import NoResultFound - -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.common import utils -from inventory.db import api -from inventory.db.sqlalchemy import models - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -_LOCK = threading.Lock() -_FACADE = None - -context_manager = enginefacade.transaction_context() -context_manager.configure() - - -def _create_facade_lazily(): - global _LOCK - with _LOCK: - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade( - CONF.database.connection, - **dict(CONF.database) - ) - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def get_backend(): - """The backend is this module itself.""" - return Connection() - - -def _session_for_read(): - # _context = threading.local() - _context = eventlet.greenthread.getcurrent() - - return enginefacade.reader.using(_context) - - -def _session_for_write(): - _context = eventlet.greenthread.getcurrent() - - return enginefacade.writer.using(_context) - - -def _paginate_query(model, limit=None, marker=None, sort_key=None, - sort_dir=None, query=None): - if not query: - query = model_query(model) - - if not sort_key: - sort_keys = [] - elif not isinstance(sort_key, list): - sort_keys = [sort_key] - else: - sort_keys = sort_key - - if 'id' not in sort_keys: - sort_keys.append('id') - query = db_utils.paginate_query(query, model, limit, sort_keys, - marker=marker, sort_dir=sort_dir) - return query.all() - - -def model_query(model, *args, **kwargs): - """Query helper for simpler session usage. - - :param model: database model - :param session: if present, the session to use - """ - - session = kwargs.get('session') - if session: - query = session.query(model, *args) - else: - with _session_for_read() as session: - query = session.query(model, *args) - return query - - -def add_identity_filter(query, value, - use_ifname=False, - use_ipaddress=False, - use_community=False, - use_key=False, - use_name=False, - use_cname=False, - use_secname=False, - use_sensorgroupname=False, - use_sensorname=False, - use_pciaddr=False): - """Adds an identity filter to a query. - - Filters results by ID, if supplied value is a valid integer. - Otherwise attempts to filter results by UUID. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - if utils.is_int_like(value): - return query.filter_by(id=value) - elif uuidutils.is_uuid_like(value): - return query.filter_by(uuid=value) - else: - if use_ifname: - return query.filter_by(ifname=value) - elif use_ipaddress: - return query.filter_by(ip_address=value) - elif use_community: - return query.filter_by(community=value) - elif use_name: - return query.filter_by(name=value) - elif use_cname: - return query.filter_by(cname=value) - elif use_secname: - return query.filter_by(secname=value) - elif use_key: - return query.filter_by(key=value) - elif use_pciaddr: - return query.filter_by(pciaddr=value) - elif use_sensorgroupname: - return query.filter_by(sensorgroupname=value) - elif use_sensorname: - return query.filter_by(sensorname=value) - else: - return query.filter_by(hostname=value) - - -def add_filter_by_many_identities(query, model, values): - """Adds an identity filter to a query for values list. - - Filters results by ID, if supplied values contain a valid integer. - Otherwise attempts to filter results by UUID. - - :param query: Initial query to add filter to. - :param model: Model for filter. - :param values: Values for filtering results by. - :return: tuple (Modified query, filter field name). - """ - if not values: - raise exception.InvalidIdentity(identity=values) - value = values[0] - if utils.is_int_like(value): - return query.filter(getattr(model, 'id').in_(values)), 'id' - elif uuidutils.is_uuid_like(value): - return query.filter(getattr(model, 'uuid').in_(values)), 'uuid' - else: - raise exception.InvalidIdentity(identity=value) - - -def add_node_filter_by_host(query, value): - if utils.is_int_like(value): - return query.filter_by(host_id=value) - else: - query = query.join(models.Hosts, - models.Nodes.host_id == models.Hosts.id) - return query.filter(models.Hosts.uuid == value) - - -def add_filter_by_host_node(query, ihostid, inodeid): - if utils.is_int_like(ihostid) and utils.is_int_like(inodeid): - return query.filter_by(host_id=ihostid, node_id=inodeid) - - if utils.is_uuid_like(ihostid) and utils.is_uuid_like(inodeid): - ihostq = model_query(models.Hosts).filter_by(uuid=ihostid).first() - inodeq = model_query(models.Nodes).filter_by(uuid=inodeid).first() - - query = query.filter_by(host_id=ihostq.id, - node_id=inodeq.id) - - return query - - -def add_cpu_filter_by_host(query, value): - if utils.is_int_like(value): - return query.filter_by(host_id=value) - else: - query = query.join(models.Hosts, - models.Cpus.host_id == models.Hosts.id) - return query.filter(models.Hosts.uuid == value) - - -def add_cpu_filter_by_host_node(query, ihostid, inodeid): - if utils.is_int_like(ihostid) and utils.is_int_like(inodeid): - return query.filter_by(host_id=ihostid, node_id=inodeid) - - # gives access to joined tables... nice to have unique col name - if utils.is_uuid_like(ihostid) and utils.is_uuid_like(inodeid): - query = query.join(models.Hosts, - models.Cpus.host_id == models.Hosts.id, - models.Nodes.host_id == models.Hosts.id) - - return query.filter(models.Hosts.uuid == ihostid, - models.Nodes.uuid == inodeid) - - LOG.error("cpu_filter_by_host_inode: No match for id int or ids uuid") - - -def add_cpu_filter_by_node(query, inodeid): - if utils.is_int_like(inodeid): - return query.filter_by(node_id=inodeid) - else: - query = query.join(models.Nodes, - models.Cpus.node_id == models.Nodes.id) - return query.filter(models.Nodes.uuid == inodeid) - - -def add_memory_filter_by_host(query, value): - if utils.is_int_like(value): - return query.filter_by(host_id=value) - else: - query = query.join(models.Hosts, - models.Memorys.host_id == models.Hosts.id) - return query.filter(models.Hosts.uuid == value) - - -def add_memory_filter_by_host_node(query, ihostid, inodeid): - if utils.is_int_like(ihostid) and utils.is_int_like(inodeid): - return query.filter_by(host_id=ihostid, node_id=inodeid) - - if utils.is_uuid_like(ihostid) and utils.is_uuid_like(inodeid): - ihostq = model_query(models.Hosts).filter_by(uuid=ihostid).first() - inodeq = model_query(models.Nodes).filter_by(uuid=inodeid).first() - - query = query.filter_by(host_id=ihostq.id, - node_id=inodeq.id) - - return query - - -def add_memory_filter_by_node(query, inodeid): - if utils.is_int_like(inodeid): - return query.filter_by(node_id=inodeid) - else: - query = query.join(models.Nodes, - models.Memorys.node_id == models.Nodes.id) - return query.filter(models.Nodes.uuid == inodeid) - - -def add_device_filter_by_host(query, hostid): - """Adds a device-specific ihost filter to a query. - - Filters results by host id if supplied value is an integer, - otherwise attempts to filter results by host uuid. - - :param query: Initial query to add filter to. - :param hostid: host id or uuid to filter results by. - :return: Modified query. - """ - if utils.is_int_like(hostid): - return query.filter_by(host_id=hostid) - - elif utils.is_uuid_like(hostid): - query = query.join(models.Hosts) - return query.filter(models.Hosts.uuid == hostid) - - -def add_port_filter_by_numa_node(query, nodeid): - """Adds a port-specific numa node filter to a query. - - Filters results by numa node id if supplied nodeid is an integer, - otherwise attempts to filter results by numa node uuid. - - :param query: Initial query to add filter to. - :param nodeid: numa node id or uuid to filter results by. - :return: Modified query. - """ - if utils.is_int_like(nodeid): - return query.filter_by(node_id=nodeid) - - elif utils.is_uuid_like(nodeid): - query = query.join(models.Nodes) - return query.filter(models.Nodes.uuid == nodeid) - - LOG.debug("port_filter_by_numa_node: " - "No match for supplied filter id (%s)" % str(nodeid)) - - -def add_port_filter_by_host(query, hostid): - """Adds a port-specific host filter to a query. - - Filters results by host id if supplied value is an integer, - otherwise attempts to filter results by host uuid. - - :param query: Initial query to add filter to. - :param hostid: host id or uuid to filter results by. - :return: Modified query. - """ - if utils.is_int_like(hostid): - # Should not need join due to polymorphic ports table - # query = query.join(models.ports, - # models.EthernetPorts.id == models.ports.id) - # - # Query of ethernet_ports table should return data from - # corresponding ports table entry so should be able to - # use filter_by() rather than filter() - # - return query.filter_by(host_id=hostid) - - elif utils.is_uuid_like(hostid): - query = query.join(models.Hosts) - return query.filter(models.Hosts.uuid == hostid) - - LOG.debug("port_filter_by_host: " - "No match for supplied filter id (%s)" % str(hostid)) - - -def add_lldp_filter_by_host(query, hostid): - """Adds a lldp-specific ihost filter to a query. - - Filters results by host id if supplied value is an integer, - otherwise attempts to filter results by host uuid. - - :param query: Initial query to add filter to. - :param hostid: host id or uuid to filter results by. - :return: Modified query. - """ - if utils.is_int_like(hostid): - return query.filter_by(host_id=hostid) - elif utils.is_uuid_like(hostid): - query = query.join(models.Hosts) - return query.filter(models.Hosts.uuid == hostid) - - LOG.debug("lldp_filter_by_host: " - "No match for supplied filter id (%s)" % str(hostid)) - - -def add_lldp_filter_by_port(query, portid): - """Adds a lldp-specific port filter to a query. - - Filters results by port id if supplied value is an integer, - otherwise attempts to filter results by port uuid. - - :param query: Initial query to add filter to. - :param portid: port id or uuid to filter results by. - :return: Modified query. - """ - if utils.is_int_like(portid): - return query.filter_by(port_id=portid) - elif utils.is_uuid_like(portid): - query = query.join(models.Ports) - return query.filter(models.Ports.uuid == portid) - - -def add_lldp_filter_by_agent(query, value): - """Adds an lldp-specific filter to a query. - - Filters results by agent id if supplied value is an integer. - Filters results by agent UUID if supplied value is a UUID. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - if utils.is_int_like(value): - return query.filter(models.LldpAgents.id == value) - elif uuidutils.is_uuid_like(value): - return query.filter(models.LldpAgents.uuid == value) - - -def add_lldp_filter_by_neighbour(query, value): - """Adds an lldp-specific filter to a query. - - Filters results by neighbour id if supplied value is an integer. - Filters results by neighbour UUID if supplied value is a UUID. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - if utils.is_int_like(value): - return query.filter(models.LldpNeighbours.id == value) - elif uuidutils.is_uuid_like(value): - return query.filter(models.LldpNeighbours.uuid == value) - - -def add_lldp_tlv_filter_by_neighbour(query, neighbourid): - """Adds an lldp-specific filter to a query. - - Filters results by neighbour id if supplied value is an integer. - Filters results by neighbour UUID if supplied value is a UUID. - - :param query: Initial query to add filter to. - :param neighbourid: Value for filtering results by. - :return: Modified query. - """ - if utils.is_int_like(neighbourid): - return query.filter_by(neighbour_id=neighbourid) - elif uuidutils.is_uuid_like(neighbourid): - query = query.join( - models.LldpNeighbours, - models.LldpTlvs.neighbour_id == models.LldpNeighbours.id) - return query.filter(models.LldpNeighbours.uuid == neighbourid) - - -def add_lldp_tlv_filter_by_agent(query, agentid): - """Adds an lldp-specific filter to a query. - - Filters results by agent id if supplied value is an integer. - Filters results by agent UUID if supplied value is a UUID. - - :param query: Initial query to add filter to. - :param agentid: Value for filtering results by. - :return: Modified query. - """ - if utils.is_int_like(agentid): - return query.filter_by(agent_id=agentid) - elif uuidutils.is_uuid_like(agentid): - query = query.join(models.LldpAgents, - models.LldpTlvs.agent_id == models.LldpAgents.id) - return query.filter(models.LldpAgents.uuid == agentid) - - -# -# SENSOR FILTERS -# -def add_sensorgroup_filter(query, value): - """Adds a sensorgroup-specific filter to a query. - - Filters results by mac, if supplied value is a valid MAC - address. Otherwise attempts to filter results by identity. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - if uuidutils.is_uuid_like(value): - return query.filter(or_(models.SensorGroupsAnalog.uuid == value, - models.SensorGroupsDiscrete.uuid == value)) - elif utils.is_int_like(value): - return query.filter(or_(models.SensorGroupsAnalog.id == value, - models.SensorGroupsDiscrete.id == value)) - else: - return add_identity_filter(query, value, use_sensorgroupname=True) - - -def add_sensorgroup_filter_by_sensor(query, value): - """Adds an sensorgroup-specific filter to a query. - - Filters results by sensor id if supplied value is an integer. - Filters results by sensor UUID if supplied value is a UUID. - Otherwise attempts to filter results by name - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - query = query.join(models.Sensors) - if utils.is_int_like(value): - return query.filter(models.Sensors.id == value) - elif uuidutils.is_uuid_like(value): - return query.filter(models.Sensors.uuid == value) - else: - return query.filter(models.Sensors.name == value) - - -def add_sensorgroup_filter_by_host(query, value): - """Adds an sensorgroup-specific filter to a query. - - Filters results by hostid, if supplied value is an integer. - Otherwise attempts to filter results by UUID. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - if utils.is_int_like(value): - return query.filter_by(host_id=value) - else: - query = query.join(models.Hosts, - models.SensorGroups.host_id == models.Hosts.id) - return query.filter(models.Hosts.uuid == value) - - -def add_sensor_filter(query, value): - """Adds a sensor-specific filter to a query. - - Filters results by identity. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - return add_identity_filter(query, value, use_sensorname=True) - - -def add_sensor_filter_by_host(query, hostid): - """Adds a sensor-specific ihost filter to a query. - - Filters results by host id if supplied value is an integer, - otherwise attempts to filter results by host uuid. - - :param query: Initial query to add filter to. - :param hostid: host id or uuid to filter results by. - :return: Modified query. - """ - if utils.is_int_like(hostid): - return query.filter_by(host_id=hostid) - elif utils.is_uuid_like(hostid): - query = query.join(models.Hosts) - return query.filter(models.Hosts.uuid == hostid) - - LOG.debug("sensor_filter_by_host: " - "No match for supplied filter id (%s)" % str(hostid)) - - -def add_sensor_filter_by_sensorgroup(query, sensorgroupid): - """Adds a sensor-specific sensorgroup filter to a query. - - Filters results by sensorgroup id if supplied value is an integer, - otherwise attempts to filter results by sensorgroup uuid. - - :param query: Initial query to add filter to. - :param sensorgroupid: sensorgroup id or uuid to filter results by. - :return: Modified query. - """ - if utils.is_int_like(sensorgroupid): - return query.filter_by(sensorgroup_id=sensorgroupid) - - elif utils.is_uuid_like(sensorgroupid): - query = query.join(models.SensorGroups, - models.Sensors.sensorgroup_id == - models.SensorGroups.id) - - return query.filter(models.SensorGroups.uuid == sensorgroupid) - - LOG.warn("sensor_filter_by_sensorgroup: " - "No match for supplied filter id (%s)" % str(sensorgroupid)) - - -def add_sensor_filter_by_host_sensorgroup(query, hostid, sensorgroupid): - """Adds a sensor-specific host and sensorgroup filter to a query. - - Filters results by host id and sensorgroup id if supplied hostid and - sensorgroupid are integers, otherwise attempts to filter results by - host uuid and sensorgroup uuid. - - :param query: Initial query to add filter to. - :param hostid: host id or uuid to filter results by. - :param sensorgroupid: sensorgroup id or uuid to filter results by. - :return: Modified query. - """ - if utils.is_int_like(hostid) and utils.is_int_like(sensorgroupid): - return query.filter_by(host_id=hostid, sensorgroup_id=sensorgroupid) - - elif utils.is_uuid_like(hostid) and utils.is_uuid_like(sensorgroupid): - query = query.join(models.Hosts, - models.SensorGroups) - return query.filter(models.Hosts.uuid == hostid, - models.SensorGroups.uuid == sensorgroupid) - - LOG.debug("sensor_filter_by_host_isensorgroup: " - "No match for supplied filter ids (%s, %s)" - % (str(hostid), str(sensorgroupid))) - - -class Connection(api.Connection): - """SQLAlchemy connection.""" - - def __init__(self): - pass - - def get_session(self, autocommit=True): - return get_session(autocommit) - - def get_engine(self): - return get_engine() - - def _system_get(self, system): - query = model_query(models.Systems) - query = add_identity_filter(query, system) - try: - result = query.one() - except NoResultFound: - raise exception.SystemNotFound(system=system) - return result - - def system_create(self, values): - if not values.get('uuid'): - # The system uuid comes from systemconfig - raise exception.SystemNotFound(system=values) - system = models.Systems() - system.update(values) - with _session_for_write() as session: - try: - session.add(system) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.SystemAlreadyExists(uuid=values['uuid']) - return self._system_get(values['uuid']) - - def system_get(self, system): - return self._system_get(system) - - def system_get_one(self): - query = model_query(models.Systems) - try: - return query.one() - except NoResultFound: - raise exception.NotFound() - - def system_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Systems) - return _paginate_query(models.Systems, limit, marker, - sort_key, sort_dir, query) - - def system_update(self, system, values): - with _session_for_write() as session: - query = model_query(models.Systems, session=session) - query = add_identity_filter(query, system) - - count = query.update(values, synchronize_session='fetch') - if count != 1: - raise exception.SystemNotFound(system=system) - return query.one() - - def system_delete(self, system): - with _session_for_write() as session: - query = model_query(models.Systems, session=session) - query = add_identity_filter(query, system) - try: - query.one() - except NoResultFound: - raise exception.SystemNotFound(system=system) - query.delete() - - # - # Hosts - # - - def _add_hosts_filters(self, query, filters): - if filters is None: - filters = dict() - supported_filters = {'hostname', - 'invprovision', - 'mgmt_mac', - 'personality', - } - unsupported_filters = set(filters).difference(supported_filters) - if unsupported_filters: - msg = _("SqlAlchemy API does not support " - "filtering by %s") % ', '.join(unsupported_filters) - raise ValueError(msg) - - for field in supported_filters: - if field in filters: - query = query.filter_by(**{field: filters[field]}) - - return query - - def _host_get(self, host): - query = model_query(models.Hosts) - if utils.is_uuid_like(host): - host.strip() - query = add_identity_filter(query, host) - try: - return query.one() - except NoResultFound: - raise exception.HostNotFound(host=host) - - def host_create(self, values): - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - host = models.Hosts() - host.update(values) - with _session_for_write() as session: - try: - session.add(host) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.HostAlreadyExists(uuid=values['uuid']) - return self._host_get(values['uuid']) - - def host_get(self, host): - return self._host_get(host) - - def host_get_list(self, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Hosts) - query = self._add_hosts_filters(query, filters) - return _paginate_query(models.Hosts, limit, marker, - sort_key, sort_dir, query) - - def host_get_by_filters_one(self, filters): - query = model_query(models.Hosts) - query = self._add_hosts_filters(query, filters) - try: - return query.one() - except NoResultFound: - raise exception.HostNotFound(host=filters) - - def host_get_by_hostname(self, hostname): - query = model_query(models.Hosts) - query = query.filter_by(hostname=hostname) - try: - return query.one() - except NoResultFound: - raise exception.HostNotFound(host=hostname) - - def host_get_by_personality(self, personality, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Hosts) - query = query.filter_by(personality=personality) - return _paginate_query(models.Hosts, limit, marker, - sort_key, sort_dir, query) - - def host_get_by_mgmt_mac(self, mgmt_mac): - try: - mgmt_mac = mgmt_mac.rstrip() - mgmt_mac = utils.validate_and_normalize_mac(mgmt_mac) - except exception.InventoryException: - raise exception.HostNotFound(host=mgmt_mac) - - query = model_query(models.Hosts) - query = query.filter_by(mgmt_mac=mgmt_mac) - - try: - return query.one() - except NoResultFound: - raise exception.HostNotFound(host=mgmt_mac) - - def host_update(self, host, values, context=None): - with _session_for_write() as session: - query = model_query(models.Hosts, session=session) - query = add_identity_filter(query, host) - count = query.update(values, synchronize_session='fetch') - if count != 1: - raise exception.HostNotFound(host=host) - return self._host_get(host) - - def host_destroy(self, host): - with _session_for_write() as session: - query = model_query(models.Hosts, session=session) - query = add_identity_filter(query, host) - try: - query.one() - except NoResultFound: - raise exception.HostNotFound(host=host) - query.delete() - - # - # Ports - # - - def _port_get(self, portid, hostid=None): - query = model_query(models.Ports) - - if hostid: - query = query.filter_by(host_id=hostid) - - query = add_identity_filter(query, portid, use_name=True) - - try: - return query.one() - except NoResultFound: - raise exception.PortNotFound(port=portid) - - def port_get(self, portid, hostid=None): - return self._port_get(portid, hostid) - - def port_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.Ports, limit, marker, - sort_key, sort_dir) - - def port_get_all(self, hostid=None, interfaceid=None): - query = model_query(models.Ports, read_deleted="no") - if hostid: - query = query.filter_by(host_id=hostid) - if interfaceid: - query = query.filter_by(interface_id=interfaceid) - return query.all() - - def port_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Ports) - query = add_port_filter_by_host(query, host) - return _paginate_query(models.Ports, limit, marker, - sort_key, sort_dir, query) - - def port_get_by_numa_node(self, node, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.Ports) - query = add_port_filter_by_numa_node(query, node) - return _paginate_query(models.Ports, limit, marker, - sort_key, sort_dir, query) - - def _ethernet_port_get(self, portid, hostid=None): - query = model_query(models.EthernetPorts) - - if hostid: - query = query.filter_by(host_id=hostid) - - query = add_identity_filter(query, portid, use_name=True) - - try: - return query.one() - except NoResultFound: - raise exception.PortNotFound(port=portid) - - def ethernet_port_create(self, hostid, values): - if utils.is_int_like(hostid): - host = self.host_get(int(hostid)) - elif utils.is_uuid_like(hostid): - host = self.host_get(hostid.strip()) - elif isinstance(hostid, models.Hosts): - host = hostid - else: - raise exception.HostNotFound(host=hostid) - - values['host_id'] = host['id'] - - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - ethernet_port = models.EthernetPorts() - ethernet_port.update(values) - with _session_for_write() as session: - try: - session.add(ethernet_port) - session.flush() - except db_exc.DBDuplicateEntry: - LOG.error("Failed to add port %s (uuid: %s), port with MAC " - "address %s on host %s already exists" % - (values['name'], - values['uuid'], - values['mac'], - values['host_id'])) - raise exception.MACAlreadyExists(mac=values['mac'], - host=values['host_id']) - - return self._ethernet_port_get(values['uuid']) - - def ethernet_port_get(self, portid, hostid=None): - return self._ethernet_port_get(portid, hostid) - - def ethernet_port_get_by_mac(self, mac): - query = model_query(models.EthernetPorts).filter_by(mac=mac) - try: - return query.one() - except NoResultFound: - raise exception.PortNotFound(port=mac) - - def ethernet_port_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.EthernetPorts, limit, marker, - sort_key, sort_dir) - - def ethernet_port_get_all(self, hostid=None): - query = model_query(models.EthernetPorts, read_deleted="no") - if hostid: - query = query.filter_by(host_id=hostid) - return query.all() - - def ethernet_port_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.EthernetPorts) - query = add_port_filter_by_host(query, host) - return _paginate_query(models.EthernetPorts, limit, marker, - sort_key, sort_dir, query) - - def ethernet_port_get_by_numa_node(self, node, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.EthernetPorts) - query = add_port_filter_by_numa_node(query, node) - return _paginate_query(models.EthernetPorts, limit, marker, - sort_key, sort_dir, query) - - def ethernet_port_update(self, portid, values): - with _session_for_write() as session: - # May need to reserve in multi controller system; ref sysinv - query = model_query(models.EthernetPorts, read_deleted="no", - session=session) - query = add_identity_filter(query, portid) - - try: - result = query.one() - for k, v in values.items(): - setattr(result, k, v) - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for port %s" % portid) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for port %s" % portid) - - return query.one() - - def ethernet_port_destroy(self, portid): - with _session_for_write() as session: - # Delete port which should cascade to delete EthernetPort - if uuidutils.is_uuid_like(portid): - model_query(models.Ports, read_deleted="no", - session=session).\ - filter_by(uuid=portid).\ - delete() - else: - model_query(models.Ports, read_deleted="no", - session=session).\ - filter_by(id=portid).\ - delete() - - # - # Nodes - # - - def _node_get(self, node_id): - query = model_query(models.Nodes) - query = add_identity_filter(query, node_id) - - try: - result = query.one() - except NoResultFound: - raise exception.NodeNotFound(node=node_id) - - return result - - def node_create(self, host_id, values): - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - values['host_id'] = int(host_id) - node = models.Nodes() - node.update(values) - with _session_for_write() as session: - try: - session.add(node) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.NodeAlreadyExists(uuid=values['uuid']) - - return self._node_get(values['uuid']) - - def node_get_all(self, host_id=None): - query = model_query(models.Nodes, read_deleted="no") - if host_id: - query = query.filter_by(host_id=host_id) - return query.all() - - def node_get(self, node_id): - return self._node_get(node_id) - - def node_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.Nodes, limit, marker, - sort_key, sort_dir) - - def node_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.Nodes) - query = add_node_filter_by_host(query, host) - return _paginate_query(models.Nodes, limit, marker, - sort_key, sort_dir, query) - - def node_update(self, node_id, values): - with _session_for_write() as session: - # May need to reserve in multi controller system; ref sysinv - query = model_query(models.Nodes, read_deleted="no", - session=session) - query = add_identity_filter(query, node_id) - - count = query.update(values, synchronize_session='fetch') - if count != 1: - raise exception.NodeNotFound(node=node_id) - return query.one() - - def node_destroy(self, node_id): - with _session_for_write() as session: - # Delete physically since it has unique columns - if uuidutils.is_uuid_like(node_id): - model_query(models.Nodes, read_deleted="no", - session=session).\ - filter_by(uuid=node_id).\ - delete() - else: - model_query(models.Nodes, read_deleted="no").\ - filter_by(id=node_id).\ - delete() - - # - # Cpus - # - - def _cpu_get(self, cpu_id, host_id=None): - query = model_query(models.Cpus) - - if host_id: - query = query.filter_by(host_id=host_id) - - query = add_identity_filter(query, cpu_id) - - try: - result = query.one() - except NoResultFound: - raise exception.CPUNotFound(cpu=cpu_id) - - return result - - def cpu_create(self, host_id, values): - - if utils.is_int_like(host_id): - values['host_id'] = int(host_id) - else: - # this is not necessary if already integer following not work - host = self.host_get(host_id.strip()) - values['host_id'] = host['id'] - - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - cpu = models.Cpus() - cpu.update(values) - - with _session_for_write() as session: - try: - session.add(cpu) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.CPUAlreadyExists(cpu=values['cpu']) - return self._cpu_get(values['uuid']) - - def cpu_get_all(self, host_id=None, fornodeid=None): - query = model_query(models.Cpus, read_deleted="no") - if host_id: - query = query.filter_by(host_id=host_id) - if fornodeid: - query = query.filter_by(fornodeid=fornodeid) - return query.all() - - def cpu_get(self, cpu_id, host_id=None): - return self._cpu_get(cpu_id, host_id) - - def cpu_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.Cpus, limit, marker, - sort_key, sort_dir) - - def cpu_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.Cpus) - query = add_cpu_filter_by_host(query, host) - return _paginate_query(models.Cpus, limit, marker, - sort_key, sort_dir, query) - - def cpu_get_by_node(self, node, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.Cpus) - query = add_cpu_filter_by_node(query, node) - return _paginate_query(models.Cpus, limit, marker, - sort_key, sort_dir, query) - - def cpu_get_by_host_node(self, host, node, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.Cpus) - query = add_cpu_filter_by_host_node(query, host, node) - return _paginate_query(models.Cpus, limit, marker, - sort_key, sort_dir, query) - - def cpu_update(self, cpu_id, values, host_id=None): - with _session_for_write() as session: - # May need to reserve in multi controller system; ref sysinv - query = model_query(models.Cpus, read_deleted="no", - session=session) - if host_id: - query = query.filter_by(host_id=host_id) - - query = add_identity_filter(query, cpu_id) - - count = query.update(values, synchronize_session='fetch') - if count != 1: - raise exception.CPUNotFound(cpu=cpu_id) - return query.one() - - def cpu_destroy(self, cpu_id): - with _session_for_write() as session: - # Delete physically since it has unique columns - if uuidutils.is_uuid_like(cpu_id): - model_query(models.Cpus, read_deleted="no", session=session).\ - filter_by(uuid=cpu_id).\ - delete() - else: - model_query(models.Cpus, read_deleted="no").\ - filter_by(id=cpu_id).\ - delete() - - # - # Memory - # - - def _memory_get(self, memory_id, host_id=None): - query = model_query(models.Memorys) - - if host_id: - query = query.filter_by(host_id=host_id) - - query = add_identity_filter(query, memory_id) - - try: - result = query.one() - except NoResultFound: - raise exception.MemoryNotFound(memory=memory_id) - - return result - - def memory_create(self, host_id, values): - if utils.is_int_like(host_id): - values['host_id'] = int(host_id) - else: - # this is not necessary if already integer following not work - host = self.host_get(host_id.strip()) - values['host_id'] = host['id'] - - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - values.pop('numa_node', None) - - memory = models.Memorys() - memory.update(values) - with _session_for_write() as session: - try: - session.add(memory) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.MemoryAlreadyExists(uuid=values['uuid']) - return self._memory_get(values['uuid']) - - def memory_get_all(self, host_id=None, fornodeid=None): - query = model_query(models.Memorys, read_deleted="no") - if host_id: - query = query.filter_by(host_id=host_id) - if fornodeid: - query = query.filter_by(fornodeid=fornodeid) - return query.all() - - def memory_get(self, memory_id, host_id=None): - return self._memory_get(memory_id, host_id) - - def memory_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.Memorys, limit, marker, - sort_key, sort_dir) - - def memory_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.Memorys) - query = add_memory_filter_by_host(query, host) - return _paginate_query(models.Memorys, limit, marker, - sort_key, sort_dir, query) - - def memory_get_by_node(self, node, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.Memorys) - query = add_memory_filter_by_node(query, node) - return _paginate_query(models.Memorys, limit, marker, - sort_key, sort_dir, query) - - def memory_get_by_host_node(self, host, node, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.Memorys) - query = add_memory_filter_by_host_node(query, host, node) - return _paginate_query(models.Memorys, limit, marker, - sort_key, sort_dir, query) - - def memory_update(self, memory_id, values, host_id=None): - with _session_for_write() as session: - # May need to reserve in multi controller system; ref sysinv - query = model_query(models.Memorys, read_deleted="no", - session=session) - if host_id: - query = query.filter_by(host_id=host_id) - - query = add_identity_filter(query, memory_id) - - values.pop('numa_node', None) - - count = query.update(values, synchronize_session='fetch') - if count != 1: - raise exception.MemoryNotFound(memory=memory_id) - return query.one() - - def memory_destroy(self, memory_id): - with _session_for_write() as session: - # Delete physically since it has unique columns - if uuidutils.is_uuid_like(memory_id): - model_query(models.Memorys, read_deleted="no", - session=session).\ - filter_by(uuid=memory_id).\ - delete() - else: - model_query(models.Memorys, read_deleted="no", - session=session).\ - filter_by(id=memory_id).\ - delete() - - # - # PciDevices - # - - def pci_device_create(self, hostid, values): - - if utils.is_int_like(hostid): - host = self.host_get(int(hostid)) - elif utils.is_uuid_like(hostid): - host = self.host_get(hostid.strip()) - elif isinstance(hostid, models.Hosts): - host = hostid - else: - raise exception.HostNotFound(host=hostid) - - values['host_id'] = host['id'] - - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - pci_device = models.PciDevices() - pci_device.update(values) - with _session_for_write() as session: - try: - session.add(pci_device) - session.flush() - except db_exc.DBDuplicateEntry: - LOG.error("Failed to add pci device %s:%s (uuid: %s), " - "device with PCI address %s on host %s " - "already exists" % - (values['vendor'], - values['device'], - values['uuid'], - values['pciaddr'], - values['host_id'])) - raise exception.PCIAddrAlreadyExists(pciaddr=values['pciaddr'], - host=values['host_id']) - - def pci_device_get_all(self, hostid=None): - query = model_query(models.PciDevices, read_deleted="no") - if hostid: - query = query.filter_by(host_id=hostid) - return query.all() - - def pci_device_get(self, deviceid, hostid=None): - query = model_query(models.PciDevices) - if hostid: - query = query.filter_by(host_id=hostid) - query = add_identity_filter(query, deviceid, use_pciaddr=True) - try: - result = query.one() - except NoResultFound: - raise exception.PCIDeviceNotFound(pcidevice_id=deviceid) - - return result - - def pci_device_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.PciDevices, limit, marker, - sort_key, sort_dir) - - def pci_device_get_by_host(self, host, limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.PciDevices) - query = add_device_filter_by_host(query, host) - return _paginate_query(models.PciDevices, limit, marker, - sort_key, sort_dir, query) - - def pci_device_update(self, device_id, values, host_id=None): - with _session_for_write() as session: - query = model_query(models.PciDevices, read_deleted="no", - session=session) - - if host_id: - query = query.filter_by(host_id=host_id) - - try: - query = add_identity_filter(query, device_id) - result = query.one() - for k, v in values.items(): - setattr(result, k, v) - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for device %s" % device_id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for device %s" % device_id) - - return query.one() - - def pci_device_destroy(self, device_id): - with _session_for_write() as session: - if uuidutils.is_uuid_like(device_id): - model_query(models.PciDevices, read_deleted="no", - session=session).\ - filter_by(uuid=device_id).\ - delete() - else: - model_query(models.PciDevices, read_deleted="no", - session=session).\ - filter_by(id=device_id).\ - delete() - - # - # LLDP - # - - def _lldp_agent_get(self, agentid, hostid=None): - query = model_query(models.LldpAgents) - - if hostid: - query = query.filter_by(host_id=hostid) - - query = add_lldp_filter_by_agent(query, agentid) - - try: - return query.one() - except NoResultFound: - raise exception.LldpAgentNotFound(agent=agentid) - - def lldp_agent_create(self, portid, hostid, values): - host = self.host_get(hostid) - port = self.port_get(portid) - - values['host_id'] = host['id'] - values['port_id'] = port['id'] - - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - lldp_agent = models.LldpAgents() - lldp_agent.update(values) - with _session_for_write() as session: - try: - session.add(lldp_agent) - session.flush() - except db_exc.DBDuplicateEntry: - LOG.error("Failed to add lldp agent %s, on host %s:" - "already exists" % - (values['uuid'], - values['host_id'])) - raise exception.LLDPAgentExists(uuid=values['uuid'], - host=values['host_id']) - return self._lldp_agent_get(values['uuid']) - - def lldp_agent_get(self, agentid, hostid=None): - return self._lldp_agent_get(agentid, hostid) - - def lldp_agent_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.LldpAgents, limit, marker, - sort_key, sort_dir) - - def lldp_agent_get_all(self, hostid=None, portid=None): - query = model_query(models.LldpAgents, read_deleted="no") - if hostid: - query = query.filter_by(host_id=hostid) - if portid: - query = query.filter_by(port_id=portid) - return query.all() - - def lldp_agent_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.LldpAgents) - query = add_lldp_filter_by_host(query, host) - return _paginate_query(models.LldpAgents, limit, marker, - sort_key, sort_dir, query) - - def lldp_agent_get_by_port(self, port): - query = model_query(models.LldpAgents) - query = add_lldp_filter_by_port(query, port) - try: - return query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for agent on port %s" % port) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for agent on port %s" % port) - - def lldp_agent_update(self, uuid, values): - with _session_for_write(): - query = model_query(models.LldpAgents, read_deleted="no") - - try: - query = add_lldp_filter_by_agent(query, uuid) - result = query.one() - for k, v in values.items(): - setattr(result, k, v) - return result - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for agent %s" % uuid) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for agent %s" % uuid) - - def lldp_agent_destroy(self, agentid): - - with _session_for_write(): - query = model_query(models.LldpAgents, read_deleted="no") - query = add_lldp_filter_by_agent(query, agentid) - - try: - query.delete() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for agent %s" % agentid) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for agent %s" % agentid) - - def _lldp_neighbour_get(self, neighbourid, hostid=None): - query = model_query(models.LldpNeighbours) - - if hostid: - query = query.filter_by(host_id=hostid) - - query = add_lldp_filter_by_neighbour(query, neighbourid) - - try: - return query.one() - except NoResultFound: - raise exception.LldpNeighbourNotFound(neighbour=neighbourid) - - def lldp_neighbour_create(self, portid, hostid, values): - if utils.is_int_like(hostid): - host = self.host_get(int(hostid)) - elif utils.is_uuid_like(hostid): - host = self.host_get(hostid.strip()) - elif isinstance(hostid, models.Hosts): - host = hostid - else: - raise exception.HostNotFound(host=hostid) - if utils.is_int_like(portid): - port = self.port_get(int(portid)) - elif utils.is_uuid_like(portid): - port = self.port_get(portid.strip()) - elif isinstance(portid, models.port): - port = portid - else: - raise exception.PortNotFound(port=portid) - - values['host_id'] = host['id'] - values['port_id'] = port['id'] - - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - lldp_neighbour = models.LldpNeighbours() - lldp_neighbour.update(values) - with _session_for_write() as session: - try: - session.add(lldp_neighbour) - session.flush() - except db_exc.DBDuplicateEntry: - LOG.error("Failed to add lldp neighbour %s, on port %s:. " - "Already exists with msap %s" % - (values['uuid'], - values['port_id'], - values['msap'])) - raise exception.LLDPNeighbourExists(uuid=values['uuid']) - - return self._lldp_neighbour_get(values['uuid']) - - def lldp_neighbour_get(self, neighbourid, hostid=None): - return self._lldp_neighbour_get(neighbourid, hostid) - - def lldp_neighbour_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.LldpNeighbours, limit, marker, - sort_key, sort_dir) - - def lldp_neighbour_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.LldpNeighbours) - query = add_port_filter_by_host(query, host) - return _paginate_query(models.LldpNeighbours, limit, marker, - sort_key, sort_dir, query) - - def lldp_neighbour_get_by_port(self, port, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.LldpNeighbours) - query = add_lldp_filter_by_port(query, port) - return _paginate_query(models.LldpNeighbours, limit, marker, - sort_key, sort_dir, query) - - def lldp_neighbour_update(self, uuid, values): - with _session_for_write(): - query = model_query(models.LldpNeighbours, read_deleted="no") - - try: - query = add_lldp_filter_by_neighbour(query, uuid) - result = query.one() - for k, v in values.items(): - setattr(result, k, v) - return result - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for uuid %s" % uuid) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for uuid %s" % uuid) - - def lldp_neighbour_destroy(self, neighbourid): - with _session_for_write(): - query = model_query(models.LldpNeighbours, read_deleted="no") - query = add_lldp_filter_by_neighbour(query, neighbourid) - try: - query.delete() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for neighbour %s" % neighbourid) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for neighbour %s" % - neighbourid) - - def _lldp_tlv_get(self, type, agentid=None, neighbourid=None, - session=None): - if not agentid and not neighbourid: - raise exception.InvalidParameterValue( - err="agent id and neighbour id not specified") - - query = model_query(models.LldpTlvs, session=session) - - if agentid: - query = query.filter_by(agent_id=agentid) - - if neighbourid: - query = query.filter_by(neighbour_id=neighbourid) - - query = query.filter_by(type=type) - - try: - return query.one() - except NoResultFound: - raise exception.LldpTlvNotFound(type=type) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found") - - def lldp_tlv_create(self, values, agentid=None, neighbourid=None): - if not agentid and not neighbourid: - raise exception.InvalidParameterValue( - err="agent id and neighbour id not specified") - - if agentid: - if utils.is_int_like(agentid): - agent = self.lldp_agent_get(int(agentid)) - elif utils.is_uuid_like(agentid): - agent = self.lldp_agent_get(agentid.strip()) - elif isinstance(agentid, models.lldp_agents): - agent = agentid - else: - raise exception.LldpAgentNotFound(agent=agentid) - - if neighbourid: - if utils.is_int_like(neighbourid): - neighbour = self.lldp_neighbour_get(int(neighbourid)) - elif utils.is_uuid_like(neighbourid): - neighbour = self.lldp_neighbour_get(neighbourid.strip()) - elif isinstance(neighbourid, models.lldp_neighbours): - neighbour = neighbourid - else: - raise exception.LldpNeighbourNotFound(neighbour=neighbourid) - - if agentid: - values['agent_id'] = agent['id'] - - if neighbourid: - values['neighbour_id'] = neighbour['id'] - - lldp_tlv = models.LldpTlvs() - lldp_tlv.update(values) - with _session_for_write() as session: - try: - session.add(lldp_tlv) - session.flush() - except db_exc.DBDuplicateEntry: - LOG.error("Failed to add lldp tlv %s" - "already exists" % (values['type'])) - raise exception.LLDPTlvExists(uuid=values['id']) - return self._lldp_tlv_get(values['type'], - agentid=values.get('agent_id'), - neighbourid=values.get('neighbour_id')) - - def lldp_tlv_create_bulk(self, values, agentid=None, neighbourid=None): - if not agentid and not neighbourid: - raise exception.InvalidParameterValue( - err="agent id and neighbour id not specified") - - if agentid: - if utils.is_int_like(agentid): - agent = self.lldp_agent_get(int(agentid)) - elif utils.is_uuid_like(agentid): - agent = self.lldp_agent_get(agentid.strip()) - elif isinstance(agentid, models.lldp_agents): - agent = agentid - else: - raise exception.LldpAgentNotFound(agent=agentid) - - if neighbourid: - if utils.is_int_like(neighbourid): - neighbour = self.lldp_neighbour_get(int(neighbourid)) - elif utils.is_uuid_like(neighbourid): - neighbour = self.lldp_neighbour_get(neighbourid.strip()) - elif isinstance(neighbourid, models.lldp_neighbours): - neighbour = neighbourid - else: - raise exception.LldpNeighbourNotFound(neighbour=neighbourid) - - tlvs = [] - with _session_for_write() as session: - for entry in values: - lldp_tlv = models.LldpTlvs() - if agentid: - entry['agent_id'] = agent['id'] - - if neighbourid: - entry['neighbour_id'] = neighbour['id'] - - lldp_tlv.update(entry) - session.add(lldp_tlv) - - lldp_tlv = self._lldp_tlv_get( - entry['type'], - agentid=entry.get('agent_id'), - neighbourid=entry.get('neighbour_id'), - session=session) - - tlvs.append(lldp_tlv) - - return tlvs - - def lldp_tlv_get(self, type, agentid=None, neighbourid=None): - return self._lldp_tlv_get(type, agentid, neighbourid) - - def lldp_tlv_get_by_id(self, id, agentid=None, neighbourid=None): - query = model_query(models.LldpTlvs) - - query = query.filter_by(id=id) - try: - result = query.one() - except NoResultFound: - raise exception.LldpTlvNotFound(id=id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found") - - return result - - def lldp_tlv_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.LldpTlvs, limit, marker, - sort_key, sort_dir) - - def lldp_tlv_get_all(self, agentid=None, neighbourid=None): - query = model_query(models.LldpTlvs, read_deleted="no") - if agentid: - query = query.filter_by(agent_id=agentid) - if neighbourid: - query = query.filter_by(neighbour_id=neighbourid) - return query.all() - - def lldp_tlv_get_by_agent(self, agent, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.LldpTlvs) - query = add_lldp_tlv_filter_by_agent(query, agent) - return _paginate_query(models.LldpTlvs, limit, marker, - sort_key, sort_dir, query) - - def lldp_tlv_get_by_neighbour(self, neighbour, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.LldpTlvs) - query = add_lldp_tlv_filter_by_neighbour(query, neighbour) - return _paginate_query(models.LldpTlvs, limit, marker, - sort_key, sort_dir, query) - - def lldp_tlv_update(self, values, agentid=None, neighbourid=None): - if not agentid and not neighbourid: - raise exception.InvalidParameterValue( - err="agent id and neighbour id not specified") - - with _session_for_write(): - query = model_query(models.LldpTlvs, read_deleted="no") - - if agentid: - query = add_lldp_tlv_filter_by_agent(query, agentid) - - if neighbourid: - query = add_lldp_tlv_filter_by_neighbour(query, - neighbourid) - - query = query.filter_by(type=values['type']) - - try: - result = query.one() - for k, v in values.items(): - setattr(result, k, v) - return result - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for tlv") - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found") - - def lldp_tlv_update_bulk(self, values, agentid=None, neighbourid=None): - results = [] - - if not agentid and not neighbourid: - raise exception.InvalidParameterValue( - err="agent id and neighbour id not specified") - - with _session_for_write() as session: - for entry in values: - query = model_query(models.LldpTlvs, read_deleted="no") - - if agentid: - query = query.filter_by(agent_id=agentid) - - if neighbourid: - query = query.filter_by(neighbour_id=neighbourid) - - query = query.filter_by(type=entry['type']) - - try: - result = query.one() - result.update(entry) - session.merge(result) - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for tlv") - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found") - - results.append(result) - return results - - def lldp_tlv_destroy(self, id): - with _session_for_write(): - model_query(models.LldpTlvs, read_deleted="no").\ - filter_by(id=id).\ - delete() - - # - # SENSORS - # - - def _sensor_analog_create(self, hostid, values): - if utils.is_int_like(hostid): - host = self.host_get(int(hostid)) - elif utils.is_uuid_like(hostid): - host = self.host_get(hostid.strip()) - elif isinstance(hostid, models.Hosts): - host = hostid - else: - raise exception.HostNotFound(host=hostid) - - values['host_id'] = host['id'] - - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - sensor_analog = models.SensorsAnalog() - sensor_analog.update(values) - - with _session_for_write() as session: - try: - session.add(sensor_analog) - session.flush() - except db_exc.DBDuplicateEntry: - exception.SensorAlreadyExists(uuid=values['uuid']) - return self._sensor_analog_get(values['uuid']) - - def _sensor_analog_get(self, sensorid, hostid=None): - query = model_query(models.SensorsAnalog) - - if hostid: - query = query.filter_by(host_id=hostid) - - query = add_sensor_filter(query, sensorid) - - try: - result = query.one() - except NoResultFound: - raise exception.SensorNotFound(sensor=sensorid) - - return result - - def _sensor_analog_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.SensorsAnalog, limit, marker, - sort_key, sort_dir) - - def _sensor_analog_get_all(self, hostid=None, sensorgroupid=None): - query = model_query(models.SensorsAnalog, read_deleted="no") - if hostid: - query = query.filter_by(host_id=hostid) - if sensorgroupid: - query = query.filter_by(sensorgroup_id=hostid) - return query.all() - - def _sensor_analog_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.SensorsAnalog) - query = add_port_filter_by_host(query, host) - return _paginate_query(models.SensorsAnalog, limit, marker, - sort_key, sort_dir, query) - - def _sensor_analog_get_by_sensorgroup(self, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.SensorsAnalog) - query = add_sensor_filter_by_sensorgroup(query, sensorgroup) - return _paginate_query(models.SensorsAnalog, limit, marker, - sort_key, sort_dir, query) - - def _sensor_analog_get_by_host_sensorgroup(self, host, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.SensorsAnalog) - query = add_sensor_filter_by_host_sensorgroup(query, - host, - sensorgroup) - return _paginate_query(models.SensorsAnalog, limit, marker, - sort_key, sort_dir, query) - - def _sensor_analog_update(self, sensorid, values, hostid=None): - with _session_for_write(): - # May need to reserve in multi controller system; ref sysinv - query = model_query(models.SensorsAnalog, read_deleted="no") - - if hostid: - query = query.filter_by(host_id=hostid) - - try: - query = add_sensor_filter(query, sensorid) - result = query.one() - for k, v in values.items(): - setattr(result, k, v) - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for port %s" % sensorid) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for port %s" % sensorid) - - return query.one() - - def _sensor_analog_destroy(self, sensorid): - with _session_for_write(): - # Delete port which should cascade to delete SensorsAnalog - if uuidutils.is_uuid_like(sensorid): - model_query(models.Sensors, read_deleted="no").\ - filter_by(uuid=sensorid).\ - delete() - else: - model_query(models.Sensors, read_deleted="no").\ - filter_by(id=sensorid).\ - delete() - - def sensor_analog_create(self, hostid, values): - return self._sensor_analog_create(hostid, values) - - def sensor_analog_get(self, sensorid, hostid=None): - return self._sensor_analog_get(sensorid, hostid) - - def sensor_analog_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensor_analog_get_list(limit, marker, sort_key, sort_dir) - - def sensor_analog_get_all(self, hostid=None, sensorgroupid=None): - return self._sensor_analog_get_all(hostid, sensorgroupid) - - def sensor_analog_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensor_analog_get_by_host(host, limit, marker, - sort_key, sort_dir) - - def sensor_analog_get_by_sensorgroup(self, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensor_analog_get_by_sensorgroup(sensorgroup, - limit, marker, - sort_key, sort_dir) - - def sensor_analog_get_by_host_sensorgroup(self, host, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensor_analog_get_by_host_sensorgroup(host, sensorgroup, - limit, marker, - sort_key, sort_dir) - - def sensor_analog_update(self, sensorid, values, hostid=None): - return self._sensor_analog_update(sensorid, values, hostid) - - def sensor_analog_destroy(self, sensorid): - return self._sensor_analog_destroy(sensorid) - - def _sensor_discrete_create(self, hostid, values): - if utils.is_int_like(hostid): - host = self.host_get(int(hostid)) - elif utils.is_uuid_like(hostid): - host = self.host_get(hostid.strip()) - elif isinstance(hostid, models.Hosts): - host = hostid - else: - raise exception.HostNotFound(host=hostid) - - values['host_id'] = host['id'] - - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - - sensor_discrete = models.SensorsDiscrete() - sensor_discrete.update(values) - with _session_for_write() as session: - try: - session.add(sensor_discrete) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.SensorAlreadyExists(uuid=values['uuid']) - return self._sensor_discrete_get(values['uuid']) - - def _sensor_discrete_get(self, sensorid, hostid=None): - query = model_query(models.SensorsDiscrete) - - if hostid: - query = query.filter_by(host_id=hostid) - - query = add_sensor_filter(query, sensorid) - - try: - result = query.one() - except NoResultFound: - raise exception.SensorNotFound(sensor=sensorid) - - return result - - def _sensor_discrete_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(models.SensorsDiscrete, limit, marker, - sort_key, sort_dir) - - def _sensor_discrete_get_all(self, hostid=None, sensorgroupid=None): - query = model_query(models.SensorsDiscrete, read_deleted="no") - if hostid: - query = query.filter_by(host_id=hostid) - if sensorgroupid: - query = query.filter_by(sensorgroup_id=hostid) - return query.all() - - def _sensor_discrete_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.SensorsDiscrete) - query = add_port_filter_by_host(query, host) - return _paginate_query(models.SensorsDiscrete, limit, marker, - sort_key, sort_dir, query) - - def _sensor_discrete_get_by_sensorgroup(self, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.SensorsDiscrete) - query = add_sensor_filter_by_sensorgroup(query, sensorgroup) - return _paginate_query(models.SensorsDiscrete, limit, marker, - sort_key, sort_dir, query) - - def _sensor_discrete_get_by_host_sensorgroup(self, host, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.SensorsDiscrete) - query = add_sensor_filter_by_host_sensorgroup(query, - host, - sensorgroup) - return _paginate_query(models.SensorsDiscrete, limit, marker, - sort_key, sort_dir, query) - - def _sensor_discrete_update(self, sensorid, values, hostid=None): - with _session_for_write(): - # May need to reserve in multi controller system; ref sysinv - query = model_query(models.SensorsDiscrete, read_deleted="no") - - if hostid: - query = query.filter_by(host_id=hostid) - - try: - query = add_sensor_filter(query, sensorid) - result = query.one() - for k, v in values.items(): - setattr(result, k, v) - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for port %s" % sensorid) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for port %s" % sensorid) - - return query.one() - - def _sensor_discrete_destroy(self, sensorid): - with _session_for_write(): - # Delete port which should cascade to delete SensorsDiscrete - if uuidutils.is_uuid_like(sensorid): - model_query(models.Sensors, read_deleted="no").\ - filter_by(uuid=sensorid).\ - delete() - else: - model_query(models.Sensors, read_deleted="no").\ - filter_by(id=sensorid).\ - delete() - - def sensor_discrete_create(self, hostid, values): - return self._sensor_discrete_create(hostid, values) - - def sensor_discrete_get(self, sensorid, hostid=None): - return self._sensor_discrete_get(sensorid, hostid) - - def sensor_discrete_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensor_discrete_get_list( - limit, marker, sort_key, sort_dir) - - def sensor_discrete_get_all(self, hostid=None, sensorgroupid=None): - return self._sensor_discrete_get_all(hostid, sensorgroupid) - - def sensor_discrete_get_by_host(self, host, - limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensor_discrete_get_by_host(host, limit, marker, - sort_key, sort_dir) - - def sensor_discrete_get_by_sensorgroup(self, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensor_discrete_get_by_sensorgroup( - sensorgroup, limit, marker, sort_key, sort_dir) - - def sensor_discrete_get_by_host_sensorgroup(self, host, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensor_discrete_get_by_host_sensorgroup( - host, sensorgroup, limit, marker, sort_key, sort_dir) - - def sensor_discrete_update(self, sensorid, values, hostid=None): - return self._sensor_discrete_update(sensorid, values, hostid) - - def sensor_discrete_destroy(self, sensorid): - return self._sensor_discrete_destroy(sensorid) - - def _sensor_get(self, cls, sensor_id, ihost=None, obj=None): - session = None - if obj: - session = inspect(obj).session - query = model_query(cls, session=session) - query = add_sensor_filter(query, sensor_id) - if ihost: - query = add_sensor_filter_by_host(query, ihost) - - try: - result = query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for sensor %s" % sensor_id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for sensor %s" % sensor_id) - - return result - - def _sensor_create(self, obj, host_id, values): - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - values['host_id'] = int(host_id) - - if 'sensor_profile' in values: - values.pop('sensor_profile') - - # The id is null for ae sensors with more than one member - # sensor - temp_id = obj.id - obj.update(values) - if obj.id is None: - obj.id = temp_id - - with _session_for_write() as session: - try: - session.add(obj) - session.flush() - except db_exc.DBDuplicateEntry: - LOG.error("Failed to add sensor %s (uuid: %s), an sensor " - "with name %s already exists on host %s" % - (values['sensorname'], - values['uuid'], - values['sensorname'], - values['host_id'])) - raise exception.SensorAlreadyExists(uuid=values['uuid']) - return self._sensor_get(type(obj), values['uuid']) - - def sensor_create(self, hostid, values): - if values['datatype'] == 'discrete': - sensor = models.SensorsDiscrete() - elif values['datatype'] == 'analog': - sensor = models.SensorsAnalog() - else: - sensor = models.SensorsAnalog() - LOG.error("default SensorsAnalog due to datatype=%s" % - values['datatype']) - - return self._sensor_create(sensor, hostid, values) - - def sensor_get(self, sensorid, hostid=None): - return self._sensor_get(models.Sensors, sensorid, hostid) - - def sensor_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - model_query(models.Sensors) - return _paginate_query(models.Sensors, limit, marker, - sort_key, sort_dir) - - def sensor_get_all(self, host_id=None, sensorgroupid=None): - query = model_query(models.Sensors, read_deleted="no") - - if host_id: - query = query.filter_by(host_id=host_id) - if sensorgroupid: - query = query.filter_by(sensorgroup_id=sensorgroupid) - return query.all() - - def sensor_get_by_host(self, ihost, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Sensors) - query = add_sensor_filter_by_host(query, ihost) - return _paginate_query(models.Sensors, limit, marker, - sort_key, sort_dir, query) - - def _sensor_get_by_sensorgroup(self, cls, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(cls) - query = add_sensor_filter_by_sensorgroup(query, sensorgroup) - return _paginate_query(cls, limit, marker, sort_key, sort_dir, query) - - def sensor_get_by_sensorgroup(self, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Sensors) - query = add_sensor_filter_by_sensorgroup(query, sensorgroup) - return _paginate_query(models.Sensors, limit, marker, - sort_key, sort_dir, query) - - def sensor_get_by_host_sensorgroup(self, ihost, sensorgroup, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.Sensors) - query = add_sensor_filter_by_host(query, ihost) - query = add_sensor_filter_by_sensorgroup(query, sensorgroup) - return _paginate_query(models.Sensors, limit, marker, - sort_key, sort_dir, query) - - def _sensor_update(self, cls, sensor_id, values): - with _session_for_write(): - query = model_query(models.Sensors) - query = add_sensor_filter(query, sensor_id) - try: - result = query.one() - # obj = self._sensor_get(models.Sensors, sensor_id) - for k, v in values.items(): - if v == 'none': - v = None - setattr(result, k, v) - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for sensor %s" % sensor_id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for sensor %s" % sensor_id) - - return query.one() - - def sensor_update(self, sensor_id, values): - with _session_for_write(): - query = model_query(models.Sensors, read_deleted="no") - query = add_sensor_filter(query, sensor_id) - try: - result = query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for sensor %s" % sensor_id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for sensor %s" % sensor_id) - - if result.datatype == 'discrete': - return self._sensor_update(models.SensorsDiscrete, - sensor_id, values) - elif result.datatype == 'analog': - return self._sensor_update(models.SensorsAnalog, - sensor_id, values) - else: - return self._sensor_update(models.SensorsAnalog, - sensor_id, values) - - def _sensor_destroy(self, cls, sensor_id): - with _session_for_write(): - # Delete sensor which should cascade to delete derived sensors - if uuidutils.is_uuid_like(sensor_id): - model_query(cls, read_deleted="no").\ - filter_by(uuid=sensor_id).\ - delete() - else: - model_query(cls, read_deleted="no").\ - filter_by(id=sensor_id).\ - delete() - - def sensor_destroy(self, sensor_id): - return self._sensor_destroy(models.Sensors, sensor_id) - - # SENSOR GROUPS - def sensorgroup_create(self, host_id, values): - if values['datatype'] == 'discrete': - sensorgroup = models.SensorGroupsDiscrete() - elif values['datatype'] == 'analog': - sensorgroup = models.SensorGroupsAnalog() - else: - LOG.error("default SensorsAnalog due to datatype=%s" % - values['datatype']) - - sensorgroup = models.SensorGroupsAnalog - return self._sensorgroup_create(sensorgroup, host_id, values) - - def _sensorgroup_get(self, cls, sensorgroup_id, ihost=None, obj=None): - query = model_query(cls) - query = add_sensorgroup_filter(query, sensorgroup_id) - if ihost: - query = add_sensorgroup_filter_by_host(query, ihost) - - try: - result = query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for sensorgroup %s" % sensorgroup_id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for sensorgroup %s" % - sensorgroup_id) - - return result - - def sensorgroup_get(self, sensorgroup_id, ihost=None): - return self._sensorgroup_get(models.SensorGroups, - sensorgroup_id, - ihost) - - def sensorgroup_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.SensorGroups) - return _paginate_query(models.SensorGroupsAnalog, limit, marker, - sort_key, sort_dir, query) - - def sensorgroup_get_by_host_sensor(self, ihost, sensor, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.SensorGroups) - query = add_sensorgroup_filter_by_host(query, ihost) - query = add_sensorgroup_filter_by_sensor(query, sensor) - try: - result = query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for host %s port %s" % (ihost, sensor)) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for host %s port %s" % - (ihost, sensor)) - - return result - - def sensorgroup_get_by_host(self, ihost, - limit=None, marker=None, - sort_key=None, sort_dir=None): - query = model_query(models.SensorGroups) - query = add_sensorgroup_filter_by_host(query, ihost) - return _paginate_query(models.SensorGroups, limit, marker, - sort_key, sort_dir, query) - - def sensorgroup_update(self, sensorgroup_id, values): - with _session_for_write(): - query = model_query(models.SensorGroups, read_deleted="no") - query = add_sensorgroup_filter(query, sensorgroup_id) - try: - result = query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for sensorgroup %s" % sensorgroup_id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for sensorgroup %s" % - sensorgroup_id) - - if result.datatype == 'discrete': - return self._sensorgroup_update(models.SensorGroupsDiscrete, - sensorgroup_id, - values) - elif result.datatype == 'analog': - return self._sensorgroup_update(models.SensorGroupsAnalog, - sensorgroup_id, - values) - else: - return self._sensorgroup_update(models.SensorGroupsAnalog, - sensorgroup_id, - values) - - def sensorgroup_propagate(self, sensorgroup_id, values): - query = model_query(models.SensorGroups, read_deleted="no") - query = add_sensorgroup_filter(query, sensorgroup_id) - try: - result = query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for sensorgroup %s" % sensorgroup_id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for sensorgroup %s" % - sensorgroup_id) - - sensors = self._sensor_get_by_sensorgroup(models.Sensors, - result.uuid) - for sensor in sensors: - LOG.info("sensorgroup update propagate sensor=%s val=%s" % - (sensor.sensorname, values)) - self._sensor_update(models.Sensors, sensor.uuid, values) - - def _sensorgroup_create(self, obj, host_id, values): - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - values['host_id'] = int(host_id) - - if 'sensorgroup_profile' in values: - values.pop('sensorgroup_profile') - - temp_id = obj.id - obj.update(values) - if obj.id is None: - obj.id = temp_id - with _session_for_write() as session: - try: - session.add(obj) - session.flush() - except db_exc.DBDuplicateEntry: - LOG.error("Failed to add sensorgroup %s (uuid: %s), a " - "sensorgroup with name %s already exists on host %s" - % (values['sensorgroupname'], - values['uuid'], - values['sensorgroupname'], - values['host_id'])) - raise exception.SensorGroupAlreadyExists(uuid=values['uuid']) - return self._sensorgroup_get(type(obj), values['uuid']) - - def _sensorgroup_get_all(self, cls, host_id=None): - query = model_query(cls, read_deleted="no") - if utils.is_int_like(host_id): - query = query.filter_by(host_id=host_id) - return query.all() - - def _sensorgroup_get_list(self, cls, limit=None, marker=None, - sort_key=None, sort_dir=None): - return _paginate_query(cls, limit, marker, sort_key, sort_dir) - - def _sensorgroup_get_by_host_sensor(self, cls, ihost, sensor, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(cls).join(models.Sensors) - query = add_sensorgroup_filter_by_host(query, ihost) - query = add_sensorgroup_filter_by_sensor(query, sensor) - return _paginate_query(cls, limit, marker, sort_key, sort_dir, query) - - def _sensorgroup_get_by_host(self, cls, ihost, - limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(cls) - query = add_sensorgroup_filter_by_host(query, ihost) - return _paginate_query(cls, limit, marker, sort_key, sort_dir, query) - - def _sensorgroup_update(self, cls, sensorgroup_id, values): - with _session_for_write() as session: - # query = model_query(models.SensorGroups, read_deleted="no") - query = model_query(cls, read_deleted="no") - try: - query = add_sensorgroup_filter(query, sensorgroup_id) - result = query.one() - - # obj = self._sensorgroup_get(models.SensorGroups, - obj = self._sensorgroup_get(cls, sensorgroup_id) - - for k, v in values.items(): - if k == 'algorithm' and v == 'none': - v = None - if k == 'actions_critical_choices' and v == 'none': - v = None - if k == 'actions_major_choices' and v == 'none': - v = None - if k == 'actions_minor_choices' and v == 'none': - v = None - setattr(result, k, v) - - except NoResultFound: - raise exception.InvalidParameterValue( - err="No entry found for sensorgroup %s" % sensorgroup_id) - except MultipleResultsFound: - raise exception.InvalidParameterValue( - err="Multiple entries found for sensorgroup %s" % - sensorgroup_id) - try: - session.add(obj) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.SensorGroupAlreadyExists(uuid=values['uuid']) - return query.one() - - def _sensorgroup_destroy(self, cls, sensorgroup_id): - with _session_for_write(): - # Delete sensorgroup which should cascade to - # delete derived sensorgroups - if uuidutils.is_uuid_like(sensorgroup_id): - model_query(cls, read_deleted="no").\ - filter_by(uuid=sensorgroup_id).\ - delete() - else: - model_query(cls, read_deleted="no").\ - filter_by(id=sensorgroup_id).\ - delete() - - def sensorgroup_destroy(self, sensorgroup_id): - return self._sensorgroup_destroy(models.SensorGroups, sensorgroup_id) - - def sensorgroup_analog_create(self, host_id, values): - sensorgroup = models.SensorGroupsAnalog() - return self._sensorgroup_create(sensorgroup, host_id, values) - - def sensorgroup_analog_get_all(self, host_id=None): - return self._sensorgroup_get_all(models.SensorGroupsAnalog, host_id) - - def sensorgroup_analog_get(self, sensorgroup_id): - return self._sensorgroup_get(models.SensorGroupsAnalog, - sensorgroup_id) - - def sensorgroup_analog_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensorgroup_get_list(models.SensorGroupsAnalog, - limit, marker, - sort_key, sort_dir) - - def sensorgroup_analog_get_by_host(self, ihost, - limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensorgroup_get_by_host(models.SensorGroupsAnalog, - ihost, - limit, marker, - sort_key, sort_dir) - - def sensorgroup_analog_update(self, sensorgroup_id, values): - return self._sensorgroup_update(models.SensorGroupsAnalog, - sensorgroup_id, - values) - - def sensorgroup_analog_destroy(self, sensorgroup_id): - return self._sensorgroup_destroy(models.SensorGroupsAnalog, - sensorgroup_id) - - def sensorgroup_discrete_create(self, host_id, values): - sensorgroup = models.SensorGroupsDiscrete() - return self._sensorgroup_create(sensorgroup, host_id, values) - - def sensorgroup_discrete_get_all(self, host_id=None): - return self._sensorgroup_get_all(models.SensorGroupsDiscrete, host_id) - - def sensorgroup_discrete_get(self, sensorgroup_id): - return self._sensorgroup_get(models.SensorGroupsDiscrete, - sensorgroup_id) - - def sensorgroup_discrete_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensorgroup_get_list(models.SensorGroupsDiscrete, - limit, marker, - sort_key, sort_dir) - - def sensorgroup_discrete_get_by_host(self, ihost, - limit=None, marker=None, - sort_key=None, sort_dir=None): - return self._sensorgroup_get_by_host(models.SensorGroupsDiscrete, - ihost, - limit, marker, sort_key, sort_dir) - - def sensorgroup_discrete_update(self, sensorgroup_id, values): - return self._sensorgroup_update(models.SensorGroupsDiscrete, - sensorgroup_id, values) - - def sensorgroup_discrete_destroy(self, sensorgroup_id): - return self._sensorgroup_destroy(models.SensorGroupsDiscrete, - sensorgroup_id) diff --git a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/README b/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/README deleted file mode 100644 index 54745cf4..00000000 --- a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/README +++ /dev/null @@ -1,4 +0,0 @@ -This is a database migration repository. - -More information at: - https://github.com/openstack/sqlalchemy-migrate diff --git a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/__init__.py b/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/manage.py b/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/manage.py deleted file mode 100644 index 1d50f84d..00000000 --- a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/manage.py +++ /dev/null @@ -1,11 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from migrate.versioning.shell import main - - -if __name__ == '__main__': - main(debug='False', repository='.') diff --git a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/migrate.cfg b/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/migrate.cfg deleted file mode 100644 index 2790b4be..00000000 --- a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/migrate.cfg +++ /dev/null @@ -1,21 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=inventory - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] - diff --git a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/versions/001_init.py b/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/versions/001_init.py deleted file mode 100644 index 51c998d5..00000000 --- a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/versions/001_init.py +++ /dev/null @@ -1,605 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from sqlalchemy import Column, MetaData, String, Table, UniqueConstraint -from sqlalchemy import Boolean, Integer, Enum, Text, ForeignKey, DateTime - -ENGINE = 'InnoDB' -CHARSET = 'utf8' - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - systems = Table( - 'systems', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(36), unique=True, index=True), - - Column('system_type', String(255)), - Column('system_mode', String(255)), - - Column('name', String(255), unique=True), - Column('contact', String(255)), - Column('location', String(255)), - - Column('description', String(255), unique=True), - Column('timezone', String(255)), - Column('region_name', Text), - Column('services', Integer, default=72), - Column('service_project_name', Text), - Column('distributed_cloud_role', String(255)), - Column('security_profile', String(255)), - Column('security_feature', String(255)), - Column('software_version', String(255)), - Column('capabilities', Text), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - # Hosts Enum definitions - recordtype_enum = Enum('standard', - 'reserve1', - 'reserve2', - name='recordtype_enum') - - personality_enum = Enum('controller', - 'compute', - 'storage', - 'reserve1', - 'reserve2', - name='personality_enum') - - admin_enum = Enum('locked', - 'unlocked', - 'reserve1', - 'reserve2', - name='admin_enum') - - operational_enum = Enum('disabled', - 'enabled', - 'reserve1', - 'reserve2', - name='operational_enum') - - availability_enum = Enum('available', - 'intest', - 'degraded', - 'failed', - 'power-off', - 'offline', - 'offduty', - 'online', - 'dependency', - 'not-installed', - 'reserve1', - 'reserve2', - name='availability_enum') - - action_enum = Enum('none', - 'lock', - 'force-lock', - 'unlock', - 'reset', - 'swact', - 'force-swact', - 'reboot', - 'power-on', - 'power-off', - 'reinstall', - 'reserve1', - 'reserve2', - name='action_enum') - - hosts = Table( - 'hosts', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(36), unique=True), - - Column('hostname', String(255), unique=True, index=True), - Column('recordtype', recordtype_enum, default="standard"), - Column('reserved', Boolean), - - Column('invprovision', String(64), default="unprovisioned"), - - Column('mgmt_mac', String(255), unique=True), - Column('mgmt_ip', String(255), unique=True), - - # Board Management database members - Column('bm_ip', String(255)), - Column('bm_mac', String(255)), - Column('bm_type', String(255)), - Column('bm_username', String(255)), - - Column('personality', personality_enum), - Column('subfunctions', String(255)), - Column('subfunction_oper', String(255)), - Column('subfunction_avail', String(255)), - - Column('serialid', String(255)), - Column('location', Text), - Column('administrative', admin_enum, default="locked"), - Column('operational', operational_enum, default="disabled"), - Column('availability', availability_enum, default="offline"), - Column('action', action_enum, default="none"), - Column('host_action', String(255)), - Column('action_state', String(255)), - Column('mtce_info', String(255)), - Column('install_state', String(255)), - Column('install_state_info', String(255)), - Column('vim_progress_status', String(255)), - Column('task', String(64)), - Column('uptime', Integer), - Column('capabilities', Text), - - Column('boot_device', String(255)), - Column('rootfs_device', String(255)), - Column('install_output', String(255)), - Column('console', String(255)), - Column('tboot', String(64)), - Column('ttys_dcd', Boolean), - Column('iscsi_initiator_name', String(64)), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - nodes = Table( - 'nodes', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(36), unique=True), - - # numaNode from /sys/devices/system/node/nodeX/cpulist or cpumap - Column('numa_node', Integer), - Column('capabilities', Text), - - Column('host_id', Integer, - ForeignKey('hosts.id', ondelete='CASCADE')), - UniqueConstraint('numa_node', 'host_id', name='u_hostnuma'), - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - cpus = Table( - 'cpus', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(36), unique=True), - Column('cpu', Integer), - Column('core', Integer), - Column('thread', Integer), - Column('cpu_family', String(255)), - Column('cpu_model', String(255)), - Column('capabilities', Text), - - Column('host_id', Integer, - ForeignKey('hosts.id', ondelete='CASCADE')), - Column('node_id', Integer, - ForeignKey('nodes.id', ondelete='CASCADE')), - UniqueConstraint('cpu', 'host_id', name='u_hostcpu'), - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - memorys = Table( - 'memorys', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, - primary_key=True, nullable=False), - Column('uuid', String(36), unique=True), - - # per NUMA: /sys/devices/system/node/node/meminfo - Column('memtotal_mib', Integer), - Column('memavail_mib', Integer), - Column('platform_reserved_mib', Integer), - - Column('hugepages_configured', Boolean), # if hugepages_configured - - Column('vswitch_hugepages_size_mib', Integer), - Column('vswitch_hugepages_reqd', Integer), - Column('vswitch_hugepages_nr', Integer), - Column('vswitch_hugepages_avail', Integer), - - Column('vm_hugepages_nr_2M', Integer), - Column('vm_hugepages_nr_1G', Integer), - Column('vm_hugepages_use_1G', Boolean), - Column('vm_hugepages_possible_2M', Integer), - Column('vm_hugepages_possible_1G', Integer), - - Column('vm_hugepages_nr_2M_pending', Integer), # To be removed - Column('vm_hugepages_nr_1G_pending', Integer), # To be removed - Column('vm_hugepages_avail_2M', Integer), - Column('vm_hugepages_avail_1G', Integer), - - Column('vm_hugepages_nr_4K', Integer), - - Column('node_memtotal_mib', Integer), - - Column('capabilities', Text), - - # psql requires unique FK - Column('host_id', Integer, - ForeignKey('hosts.id', ondelete='CASCADE')), - Column('node_id', Integer, ForeignKey('nodes.id')), - UniqueConstraint('host_id', 'node_id', name='u_hostnode'), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - ports = Table( - 'ports', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(36), unique=True), - - Column('host_id', Integer, ForeignKey('hosts.id', - ondelete='CASCADE')), - Column('node_id', Integer, ForeignKey('nodes.id', - ondelete='SET NULL')), - - Column('type', String(255)), - Column('name', String(255)), - Column('namedisplay', String(255)), - Column('pciaddr', String(255)), - Column('dev_id', Integer), - Column('sriov_totalvfs', Integer), - Column('sriov_numvfs', Integer), - Column('sriov_vfs_pci_address', String(1020)), - Column('driver', String(255)), - - Column('pclass', String(255)), - Column('pvendor', String(255)), - Column('pdevice', String(255)), - Column('psvendor', String(255)), - Column('psdevice', String(255)), - Column('dpdksupport', Boolean, default=False), - Column('numa_node', Integer), - Column('capabilities', Text), - - UniqueConstraint('pciaddr', 'dev_id', 'host_id', - name='u_pciaddr_dev_host_id'), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - ethernet_ports = Table( - 'ethernet_ports', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True, nullable=False), - - Column('mac', String(255)), - Column('mtu', Integer), - Column('speed', Integer), - Column('link_mode', String(255)), - Column('duplex', String(255)), - Column('autoneg', String(255)), - Column('bootp', String(255)), - Column('capabilities', Text), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - pci_devices = Table( - 'pci_devices', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(255), unique=True, index=True), - Column('host_id', Integer, ForeignKey('hosts.id', - ondelete='CASCADE')), - Column('name', String(255)), - Column('pciaddr', String(255)), - Column('pclass_id', String(6)), - Column('pvendor_id', String(4)), - Column('pdevice_id', String(4)), - Column('pclass', String(255)), - Column('pvendor', String(255)), - Column('pdevice', String(255)), - Column('psvendor', String(255)), - Column('psdevice', String(255)), - Column('numa_node', Integer), - Column('driver', String(255)), - Column('sriov_totalvfs', Integer), - Column('sriov_numvfs', Integer), - Column('sriov_vfs_pci_address', String(1020)), - Column('enabled', Boolean), - Column('extra_info', Text), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - lldp_agents = Table( - 'lldp_agents', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(36), unique=True), - Column('host_id', Integer, ForeignKey('hosts.id', - ondelete='CASCADE')), - Column('port_id', Integer, ForeignKey('ports.id', - ondelete='CASCADE')), - Column('status', String(255)), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - lldp_neighbours = Table( - 'lldp_neighbours', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(36), unique=True), - Column('host_id', Integer, ForeignKey('hosts.id', - ondelete='CASCADE')), - Column('port_id', Integer, ForeignKey('ports.id', - ondelete='CASCADE')), - - Column('msap', String(511), nullable=False), - - UniqueConstraint('msap', 'port_id', - name='u_msap_port_id'), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - lldp_tlvs = Table( - 'lldp_tlvs', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - Column('agent_id', Integer, - ForeignKey('lldp_agents.id', ondelete="CASCADE"), - nullable=True), - Column('neighbour_id', Integer, - ForeignKey('lldp_neighbours.id', ondelete="CASCADE"), - nullable=True), - Column('type', String(255)), - Column('value', String(255)), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - sensorgroups = Table( - 'sensorgroups', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - - Column('uuid', String(36), unique=True), - Column('host_id', Integer, - ForeignKey('hosts.id', ondelete='CASCADE')), - - Column('sensorgroupname', String(255)), - Column('path', String(255)), - Column('datatype', String(255)), # polymorphic 'analog'/'discrete - Column('sensortype', String(255)), - Column('description', String(255)), - Column('state', String(255)), # enabled or disabled - Column('possible_states', String(255)), - Column('audit_interval_group', Integer), - Column('record_ttl', Integer), - - Column('algorithm', String(255)), - Column('actions_critical_choices', String(255)), - Column('actions_major_choices', String(255)), - Column('actions_minor_choices', String(255)), - Column('actions_minor_group', String(255)), - Column('actions_major_group', String(255)), - Column('actions_critical_group', String(255)), - - Column('suppress', Boolean), # True, disables the action - - Column('capabilities', Text), - - UniqueConstraint('sensorgroupname', 'path', 'host_id', - name='u_sensorgroupname_path_hostid'), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - # polymorphic on datatype 'discrete' - sensorgroups_discrete = Table( - 'sensorgroups_discrete', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, - ForeignKey('sensorgroups.id', ondelete="CASCADE"), - primary_key=True, nullable=False), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - # polymorphic on datatype 'analog' - sensorgroups_analog = Table( - 'sensorgroups_analog', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, - ForeignKey('sensorgroups.id', ondelete="CASCADE"), - primary_key=True, nullable=False), - - Column('unit_base_group', String(255)), # revolutions - Column('unit_modifier_group', String(255)), # 100 - Column('unit_rate_group', String(255)), # minute - - Column('t_minor_lower_group', String(255)), - Column('t_minor_upper_group', String(255)), - Column('t_major_lower_group', String(255)), - Column('t_major_upper_group', String(255)), - Column('t_critical_lower_group', String(255)), - Column('t_critical_upper_group', String(255)), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - sensors = Table( - 'sensors', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - Column('uuid', String(36), unique=True), - - Column('host_id', Integer, - ForeignKey('hosts.id', ondelete='CASCADE')), - - Column('sensorgroup_id', Integer, - ForeignKey('sensorgroups.id', ondelete='SET NULL')), - - Column('sensorname', String(255)), - Column('path', String(255)), - - Column('datatype', String(255)), # polymorphic on datatype - Column('sensortype', String(255)), - - Column('status', String(255)), # ok, minor, major, critical, disabled - Column('state', String(255)), # enabled, disabled - Column('state_requested', String(255)), - - Column('sensor_action_requested', String(255)), - - Column('audit_interval', Integer), - Column('algorithm', String(255)), - Column('actions_minor', String(255)), - Column('actions_major', String(255)), - Column('actions_critical', String(255)), - - Column('suppress', Boolean), # True, disables the action - - Column('capabilities', Text), - - UniqueConstraint('sensorname', 'path', 'host_id', - name='u_sensorname_path_host_id'), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - # discrete sensor - sensors_discrete = Table( - 'sensors_discrete', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, - ForeignKey('sensors.id', ondelete="CASCADE"), - primary_key=True, nullable=False), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - # analog sensor - sensors_analog = Table( - 'sensors_analog', - meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, - ForeignKey('sensors.id', ondelete="CASCADE"), - primary_key=True, nullable=False), - - Column('unit_base', String(255)), # revolutions - Column('unit_modifier', String(255)), # 10^2 - Column('unit_rate', String(255)), # minute - - Column('t_minor_lower', String(255)), - Column('t_minor_upper', String(255)), - Column('t_major_lower', String(255)), - Column('t_major_upper', String(255)), - Column('t_critical_lower', String(255)), - Column('t_critical_upper', String(255)), - - mysql_engine=ENGINE, - mysql_charset=CHARSET, - ) - - # TODO(sc) disks - tables = ( - systems, - hosts, - nodes, - cpus, - memorys, - pci_devices, - ports, - ethernet_ports, - lldp_agents, - lldp_neighbours, - lldp_tlvs, - sensorgroups, - sensorgroups_discrete, - sensorgroups_analog, - sensors, - sensors_discrete, - sensors_analog, - ) - - for index, table in enumerate(tables): - try: - table.create() - except Exception: - # If an error occurs, drop all tables created so far to return - # to the previously existing state. - meta.drop_all(tables=tables[:index]) - raise diff --git a/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/versions/__init__.py b/inventory/inventory/inventory/db/sqlalchemy/migrate_repo/versions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/db/sqlalchemy/migration.py b/inventory/inventory/inventory/db/sqlalchemy/migration.py deleted file mode 100644 index 24e8292f..00000000 --- a/inventory/inventory/inventory/db/sqlalchemy/migration.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import os -import sqlalchemy - -from migrate import exceptions as versioning_exceptions -from migrate.versioning import api as versioning_api -from migrate.versioning.repository import Repository -from oslo_db.sqlalchemy import enginefacade - -from inventory.common import exception -from inventory.common.i18n import _ -from inventory.db import migration - -_REPOSITORY = None - -get_engine = enginefacade.get_legacy_facade().get_engine - - -def db_sync(version=None): - if version is not None: - try: - version = int(version) - except ValueError: - raise exception.ApiError(_("version should be an integer")) - - current_version = db_version() - repository = _find_migrate_repo() - if version is None or version > current_version: - return versioning_api.upgrade(get_engine(), repository, version) - else: - return versioning_api.downgrade(get_engine(), repository, - version) - - -def db_version(): - repository = _find_migrate_repo() - try: - return versioning_api.db_version(get_engine(), repository) - except versioning_exceptions.DatabaseNotControlledError: - meta = sqlalchemy.MetaData() - engine = get_engine() - meta.reflect(bind=engine) - tables = meta.tables - if len(tables) == 0: - db_version_control(migration.INIT_VERSION) - return versioning_api.db_version(get_engine(), repository) - - -def db_version_control(version=None): - repository = _find_migrate_repo() - versioning_api.version_control(get_engine(), repository, version) - return version - - -def _find_migrate_repo(): - """Get the path for the migrate repository.""" - global _REPOSITORY - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'migrate_repo') - assert os.path.exists(path) - if _REPOSITORY is None: - _REPOSITORY = Repository(path) - return _REPOSITORY diff --git a/inventory/inventory/inventory/db/sqlalchemy/models.py b/inventory/inventory/inventory/db/sqlalchemy/models.py deleted file mode 100644 index ee748d78..00000000 --- a/inventory/inventory/inventory/db/sqlalchemy/models.py +++ /dev/null @@ -1,589 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import json -import urlparse - -from oslo_config import cfg -from oslo_db.sqlalchemy import models -from sqlalchemy import Column, Enum, ForeignKey, Integer, Boolean -from sqlalchemy import UniqueConstraint, String, Text -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.ext.declarative import declared_attr -from sqlalchemy.types import TypeDecorator, VARCHAR -from sqlalchemy.orm import relationship, backref - - -def table_args(): - engine_name = urlparse.urlparse(cfg.CONF.database_connection).scheme - if engine_name == 'mysql': - return {'mysql_engine': 'InnoDB', - 'mysql_charset': "utf8"} - return None - - -class JSONEncodedDict(TypeDecorator): - """Represents an immutable structure as a json-encoded string.""" - - impl = VARCHAR - - def process_bind_param(self, value, dialect): - if value is not None: - value = json.dumps(value) - return value - - def process_result_value(self, value, dialect): - if value is not None: - value = json.loads(value) - return value - - -class InventoryBase(models.TimestampMixin, models.ModelBase): - - metadata = None - - def as_dict(self): - d = {} - for c in self.__table__.columns: - d[c.name] = self[c.name] - return d - - -Base = declarative_base(cls=InventoryBase) - - -class Systems(Base): - __tablename__ = 'systems' - - # The reference for system is from systemconfig - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36), unique=True) - - name = Column(String(255), unique=True) - system_type = Column(String(255)) - system_mode = Column(String(255)) - description = Column(String(255)) - capabilities = Column(JSONEncodedDict) - contact = Column(String(255)) - location = Column(String(255)) - services = Column(Integer, default=72) - software_version = Column(String(255)) - timezone = Column(String(255)) - security_profile = Column(String(255)) - region_name = Column(Text) - service_project_name = Column(Text) - distributed_cloud_role = Column(String(255)) - security_feature = Column(String(255)) - - -class Hosts(Base): - recordTypeEnum = Enum('standard', - 'profile', - 'sprofile', - 'reserve1', - 'reserve2', - name='recordtypeEnum') - - adminEnum = Enum('locked', - 'unlocked', - 'reserve1', - 'reserve2', - name='administrativeEnum') - - operEnum = Enum('disabled', - 'enabled', - 'reserve1', - 'reserve2', - name='operationalEnum') - - availEnum = Enum('available', - 'intest', - 'degraded', - 'failed', - 'power-off', - 'offline', - 'offduty', - 'online', - 'dependency', - 'not-installed', - 'reserv1', - 'reserve2', - name='availabilityEnum') - - actionEnum = Enum('none', - 'lock', - 'force-lock', - 'unlock', - 'reset', - 'swact', - 'force-swact', - 'reboot', - 'power-on', - 'power-off', - 'reinstall', - 'reserve1', - 'reserve2', - name='actionEnum') - - __tablename__ = 'hosts' - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36), unique=True) - - hostname = Column(String(255), unique=True, index=True) - recordtype = Column(recordTypeEnum, default="standard") - reserved = Column(Boolean, default=False) - - invprovision = Column(String(64), default="unprovisioned") - - mgmt_mac = Column(String(255), unique=True) - mgmt_ip = Column(String(255)) - - # board management IP address, MAC, type and username - bm_ip = Column(String(255)) - bm_mac = Column(String(255)) - bm_type = Column(String(255)) - bm_username = Column(String(255)) - - personality = Column(String(255)) - subfunctions = Column(String(255)) - subfunction_oper = Column(operEnum, default="disabled") - subfunction_avail = Column(availEnum, default="not-installed") - serialid = Column(String(255)) - location = Column(JSONEncodedDict) - administrative = Column(adminEnum, default="locked") - operational = Column(operEnum, default="disabled") - availability = Column(availEnum, default="offline") - action = Column(actionEnum, default="none") - host_action = Column(String(255)) - action_state = Column(String(255)) - mtce_info = Column(String(255)) - install_state = Column(String(255)) - install_state_info = Column(String(255)) - vim_progress_status = Column(String(255)) - task = Column(String(64)) - uptime = Column(Integer, default=0) - capabilities = Column(JSONEncodedDict) - - boot_device = Column(String(255), default="sda") - rootfs_device = Column(String(255), default="sda") - install_output = Column(String(255), default="text") - console = Column(String(255), default="ttyS0,115200") - tboot = Column(String(64), default="") - ttys_dcd = Column(Boolean) - iscsi_initiator_name = Column(String(64)) - - -class Nodes(Base): - __tablename__ = 'nodes' - - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36), unique=True) - - numa_node = Column(Integer) - capabilities = Column(JSONEncodedDict) - - host_id = Column(Integer, ForeignKey('hosts.id', ondelete='CASCADE')) - host = relationship("Hosts", - backref="nodes", lazy="joined", join_depth=1) - - UniqueConstraint('numa_node', 'host_id', name='u_hostnuma') - - -class Cpus(Base): - __tablename__ = 'cpus' - - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36), unique=True) - - cpu = Column(Integer) - core = Column(Integer) - thread = Column(Integer) - cpu_family = Column(String(255)) - cpu_model = Column(String(255)) - # allocated_function = Column(String(255)) # systemconfig allocates - capabilities = Column(JSONEncodedDict) - host_id = Column(Integer, ForeignKey('hosts.id', ondelete='CASCADE')) - node_id = Column(Integer, ForeignKey('nodes.id', ondelete='CASCADE')) - - host = relationship("Hosts", - backref="cpus", lazy="joined", join_depth=1) - node = relationship("Nodes", - backref="cpus", lazy="joined", join_depth=1) - - UniqueConstraint('cpu', 'host_id', name='u_hostcpu') - - -class Memorys(Base): - __tablename__ = 'memorys' - - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36), unique=True) - - memtotal_mib = Column(Integer) - memavail_mib = Column(Integer) - platform_reserved_mib = Column(Integer) - node_memtotal_mib = Column(Integer) - - hugepages_configured = Column(Boolean, default=False) - - vswitch_hugepages_size_mib = Column(Integer) - vswitch_hugepages_reqd = Column(Integer) - vswitch_hugepages_nr = Column(Integer) - vswitch_hugepages_avail = Column(Integer) - - vm_hugepages_nr_2M_pending = Column(Integer) - vm_hugepages_nr_1G_pending = Column(Integer) - vm_hugepages_nr_2M = Column(Integer) - vm_hugepages_nr_1G = Column(Integer) - vm_hugepages_nr_4K = Column(Integer) - vm_hugepages_avail_2M = Column(Integer) - vm_hugepages_avail_1G = Column(Integer) - - vm_hugepages_use_1G = Column(Boolean, default=False) - vm_hugepages_possible_2M = Column(Integer) - vm_hugepages_possible_1G = Column(Integer) - capabilities = Column(JSONEncodedDict) - - host_id = Column(Integer, ForeignKey('hosts.id', ondelete='CASCADE')) - node_id = Column(Integer, ForeignKey('nodes.id')) - - host = relationship("Hosts", backref="memory", lazy="joined", join_depth=1) - node = relationship("Nodes", backref="memory", lazy="joined", join_depth=1) - - UniqueConstraint('host_id', 'node_id', name='u_hostnode') - - -class Ports(Base): - __tablename__ = 'ports' - - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36)) - host_id = Column(Integer, ForeignKey('hosts.id', ondelete='CASCADE')) - node_id = Column(Integer, ForeignKey('nodes.id')) - - type = Column(String(255)) - - name = Column(String(255)) - namedisplay = Column(String(255)) - pciaddr = Column(String(255)) - pclass = Column(String(255)) - pvendor = Column(String(255)) - pdevice = Column(String(255)) - psvendor = Column(String(255)) - psdevice = Column(String(255)) - dpdksupport = Column(Boolean, default=False) - numa_node = Column(Integer) - dev_id = Column(Integer) - sriov_totalvfs = Column(Integer) - sriov_numvfs = Column(Integer) - # Each PCI Address is 12 char, 1020 char is enough for 64 devices - sriov_vfs_pci_address = Column(String(1020)) - driver = Column(String(255)) - capabilities = Column(JSONEncodedDict) - - node = relationship("Nodes", backref="ports", lazy="joined", join_depth=1) - host = relationship("Hosts", backref="ports", lazy="joined", join_depth=1) - - UniqueConstraint('pciaddr', 'dev_id', 'host_id', name='u_pciaddrdevhost') - - __mapper_args__ = { - 'polymorphic_identity': 'port', - 'polymorphic_on': type - } - - -class EthernetPorts(Ports): - __tablename__ = 'ethernet_ports' - - id = Column(Integer, - ForeignKey('ports.id'), primary_key=True, nullable=False) - - mac = Column(String(255), unique=True) - mtu = Column(Integer) - speed = Column(Integer) - link_mode = Column(String(255)) - duplex = Column(String(255)) - autoneg = Column(String(255)) - bootp = Column(String(255)) - - UniqueConstraint('mac', name='u_mac') - - __mapper_args__ = { - 'polymorphic_identity': 'ethernet' - } - - -class LldpAgents(Base): - __tablename__ = 'lldp_agents' - - id = Column('id', Integer, primary_key=True, nullable=False) - uuid = Column('uuid', String(36)) - host_id = Column('host_id', Integer, ForeignKey('hosts.id', - ondelete='CASCADE')) - port_id = Column('port_id', Integer, ForeignKey('ports.id', - ondelete='CASCADE')) - status = Column('status', String(255)) - - lldp_tlvs = relationship("LldpTlvs", - backref=backref("lldpagents", lazy="subquery"), - cascade="all") - - host = relationship("Hosts", lazy="joined", join_depth=1) - port = relationship("Ports", lazy="joined", join_depth=1) - - -class PciDevices(Base): - __tablename__ = 'pci_devices' - - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36)) - host_id = Column(Integer, ForeignKey('hosts.id', ondelete='CASCADE')) - name = Column(String(255)) - pciaddr = Column(String(255)) - pclass_id = Column(String(6)) - pvendor_id = Column(String(4)) - pdevice_id = Column(String(4)) - pclass = Column(String(255)) - pvendor = Column(String(255)) - pdevice = Column(String(255)) - psvendor = Column(String(255)) - psdevice = Column(String(255)) - numa_node = Column(Integer) - sriov_totalvfs = Column(Integer) - sriov_numvfs = Column(Integer) - sriov_vfs_pci_address = Column(String(1020)) - driver = Column(String(255)) - enabled = Column(Boolean) - extra_info = Column(Text) - - host = relationship("Hosts", lazy="joined", join_depth=1) - - UniqueConstraint('pciaddr', 'host_id', name='u_pciaddrhost') - - -class LldpNeighbours(Base): - __tablename__ = 'lldp_neighbours' - - id = Column('id', Integer, primary_key=True, nullable=False) - uuid = Column('uuid', String(36)) - host_id = Column('host_id', Integer, ForeignKey('hosts.id', - ondelete='CASCADE')) - port_id = Column('port_id', Integer, ForeignKey('ports.id', - ondelete='CASCADE')) - msap = Column('msap', String(511)) - - lldp_tlvs = relationship( - "LldpTlvs", - backref=backref("lldpneighbours", lazy="subquery"), - cascade="all") - - host = relationship("Hosts", lazy="joined", join_depth=1) - port = relationship("Ports", lazy="joined", join_depth=1) - - UniqueConstraint('msap', 'port_id', name='u_msap_port_id') - - -class LldpTlvs(Base): - __tablename__ = 'lldp_tlvs' - - id = Column('id', Integer, primary_key=True, nullable=False) - agent_id = Column('agent_id', Integer, ForeignKey('lldp_agents.id', - ondelete='CASCADE'), nullable=True) - neighbour_id = Column('neighbour_id', Integer, - ForeignKey('lldp_neighbours.id', ondelete='CASCADE'), - nullable=True) - type = Column('type', String(255)) - value = Column('value', String(255)) - - lldp_agent = relationship("LldpAgents", - backref=backref("lldptlvs", lazy="subquery"), - cascade="all", - lazy="joined") - - lldp_neighbour = relationship( - "LldpNeighbours", - backref=backref("lldptlvs", lazy="subquery"), - cascade="all", - lazy="joined") - - UniqueConstraint('type', 'agent_id', - name='u_type@agent') - - UniqueConstraint('type', 'neighbour_id', - name='u_type@neighbour') - - -class SensorGroups(Base): - __tablename__ = 'sensorgroups' - - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36)) - host_id = Column(Integer, ForeignKey('hosts.id', ondelete='CASCADE')) - - sensortype = Column(String(255)) - datatype = Column(String(255)) # polymorphic - sensorgroupname = Column(String(255)) - path = Column(String(255)) - description = Column(String(255)) - - state = Column(String(255)) - possible_states = Column(String(255)) - algorithm = Column(String(255)) - audit_interval_group = Column(Integer) - record_ttl = Column(Integer) - - actions_minor_group = Column(String(255)) - actions_major_group = Column(String(255)) - actions_critical_group = Column(String(255)) - - suppress = Column(Boolean, default=False) - - capabilities = Column(JSONEncodedDict) - - actions_critical_choices = Column(String(255)) - actions_major_choices = Column(String(255)) - actions_minor_choices = Column(String(255)) - - host = relationship("Hosts", lazy="joined", join_depth=1) - - UniqueConstraint('sensorgroupname', 'path', 'host_id', - name='u_sensorgroupname_path_host_id') - - __mapper_args__ = { - 'polymorphic_identity': 'sensorgroup', - 'polymorphic_on': datatype - } - - -class SensorGroupsCommon(object): - @declared_attr - def id(cls): - return Column(Integer, - ForeignKey('sensorgroups.id', ondelete="CASCADE"), - primary_key=True, nullable=False) - - -class SensorGroupsDiscrete(SensorGroupsCommon, SensorGroups): - __tablename__ = 'sensorgroups_discrete' - - __mapper_args__ = { - 'polymorphic_identity': 'discrete', - } - - -class SensorGroupsAnalog(SensorGroupsCommon, SensorGroups): - __tablename__ = 'sensorgroups_analog' - - unit_base_group = Column(String(255)) - unit_modifier_group = Column(String(255)) - unit_rate_group = Column(String(255)) - - t_minor_lower_group = Column(String(255)) - t_minor_upper_group = Column(String(255)) - t_major_lower_group = Column(String(255)) - t_major_upper_group = Column(String(255)) - t_critical_lower_group = Column(String(255)) - t_critical_upper_group = Column(String(255)) - - __mapper_args__ = { - 'polymorphic_identity': 'analog', - } - - -class Sensors(Base): - __tablename__ = 'sensors' - - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36)) - host_id = Column(Integer, ForeignKey('hosts.id', ondelete='CASCADE')) - - sensorgroup_id = Column(Integer, - ForeignKey('sensorgroups.id', - ondelete='SET NULL')) - sensortype = Column(String(255)) # "watchdog", "temperature". - datatype = Column(String(255)) # "discrete" or "analog" - - sensorname = Column(String(255)) - path = Column(String(255)) - - status = Column(String(255)) - state = Column(String(255)) - state_requested = Column(String(255)) - - sensor_action_requested = Column(String(255)) - - audit_interval = Column(Integer) - algorithm = Column(String(255)) - actions_minor = Column(String(255)) - actions_major = Column(String(255)) - actions_critical = Column(String(255)) - - suppress = Column(Boolean, default=False) - - capabilities = Column(JSONEncodedDict) - - host = relationship("Hosts", lazy="joined", join_depth=1) - sensorgroup = relationship("SensorGroups", lazy="joined", join_depth=1) - - UniqueConstraint('sensorname', 'path', 'host_id', - name='u_sensorname_path_host_id') - - __mapper_args__ = { - 'polymorphic_identity': 'sensor', - 'polymorphic_on': datatype - # with_polymorphic is only supported in sqlalchemy.orm >= 0.8 - # 'with_polymorphic': '*' - } - - -class SensorsDiscrete(Sensors): - __tablename__ = 'sensors_discrete' - - id = Column(Integer, ForeignKey('sensors.id'), - primary_key=True, nullable=False) - - __mapper_args__ = { - 'polymorphic_identity': 'discrete' - } - - -class SensorsAnalog(Sensors): - __tablename__ = 'sensors_analog' - - id = Column(Integer, ForeignKey('sensors.id'), - primary_key=True, nullable=False) - - unit_base = Column(String(255)) - unit_modifier = Column(String(255)) - unit_rate = Column(String(255)) - - t_minor_lower = Column(String(255)) - t_minor_upper = Column(String(255)) - t_major_lower = Column(String(255)) - t_major_upper = Column(String(255)) - t_critical_lower = Column(String(255)) - t_critical_upper = Column(String(255)) - - __mapper_args__ = { - 'polymorphic_identity': 'analog' - } diff --git a/inventory/inventory/inventory/db/utils.py b/inventory/inventory/inventory/db/utils.py deleted file mode 100644 index d8f18d42..00000000 --- a/inventory/inventory/inventory/db/utils.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2015 Ericsson AB. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class LazyPluggable(object): - """A pluggable backend loaded lazily based on some value.""" - - def __init__(self, pivot, **backends): - self.__backends = backends - self.__pivot = pivot - self.__backend = None - - def __get_backend(self): - if not self.__backend: - backend_name = 'sqlalchemy' - backend = self.__backends[backend_name] - if isinstance(backend, tuple): - name = backend[0] - fromlist = backend[1] - else: - name = backend - fromlist = backend - - self.__backend = __import__(name, None, None, fromlist) - return self.__backend - - def __getattr__(self, key): - backend = self.__get_backend() - return getattr(backend, key) - - -IMPL = LazyPluggable('backend', sqlalchemy='inventory.db.sqlalchemy.api') - - -def purge_deleted(age, granularity='days'): - IMPL.purge_deleted(age, granularity) diff --git a/inventory/inventory/inventory/objects/__init__.py b/inventory/inventory/inventory/objects/__init__.py deleted file mode 100644 index 213fa432..00000000 --- a/inventory/inventory/inventory/objects/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(comstud): You may scratch your head as you see code that imports -# this module and then accesses attributes for objects such as Node, -# etc, yet you do not see these attributes in here. Never fear, there is -# a little bit of magic. When objects are registered, an attribute is set -# on this module automatically, pointing to the newest/latest version of -# the object. - - -def register_all(): - # NOTE(danms): You must make sure your object gets imported in this - # function in order for it to be registered by services that may - # need to receive it via RPC. - __import__('inventory.objects.cpu') - __import__('inventory.objects.host') - __import__('inventory.objects.lldp_agent') - __import__('inventory.objects.lldp_neighbour') - __import__('inventory.objects.lldp_tlv') - __import__('inventory.objects.memory') - __import__('inventory.objects.node') - __import__('inventory.objects.pci_device') - __import__('inventory.objects.port_ethernet') - __import__('inventory.objects.port') - __import__('inventory.objects.sensor_analog') - __import__('inventory.objects.sensor_discrete') - __import__('inventory.objects.sensorgroup_analog') - __import__('inventory.objects.sensorgroup_discrete') - __import__('inventory.objects.sensorgroup') - __import__('inventory.objects.sensor') - __import__('inventory.objects.system') diff --git a/inventory/inventory/inventory/objects/base.py b/inventory/inventory/inventory/objects/base.py deleted file mode 100644 index 173f0a3e..00000000 --- a/inventory/inventory/inventory/objects/base.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from inventory import objects -from oslo_log import log -from oslo_utils import versionutils -from oslo_versionedobjects import base as object_base -# from oslo_versionedobjects import exception as ovo_exception - -from oslo_versionedobjects import fields as object_fields -LOG = log.getLogger(__name__) - - -class InventoryObjectRegistry(object_base.VersionedObjectRegistry): - def registration_hook(self, cls, index): - # NOTE(jroll): blatantly stolen from nova - # NOTE(danms): This is called when an object is registered, - # and is responsible for maintaining inventory.objects.$OBJECT - # as the highest-versioned implementation of a given object. - version = versionutils.convert_version_to_tuple(cls.VERSION) - if not hasattr(objects, cls.obj_name()): # noqa - setattr(objects, cls.obj_name(), cls) # noqa - else: - cur_version = versionutils.convert_version_to_tuple( - getattr(objects, cls.obj_name()).VERSION) # noqa - if version >= cur_version: - setattr(objects, cls.obj_name(), cls) # noqa - - -class InventoryObject(object_base.VersionedObject): - """Base class and object factory. - - This forms the base of all objects that can be remoted or instantiated - via RPC. Simply defining a class that inherits from this base class - will make it remotely instantiatable. Objects should implement the - necessary "get" classmethod routines as well as "save" object methods - as appropriate. - """ - - OBJ_SERIAL_NAMESPACE = 'inventory_object' - OBJ_PROJECT_NAMESPACE = 'inventory' - - fields = { - 'created_at': object_fields.DateTimeField(nullable=True), - 'updated_at': object_fields.DateTimeField(nullable=True), - } - - _foreign_fields = {} - _optional_fields = [] - - def _get_foreign_field(self, field, db_object): - """Retrieve data from a foreign relationship on a DB entry. - - Depending on how the field was described in _foreign_fields the data - may be retrieved by calling a function to do the work, or by accessing - the specified remote field name if specified as a string. - """ - accessor = self._foreign_fields[field] - if callable(accessor): - return accessor(field, db_object) - - # Split as "local object reference:remote field name" - local, remote = accessor.split(':') - try: - local_object = db_object[local] - if local_object: - return local_object[remote] - except KeyError: - pass # foreign relationships are not always available - return None - - def __getitem__(self, name): - return getattr(self, name) - - def __setitem__(self, name, value): - setattr(self, name, value) - - def as_dict(self): - return dict((k, getattr(self, k)) - for k in self.fields - if hasattr(self, k)) - - @classmethod - def get_defaults(cls): - """Return a dict of its fields with their default value.""" - return dict((k, v(None)) - for k, v in cls.fields.iteritems() - if k != "id" and callable(v)) - - def get(self, key, value=None): - """For backwards-compatibility with dict-based objects. - - NOTE(danms): May be removed in the future. - """ - return self[key] - - def _set_from_db_object(self, context, cls_object, db_object, fields): - """Sets object fields. - - :param context: security context - :param db_object: A DB entity of the object - :param fields: list of fields to set on obj from values from db_object. - """ - - for field in cls_object.fields: - if field in cls_object._optional_fields: - if not hasattr(db_object, field): - continue - - if field in cls_object._foreign_fields: - setattr(self, field, - cls_object._get_foreign_field(field, db_object)) - continue - - setattr(self, field, db_object[field]) - # cls_object[field] = db_object[field] - - @staticmethod - def _from_db_object(context, obj, db_object, fields=None): - """Converts a database entity to a formal object. - - This always converts the database entity to the latest version - of the object. Note that the latest version is available at - object.__class__.VERSION. object.VERSION is the version of this - particular object instance; it is possible that it is not the latest - version. - - :param context: security context - :param obj: An object of the class. - :param db_object: A DB entity of the object - :param fields: list of fields to set on obj from values from db_object. - :return: The object of the class with the database entity added - :raises: ovo_exception.IncompatibleObjectVersion - """ - # objname = obj.obj_name() - # db_version = db_object['version'] - - # if not versionutils.is_compatible(db_version, obj.__class__.VERSION): - # raise ovo_exception.IncompatibleObjectVersion( - # objname=objname, objver=db_version, - # supported=obj.__class__.VERSION) - - obj._set_from_db_object(context, obj, db_object, fields) - - obj._context = context - - # NOTE(rloo). We now have obj, a versioned object that corresponds to - # its DB representation. A versioned object has an internal attribute - # ._changed_fields; this is a list of changed fields -- used, e.g., - # when saving the object to the DB (only those changed fields are - # saved to the DB). The obj.obj_reset_changes() clears this list - # since we didn't actually make any modifications to the object that - # we want saved later. - obj.obj_reset_changes() - - # if db_version != obj.__class__.VERSION: - # # convert to the latest version - # obj.VERSION = db_version - # obj.convert_to_version(obj.__class__.VERSION, - # remove_unavailable_fields=False) - return obj - - def _convert_to_version(self, target_version, - remove_unavailable_fields=True): - """Convert to the target version. - - Subclasses should redefine this method, to do the conversion of the - object to the target version. - - Convert the object to the target version. The target version may be - the same, older, or newer than the version of the object. This is - used for DB interactions as well as for serialization/deserialization. - - The remove_unavailable_fields flag is used to distinguish these two - cases: - - 1) For serialization/deserialization, we need to remove the unavailable - fields, because the service receiving the object may not know about - these fields. remove_unavailable_fields is set to True in this case. - - 2) For DB interactions, we need to set the unavailable fields to their - appropriate values so that these fields are saved in the DB. (If - they are not set, the VersionedObject magic will not know to - save/update them to the DB.) remove_unavailable_fields is set to - False in this case. - - :param target_version: the desired version of the object - :param remove_unavailable_fields: True to remove fields that are - unavailable in the target version; set this to True when - (de)serializing. False to set the unavailable fields to appropriate - values; set this to False for DB interactions. - """ - pass - - def convert_to_version(self, target_version, - remove_unavailable_fields=True): - """Convert this object to the target version. - - Convert the object to the target version. The target version may be - the same, older, or newer than the version of the object. This is - used for DB interactions as well as for serialization/deserialization. - - The remove_unavailable_fields flag is used to distinguish these two - cases: - - 1) For serialization/deserialization, we need to remove the unavailable - fields, because the service receiving the object may not know about - these fields. remove_unavailable_fields is set to True in this case. - - 2) For DB interactions, we need to set the unavailable fields to their - appropriate values so that these fields are saved in the DB. (If - they are not set, the VersionedObject magic will not know to - save/update them to the DB.) remove_unavailable_fields is set to - False in this case. - - _convert_to_version() does the actual work. - - :param target_version: the desired version of the object - :param remove_unavailable_fields: True to remove fields that are - unavailable in the target version; set this to True when - (de)serializing. False to set the unavailable fields to appropriate - values; set this to False for DB interactions. - """ - if self.VERSION != target_version: - self._convert_to_version( - target_version, - remove_unavailable_fields=remove_unavailable_fields) - if remove_unavailable_fields: - # NOTE(rloo): We changed the object, but don't keep track of - # any of these changes, since it is inaccurate anyway (because - # it doesn't keep track of any 'changed' unavailable fields). - self.obj_reset_changes() - - # NOTE(rloo): self.__class__.VERSION is the latest version that - # is supported by this service. self.VERSION is the version of - # this object instance -- it may get set via e.g. the - # serialization or deserialization process, or here. - if (self.__class__.VERSION != target_version or - self.VERSION != self.__class__.VERSION): - self.VERSION = target_version - - @classmethod - def get_target_version(cls): - return cls.VERSION - - def do_version_changes_for_db(self): - """Change the object to the version needed for the database. - - If needed, this changes the object (modifies object fields) to be in - the correct version for saving to the database. - - The version used to save the object in the DB is determined as follows: - - * If the object is pinned, we save the object in the pinned version. - Since it is pinned, we must not save in a newer version, in case - a rolling upgrade is happening and some services are still using the - older version of inventory, with no knowledge of this newer version. - * If the object isn't pinned, we save the object in the latest version. - - Because the object may be converted to a different object version, this - method must only be called just before saving the object to the DB. - - :returns: a dictionary of changed fields and their new values - (could be an empty dictionary). These are the fields/values - of the object that would be saved to the DB. - """ - target_version = self.get_target_version() - - if target_version != self.VERSION: - # Convert the object so we can save it in the target version. - self.convert_to_version(target_version, - remove_unavailable_fields=False) - - changes = self.obj_get_changes() - # NOTE(rloo): Since this object doesn't keep track of the version that - # is saved in the DB and we don't want to make a DB call - # just to find out, we always update 'version' in the DB. - changes['version'] = self.VERSION - - return changes - - @classmethod - def _from_db_object_list(cls, context, db_objects): - """Returns objects corresponding to database entities. - - Returns a list of formal objects of this class that correspond to - the list of database entities. - - :param cls: the VersionedObject class of the desired object - :param context: security context - :param db_objects: A list of DB models of the object - :returns: A list of objects corresponding to the database entities - """ - return [cls._from_db_object(context, cls(), db_obj) - for db_obj in db_objects] - - def save(self, context=None): - updates = {} - changes = self.do_version_changes_for_db() - - for field in changes: - if field == 'version': - continue - updates[field] = self[field] - - self.save_changes(context, updates) - self.obj_reset_changes() - - -class InventoryObjectSerializer(object_base.VersionedObjectSerializer): - # Base class to use for object hydration - OBJ_BASE_CLASS = InventoryObject - - -def obj_to_primitive(obj): - """Recursively turn an object into a python primitive. - - An InventoryObject becomes a dict, and anything that implements - ObjectListBase becomes a list. - """ - if isinstance(obj, object_base.ObjectListBase): - return [obj_to_primitive(x) for x in obj] - elif isinstance(obj, InventoryObject): - result = {} - for key, value in obj.iteritems(): - result[key] = obj_to_primitive(value) - return result - else: - return obj diff --git a/inventory/inventory/inventory/objects/cpu.py b/inventory/inventory/inventory/objects/cpu.py deleted file mode 100644 index bf187bf9..00000000 --- a/inventory/inventory/inventory/objects/cpu.py +++ /dev/null @@ -1,119 +0,0 @@ -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class CPU(base.InventoryObject, object_base.VersionedObjectDictCompat): - - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(), - 'uuid': object_fields.StringField(nullable=True), - 'host_id': object_fields.IntegerField(), - 'host_uuid': object_fields.StringField(nullable=True), - 'node_id': object_fields.IntegerField(nullable=True), - 'node_uuid': object_fields.StringField(nullable=True), - 'numa_node': object_fields.IntegerField(nullable=True), - 'cpu': object_fields.IntegerField(), - 'core': object_fields.IntegerField(nullable=True), - 'thread': object_fields.IntegerField(nullable=True), - 'cpu_family': object_fields.StringField(nullable=True), - 'cpu_model': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True), - # part of config - # 'allocated_function': object_fields.StringField(nullable=True), - } - - _foreign_fields = {'host_uuid': 'host:uuid', - 'node_uuid': 'node:uuid', - 'numa_node': 'node:numa_node'} - - @classmethod - def get_by_uuid(cls, context, uuid): - db_cpu = cls.dbapi.cpu_get(uuid) - return cls._from_db_object(context, cls(), db_cpu) - - def save_changes(self, context, updates): - self.dbapi.cpu_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of CPU objects. - - :param cls: the :class:`CPU` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`CPU` object. - - """ - db_cpus = cls.dbapi.cpu_get_list( - filters=filters, - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_cpus) - - @classmethod - def get_by_host(cls, context, host_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_cpus = cls.dbapi.cpu_get_by_host( - host_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_cpus) - - @classmethod - def get_by_node(cls, context, node_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_cpus = cls.dbapi.cpu_get_by_node( - node_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_cpus) - - @classmethod - def get_by_host_node(cls, context, host_uuid, node_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_cpus = cls.dbapi.cpu_get_by_host_node( - host_uuid, - node_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_cpus) - - def create(self, context=None): - """Create a CPU record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. - """ - values = self.do_version_changes_for_db() - db_cpu = self.dbapi.cpu_create(values) - return self._from_db_object(self._context, self, db_cpu) diff --git a/inventory/inventory/inventory/objects/fields.py b/inventory/inventory/inventory/objects/fields.py deleted file mode 100644 index d85c5cae..00000000 --- a/inventory/inventory/inventory/objects/fields.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import ast -import hashlib -import inspect - -from oslo_versionedobjects import fields as object_fields -import six - -from inventory.common import utils - - -class IntegerField(object_fields.IntegerField): - pass - - -class UUIDField(object_fields.UUIDField): - pass - - -class StringField(object_fields.StringField): - pass - - -class StringAcceptsCallable(object_fields.String): - @staticmethod - def coerce(obj, attr, value): - if callable(value): - value = value() - return super(StringAcceptsCallable, StringAcceptsCallable).coerce( - obj, attr, value) - - -class StringFieldThatAcceptsCallable(object_fields.StringField): - """Custom StringField object that allows for functions as default - - In some cases we need to allow for dynamic defaults based on configuration - options, this StringField object allows for a function to be passed as a - default, and will only process it at the point the field is coerced - """ - - AUTO_TYPE = StringAcceptsCallable() - - def __repr__(self): - default = self._default - if (self._default != object_fields.UnspecifiedDefault and - callable(self._default)): - default = "%s-%s" % ( - self._default.__name__, - hashlib.md5(inspect.getsource( - self._default).encode()).hexdigest()) - return '%s(default=%s,nullable=%s)' % (self._type.__class__.__name__, - default, self._nullable) - - -class DateTimeField(object_fields.DateTimeField): - pass - - -class BooleanField(object_fields.BooleanField): - pass - - -class ListOfStringsField(object_fields.ListOfStringsField): - pass - - -class ObjectField(object_fields.ObjectField): - pass - - -class ListOfObjectsField(object_fields.ListOfObjectsField): - pass - - -class FlexibleDict(object_fields.FieldType): - @staticmethod - def coerce(obj, attr, value): - if isinstance(value, six.string_types): - value = ast.literal_eval(value) - return dict(value) - - -class FlexibleDictField(object_fields.AutoTypedField): - AUTO_TYPE = FlexibleDict() - - # TODO(lucasagomes): In our code we've always translated None to {}, - # this method makes this field to work like this. But probably won't - # be accepted as-is in the oslo_versionedobjects library - def _null(self, obj, attr): - if self.nullable: - return {} - super(FlexibleDictField, self)._null(obj, attr) - - -class EnumField(object_fields.EnumField): - pass - - -class NotificationLevel(object_fields.Enum): - DEBUG = 'debug' - INFO = 'info' - WARNING = 'warning' - ERROR = 'error' - CRITICAL = 'critical' - - ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) - - def __init__(self): - super(NotificationLevel, self).__init__( - valid_values=NotificationLevel.ALL) - - -class NotificationLevelField(object_fields.BaseEnumField): - AUTO_TYPE = NotificationLevel() - - -class NotificationStatus(object_fields.Enum): - START = 'start' - END = 'end' - ERROR = 'error' - SUCCESS = 'success' - - ALL = (START, END, ERROR, SUCCESS) - - def __init__(self): - super(NotificationStatus, self).__init__( - valid_values=NotificationStatus.ALL) - - -class NotificationStatusField(object_fields.BaseEnumField): - AUTO_TYPE = NotificationStatus() - - -class MACAddress(object_fields.FieldType): - @staticmethod - def coerce(obj, attr, value): - return utils.validate_and_normalize_mac(value) - - -class MACAddressField(object_fields.AutoTypedField): - AUTO_TYPE = MACAddress() diff --git a/inventory/inventory/inventory/objects/host.py b/inventory/inventory/inventory/objects/host.py deleted file mode 100644 index e107dff7..00000000 --- a/inventory/inventory/inventory/objects/host.py +++ /dev/null @@ -1,118 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class Host(base.InventoryObject, object_base.VersionedObjectDictCompat): - - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - - 'recordtype': object_fields.StringField(nullable=True), - 'hostname': object_fields.StringField(nullable=True), - - 'personality': object_fields.StringField(nullable=True), - 'subfunctions': object_fields.StringField(nullable=True), - 'subfunction_oper': object_fields.StringField(nullable=True), - 'subfunction_avail': object_fields.StringField(nullable=True), - 'reserved': object_fields.StringField(nullable=True), - - 'invprovision': object_fields.StringField(nullable=True), - 'mgmt_mac': object_fields.StringField(nullable=True), - 'mgmt_ip': object_fields.StringField(nullable=True), - - # Board management members - 'bm_ip': object_fields.StringField(nullable=True), - 'bm_mac': object_fields.StringField(nullable=True), - 'bm_type': object_fields.StringField(nullable=True), - 'bm_username': object_fields.StringField(nullable=True), - - 'location': object_fields.FlexibleDictField(nullable=True), - 'serialid': object_fields.StringField(nullable=True), - 'administrative': object_fields.StringField(nullable=True), - 'operational': object_fields.StringField(nullable=True), - 'availability': object_fields.StringField(nullable=True), - 'host_action': object_fields.StringField(nullable=True), - 'action_state': object_fields.StringField(nullable=True), - 'mtce_info': object_fields.StringField(nullable=True), - 'vim_progress_status': object_fields.StringField(nullable=True), - 'action': object_fields.StringField(nullable=True), - 'task': object_fields.StringField(nullable=True), - 'uptime': object_fields.IntegerField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True), - - 'boot_device': object_fields.StringField(nullable=True), - 'rootfs_device': object_fields.StringField(nullable=True), - 'install_output': object_fields.StringField(nullable=True), - 'console': object_fields.StringField(nullable=True), - 'tboot': object_fields.StringField(nullable=True), - 'ttys_dcd': object_fields.StringField(nullable=True), - 'install_state': object_fields.StringField(nullable=True), - 'install_state_info': object_fields.StringField(nullable=True), - 'iscsi_initiator_name': object_fields.StringField(nullable=True), - } - - @classmethod - def get_by_uuid(cls, context, uuid): - db_host = cls.dbapi.host_get(uuid) - return cls._from_db_object(context, cls(), db_host) - - @classmethod - def get_by_filters_one(cls, context, filters): - db_host = cls.dbapi.host_get_by_filters_one(filters) - return cls._from_db_object(context, cls(), db_host) - - def save_changes(self, context, updates): - self.dbapi.host_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of Host objects. - - :param cls: the :class:`Host` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`Host` object. - - """ - db_hosts = cls.dbapi.host_get_list(filters=filters, limit=limit, - marker=marker, sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_hosts) - - def create(self, context=None): - """Create a Host record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Host(context) - :raises: InvalidParameterValue if some property values are invalid. - """ - values = self.do_version_changes_for_db() - # self._validate_property_values(values.get('properties')) - db_host = self.dbapi.host_create(values) - return self._from_db_object(self._context, self, db_host) diff --git a/inventory/inventory/inventory/objects/lldp_agent.py b/inventory/inventory/inventory/objects/lldp_agent.py deleted file mode 100644 index 5735da4e..00000000 --- a/inventory/inventory/inventory/objects/lldp_agent.py +++ /dev/null @@ -1,122 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from oslo_versionedobjects import base as object_base - -from inventory.common import k_lldp -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields - - -def get_lldp_tlvs(field, db_object): - if hasattr(db_object, field): - return db_object[field] - if hasattr(db_object, 'lldptlvs'): - tlv_object = db_object['lldptlvs'] - if tlv_object: - for tlv in tlv_object: - if tlv['type'] == field: - return tlv['value'] - return None - - -@base.InventoryObjectRegistry.register -class LLDPAgent(base.InventoryObject, object_base.VersionedObjectDictCompat): - - dbapi = db_api.get_instance() - - fields = {'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'status': object_fields.StringField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.StringField(nullable=True), - 'port_id': object_fields.IntegerField(nullable=True), - 'port_uuid': object_fields.UUIDField(nullable=True), - 'port_name': object_fields.StringField(nullable=True), - 'port_namedisplay': object_fields.StringField(nullable=True)} - - _foreign_fields = { - 'host_uuid': 'host:uuid', - 'port_uuid': 'port:uuid', - 'port_name': 'port:name', - 'port_namedisplay': 'port:namedisplay', - } - - for tlv in k_lldp.LLDP_TLV_VALID_LIST: - fields.update({tlv: object_fields.StringField(nullable=True)}) - _foreign_fields.update({tlv: get_lldp_tlvs}) - - @classmethod - def get_by_uuid(cls, context, uuid): - db_lldp_agent = cls.dbapi.lldp_agent_get(uuid) - return cls._from_db_object(context, cls(), db_lldp_agent) - - def save_changes(self, context, updates): - self.dbapi.lldp_agent_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of LLDPAgent objects. - - :param cls: the :class:`LLDPAgent` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`LLDPAgent` object. - - """ - db_lldp_agents = cls.dbapi.lldp_agent_get_list( - filters=filters, - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_agents) - - @classmethod - def get_by_host(cls, context, host_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_lldp_agents = cls.dbapi.lldp_agent_get_by_host( - host_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_agents) - - @classmethod - def get_by_port(cls, context, port_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_lldp_agents = cls.dbapi.lldp_agent_get_by_port( - port_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_agents) - - def create(self, context, portid, hostid, values): - """Create a LLDPAgent record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. - :param portid: port id - :param hostid: host id - :param values: dictionary of values - """ - values = self.do_version_changes_for_db() - db_lldp_agent = self.dbapi.lldp_agent_create(portid, hostid, values) - return self._from_db_object(self._context, self, db_lldp_agent) diff --git a/inventory/inventory/inventory/objects/lldp_neighbour.py b/inventory/inventory/inventory/objects/lldp_neighbour.py deleted file mode 100644 index 5df7f0a8..00000000 --- a/inventory/inventory/inventory/objects/lldp_neighbour.py +++ /dev/null @@ -1,124 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from oslo_versionedobjects import base as object_base - -from inventory.common import k_lldp -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields - - -def get_lldp_tlvs(field, db_object): - if hasattr(db_object, field): - return db_object[field] - if hasattr(db_object, 'lldptlvs'): - tlv_object = db_object['lldptlvs'] - if tlv_object: - for tlv in tlv_object: - if tlv['type'] == field: - return tlv['value'] - return None - - -@base.InventoryObjectRegistry.register -class LLDPNeighbour(base.InventoryObject, - object_base.VersionedObjectDictCompat): - - dbapi = db_api.get_instance() - - fields = {'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'msap': object_fields.StringField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.UUIDField(nullable=True), - 'port_id': object_fields.IntegerField(nullable=True), - 'port_uuid': object_fields.UUIDField(nullable=True), - 'port_name': object_fields.StringField(nullable=True), - 'port_namedisplay': object_fields.StringField(nullable=True)} - - _foreign_fields = { - 'host_uuid': 'host:uuid', - 'port_uuid': 'port:uuid', - 'port_name': 'port:name', - 'port_namedisplay': 'port:namedisplay', - } - - for tlv in k_lldp.LLDP_TLV_VALID_LIST: - fields.update({tlv: object_fields.StringField(nullable=True)}) - _foreign_fields.update({tlv: get_lldp_tlvs}) - - @classmethod - def get_by_uuid(cls, context, uuid): - db_lldp_neighbour = cls.dbapi.lldp_neighbour_get(uuid) - return cls._from_db_object(context, cls(), db_lldp_neighbour) - - def save_changes(self, context, updates): - self.dbapi.lldp_neighbour_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of LLDPNeighbour objects. - - :param cls: the :class:`LLDPNeighbour` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`LLDPNeighbour` object. - - """ - db_lldp_neighbours = cls.dbapi.lldp_neighbour_get_list( - filters=filters, - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_neighbours) - - @classmethod - def get_by_host(cls, context, host_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_lldp_neighbours = cls.dbapi.lldp_neighbour_get_by_host( - host_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_neighbours) - - @classmethod - def get_by_port(cls, context, port_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_lldp_neighbours = cls.dbapi.lldp_neighbour_get_by_port( - port_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_neighbours) - - def create(self, context, portid, hostid, values): - """Create a LLDPAgent record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. - :param portid: port id - :param hostid: host id - :param values: dictionary of values - """ - values = self.do_version_changes_for_db() - db_lldp_neighbour = self.dbapi.lldp_neighbour_create( - portid, hostid, values) - return self._from_db_object(self._context, self, db_lldp_neighbour) diff --git a/inventory/inventory/inventory/objects/lldp_tlv.py b/inventory/inventory/inventory/objects/lldp_tlv.py deleted file mode 100644 index a6f1b21f..00000000 --- a/inventory/inventory/inventory/objects/lldp_tlv.py +++ /dev/null @@ -1,114 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class LLDPTLV(base.InventoryObject, object_base.VersionedObjectDictCompat): - - dbapi = db_api.get_instance() - - fields = {'id': object_fields.IntegerField(nullable=True), - 'agent_id': object_fields.IntegerField(nullable=True), - 'agent_uuid': object_fields.UUIDField(nullable=True), - 'neighbour_id': object_fields.IntegerField(nullable=True), - 'neighbour_uuid': object_fields.UUIDField(nullable=True), - 'type': object_fields.StringField(nullable=True), - 'value': object_fields.StringField(nullable=True)} - - _foreign_fields = { - 'agent_uuid': 'lldp_agent:uuid', - 'neighbour_uuid': 'lldp_neighbour:uuid', - } - - @classmethod - def get_by_id(cls, context, id): - db_lldp_tlv = cls.dbapi.lldp_tlv_get_by_id(id) - return cls._from_db_object(context, cls(), db_lldp_tlv) - - def save_changes(self, context, updates): - self.dbapi.lldp_tlv_update(self.id, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of LLDPTLV objects. - - :param cls: the :class:`LLDPTLV` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`LLDPTLV` object. - - """ - db_lldp_tlvs = cls.dbapi.lldp_tlv_get_list( - filters=filters, - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_tlvs) - - @classmethod - def get_by_neighbour(cls, context, neighbour_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_lldp_tlvs = cls.dbapi.lldp_tlv_get_by_neighbour( - neighbour_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_tlvs) - - @classmethod - def get_by_agent(cls, context, agent_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_lldp_tlvs = cls.dbapi.lldp_tlv_get_by_agent( - agent_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_lldp_tlvs) - - def create(self, values, context=None, agentid=None, neighbourid=None): - """Create a LLDPTLV record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. - :param agentid: agent id - :param neighbourid: neighbour id - :param values: dictionary of values - """ - values = self.do_version_changes_for_db() - db_lldp_tlv = self.dbapi.lldp_tlv_create( - values, agentid, neighbourid) - return self._from_db_object(self._context, self, db_lldp_tlv) - - def destroy(self, context=None): - """Delete the LLDPTLV from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Node(context) - """ - self.dbapi.lldp_tlv_destroy(self.id) - self.obj_reset_changes() diff --git a/inventory/inventory/inventory/objects/memory.py b/inventory/inventory/inventory/objects/memory.py deleted file mode 100644 index f4adf86a..00000000 --- a/inventory/inventory/inventory/objects/memory.py +++ /dev/null @@ -1,141 +0,0 @@ -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class Memory(base.InventoryObject, object_base.VersionedObjectDictCompat): - - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'node_id': object_fields.IntegerField(nullable=True), - 'node_uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.UUIDField(nullable=True), - 'numa_node': object_fields.IntegerField(nullable=True), - - 'memtotal_mib': object_fields.IntegerField(nullable=True), - 'memavail_mib': object_fields.IntegerField(nullable=True), - 'platform_reserved_mib': object_fields.IntegerField(nullable=True), - 'node_memtotal_mib': object_fields.IntegerField(nullable=True), - - 'hugepages_configured': object_fields.StringField(nullable=True), - - 'vswitch_hugepages_size_mib': - object_fields.IntegerField(nullable=True), - 'vswitch_hugepages_reqd': object_fields.IntegerField(nullable=True), - 'vswitch_hugepages_nr': object_fields.IntegerField(nullable=True), - 'vswitch_hugepages_avail': object_fields.IntegerField(nullable=True), - - 'vm_hugepages_nr_2M_pending': - object_fields.IntegerField(nullable=True), - 'vm_hugepages_nr_1G_pending': - object_fields.IntegerField(nullable=True), - 'vm_hugepages_nr_2M': object_fields.IntegerField(nullable=True), - 'vm_hugepages_avail_2M': object_fields.IntegerField(nullable=True), - 'vm_hugepages_nr_1G': object_fields.IntegerField(nullable=True), - 'vm_hugepages_avail_1G': object_fields.IntegerField(nullable=True), - 'vm_hugepages_nr_4K': object_fields.IntegerField(nullable=True), - - - 'vm_hugepages_use_1G': object_fields.StringField(nullable=True), - 'vm_hugepages_possible_2M': object_fields.IntegerField(nullable=True), - 'vm_hugepages_possible_1G': object_fields.IntegerField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True), - } - - _foreign_fields = {'host_uuid': 'host:uuid', - 'node_uuid': 'node:uuid', - 'numa_node': 'node:numa_node'} - - @classmethod - def get_by_uuid(cls, context, uuid): - db_memory = cls.dbapi.memory_get(uuid) - return cls._from_db_object(context, cls(), db_memory) - - def save_changes(self, context, updates): - self.dbapi.memory_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of Memory objects. - - :param cls: the :class:`Memory` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`Memory` object. - - """ - db_memorys = cls.dbapi.memory_get_list( - filters=filters, - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_memorys) - - @classmethod - def get_by_host(cls, context, host_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_memorys = cls.dbapi.memory_get_by_host( - host_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_memorys) - - @classmethod - def get_by_host_node(cls, context, host_uuid, node_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_memorys = cls.dbapi.memory_get_by_host_node( - host_uuid, - node_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_memorys) - - @classmethod - def get_by_node(cls, context, node_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_memorys = cls.dbapi.memory_get_by_node( - node_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_memorys) - - def create(self, context=None): - """Create a Memory record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. - """ - values = self.do_version_changes_for_db() - db_memory = self.dbapi.memory_create(values) - return self._from_db_object(self._context, self, db_memory) diff --git a/inventory/inventory/inventory/objects/node.py b/inventory/inventory/inventory/objects/node.py deleted file mode 100644 index aa147683..00000000 --- a/inventory/inventory/inventory/objects/node.py +++ /dev/null @@ -1,73 +0,0 @@ -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class Node(base.InventoryObject, - object_base.VersionedObjectDictCompat): - - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=False), - 'host_uuid': object_fields.StringField(nullable=True), - - 'numa_node': object_fields.IntegerField(nullable=False), - 'capabilities': object_fields.FlexibleDictField(nullable=True), - } - - _foreign_fields = {'host_uuid': 'host:uuid'} - - @classmethod - def get_by_uuid(cls, context, uuid): - db_node = cls.dbapi.node_get(uuid) - return cls._from_db_object(context, cls(), db_node) - - def save_changes(self, context, updates): - self.dbapi.node_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of Memory objects. - - :param cls: the :class:`Memory` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`Memory` object. - - """ - db_nodes = cls.dbapi.node_get_list( - filters=filters, - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_nodes) - - @classmethod - def get_by_host(cls, context, host_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_nodes = cls.dbapi.node_get_by_host( - host_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_nodes) diff --git a/inventory/inventory/inventory/objects/pci_device.py b/inventory/inventory/inventory/objects/pci_device.py deleted file mode 100644 index ccd1226e..00000000 --- a/inventory/inventory/inventory/objects/pci_device.py +++ /dev/null @@ -1,99 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class PCIDevice(base.InventoryObject, - object_base.VersionedObjectDictCompat): - - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.UUIDField(nullable=True), - 'name': object_fields.StringField(nullable=True), - 'pciaddr': object_fields.StringField(nullable=True), - 'pclass_id': object_fields.StringField(nullable=True), - 'pvendor_id': object_fields.StringField(nullable=True), - 'pdevice_id': object_fields.StringField(nullable=True), - 'pclass': object_fields.StringField(nullable=True), - 'pvendor': object_fields.StringField(nullable=True), - 'pdevice': object_fields.StringField(nullable=True), - 'psvendor': object_fields.StringField(nullable=True), - 'psdevice': object_fields.StringField(nullable=True), - 'numa_node': object_fields.IntegerField(nullable=True), - 'sriov_totalvfs': object_fields.IntegerField(nullable=True), - 'sriov_numvfs': object_fields.IntegerField(nullable=True), - 'sriov_vfs_pci_address': object_fields.StringField(nullable=True), - 'driver': object_fields.StringField(nullable=True), - 'enabled': object_fields.BooleanField(nullable=True), - 'extra_info': object_fields.StringField(nullable=True), - } - - _foreign_fields = { - 'host_uuid': 'host:uuid' - } - - @classmethod - def get_by_uuid(cls, context, uuid): - db_pci_device = cls.dbapi.pci_device_get(uuid) - return cls._from_db_object(context, cls(), db_pci_device) - - def save_changes(self, context, updates): - self.dbapi.pci_device_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None): - """Return a list of CPU objects. - - :param cls: the :class:`CPU` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`CPU` object. - - """ - db_pci_devices = cls.dbapi.pci_device_get_list( - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_pci_devices) - - @classmethod - def get_by_host(cls, context, host_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_pci_devices = cls.dbapi.pci_device_get_by_host( - host_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_pci_devices) - - def create(self, context=None): - """Create a CPU record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. - """ - values = self.do_version_changes_for_db() - db_pci_device = self.dbapi.pci_device_create(values) - return self._from_db_object(self._context, self, db_pci_device) diff --git a/inventory/inventory/inventory/objects/port.py b/inventory/inventory/inventory/objects/port.py deleted file mode 100644 index 4cd7e8fb..00000000 --- a/inventory/inventory/inventory/objects/port.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class Port(base.InventoryObject, object_base.VersionedObjectDictCompat): - - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(), - 'uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.UUIDField(nullable=True), - 'node_id': object_fields.IntegerField(nullable=True), - 'node_uuid': object_fields.UUIDField(nullable=True), - - 'type': object_fields.StringField(nullable=True), - 'name': object_fields.StringField(nullable=True), - 'namedisplay': object_fields.StringField(nullable=True), - 'pciaddr': object_fields.StringField(nullable=True), - 'dev_id': object_fields.IntegerField(nullable=True), - 'pclass': object_fields.StringField(nullable=True), - 'pvendor': object_fields.StringField(nullable=True), - 'pdevice': object_fields.StringField(nullable=True), - 'psvendor': object_fields.StringField(nullable=True), - 'dpdksupport': object_fields.BooleanField(nullable=True), - 'psdevice': object_fields.StringField(nullable=True), - 'numa_node': object_fields.IntegerField(nullable=True), - 'sriov_totalvfs': object_fields.IntegerField(nullable=True), - 'sriov_numvfs': object_fields.IntegerField(nullable=True), - 'sriov_vfs_pci_address': object_fields.StringField(nullable=True), - 'driver': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True), - } - - # interface_uuid is in systemconfig - _foreign_fields = {'host_uuid': 'host:uuid', - 'node_uuid': 'node:uuid', - } - - @classmethod - def get_by_uuid(cls, context, uuid): - db_port = cls.dbapi.port_get(uuid) - return cls._from_db_object(context, cls(), db_port) - - def save_changes(self, context, updates): - self.dbapi.port_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of Port objects. - - :param cls: the :class:`Port` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`Port` object. - - """ - db_ports = cls.dbapi.port_get_list( - filters=filters, - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_ports) - - @classmethod - def get_by_host(cls, context, host_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_ports = cls.dbapi.port_get_by_host( - host_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_ports) - - @classmethod - def get_by_numa_node(cls, context, node_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_ports = cls.dbapi.port_get_by_numa_node( - node_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_ports) - - def create(self, context=None): - """Create a EthernetPort record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. - :raises: InvalidParameterValue if some property values are invalid. - """ - values = self.do_version_changes_for_db() - db_port = self.dbapi.port_create(values) - return self._from_db_object(self._context, self, db_port) diff --git a/inventory/inventory/inventory/objects/port_ethernet.py b/inventory/inventory/inventory/objects/port_ethernet.py deleted file mode 100644 index c1284472..00000000 --- a/inventory/inventory/inventory/objects/port_ethernet.py +++ /dev/null @@ -1,93 +0,0 @@ -# -# Copyright (c) 2013-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.objects import base -from inventory.objects import fields as object_fields -from inventory.objects import port -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class EthernetPort(port.Port, object_base.VersionedObjectDictCompat): - - fields = dict({ - 'mac': object_fields.StringField(nullable=True), - 'mtu': object_fields.IntegerField(nullable=True), - 'speed': object_fields.IntegerField(nullable=True), - 'link_mode': object_fields.StringField(nullable=True), - 'duplex': object_fields.IntegerField(nullable=True), - 'autoneg': object_fields.StringField(nullable=True), - 'bootp': object_fields.StringField(nullable=True)}, - **port.Port.fields) - - @classmethod - def get_by_uuid(cls, context, uuid): - db_ethernet_port = cls.dbapi.ethernet_port_get(uuid) - return cls._from_db_object(context, cls(), db_ethernet_port) - - def save_changes(self, context, updates): - self.dbapi.ethernet_port_update(self.uuid, updates) - - @classmethod - def list(cls, context, limit=None, marker=None, sort_key=None, - sort_dir=None, filters=None): - """Return a list of EthernetPort objects. - - :param cls: the :class:`EthernetPort` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param filters: Filters to apply. - :returns: a list of :class:`EthernetPort` object. - - """ - db_ethernet_ports = cls.dbapi.ethernet_port_get_list( - filters=filters, - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_ethernet_ports) - - @classmethod - def get_by_host(cls, context, host_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_ethernet_ports = cls.dbapi.ethernet_port_get_by_host( - host_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_ethernet_ports) - - @classmethod - def get_by_numa_node(cls, context, node_uuid, - limit=None, marker=None, - sort_key=None, sort_dir=None): - db_ethernet_ports = cls.dbapi.ethernet_port_get_by_numa_node( - node_uuid, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return cls._from_db_object_list(context, db_ethernet_ports) - - def create(self, context=None): - """Create a EthernetPort record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. - :raises: InvalidParameterValue if some property values are invalid. - """ - values = self.do_version_changes_for_db() - db_ethernet_port = self.dbapi.ethernet_port_create(values) - return self._from_db_object(self._context, self, db_ethernet_port) diff --git a/inventory/inventory/inventory/objects/sensor.py b/inventory/inventory/inventory/objects/sensor.py deleted file mode 100644 index a6928dc8..00000000 --- a/inventory/inventory/inventory/objects/sensor.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class Sensor(base.InventoryObject, object_base.VersionedObjectDictCompat): - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.UUIDField(nullable=True), - 'sensorgroup_id': object_fields.IntegerField(nullable=True), - 'sensorgroup_uuid': object_fields.UUIDField(nullable=True), - - 'sensorname': object_fields.StringField(nullable=True), - 'path': object_fields.StringField(nullable=True), - 'datatype': object_fields.StringField(nullable=True), - 'sensortype': object_fields.StringField(nullable=True), - - 'status': object_fields.StringField(nullable=True), - 'state': object_fields.StringField(nullable=True), - 'state_requested': object_fields.IntegerField(nullable=True), - 'audit_interval': object_fields.IntegerField(nullable=True), - 'algorithm': object_fields.StringField(nullable=True), - 'sensor_action_requested': object_fields.StringField(nullable=True), - 'actions_minor': object_fields.StringField(nullable=True), - 'actions_major': object_fields.StringField(nullable=True), - 'actions_critical': object_fields.StringField(nullable=True), - - 'unit_base': object_fields.StringField(nullable=True), - 'unit_modifier': object_fields.StringField(nullable=True), - 'unit_rate': object_fields.StringField(nullable=True), - - 't_minor_lower': object_fields.StringField(nullable=True), - 't_minor_upper': object_fields.StringField(nullable=True), - 't_major_lower': object_fields.StringField(nullable=True), - 't_major_upper': object_fields.StringField(nullable=True), - 't_critical_lower': object_fields.StringField(nullable=True), - 't_critical_upper': object_fields.StringField(nullable=True), - - 'suppress': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True) - } - - _foreign_fields = { - 'host_uuid': 'host:uuid', - 'sensorgroup_uuid': 'sensorgroup:uuid', - } - - _optional_fields = [ - 'unit_base', - 'unit_modifier', - 'unit_rate', - - 't_minor_lower', - 't_minor_upper', - 't_major_lower', - 't_major_upper', - 't_critical_lower', - 't_critical_upper', - ] - - @object_base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.isensor_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.isensor_update(self.uuid, updates) diff --git a/inventory/inventory/inventory/objects/sensor_analog.py b/inventory/inventory/inventory/objects/sensor_analog.py deleted file mode 100644 index 3206593c..00000000 --- a/inventory/inventory/inventory/objects/sensor_analog.py +++ /dev/null @@ -1,72 +0,0 @@ -# -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class SensorAnalog(base.InventoryObject, - object_base.VersionedObjectDictCompat): - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.UUIDField(nullable=True), - - 'sensorgroup_id': object_fields.IntegerField(nullable=True), - 'sensorgroup_uuid': object_fields.UUIDField(nullable=True), - - 'sensorname': object_fields.StringField(nullable=True), - 'path': object_fields.StringField(nullable=True), - 'datatype': object_fields.StringField(nullable=True), - 'sensortype': object_fields.StringField(nullable=True), - - 'status': object_fields.StringField(nullable=True), - 'state': object_fields.StringField(nullable=True), - 'state_requested': object_fields.IntegerField(nullable=True), - 'sensor_action_requested': object_fields.StringField(nullable=True), - 'audit_interval': object_fields.IntegerField(nullable=True), - 'algorithm': object_fields.StringField(nullable=True), - 'actions_minor': object_fields.StringField(nullable=True), - 'actions_major': object_fields.StringField(nullable=True), - 'actions_critical': object_fields.StringField(nullable=True), - - 'unit_base': object_fields.StringField(nullable=True), - 'unit_modifier': object_fields.StringField(nullable=True), - 'unit_rate': object_fields.StringField(nullable=True), - - 't_minor_lower': object_fields.StringField(nullable=True), - 't_minor_upper': object_fields.StringField(nullable=True), - 't_major_lower': object_fields.StringField(nullable=True), - 't_major_upper': object_fields.StringField(nullable=True), - 't_critical_lower': object_fields.StringField(nullable=True), - 't_critical_upper': object_fields.StringField(nullable=True), - - 'suppress': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True), - } - - _foreign_fields = { - 'host_uuid': 'host:uuid', - 'sensorgroup_uuid': 'sensorgroup:uuid', - } - - @object_base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.isensor_analog_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.isensor_analog_update(self.uuid, updates) diff --git a/inventory/inventory/inventory/objects/sensor_discrete.py b/inventory/inventory/inventory/objects/sensor_discrete.py deleted file mode 100644 index 42e3dfef..00000000 --- a/inventory/inventory/inventory/objects/sensor_discrete.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class SensorDiscrete(base.InventoryObject, - object_base.VersionedObjectDictCompat): - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.UUIDField(nullable=True), - - 'sensorgroup_id': object_fields.IntegerField(nullable=True), - 'sensorgroup_uuid': object_fields.UUIDField(nullable=True), - - 'sensorname': object_fields.StringField(nullable=True), - 'path': object_fields.StringField(nullable=True), - 'datatype': object_fields.StringField(nullable=True), - 'sensortype': object_fields.StringField(nullable=True), - - 'status': object_fields.StringField(nullable=True), - 'state': object_fields.StringField(nullable=True), - 'state_requested': object_fields.IntegerField(nullable=True), - 'audit_interval': object_fields.IntegerField(nullable=True), - 'algorithm': object_fields.StringField(nullable=True), - 'sensor_action_requested': object_fields.StringField(nullable=True), - 'actions_minor': object_fields.StringField(nullable=True), - 'actions_major': object_fields.StringField(nullable=True), - 'actions_critical': object_fields.StringField(nullable=True), - - 'suppress': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True) - } - - _foreign_fields = { - 'host_uuid': 'host:uuid', - 'sensorgroup_uuid': 'sensorgroup:uuid', - } - - @object_base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.isensor_discrete_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.isensor_discrete_update(self.uuid, updates) diff --git a/inventory/inventory/inventory/objects/sensorgroup.py b/inventory/inventory/inventory/objects/sensorgroup.py deleted file mode 100644 index 5955c44a..00000000 --- a/inventory/inventory/inventory/objects/sensorgroup.py +++ /dev/null @@ -1,86 +0,0 @@ -# -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class SensorGroup(base.InventoryObject, - object_base.VersionedObjectDictCompat): - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - 'host_uuid': object_fields.UUIDField(nullable=True), - - 'sensorgroupname': object_fields.StringField(nullable=True), - 'path': object_fields.StringField(nullable=True), - - 'datatype': object_fields.StringField(nullable=True), - 'sensortype': object_fields.StringField(nullable=True), - 'description': object_fields.StringField(nullable=True), - - 'state': object_fields.StringField(nullable=True), - 'possible_states': object_fields.StringField(nullable=True), - 'audit_interval_group': object_fields.IntegerField(nullable=True), - 'record_ttl': object_fields.StringField(nullable=True), - - 'algorithm': object_fields.StringField(nullable=True), - 'actions_minor_group': object_fields.StringField(nullable=True), - 'actions_major_group': object_fields.StringField(nullable=True), - 'actions_critical_group': object_fields.StringField(nullable=True), - - 'unit_base_group': object_fields.StringField(nullable=True), - 'unit_modifier_group': object_fields.StringField(nullable=True), - 'unit_rate_group': object_fields.StringField(nullable=True), - - 't_minor_lower_group': object_fields.StringField(nullable=True), - 't_minor_upper_group': object_fields.StringField(nullable=True), - 't_major_lower_group': object_fields.StringField(nullable=True), - 't_major_upper_group': object_fields.StringField(nullable=True), - 't_critical_lower_group': object_fields.StringField(nullable=True), - 't_critical_upper_group': object_fields.StringField(nullable=True), - - 'suppress': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True), - - 'actions_critical_choices': object_fields.StringField(nullable=True), - 'actions_major_choices': object_fields.StringField(nullable=True), - 'actions_minor_choices': object_fields.StringField(nullable=True) - } - - _foreign_fields = { - 'host_uuid': 'host:uuid' - } - - _optional_fields = [ - 'unit_base_group', - 'unit_modifier_group', - 'unit_rate_group', - - 't_minor_lower_group', - 't_minor_upper_group', - 't_major_lower_group', - 't_major_upper_group', - 't_critical_lower_group', - 't_critical_upper_group', - ] - - @object_base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.isensorgroup_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.isensorgroup_update(self.uuid, updates) diff --git a/inventory/inventory/inventory/objects/sensorgroup_analog.py b/inventory/inventory/inventory/objects/sensorgroup_analog.py deleted file mode 100644 index 1de49aaf..00000000 --- a/inventory/inventory/inventory/objects/sensorgroup_analog.py +++ /dev/null @@ -1,68 +0,0 @@ -# -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from oslo_versionedobjects import base as object_base - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields - - -@base.InventoryObjectRegistry.register -class SensorGroupAnalog(base.InventoryObject, - object_base.VersionedObjectDictCompat): - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - - 'sensorgroupname': object_fields.StringField(nullable=True), - 'path': object_fields.StringField(nullable=True), - - 'sensortype': object_fields.StringField(nullable=True), - 'datatype': object_fields.StringField(nullable=True), - 'description': object_fields.StringField(nullable=True), - - 'state': object_fields.StringField(nullable=True), - 'possible_states': object_fields.StringField(nullable=True), - 'audit_interval_group': object_fields.IntegerField(nullable=True), - 'record_ttl': object_fields.StringField(nullable=True), - - 'algorithm': object_fields.StringField(nullable=True), - 'actions_critical_choices': object_fields.StringField(nullable=True), - 'actions_major_choices': object_fields.StringField(nullable=True), - 'actions_minor_choices': object_fields.StringField(nullable=True), - 'actions_minor_group': object_fields.StringField(nullable=True), - 'actions_major_group': object_fields.StringField(nullable=True), - 'actions_critical_group': object_fields.StringField(nullable=True), - - 'unit_base_group': object_fields.StringField(nullable=True), - 'unit_modifier_group': object_fields.StringField(nullable=True), - 'unit_rate_group': object_fields.StringField(nullable=True), - - 't_minor_lower_group': object_fields.StringField(nullable=True), - 't_minor_upper_group': object_fields.StringField(nullable=True), - 't_major_lower_group': object_fields.StringField(nullable=True), - 't_major_upper_group': object_fields.StringField(nullable=True), - 't_critical_lower_group': object_fields.StringField(nullable=True), - 't_critical_upper_group': object_fields.StringField(nullable=True), - - 'suppress': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True) - } - - @object_base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.isensorgroup_analog_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.isensorgroup_analog_update(self.uuid, updates) diff --git a/inventory/inventory/inventory/objects/sensorgroup_discrete.py b/inventory/inventory/inventory/objects/sensorgroup_discrete.py deleted file mode 100644 index 53df768e..00000000 --- a/inventory/inventory/inventory/objects/sensorgroup_discrete.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from oslo_versionedobjects import base as object_base - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields - - -@base.InventoryObjectRegistry.register -class SensorGroupDiscrete(base.InventoryObject, - object_base.VersionedObjectDictCompat): - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'host_id': object_fields.IntegerField(nullable=True), - - 'sensorgroupname': object_fields.StringField(nullable=True), - 'path': object_fields.StringField(nullable=True), - - 'datatype': object_fields.StringField(nullable=True), - 'sensortype': object_fields.StringField(nullable=True), - 'description': object_fields.StringField(nullable=True), - - 'state': object_fields.StringField(nullable=True), - 'possible_states': object_fields.StringField(nullable=True), - 'audit_interval_group': object_fields.IntegerField(nullable=True), - 'record_ttl': object_fields.StringField(nullable=True), - - 'algorithm': object_fields.StringField(nullable=True), - 'actions_critical_choices': object_fields.StringField(nullable=True), - 'actions_major_choices': object_fields.StringField(nullable=True), - 'actions_minor_choices': object_fields.StringField(nullable=True), - 'actions_minor_group': object_fields.StringField(nullable=True), - 'actions_major_group': object_fields.StringField(nullable=True), - 'actions_critical_group': object_fields.StringField(nullable=True), - - 'suppress': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True) - - } - - @object_base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.isensorgroup_discrete_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.isensorgroup_discrete_update(self.uuid, updates) diff --git a/inventory/inventory/inventory/objects/system.py b/inventory/inventory/inventory/objects/system.py deleted file mode 100644 index dea0e8d6..00000000 --- a/inventory/inventory/inventory/objects/system.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from inventory.db import api as db_api -from inventory.objects import base -from inventory.objects import fields as object_fields -from oslo_versionedobjects import base as object_base - - -@base.InventoryObjectRegistry.register -class System(base.InventoryObject, object_base.VersionedObjectDictCompat): - - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': object_fields.IntegerField(nullable=True), - 'uuid': object_fields.UUIDField(nullable=True), - 'name': object_fields.StringField(nullable=True), - 'system_type': object_fields.StringField(nullable=True), - 'system_mode': object_fields.StringField(nullable=True), - 'description': object_fields.StringField(nullable=True), - 'capabilities': object_fields.FlexibleDictField(nullable=True), - 'contact': object_fields.StringField(nullable=True), - 'location': object_fields.StringField(nullable=True), - 'services': object_fields.IntegerField(nullable=True), - 'software_version': object_fields.StringField(nullable=True), - 'timezone': object_fields.StringField(nullable=True), - 'security_profile': object_fields.StringField(nullable=True), - 'region_name': object_fields.StringField(nullable=True), - 'service_project_name': object_fields.StringField(nullable=True), - 'distributed_cloud_role': object_fields.StringField(nullable=True), - 'security_feature': object_fields.StringField(nullable=True), - } - - @classmethod - def get_by_uuid(cls, context, uuid): - db_system = cls.dbapi.system_get(uuid) - return cls._from_db_object(context, cls(), db_system) - - @classmethod - def get_one(cls, context): - db_system = cls.dbapi.system_get_one() - system = cls._from_db_object(context, cls(), db_system) - return system - - @classmethod - def list(cls, context, - limit=None, marker=None, sort_key=None, sort_dir=None): - """Return a list of System objects. - - :param cls: the :class:`System` - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`System` object. - - """ - db_systems = cls.dbapi.system_get_list( - limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) - return cls._from_db_object_list(context, db_systems) - - def save_changes(self, context, updates): - self.dbapi.system_update(self.uuid, updates) - - def create(self, context=None): - """Create a System record in the DB. - - Column-wise updates will be made based on the result of - self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: System(context) - """ - values = self.do_version_changes_for_db() - db_system = self.dbapi.system_create(values) - return self._from_db_object(self._context, self, db_system) diff --git a/inventory/inventory/inventory/systemconfig/__init__.py b/inventory/inventory/inventory/systemconfig/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/systemconfig/config.py b/inventory/inventory/inventory/systemconfig/config.py deleted file mode 100644 index a3108c1a..00000000 --- a/inventory/inventory/inventory/systemconfig/config.py +++ /dev/null @@ -1,19 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from oslo_config import cfg -from oslo_utils._i18n import _ - -INVENTORY_CONFIG_OPTS = [ - cfg.ListOpt('drivers', - default=['systemconfig'], - help=_("SystemConfig driver " - "entrypoints to be loaded from the " - "inventory.systemconfig namespace.")), -] - -cfg.CONF.register_opts(INVENTORY_CONFIG_OPTS, group="configuration") diff --git a/inventory/inventory/inventory/systemconfig/drivers/__init__.py b/inventory/inventory/inventory/systemconfig/drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/systemconfig/drivers/base.py b/inventory/inventory/inventory/systemconfig/drivers/base.py deleted file mode 100644 index 5d1dabef..00000000 --- a/inventory/inventory/inventory/systemconfig/drivers/base.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class SystemConfigDriverBase(object): - """SystemConfig Driver Base Class.""" - - @abc.abstractmethod - def system_get_one(self): - pass - - @abc.abstractmethod - def network_get_by_type(self, network_type): - pass - - @abc.abstractmethod - def address_get_by_name(self, name): - pass - - @abc.abstractmethod - def host_configure_check(self, host_uuid): - pass - - @abc.abstractmethod - def host_configure(self, host_uuid, do_compute_apply=False): - pass - - @abc.abstractmethod - def host_unconfigure(self, host_uuid): - pass diff --git a/inventory/inventory/inventory/systemconfig/drivers/sysinv/__init__.py b/inventory/inventory/inventory/systemconfig/drivers/sysinv/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/systemconfig/drivers/sysinv/driver.py b/inventory/inventory/inventory/systemconfig/drivers/sysinv/driver.py deleted file mode 100644 index 47a9f0f0..00000000 --- a/inventory/inventory/inventory/systemconfig/drivers/sysinv/driver.py +++ /dev/null @@ -1,235 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from inventory.systemconfig.drivers import base -from inventory.systemconfig import plugin - -import cgtsclient as sysinv_client -from keystoneauth1.access import service_catalog as k_service_catalog -from keystoneclient.auth.identity import v3 -from keystoneclient import session -from oslo_config import cfg -from oslo_log import log - -CONF = cfg.CONF - -LOG = log.getLogger(__name__) - -sysinv_group = cfg.OptGroup( - 'sysinv', - title='SysInv Options', - help="Configuration options for the sysinv service") - -sysinv_opts = [ - cfg.StrOpt('catalog_info', - default='platform:sysinv:internalURL', - help="Service catalog Look up info."), - cfg.StrOpt('os_region_name', - default='RegionOne', - help="Region name of this node. It is used for catalog lookup") -] - -CONF.register_group(sysinv_group) -CONF.register_opts(sysinv_opts, group=sysinv_group) - - -def _get_keystone_session(auth_url): - auth = v3.Password(auth_url=auth_url, - username=cfg.CONF.KEYSTONE_AUTHTOKEN.username, - password=cfg.CONF.KEYSTONE_AUTHTOKEN.password, - user_domain_name=cfg.CONF.KEYSTONE_AUTHTOKEN. - user_domain_name, - project_name=cfg.CONF.KEYSTONE_AUTHTOKEN. - project_name, - project_domain_name=cfg.CONF.KEYSTONE_AUTHTOKEN. - project_domain_name) - keystone_session = session.Session(auth=auth) - return keystone_session - - -def sysinvclient(context, version=1, endpoint=None): - """Constructs a sysinv client object for making API requests. - - :param context: The request context for auth. - :param version: API endpoint version. - :param endpoint: Optional If the endpoint is not available, - it will be retrieved from context - """ - - region_name = CONF.sysinv.os_region_name - if not context.service_catalog: - # Obtain client via keystone session - auth_url = CONF.KEYSTONE_AUTHTOKEN.auth_url + "/v3" - session = _get_keystone_session(auth_url) - LOG.debug("sysinvclient auth_url=%s region_name=%s session=%s" % - (auth_url, region_name, session)) - - return sysinv_client.Client( - session=session, - version=version, - auth_url=auth_url, - endpoint_type='internalURL', - region_name=region_name) - - auth_token = context.auth_token - if endpoint is None: - sc = k_service_catalog.ServiceCatalogV2(context.service_catalog) - service_type, service_name, interface = \ - CONF.sysinv.catalog_info.split(':') - - service_parameters = {'service_type': service_type, - 'service_name': service_name, - 'interface': interface, - 'region_name': region_name} - endpoint = sc.url_for(**service_parameters) - - return sysinv_client.Client(version=version, - endpoint=endpoint, - auth_token=auth_token) - - -class SysinvSystemConfigDriver(base.SystemConfigDriverBase): - """Class to encapsulate SystemConfig driver operations""" - def __init__(self, **kwargs): - self.context = kwargs.get('context') - self.neighbours = [] - self.neighbour_audit_count = 0 - self._client = sysinvclient(self.context) - LOG.info("SysinvSystemConfigDriver kwargs=%s self.context=%s" % - (kwargs, self.context)) - - def initialize(self): - self.__init__() - - def system_get(self): - systems = self._client.isystem.list() - if not systems: - return None - return [plugin.System(n) for n in systems] - - def system_get_one(self): - systems = self._client.isystem.list() - if not systems: - return None - return [plugin.System(n) for n in systems][0] - - def host_interface_list(self, host_id): - interfaces = self._client.iinterface.list(host_id) - return [plugin.Interface(n) for n in interfaces] - - def host_interface_get(self, interface_id): - interface = self._client.iinterface.get(interface_id) - if not interface: - raise ValueError( - 'No match found for interface_id "%s".' % interface_id) - return plugin.Interface(interface) - - def host_configure_check(self, host_uuid): - LOG.info("host_configure_check %s" % host_uuid) - # host = self._client.ihost.get(host_uuid) - capabilities = [] - host = self._client.ihost.configure_check(host_uuid, capabilities) - LOG.info("host_configure_check host=%s" % host) - if host: - return True - else: - return False - - def host_configure(self, host_uuid, do_compute_apply=False): - LOG.info("simulate host_configure") - # host = self._client.ihost.get(host_uuid) - # TODO(sc) for host configuration - host = self._client.ihost.configure(host_uuid, do_compute_apply) - if host: - return plugin.Host(host) - else: - return None - - def host_unconfigure(self, host_uuid): - LOG.info("simulate host_unconfigure") - host = self._client.ihost.get(host_uuid) - if host: - return plugin.Host(host) - else: - return None - - host = self._client.ihost.unconfigure(host_uuid) - - return host - - def network_list(self): - networks = self._client.network.list() - return [plugin.Network(n) for n in networks] - - def network_get_by_type(self, network_type): - networks = self._client.network.list() - if networks: - return [plugin.Network(n) for n in networks - if n.type == network_type][0] - return [] - - def network_get(self, network_uuid): - network = self._client.network.get(network_uuid) - if not network: - raise ValueError( - 'No match found for network_uuid "%s".' % network_uuid) - return plugin.Network(network) - - def address_list_by_interface(self, interface_id): - addresses = self._client.address.list_by_interface(interface_id) - return [plugin.Address(n) for n in addresses] - - def address_list_by_field_value(self, field, value): - q = [{'field': field, - 'type': '', - 'value': value, - 'op': 'eq'}] - addresses = self._client.address.list(q) - return [plugin.Address(n) for n in addresses] - - def address_get(self, address_uuid): - address = self._client.address.get(address_uuid) - if not address: - raise ValueError( - 'No match found for address uuid "%s".' % address_uuid) - return plugin.Address(address) - - def address_pool_list(self): - pools = self._client.address_pool.list() - return [plugin.AddressPool(p) for p in pools] - - def address_pool_get(self, address_pool_uuid): - pool = self._client.address_pool.get(address_pool_uuid) - if not pool: - raise ValueError( - 'No match found for address pool uuid "%s".' % - address_pool_uuid) - return plugin.AddressPool(pool) - - def route_list_by_interface(self, interface_id): - routees = self._client.route.list_by_interface(interface_id) - return [plugin.Route(n) for n in routees] - - def route_get(self, route_uuid): - route = self._client.route.get(route_uuid) - if not route: - raise ValueError( - 'No match found for route uuid "%s".' % route_uuid) - return plugin.Route(route) - - def address_get_by_name(self, name): - field = 'name' - value = name - addresses = self.address_list_by_field_value(field, value) - if len(addresses) == 1: - address = addresses[0] - LOG.info("address_get_by_name via systemconfig " - "name=%s address=%s" % - (address.name, address.address)) - else: - LOG.error("Unexpected address_get_by_name %s %s" % - (name, addresses)) - return None diff --git a/inventory/inventory/inventory/systemconfig/manager.py b/inventory/inventory/inventory/systemconfig/manager.py deleted file mode 100644 index 6e7b3a3e..00000000 --- a/inventory/inventory/inventory/systemconfig/manager.py +++ /dev/null @@ -1,179 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventory.common import exception -from oslo_config import cfg -from oslo_log import log -from stevedore.named import NamedExtensionManager - -LOG = log.getLogger(__name__) -cfg.CONF.import_opt('drivers', - 'inventory.systemconfig.config', - group='configuration') - - -class SystemConfigDriverManager(NamedExtensionManager): - """Implementation of Sysinv SystemConfig drivers.""" - - def __init__(self, invoke_kwds={}, - namespace='inventory.systemconfig.drivers'): - - # Registered configuration drivers, keyed by name. - self.drivers = {} - - # Ordered list of inventory configuration drivers, defining - # the order in which the drivers are called. - self.ordered_drivers = [] - - names = cfg.CONF.configuration.drivers - LOG.info("Configured inventory configuration drivers: args %s " - "names=%s" % - (invoke_kwds, names)) - - super(SystemConfigDriverManager, self).__init__( - namespace, - names, - invoke_kwds=invoke_kwds, - invoke_on_load=True, - name_order=True) - - LOG.info("Loaded systemconfig drivers: %s" % self.names()) - self._register_drivers() - - def _register_drivers(self): - """Register all configuration drivers. - - This method should only be called once in the - SystemConfigDriverManager constructor. - """ - for ext in self: - self.drivers[ext.name] = ext - self.ordered_drivers.append(ext) - LOG.info("Registered systemconfig drivers: %s", - [driver.name for driver in self.ordered_drivers]) - - def _call_drivers_and_return_array(self, method_name, attr=None, - raise_orig_exc=False): - """Helper method for calling a method across all drivers. - - :param method_name: name of the method to call - :param attr: an optional attribute to provide to the drivers - :param raise_orig_exc: whether or not to raise the original - driver exception, or use a general one - """ - ret = [] - for driver in self.ordered_drivers: - try: - method = getattr(driver.obj, method_name) - if attr: - ret = ret + method(attr) - else: - ret = ret + method() - except Exception as e: - LOG.exception(e) - LOG.error( - "Inventory SystemConfig driver '%(name)s' " - "failed in %(method)s", - {'name': driver.name, 'method': method_name} - ) - if raise_orig_exc: - raise - else: - raise exception.SystemConfigDriverError( - method=method_name - ) - return list(set(ret)) - - def _call_drivers(self, method_name, - raise_orig_exc=False, - return_first=True, - **kwargs): - """Helper method for calling a method across all drivers. - - :param method_name: name of the method to call - :param attr: an optional attribute to provide to the drivers - :param raise_orig_exc: whether or not to raise the original - driver exception, or use a general one - """ - for driver in self.ordered_drivers: - try: - method = getattr(driver.obj, method_name) - LOG.info("_call_drivers_kwargs method_name=%s kwargs=%s" - % (method_name, kwargs)) - - ret = method(**kwargs) - if return_first: - return ret - - except Exception as e: - LOG.exception(e) - LOG.error( - "Inventory SystemConfig driver '%(name)s' " - "failed in %(method)s", - {'name': driver.name, 'method': method_name} - ) - if raise_orig_exc: - raise - else: - raise exception.SystemConfigDriverError( - method=method_name - ) - - def system_get_one(self): - try: - return self._call_drivers( - "system_get_one", - raise_orig_exc=True) - except Exception as e: - LOG.exception(e) - - def network_get_by_type(self, network_type): - try: - return self._call_drivers( - "network_get_by_type", - raise_orig_exc=True, - network_type=network_type) - except Exception as e: - LOG.exception(e) - - def address_get_by_name(self, name): - try: - return self._call_drivers( - "address_get_by_name", - raise_orig_exc=True, - name=name) - except Exception as e: - LOG.exception(e) - - def host_configure_check(self, host_uuid): - try: - return self._call_drivers("host_configure_check", - raise_orig_exc=True, - host_uuid=host_uuid) - except Exception as e: - LOG.exception(e) - - def host_configure(self, host_uuid, do_compute_apply=False): - try: - return self._call_drivers("host_configure", - raise_orig_exc=True, - host_uuid=host_uuid, - do_compute_apply=do_compute_apply) - except Exception as e: - LOG.exception(e) - - def host_unconfigure(self, host_uuid): - try: - return self._call_drivers("host_unconfigure", - raise_orig_exc=True, - host_uuid=host_uuid) - except Exception as e: - LOG.exception(e) diff --git a/inventory/inventory/inventory/systemconfig/plugin.py b/inventory/inventory/inventory/systemconfig/plugin.py deleted file mode 100644 index f9ed912c..00000000 --- a/inventory/inventory/inventory/systemconfig/plugin.py +++ /dev/null @@ -1,176 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -from inventory.common import base -from inventory.common import exception -from inventory.systemconfig import manager -from oslo_log import log -from oslo_utils import excutils - -LOG = log.getLogger(__name__) - - -class SystemConfigPlugin(object): - """Implementation of the Plugin.""" - - def __init__(self, invoke_kwds): - self.manager = manager.SystemConfigDriverManager( - invoke_kwds=invoke_kwds) - - def system_get_one(self): - try: - system = self.manager.system_get_one() - except exception.SystemConfigDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("system_get failed") - - return system - - def network_get_by_type(self, network_type): - try: - network = self.manager.network_get_by_type( - network_type=network_type) - except exception.SystemConfigDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("network_get_by_type failed") - - return network - - def address_get_by_name(self, name): - try: - address = self.manager.address_get_by_name( - name=name) - except exception.SystemConfigDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("address_get_by_name failed") - - return address - - def host_configure_check(self, host_uuid): - try: - return self.manager.host_configure_check( - host_uuid=host_uuid) - except exception.SystemConfigDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("host_configure_check failed") - - def host_configure(self, host_uuid, do_compute_apply=False): - try: - host = self.manager.host_configure( - host_uuid=host_uuid, - do_compute_apply=do_compute_apply) - except exception.SystemConfigDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("host_configure failed") - - return host - - def host_unconfigure(self, host_uuid): - try: - host = self.manager.host_unconfigure( - host_uuid=host_uuid) - except exception.SystemConfigDriverError as e: - LOG.exception(e) - with excutils.save_and_reraise_exception(): - LOG.error("host_unconfigure failed") - - return host - - -class System(base.APIResourceWrapper): - """Wrapper for SystemConfig System""" - - _attrs = ['uuid', 'name', 'system_type', 'system_mode', 'description', - 'software_version', 'capabilities', 'region_name', - 'updated_at', 'created_at'] - - def __init__(self, apiresource): - super(System, self).__init__(apiresource) - - def get_short_software_version(self): - if self.software_version: - return self.software_version.split(" ")[0] - return None - - -class Host(base.APIResourceWrapper): - """Wrapper for Inventory Hosts""" - - _attrs = ['uuid', 'hostname', 'personality', - 'mgmt_mac', 'mgmt_ip', 'bm_ip', - 'subfunctions', - 'capabilities', - 'created_at', 'updated_at', - ] - - # Removed 'id', 'requires_reboot' - # Add this back to models, migrate_repo: peers - - def __init__(self, apiresource): - super(Host, self).__init__(apiresource) - self._personality = self.personality - self._capabilities = self.capabilities - - -class Interface(base.APIResourceWrapper): - """Wrapper for SystemConfig Interfaces""" - - _attrs = ['id', 'uuid', 'ifname', 'ifclass', 'iftype', - 'networktype', 'networks', 'vlan_id', - 'uses', 'used_by', 'ihost_uuid', - 'ipv4_mode', 'ipv6_mode', 'ipv4_pool', 'ipv6_pool', - 'sriov_numvfs', - # VLAN and virtual interfaces - 'imac', 'imtu', 'providernetworks', 'providernetworksdict', - # AE-only - 'aemode', 'txhashpolicy', 'schedpolicy', - ] - - def __init__(self, apiresource): - super(Interface, self).__init__(apiresource) - if not self.ifname: - self.ifname = '(' + str(self.uuid)[-8:] + ')' - - -class Network(base.APIResourceWrapper): - """Wrapper for SystemConfig Networks""" - _attrs = ['id', 'uuid', 'type', 'name', 'dynamic', 'pool_uuid'] - - def __init__(self, apiresource): - super(Network, self).__init__(apiresource) - - -class Address(base.APIResourceWrapper): - """Wrapper for SystemConfig Addresses""" - - _attrs = ['uuid', 'name', 'interface_uuid', - 'address', 'prefix', 'enable_dad'] - - def __init__(self, apiresource): - super(Address, self).__init__(apiresource) - - -class AddressPool(base.APIResourceWrapper): - """Wrapper for SystemConfig Address Pools""" - - _attrs = ['uuid', 'name', 'network', 'family', 'prefix', 'order', 'ranges'] - - def __init__(self, apiresource): - super(AddressPool, self).__init__(apiresource) - - -class Route(base.APIResourceWrapper): - """Wrapper for SystemConfig Routers""" - - _attrs = ['uuid', 'interface_uuid', 'network', - 'prefix', 'gateway', 'metric'] - - def __init__(self, apiresource): - super(Route, self).__init__(apiresource) diff --git a/inventory/inventory/inventory/tests/__init__.py b/inventory/inventory/inventory/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/inventory/tests/base.py b/inventory/inventory/inventory/tests/base.py deleted file mode 100644 index 1c30cdb5..00000000 --- a/inventory/inventory/inventory/tests/base.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - - -class TestCase(base.BaseTestCase): - - """Test case base class for all unit tests.""" diff --git a/inventory/inventory/inventory/tests/test_inventory.py b/inventory/inventory/inventory/tests/test_inventory.py deleted file mode 100644 index 8e77fbf1..00000000 --- a/inventory/inventory/inventory/tests/test_inventory.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_inventory --------------- - -Tests for `inventory` module. -""" - -from inventory.tests import base - - -class TestInventory(base.TestCase): - - def test_something(self): - pass diff --git a/inventory/inventory/inventory/version.py b/inventory/inventory/inventory/version.py deleted file mode 100644 index cade1b2b..00000000 --- a/inventory/inventory/inventory/version.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo('inventory') diff --git a/inventory/inventory/releasenotes/notes/.placeholder b/inventory/inventory/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/releasenotes/source/_static/.placeholder b/inventory/inventory/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/releasenotes/source/_templates/.placeholder b/inventory/inventory/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/inventory/inventory/releasenotes/source/conf.py b/inventory/inventory/releasenotes/source/conf.py deleted file mode 100644 index 7fe27dad..00000000 --- a/inventory/inventory/releasenotes/source/conf.py +++ /dev/null @@ -1,281 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'inventory Release Notes' -copyright = u'2018, StarlingX' - -# openstackdocstheme options -repository_name = 'openstack/inventory' -bug_project = '22952' -bug_tag = '' -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -# The full version, including alpha/beta/rc tags. -release = '' -# The short X.Y version. -version = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'inventoryReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'inventoryReleaseNotes.tex', - u'inventory Release Notes Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'inventoryrereleasenotes', - u'inventory Release Notes Documentation', - [u'OpenStack Foundation'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'inventory ReleaseNotes', - u'inventory Release Notes Documentation', - u'OpenStack Foundation', 'inventoryReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/inventory/inventory/releasenotes/source/index.rst b/inventory/inventory/releasenotes/source/index.rst deleted file mode 100644 index 5f58e0f6..00000000 --- a/inventory/inventory/releasenotes/source/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -======================= -inventory Release Notes -======================= - -.. toctree:: - :maxdepth: 1 - - unreleased diff --git a/inventory/inventory/releasenotes/source/unreleased.rst b/inventory/inventory/releasenotes/source/unreleased.rst deleted file mode 100644 index 875030f9..00000000 --- a/inventory/inventory/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -Current Series Release Notes -============================ - -.. release-notes:: diff --git a/inventory/inventory/requirements.txt b/inventory/inventory/requirements.txt deleted file mode 100644 index 993f2242..00000000 --- a/inventory/inventory/requirements.txt +++ /dev/null @@ -1,46 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr>=2.0 # Apache-2.0 -SQLAlchemy -amqplib>=0.6.1 -anyjson>=0.3.3 -argparse -eventlet==0.20.0 -futurist>=1.2.0 # Apache-2.0 -greenlet>=0.3.2 # MIT -kombu>=2.4.8 -lxml>=2.3 -WebOb>=1.7.1 # MIT -sqlalchemy-migrate>=0.7 -netaddr -iso8601>=0.1.4 -oslo.concurrency>=3.7.1 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.rootwrap>=5.0.0 # Apache-2.0 -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.middleware>=3.27.0 # Apache-2.0 -oslo.policy>=1.23.0 # Apache-2.0 -oslo.db>=4.1.0 # Apache-2.0 -oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.5.0 # Apache-2.0 -osprofiler>=1.4.0 # Apache-2.0 -python-cinderclient>=3.1.0 # Apache-2.0 -python-keystoneclient>=3.8.0 # Apache-2.0 -keystonemiddleware>=4.12.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -retrying!=1.3.0,>=1.2.3 # Apache-2.0 -oslo.versionedobjects>=1.17.0 # Apache-2.0 -stevedore>=0.10 -pecan>=1.0.0 -six>=1.9.0 # MIT -jsonpatch>=1.1 # BSD -WSME>=0.8 # MIT -PyYAML>=3.10 -python-magnumclient>=2.0.0 # Apache-2.0 -psutil -simplejson>=2.2.0 # MIT diff --git a/inventory/inventory/scripts/inventory-agent-initd b/inventory/inventory/scripts/inventory-agent-initd deleted file mode 100755 index 6d7fe73a..00000000 --- a/inventory/inventory/scripts/inventory-agent-initd +++ /dev/null @@ -1,204 +0,0 @@ -#! /bin/sh -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -# chkconfig: 2345 75 25 -# -### BEGIN INIT INFO -# Provides: inventory-agent -# Default-Start: 3 5 -# Default-Stop: 0 1 2 6 -# Short-Description: inventory-agent daemon -### END INIT INFO - -. /etc/init.d/functions -. /etc/build.info - - -PLATFORM_CONF="/etc/platform/platform.conf" -NODETYPE="" -DAEMON_NAME="inventory-agent" -INVENTORYAGENT="/usr/bin/${DAEMON_NAME}" -INVENTORY_CONF_DIR="/etc/inventory" -INVENTORY_CONF_FILE="${INVENTORY_CONF_DIR}/inventory.conf" -INVENTORY_CONF_DEFAULT_FILE="/opt/platform/inventory/${SW_VERSION}/inventory.conf.default" -INVENTORY_READY_FLAG=/var/run/.inventory_ready - -DELAY_SEC=20 - -daemon_pidfile="/var/run/${DAEMON_NAME}.pid" - -if [ -f ${PLATFORM_CONF} ] ; then - NODETYPE=`cat ${PLATFORM_CONF} | grep nodetype | cut -f2 -d'='` -else - logger "$0: ${PLATFORM_CONF} is missing" - exit 1 -fi - -if [ ! -e "${INVENTORYAGENT}" ] ; then - logger "$0: ${INVENTORYAGENT} is missing" - exit 1 -fi - -RETVAL=0 - -PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin -export PATH - -mount_and_copy_config_file() -{ - echo "Mount /opt/platform" - logger "$0: Info: nfs-mount controller:/opt/platform/inventory/${SW_VERSION} /mnt/inventory" - mkdir /mnt/inventory - timeout 10s nfs-mount controller:/opt/platform/inventory/${SW_VERSION} /mnt/inventory &> /dev/null - RETVAL=$? - # 0 = true - if [ ${RETVAL} -ne 0 ] ; then - logger "$0: Warn: nfs-mount controller:/opt/platform/inventory/${SW_VERSION} /mnt/inventory" - else - mkdir -p $INVENTORY_CONF_DIR - cp /mnt/inventory/inventory.conf.default ${INVENTORY_CONF_FILE} - RETVAL=$? - if [ $? -ne 0 ] ; then - logger "$0: Warn: cp /mnt/inventory/inventory.conf.default ${INVENTORY_CONF_FILE}" - fi - timeout 5s umount /mnt/inventory - rmdir /mnt/inventory - fi - - return ${RETVAL} -} - - -case "$1" in - start) - # Check for installation failure - if [ -f /etc/platform/installation_failed ] ; then - logger "$0: /etc/platform/installation_failed flag is set. Aborting." - exit 1 - fi - - echo -n "Setting up config for inventory-agent: " - if [ -e ${INVENTORY_READY_FLAG} ] ; then - # clear it on every restart, so agent can update it - rm -f ${INVENTORY_READY_FLAG} - fi - - if [ -f ${INVENTORY_CONF_FILE} ] ; then - logger "$0: ${INVENTORY_CONF_FILE} already exists" - RETVAL=0 - else - # Avoid self-mount due to potential nfs issues - echo "Checking for controller-platform-nfs " - - # try for DELAY_SEC seconds to reach controller-platform-nfs - START=`date +%s` - FOUND=0 - while [ $(date +%s) -lt $(( ${START} + ${DELAY_SEC} )) ] ; do - ping -c 1 controller-platform-nfs > /dev/null 2>&1 || ping6 -c 1 controller-platform-nfs > /dev/null 2>&1 - if [ $? -eq 0 ] ; then - FOUND=1 - break - fi - sleep 1 - done - - CONF_COPIED=0 - if [ ${FOUND} -eq 0 ] ; then - # 'controller-platform-nfs' is not available; continue other setup - echo "controller-platform-nfs is not available" - else - # Only required if conf file does not already exist - if [ -f ${INVENTORY_CONF_DEFAULT_FILE} ] ; then - echo "Copying self inventory.conf without mount" - mkdir -p $INVENTORY_CONF_DIR - cp ${INVENTORY_CONF_DEFAULT_FILE} ${INVENTORY_CONF_FILE} - RETVAL=$? - if [ $? -ne 0 ] ; then - logger "$0: Warn: cp /mnt/inventory/inventory.conf.default ${INVENTORY_CONF_FILE} failed. Try mount." - else - CONF_COPIED=1 - fi - fi - if [ ${CONF_COPIED} -eq 0 ] ; then - CONF_COPY_COUNT=0 - while [ $CONF_COPY_COUNT -lt 3 ]; do - if mount_and_copy_config_file ; then - logger "$0: Info: Mount and copy config file PASSED. Attempt: ${CONF_COPY_COUNT}" - break - fi - let CONF_COPY_COUNT=CONF_COPY_COUNT+1 - logger "$0: Warn: Mount and copy config file failed. Attempt: ${CONF_COPY_COUNT}" - done - fi - fi - fi - - if [ -e ${daemon_pidfile} ] ; then - echo "Killing existing process before starting new" - pid=`cat ${daemon_pidfile}` - kill -TERM $pid - rm -f ${daemon_pidfile} - fi - - echo -n "Starting inventory-agent: " - /bin/sh -c "${INVENTORYAGENT}"' >> /dev/null 2>&1 & echo $!' > ${daemon_pidfile} - RETVAL=$? - if [ $RETVAL -eq 0 ] ; then - echo "OK" - touch /var/lock/subsys/${DAEMON_NAME} - else - echo "FAIL" - fi - ;; - - stop) - echo -n "Stopping inventory-agent: " - if [ -e ${daemon_pidfile} ] ; then - pid=`cat ${daemon_pidfile}` - kill -TERM $pid - rm -f ${daemon_pidfile} - rm -f /var/lock/subsys/${DAEMON_NAME} - echo "OK" - else - echo "FAIL" - fi - ;; - - restart) - $0 stop - sleep 1 - $0 start - ;; - - status) - if [ -e ${daemon_pidfile} ] ; then - pid=`cat ${daemon_pidfile}` - ps -p $pid | grep -v "PID TTY" >> /dev/null 2>&1 - if [ $? -eq 0 ] ; then - echo "inventory-agent is running" - RETVAL=0 - else - echo "inventory-agent is not running" - RETVAL=1 - fi - else - echo "inventory-agent is not running ; no pidfile" - RETVAL=1 - fi - ;; - - condrestart) - [ -f /var/lock/subsys/$DAEMON_NAME ] && $0 restart - ;; - - *) - echo "usage: $0 { start | stop | status | restart | condrestart | status }" - ;; -esac - -exit $RETVAL diff --git a/inventory/inventory/scripts/inventory-agent.service b/inventory/inventory/scripts/inventory-agent.service deleted file mode 100644 index c24285fc..00000000 --- a/inventory/inventory/scripts/inventory-agent.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Inventory Agent -After=nfscommon.service sw-patch.service -After=network-online.target systemd-udev-settle.service -Before=pmon.service - -[Service] -Type=forking -RemainAfterExit=yes -ExecStart=/etc/init.d/inventory-agent start -ExecStop=/etc/init.d/inventory-agent stop -PIDFile=/var/run/inventory-agent.pid - -[Install] -WantedBy=multi-user.target diff --git a/inventory/inventory/scripts/inventory-api b/inventory/inventory/scripts/inventory-api deleted file mode 100755 index d1bde8eb..00000000 --- a/inventory/inventory/scripts/inventory-api +++ /dev/null @@ -1,409 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -# Purpose: This resource agent manages -# -# .... the STX Inventory REST API Service -# -# -# OCF instance parameters: -# OCF_RESKEY_binary -# OCF_RESKEY_client_binary -# OCF_RESKEY_config -# OCF_RESKEY_os_username -# OCF_RESKEY_os_tenant_name -# OCF_RESKEY_os_auth_url -# OCF_RESKEY_os_password -# OCF_RESKEY_user -# OCF_RESKEY_pid -# OCF_RESKEY_additional_parameters -# -# RA Spec: -# -# http://www.opencf.org/cgi-bin/viewcvs.cgi/specs/ra/resource-agent-api.txt?rev=HEAD -# -####################################################################### -# Initialization: - -: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} -. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs - -process="inventory" -service="-api" -binname="${process}${service}" - -####################################################################### - -# Fill in some defaults if no values are specified -OCF_RESKEY_binary_default=${binname} -OCF_RESKEY_dbg_default="false" -OCF_RESKEY_user_default="inventory" -OCF_RESKEY_pid_default="/var/run/${binname}.pid" -OCF_RESKEY_config_default="/etc/inventory/inventory.conf" -OCF_RESKEY_client_binary_default="inventory" - -: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} -: ${OCF_RESKEY_dbg=${OCF_RESKEY_dbg_default}} -: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}} -: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} -: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} -: ${OCF_RESKEY_client_binary=${OCF_RESKEY_client_binary_default}} - -mydaemon="/usr/bin/${OCF_RESKEY_binary}" - -####################################################################### - -usage() { - cat < - - -1.0 - - -This 'inventory-api' is an OCF Compliant Resource Agent that manages start, stop -and in-service monitoring of the Inventory REST API Process - - - -Manages the Inventory REST API (inventory-api) process in the STX Platform. - - - - - - - -dbg = false ... info, warn and err logs sent to output stream (default) -dbg = true ... Additional debug logs are also sent to the output stream - -Service Debug Control Option - - - - - -User running Inventory API Service (inventory-api) - -Inventory API Service (inventory-api) user - - - - - - - - - - - - - - -END - return ${OCF_SUCCESS} -} - -inventory_api_validate() { - - local rc - - proc="${binname}:validate" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - check_binary ${OCF_RESKEY_binary} - - if [ ! -f ${OCF_RESKEY_config} ] ; then - ocf_log err "${OCF_RESKEY_binary} ini file missing (${OCF_RESKEY_config})" - return ${OCF_ERR_CONFIGURED} - fi - - getent passwd $OCF_RESKEY_user >/dev/null 2>&1 - rc=$? - if [ $rc -ne 0 ]; then - ocf_log err "User $OCF_RESKEY_user doesn't exist" - return ${OCF_ERR_CONFIGURED} - fi - - return ${OCF_SUCCESS} -} - -inventory_api_status() { - local pid - local rc - - proc="${binname}:status" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - if [ ! -f $OCF_RESKEY_pid ]; then - ocf_log info "${binname}:Inventory API (inventory-api) is not running" - return $OCF_NOT_RUNNING - else - pid=`cat $OCF_RESKEY_pid` - fi - - ocf_run -warn kill -s 0 $pid - rc=$? - if [ $rc -eq 0 ]; then - return $OCF_SUCCESS - else - ocf_log info "${binname}:Old PID file found, but Inventory API (inventory-api) is not running" - rm -f $OCF_RESKEY_pid - return $OCF_NOT_RUNNING - fi -} - -inventory_api_monitor () { - local rc - proc="${binname}:monitor" - - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - inventory_api_status - rc=$? - # If status returned anything but success, return that immediately - if [ $rc -ne $OCF_SUCCESS ]; then - return $rc - fi - return $OCF_SUCCESS - - if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_os_auth_url" ]; then - ocf_run -q $OCF_RESKEY_client_binary \ - --os_username "$OCF_RESKEY_os_username" \ - --os_project_name "$OCF_RESKEY_os_tenant_name" \ - --os_auth_url "$OCF_RESKEY_os_auth_url" \ - --os_region_name "$OCF_RESKEY_os_region_name" \ - --system_url "$OCF_RESKEY_system_url" \ - show > /dev/null 2>&1 - rc=$? - if [ $rc -ne 0 ]; then - ocf_log err "Failed to connect to the Inventory Service (inventory-api): $rc" - return $OCF_NOT_RUNNING - fi - fi - - ocf_log debug "Inventory Service (inventory-api) monitor succeeded" - - return $OCF_SUCCESS -} - -inventory_api_start () { - local rc - - proc="${binname}:start" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - # If running then issue a ping test - if [ -f ${OCF_RESKEY_pid} ] ; then - inventory_api_status - rc=$? - if [ $rc -ne ${OCF_SUCCESS} ] ; then - ocf_log err "${proc} ping test failed (rc=${rc})" - inventory_api_stop - else - return ${OCF_SUCCESS} - fi - fi - - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - RUN_OPT_DEBUG="--debug" - else - RUN_OPT_DEBUG="" - fi - - # switch to non-root user before starting service - su ${OCF_RESKEY_user} -g root -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=${OCF_RESKEY_config} ${RUN_OPT_DEBUG}"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid - rc=$? - if [ ${rc} -ne ${OCF_SUCCESS} ] ; then - ocf_log err "${proc} failed ${mydaemon} daemon (rc=$rc)" - return ${OCF_ERR_GENERIC} - else - if [ -f ${OCF_RESKEY_pid} ] ; then - pid=`cat ${OCF_RESKEY_pid}` - ocf_log info "${proc} running with pid ${pid}" - else - ocf_log info "${proc} with no pid file" - fi - fi - - # Record success or failure and return status - if [ ${rc} -eq $OCF_SUCCESS ] ; then - ocf_log info "Inventory Service (${OCF_RESKEY_binary}) started (pid=${pid})" - else - ocf_log err "Inventory Service (${OCF_RESKEY_binary}) failed to start (rc=${rc})" - rc=${OCF_NOT_RUNNING} - fi - - return ${rc} -} - -inventory_api_confirm_stop() { - local my_bin - local my_processes - - my_binary=`which ${OCF_RESKEY_binary}` - my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` - - if [ -n "${my_processes}" ] - then - ocf_log info "About to SIGKILL the following: ${my_processes}" - pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" - fi -} - -inventory_api_stop () { - local rc - local pid - - proc="${binname}:stop" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - inventory_api_status - rc=$? - if [ $rc -eq $OCF_NOT_RUNNING ]; then - ocf_log info "${proc} Inventory API (inventory-api) already stopped" - inventory_api_confirm_stop - return ${OCF_SUCCESS} - fi - - # Try SIGTERM - pid=`cat $OCF_RESKEY_pid` - ocf_run kill -s TERM $pid - rc=$? - if [ $rc -ne 0 ]; then - ocf_log err "${proc} Inventory API (inventory-api) couldn't be stopped" - inventory_api_confirm_stop - exit $OCF_ERR_GENERIC - fi - - # stop waiting - shutdown_timeout=15 - if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then - shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) - fi - count=0 - while [ $count -lt $shutdown_timeout ]; do - inventory_api_status - rc=$? - if [ $rc -eq $OCF_NOT_RUNNING ]; then - break - fi - count=`expr $count + 1` - sleep 1 - ocf_log info "${proc} Inventory API (inventory-api) still hasn't stopped yet. Waiting ..." - done - - inventory_api_status - rc=$? - if [ $rc -ne $OCF_NOT_RUNNING ]; then - # SIGTERM didn't help either, try SIGKILL - ocf_log info "${proc} Inventory API (inventory-api) failed to stop after ${shutdown_timeout}s using SIGTERM. Trying SIGKILL ..." - ocf_run kill -s KILL $pid - fi - inventory_api_confirm_stop - - ocf_log info "${proc} Inventory API (inventory-api) stopped." - - rm -f $OCF_RESKEY_pid - - return $OCF_SUCCESS - -} - -inventory_api_reload () { - local rc - - proc="${binname}:reload" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - inventory_api_stop - rc=$? - if [ $rc -eq ${OCF_SUCCESS} ] ; then - #sleep 1 - inventory_api_start - rc=$? - if [ $rc -eq ${OCF_SUCCESS} ] ; then - ocf_log info "Inventory (${OCF_RESKEY_binary}) process restarted" - fi - fi - - if [ ${rc} -ne ${OCF_SUCCESS} ] ; then - ocf_log err "Inventory (${OCF_RESKEY_binary}) process failed to restart (rc=${rc})" - fi - - return ${rc} -} - -case ${__OCF_ACTION} in - meta-data) meta_data - exit ${OCF_SUCCESS} - ;; - usage|help) usage - exit ${OCF_SUCCESS} - ;; -esac - -# Anything except meta-data and help must pass validation -inventory_api_validate || exit $? - -if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${binname}:${__OCF_ACTION} action" -fi - -case ${__OCF_ACTION} in - - start) inventory_api_start - ;; - stop) inventory_api_stop - ;; - status) inventory_api_status - ;; - reload) inventory_api_reload - ;; - monitor) inventory_api_monitor - ;; - validate-all) inventory_api_validate - ;; - *) usage - exit ${OCF_ERR_UNIMPLEMENTED} - ;; -esac diff --git a/inventory/inventory/scripts/inventory-api.service b/inventory/inventory/scripts/inventory-api.service deleted file mode 100644 index 7f7db8b9..00000000 --- a/inventory/inventory/scripts/inventory-api.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Inventory API -After=network-online.target syslog-ng.service config.service inventory-conductor.service - -[Service] -Type=simple -RemainAfterExit=yes -User=root -Environment=OCF_ROOT=/usr/lib/ocf -ExecStart=/usr/lib/ocf/resource.d/platform/inventory-api start -ExecStop=/usr/lib/ocf/resource.d/platform/inventory-api stop -PIDFile=/var/run/inventory-api.pid - -[Install] -WantedBy=multi-user.target diff --git a/inventory/inventory/scripts/inventory-conductor b/inventory/inventory/scripts/inventory-conductor deleted file mode 100755 index 61c880fd..00000000 --- a/inventory/inventory/scripts/inventory-conductor +++ /dev/null @@ -1,357 +0,0 @@ -#!/bin/sh -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# -# -# Support: www.windriver.com -# -# Purpose: This resource agent manages -# -# .... the Inventory Conductor Service -# -# RA Spec: -# -# http://www.opencf.org/cgi-bin/viewcvs.cgi/specs/ra/resource-agent-api.txt?rev=HEAD -# -####################################################################### -# Initialization: - -: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} -. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs - -process="inventory" -service="-conductor" -binname="${process}${service}" - -####################################################################### - -# Fill in some defaults if no values are specified -OCF_RESKEY_binary_default=${binname} -OCF_RESKEY_dbg_default="false" -OCF_RESKEY_pid_default="/var/run/${binname}.pid" -OCF_RESKEY_config_default="/etc/inventory/inventory.conf" - - -: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} -: ${OCF_RESKEY_dbg=${OCF_RESKEY_dbg_default}} -: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}} -: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}} - -mydaemon="/usr/bin/${OCF_RESKEY_binary}" - -####################################################################### - -usage() { - cat < - - -1.0 - - -This 'inventory-conductor' is an OCF Compliant Resource Agent that manages start, stop and in-service monitoring of the Inventory Conductor - - - -Manages the Config (inventory-conductor) process. - - - - - - - -dbg = false ... info, warn and err logs sent to output stream (default) -dbg = true ... Additional debug logs are also sent to the output stream - -Service Debug Control Option - - - - - - - - - - - - - - -END - return ${OCF_SUCCESS} -} - -inventory_conductor_validate() { - - local rc - - proc="${binname}:validate" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - check_binary ${OCF_RESKEY_binary} - - if [ ! -f ${OCF_RESKEY_config} ] ; then - ocf_log err "${OCF_RESKEY_binary} ini file missing (${OCF_RESKEY_config})" - return ${OCF_ERR_CONFIGURED} - fi - - return ${OCF_SUCCESS} -} - -inventory_conductor_status() { - local pid - local rc - - proc="${binname}:status" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - if [ ! -f $OCF_RESKEY_pid ]; then - ocf_log info "${binname}:Inventory Conductor (inventory-conductor) is not running" - return $OCF_NOT_RUNNING - else - pid=`cat $OCF_RESKEY_pid` - fi - - ocf_run -warn kill -s 0 $pid - rc=$? - if [ $rc -eq 0 ]; then - return $OCF_SUCCESS - else - ocf_log info "${binname}:Old PID file found, but Inventory Conductor (inventory-conductor)is not running" - rm -f $OCF_RESKEY_pid - return $OCF_NOT_RUNNING - fi -} - -inventory_conductor_monitor () { - local rc - proc="${binname}:monitor" - - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - inventory_conductor_status - rc=$? - return ${rc} -} - -inventory_conductor_start () { - local rc - - proc="${binname}:start" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - # If running then issue a ping test - if [ -f ${OCF_RESKEY_pid} ] ; then - inventory_conductor_status - rc=$? - if [ $rc -ne ${OCF_SUCCESS} ] ; then - ocf_log err "${proc} ping test failed (rc=${rc})" - inventory_conductor_stop - else - return ${OCF_SUCCESS} - fi - fi - - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - RUN_OPT_DEBUG="--debug" - else - RUN_OPT_DEBUG="" - fi - - su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=${OCF_RESKEY_config} ${RUN_OPT_DEBUG}"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid - rc=$? - if [ ${rc} -ne ${OCF_SUCCESS} ] ; then - ocf_log err "${proc} failed ${mydaemon} daemon (rc=$rc)" - return ${OCF_ERR_GENERIC} - else - if [ -f ${OCF_RESKEY_pid} ] ; then - pid=`cat ${OCF_RESKEY_pid}` - ocf_log info "${proc} running with pid ${pid}" - else - ocf_log info "${proc} with no pid file" - fi - fi - - # Record success or failure and return status - if [ ${rc} -eq $OCF_SUCCESS ] ; then - ocf_log info "Inventory Conductor Service (${OCF_RESKEY_binary}) started (pid=${pid})" - else - ocf_log err "Config Service (${OCF_RESKEY_binary}) failed to start (rc=${rc})" - rc=${OCF_NOT_RUNNING} - fi - - return ${rc} -} - -inventory_conductor_confirm_stop() { - local my_bin - local my_processes - - my_binary=`which ${OCF_RESKEY_binary}` - my_processes=`pgrep -l -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)"` - - if [ -n "${my_processes}" ] - then - ocf_log info "About to SIGKILL the following: ${my_processes}" - pkill -KILL -f "^(python|/usr/bin/python|/usr/bin/python2) ${my_binary}([^\w-]|$)" - fi -} - -inventory_conductor_stop () { - local rc - local pid - - proc="${binname}:stop" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - inventory_conductor_status - rc=$? - if [ $rc -eq $OCF_NOT_RUNNING ]; then - ocf_log info "${proc} Inventory Conductor (inventory-conductor) already stopped" - inventory_conductor_confirm_stop - return ${OCF_SUCCESS} - fi - - # Try SIGTERM - pid=`cat $OCF_RESKEY_pid` - ocf_run kill -s TERM $pid - rc=$? - if [ $rc -ne 0 ]; then - ocf_log err "${proc} Inventory Conductor (inventory-conductor) couldn't be stopped" - inventory_conductor_confirm_stop - exit $OCF_ERR_GENERIC - fi - - # stop waiting - shutdown_timeout=15 - if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then - shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5)) - fi - count=0 - while [ $count -lt $shutdown_timeout ]; do - inventory_conductor_status - rc=$? - if [ $rc -eq $OCF_NOT_RUNNING ]; then - break - fi - count=`expr $count + 1` - sleep 1 - ocf_log info "${proc} Inventory Conductor (inventory-conductor) still hasn't stopped yet. Waiting ..." - done - - inventory_conductor_status - rc=$? - if [ $rc -ne $OCF_NOT_RUNNING ]; then - # SIGTERM didn't help either, try SIGKILL - ocf_log info "${proc} Inventory Conductor (inventory-conductor) failed to stop after ${shutdown_timeout}s \ - using SIGTERM. Trying SIGKILL ..." - ocf_run kill -s KILL $pid - fi - inventory_conductor_confirm_stop - - ocf_log info "${proc} Inventory Conductor (inventory-conductor) stopped." - - rm -f $OCF_RESKEY_pid - - return $OCF_SUCCESS - -} - -inventory_conductor_reload () { - local rc - - proc="${binname}:reload" - if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${proc}" - fi - - inventory_conductor_stop - rc=$? - if [ $rc -eq ${OCF_SUCCESS} ] ; then - #sleep 1 - inventory_conductor_start - rc=$? - if [ $rc -eq ${OCF_SUCCESS} ] ; then - ocf_log info "Inventory (${OCF_RESKEY_binary}) process restarted" - fi - fi - - if [ ${rc} -ne ${OCF_SUCCESS} ] ; then - ocf_log info "Inventory (${OCF_RESKEY_binary}) process failed to restart (rc=${rc})" - fi - - return ${rc} -} - -case ${__OCF_ACTION} in - meta-data) meta_data - exit ${OCF_SUCCESS} - ;; - usage|help) usage - exit ${OCF_SUCCESS} - ;; -esac - -# Anything except meta-data and help must pass validation -inventory_conductor_validate || exit $? - -if [ ${OCF_RESKEY_dbg} = "true" ] ; then - ocf_log info "${binname}:${__OCF_ACTION} action" -fi - -case ${__OCF_ACTION} in - - start) inventory_conductor_start - ;; - stop) inventory_conductor_stop - ;; - status) inventory_conductor_status - ;; - reload) inventory_conductor_reload - ;; - monitor) inventory_conductor_monitor - ;; - validate-all) inventory_conductor_validate - ;; - *) usage - exit ${OCF_ERR_UNIMPLEMENTED} - ;; -esac diff --git a/inventory/inventory/scripts/inventory-conductor.service b/inventory/inventory/scripts/inventory-conductor.service deleted file mode 100644 index 31a6f63d..00000000 --- a/inventory/inventory/scripts/inventory-conductor.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Inventory Conductor -After=network-online.target syslog-ng.service config.service rabbitmq-server.service - -[Service] -Type=simple -RemainAfterExit=yes -User=root -Environment=OCF_ROOT=/usr/lib/ocf -ExecStart=/usr/lib/ocf/resource.d/platform/inventory-conductor start -ExecStop=/usr/lib/ocf/resource.d/platform/inventory-conductor stop -PIDFile=/var/run/inventory-conductor.pid - -[Install] -WantedBy=multi-user.target diff --git a/inventory/inventory/setup.cfg b/inventory/inventory/setup.cfg deleted file mode 100644 index 56d42c38..00000000 --- a/inventory/inventory/setup.cfg +++ /dev/null @@ -1,57 +0,0 @@ -[metadata] -name = inventory -summary = Inventory -description-file = - README.rst -author = StarlingX -author-email = starlingx-discuss@lists.starlingx.io -home-page = http://www.starlingx.io/ -classifier = - Environment :: StarlingX - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - -[files] -packages = - inventory - -[entry_points] -console_scripts = - inventory-api = inventory.cmd.api:main - inventory-agent = inventory.cmd.agent:main - inventory-conductor = inventory.cmd.conductor:main - inventory-dbsync = inventory.cmd.dbsync:main - inventory-dnsmasq-lease-update = inventory.cmd.dnsmasq_lease_update:main - -oslo.config.opts = - inventory.common.config = inventory.common.config:list_opts - inventory.common.api.api_config = inventory.api.api_config:list_opts - -inventory.database.migration_backend = - sqlalchemy = oslo_db.sqlalchemy.migration - -inventory.agent.lldp.drivers = - lldpd = inventory.agent.lldp.drivers.lldpd.driver:InventoryLldpdAgentDriver - ovs = inventory.agent.lldp.drivers.ovs.driver:InventoryOVSAgentDriver - -inventory.systemconfig.drivers = - systemconfig = inventory.systemconfig.drivers.sysinv.driver:SysinvSystemConfigDriver - -[compile_catalog] -directory = inventory/locale -domain = inventory - -[update_catalog] -domain = inventory -output_dir = inventory/locale -input_file = inventory/locale/inventory.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = inventory/locale/inventory.pot diff --git a/inventory/inventory/setup.py b/inventory/inventory/setup.py deleted file mode 100644 index 056c16c2..00000000 --- a/inventory/inventory/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/inventory/inventory/test-requirements.txt b/inventory/inventory/test-requirements.txt deleted file mode 100644 index 5ac3fee7..00000000 --- a/inventory/inventory/test-requirements.txt +++ /dev/null @@ -1,36 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -hacking>=0.12.0,<0.13 # Apache-2.0 - -coverage>=4.0,!=4.4 # Apache-2.0 -discover -fixtures>=0.3.14 -mock<1.1.0,>=1.0 -mox -MySQL-python -# passlib>=1.7.0 -psycopg2 -python-barbicanclient<3.1.0,>=3.0.1 -python-subunit>=0.0.18 # Apache-2.0/BSD -requests-mock>=0.6.0 # Apache-2.0 -sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 -oslosphinx<2.6.0,>=2.5.0 # Apache-2.0 -stestr>=1.0.0 # Apache-2.0 -testtools>=1.4.0 # MIT -oslotest>=1.10.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 -testrepository>=0.0.18 -testtools!=1.2.0,>=0.9.36 -tempest-lib<0.5.0,>=0.4.0 -ipaddr -pytest -pyudev -libvirt-python>=1.2.5 -migrate -python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 -python-cephclient -python-ldap>=2.4.22,<3.0.0 -markupsafe -Babel>=0.9.6 diff --git a/inventory/inventory/tox.ini b/inventory/inventory/tox.ini deleted file mode 100644 index 2ddf29a1..00000000 --- a/inventory/inventory/tox.ini +++ /dev/null @@ -1,102 +0,0 @@ -[tox] -minversion = 2.0 -# envlist = pep8 -envlist = py27,pep8 - -# tox does not work if the path to the workdir is too long, so move it to /tmp -toxworkdir = /tmp/{env:USER}_inventorytox -cgcsdir = {toxinidir}/../../.. -wrsdir = {toxinidir}/../../../../../../../.. -distshare={toxworkdir}/.tox/distshare - -[testenv] -# sitepackages = True -install_command = pip install \ - -v -v -v \ - -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/stein/upper-constraints.txt} \ - {opts} {packages} -whitelist_externals = bash - find -setenv = - VIRTUAL_ENV={envdir} - PYTHONWARNINGS=default::DeprecationWarning - OS_STDOUT_CAPTURE=1 - OS_STDERR_CAPTURE=1 - OS_TEST_TIMEOUT=60 - INVENTORY_TEST_ENV=True - TOX_WORK_DIR={toxworkdir} - PYLINTHOME={toxworkdir} - -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -e{[tox]cgcsdir}/config/tsconfig/tsconfig - -e{[tox]cgcsdir}/config/sysinv/cgts-client/cgts-client - -e{[tox]cgcsdir}/fault/fm-api - -e{[tox]cgcsdir}/fault/python-fmclient/fmclient - -e{[tox]cgcsdir}/config/controllerconfig/controllerconfig - -e{[tox]cgcsdir}/update/cgcs-patch/cgcs-patch - -e{[tox]cgcsdir}/utilities/utilities/platform-util/platform-util - -commands = - find . -type f -name "*.pyc" -delete - find . -type f -name ".coverage\.*" -delete - coverage erase - -[testenv:py27] -basepython = python2.7 -commands = - {[testenv]commands} - stestr run {posargs} - stestr slowest - -[testenv:pep8] -commands = flake8 {posargs} - -[testenv:venv] -commands = {posargs} - -[testenv:cover] -deps = {[testenv]deps} - -e{[tox]cgcsdir}/config/tsconfig/tsconfig - -e{[tox]cgcsdir}/fault/fm-api - -e{[tox]cgcsdir}/fault/python-fmclient/fmclient - -e{[tox]cgcsdir}/config/controllerconfig/controllerconfig - -e{[tox]cgcsdir}/config/sysinv/cgts-client/cgts-client - -e{[tox]cgcsdir}/update/cgcs-patch/cgcs-patch - -e{[tox]cgcsdir}/utilities/utilities/platform-util/platform-util -setenv = - VIRTUAL_ENV={envdir} - PYTHON=coverage run --source inventory --parallel-mode -commands = - find . -type f -name "*.pyc" -delete - find . -type f -name ".coverage\.*" -delete - stestr run {posargs} - coverage erase - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - -[testenv:docs] -deps = -r{toxinidir}/doc/requirements.txt -commands = sphinx-build -W -b html doc/source doc/build/html - -[testenv:releasenotes] -deps = {[testenv:docs]deps} -commands = - sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:debug] -commands = oslo_debug_helper {posargs} - -[hacking] -import_exceptions = inventory.common.i18n - -[flake8] -# H102 Apache License format -# H233 Python 3.x incompatible use of print operator -# H404 multi line docstring should start without a leading new line -# H405 multi line docstring summary not separated with an empty line -ignore = H102,H233,H404,H405 -show-source = True -builtins = _ -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build diff --git a/inventory/opensuse/inventory.changes b/inventory/opensuse/inventory.changes deleted file mode 100644 index 37669dd6..00000000 --- a/inventory/opensuse/inventory.changes +++ /dev/null @@ -1,14 +0,0 @@ -------------------------------------------------------------------- -Wed Jul 31 20:18:37 UTC 2019 - Marcela Rosales - -- Remove tarball from OBS and use _service XML to get the source code. - -------------------------------------------------------------------- -Fri Jul 26 16:01:39 UTC 2019 - Marcela Rosales - -- Add runtime dependencies for the package to be installed correctly. - -------------------------------------------------------------------- -Thu Jun 27 23:23:54 UTC 2019 - Erich Cordoba - -- Initial build for inventory diff --git a/inventory/opensuse/inventory.rpmlintrc b/inventory/opensuse/inventory.rpmlintrc deleted file mode 100644 index adc59bf3..00000000 --- a/inventory/opensuse/inventory.rpmlintrc +++ /dev/null @@ -1 +0,0 @@ -setBadness('script-without-shebang', 2) diff --git a/inventory/opensuse/inventory.spec b/inventory/opensuse/inventory.spec deleted file mode 100644 index 8d45c918..00000000 --- a/inventory/opensuse/inventory.spec +++ /dev/null @@ -1,189 +0,0 @@ -Summary: StarlingX Inventory -Name: inventory -Version: 1.0.0 -Release: 1 -License: Apache-2.0 -Group: System/Base -URL: https://www.starlingx.io -Source0: %{name}-%{version}.tar.gz - -BuildRequires: cgts-client -BuildRequires: python-setuptools -BuildRequires: python-jsonpatch -BuildRequires: python-keystoneauth1 -BuildRequires: python-keystonemiddleware -BuildRequires: python-mock -BuildRequires: python-neutronclient -BuildRequires: python2-oslo.concurrency -BuildRequires: python2-oslo.config -BuildRequires: python2-oslo.context -BuildRequires: python2-oslo.db -BuildRequires: python2-oslo.i18n -BuildRequires: python2-oslo.log -BuildRequires: python2-oslo.messaging -BuildRequires: python2-oslo.middleware -BuildRequires: python2-oslo.policy -BuildRequires: python2-oslo.rootwrap -BuildRequires: python2-oslo.serialization -BuildRequires: python2-oslo.service -BuildRequires: python2-oslo.utils -BuildRequires: python2-oslo.versionedobjects -BuildRequires: python-oslotest -BuildRequires: python-osprofiler -BuildRequires: python-os-testr -BuildRequires: python-pbr -BuildRequires: python-pecan -BuildRequires: python-psutil -BuildRequires: python-requests -BuildRequires: python-retrying -BuildRequires: python-six -BuildRequires: python-sqlalchemy -BuildRequires: python-stevedore -BuildRequires: python-webob -BuildRequires: python2-WSME -BuildRequires: systemd -BuildRequires: systemd-devel -BuildRequires: fdupes - -Requires: python-pyudev -Requires: python-parted -Requires: python-ipaddr -Requires: python-paste -Requires: python-eventlet -Requires: python-futurist -Requires: python-jsonpatch -Requires: python-keystoneauth1 -Requires: python-keystonemiddleware -Requires: python-neutronclient -Requires: python2-oslo.concurrency -Requires: python2-oslo.config -Requires: python2-oslo.context -Requires: python2-oslo.db -Requires: python2-oslo.i18n -Requires: python2-oslo.log -Requires: python2-oslo.messaging -Requires: python2-oslo.middleware -Requires: python2-oslo.policy -Requires: python2-oslo.rootwrap -Requires: python2-oslo.serialization -Requires: python2-oslo.service -Requires: python2-oslo.utils -Requires: python2-oslo.versionedobjects -Requires: python2-osprofiler -Requires: python-pbr -Requires: python-pecan -Requires: python-psutil -Requires: python-requests -Requires: python-retrying -Requires: python-six -Requires: python-sqlalchemy -Requires: python-stevedore -Requires: python-webob -Requires: python2-WSME -Requires: tsconfig - -%description -The inventory service for StarlingX - -%define local_etc_goenabledd %{_sysconfdir}/goenabled.d/ -%define local_etc_inventory %{_sysconfdir}/inventory/ -%define local_etc_motdd %{_sysconfdir}/motd.d/ -%define pythonroot %{_libdir}/python2.7/site-packages -%define ocf_resourced %{_libdir}/ocf/resource.d - -%define local_etc_initd %{_sysconfdir}/init.d/ -%define local_etc_pmond %{_sysconfdir}/pmon.d/ - -%define debug_package %{nil} - -%prep -%setup -n %{name}-%{version}/%{name} - -# Remove bundled egg-info -rm -rf *.egg-info - -%build -export PBR_VERSION=%{version} -%{__python} setup.py build -PYTHONPATH=. oslo-config-generator --config-file=inventory/config-generator.conf - -%install -export PBR_VERSION=%{version} -%{__python} setup.py install --root=%{buildroot} \ - --install-lib=%{pythonroot} \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed - -install -d -m 755 %{buildroot}%{local_etc_goenabledd} -install -p -D -m 755 etc/inventory/inventory_goenabled_check.sh %{buildroot}%{local_etc_goenabledd}/inventory_goenabled_check.sh - -install -d -m 755 %{buildroot}%{local_etc_inventory} -install -p -D -m 644 etc/inventory/policy.json %{buildroot}%{local_etc_inventory}/policy.json - -install -d -m 755 %{buildroot}%{local_etc_motdd} -install -p -D -m 755 etc/inventory/motd-system %{buildroot}%{local_etc_motdd}/10-system-config - -install -m 755 -p -D scripts/inventory-api %{buildroot}%{_libdir}/ocf/resource.d/platform/inventory-api -install -m 755 -p -D scripts/inventory-conductor %{buildroot}%{_libdir}/ocf/resource.d/platform/inventory-conductor - -install -m 644 -p -D scripts/inventory-api.service %{buildroot}%{_unitdir}/inventory-api.service -install -m 644 -p -D scripts/inventory-conductor.service %{buildroot}%{_unitdir}/inventory-conductor.service - -# Install sql migration -install -m 644 inventory/db/sqlalchemy/migrate_repo/migrate.cfg %{buildroot}%{pythonroot}/inventory/db/sqlalchemy/migrate_repo/migrate.cfg -%fdupes %{buildroot}%{pythonroot}/inventory-1.0-py2.7.egg-info/ - -%pre -%service_add_pre inventory-api.service -%service_add_pre inventory-conductor.service - -%post -%service_add_post inventory-api.service -%service_add_post inventory-conductor.service -# TODO(jkung) activate inventory-agent -# /usr/bin/systemctl enable inventory-agent.service >/dev/null 2>&1 - -%preun -%service_del_preun inventory-api.service -%service_del_preun inventory-conductor.service - -%postun -%service_del_postun inventory-api.service -%service_del_postun inventory-conductor.service - - -%clean -rm -rf %{buildroot} - -%files -%defattr(-,root,root,-) -%doc LICENSE - -%{_bindir}/* -%{pythonroot}/%{name} -%{pythonroot}/%{name}-%{version}*.egg-info -%dir %{local_etc_goenabledd} -%dir %{local_etc_inventory} -%dir %{local_etc_motdd} -%dir %{_libdir}/ocf -%dir %{_libdir}/ocf/resource.d -%dir %{_libdir}/ocf/resource.d/platform -%config %{local_etc_inventory}/policy.json -%{local_etc_goenabledd}/* -%{local_etc_motdd}/* - -# SM OCF Start/Stop/Monitor Scripts -%{ocf_resourced}/platform/inventory-api -%{ocf_resourced}/platform/inventory-conductor - -# systemctl service files -%{_unitdir}/inventory-api.service -%{_unitdir}/inventory-conductor.service - -%{_bindir}/inventory-api -%{_bindir}/inventory-conductor -%{_bindir}/inventory-dbsync -%{_bindir}/inventory-dnsmasq-lease-update - -%changelog diff --git a/python-inventoryclient/PKG-INFO b/python-inventoryclient/PKG-INFO deleted file mode 100644 index 9c4108c4..00000000 --- a/python-inventoryclient/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: python-inventoryclient -Version: 1.0 -Summary: A python client library for Inventory -Home-page: https://wiki.openstack.org/wiki/StarlingX -Author: StarlingX -Author-email: starlingx-discuss@lists.starlingx.io -License: Apache-2.0 - -A python client library for Inventory - - -Platform: UNKNOWN diff --git a/python-inventoryclient/centos/build_srpm.data b/python-inventoryclient/centos/build_srpm.data deleted file mode 100644 index ad41cf59..00000000 --- a/python-inventoryclient/centos/build_srpm.data +++ /dev/null @@ -1,2 +0,0 @@ -SRC_DIR="inventoryclient" -TIS_PATCH_VER=2 diff --git a/python-inventoryclient/centos/python-inventoryclient.spec b/python-inventoryclient/centos/python-inventoryclient.spec deleted file mode 100644 index 2dad46a5..00000000 --- a/python-inventoryclient/centos/python-inventoryclient.spec +++ /dev/null @@ -1,70 +0,0 @@ -%global pypi_name inventoryclient - -Summary: A python client library for Inventory -Name: python-inventoryclient -Version: 1.0 -Release: %{tis_patch_ver}%{?_tis_dist} -License: Apache-2.0 -Group: base -Packager: Wind River -URL: unknown -Source0: %{name}-%{version}.tar.gz - -BuildRequires: git -BuildRequires: python-pbr >= 2.0.0 -BuildRequires: python-setuptools -BuildRequires: python2-pip - -Requires: python-keystoneauth1 >= 3.1.0 -Requires: python-pbr >= 2.0.0 -Requires: python-six >= 1.9.0 -Requires: python-oslo-i18n >= 2.1.0 -Requires: python-oslo-utils >= 3.20.0 -Requires: python-requests -Requires: bash-completion - -%description -A python client library for Inventory - -%define local_bindir /usr/bin/ -%define local_etc_bash_completiond /etc/bash_completion.d/ -%define pythonroot /usr/lib64/python2.7/site-packages - -%define debug_package %{nil} - -%prep -%autosetup -n %{name}-%{version} -S git - -# Remove bundled egg-info -rm -rf *.egg-info - -%build -echo "Start build" - -export PBR_VERSION=%{version} -%{__python} setup.py build - -%install -echo "Start install" -export PBR_VERSION=%{version} -%{__python} setup.py install --root=%{buildroot} \ - --install-lib=%{pythonroot} \ - --prefix=/usr \ - --install-data=/usr/share \ - --single-version-externally-managed - -install -d -m 755 %{buildroot}%{local_etc_bash_completiond} -install -p -D -m 664 tools/inventory.bash_completion %{buildroot}%{local_etc_bash_completiond}/inventory.bash_completion - - -%clean -echo "CLEAN CALLED" -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,root,root,-) -%doc LICENSE -%{local_bindir}/* -%{local_etc_bash_completiond}/* -%{pythonroot}/%{pypi_name}/* -%{pythonroot}/%{pypi_name}-%{version}*.egg-info diff --git a/python-inventoryclient/inventoryclient/.gitignore b/python-inventoryclient/inventoryclient/.gitignore deleted file mode 100644 index 78c457c6..00000000 --- a/python-inventoryclient/inventoryclient/.gitignore +++ /dev/null @@ -1,35 +0,0 @@ -# Compiled files -*.py[co] -*.a -*.o -*.so - -# Sphinx -_build -doc/source/api/ - -# Packages/installer info -*.egg -*.egg-info -dist -build -eggs -parts -var -sdist -develop-eggs -.installed.cfg - -# Other -*.DS_Store -.stestr -.testrepository -.tox -.venv -.*.swp -.coverage -bandit.xml -cover -AUTHORS -ChangeLog -*.sqlite diff --git a/python-inventoryclient/inventoryclient/.testr.conf b/python-inventoryclient/inventoryclient/.testr.conf deleted file mode 100644 index d42e8fe4..00000000 --- a/python-inventoryclient/inventoryclient/.testr.conf +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./inventoryclient/tests} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list -# group tests when running concurrently -# This regex groups by classname -#group_regex=([^\.]+\.)+ diff --git a/python-inventoryclient/inventoryclient/LICENSE b/python-inventoryclient/inventoryclient/LICENSE deleted file mode 100644 index 68c771a0..00000000 --- a/python-inventoryclient/inventoryclient/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/python-inventoryclient/inventoryclient/inventoryclient/__init__.py b/python-inventoryclient/inventoryclient/inventoryclient/__init__.py deleted file mode 100644 index 15ddbe1f..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -try: - import inventoryclient.client - Client = inventoryclient.client.get_client -except ImportError: - import warnings - warnings.warn("Could not import inventoryclient.client", ImportWarning) - -import pbr.version - -version_info = pbr.version.VersionInfo('inventoryclient') - -try: - __version__ = version_info.version_string() -except AttributeError: - __version__ = None diff --git a/python-inventoryclient/inventoryclient/inventoryclient/client.py b/python-inventoryclient/inventoryclient/inventoryclient/client.py deleted file mode 100644 index 9fec2f98..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/client.py +++ /dev/null @@ -1,92 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from inventoryclient.common.i18n import _ -from inventoryclient import exc -from keystoneauth1 import loading -from oslo_utils import importutils - - -SERVICE_TYPE = 'configuration' # TODO(jkung) This needs to be inventory - - -def get_client(version, endpoint=None, session=None, auth_token=None, - inventory_url=None, username=None, password=None, auth_url=None, - project_id=None, project_name=None, - region_name=None, timeout=None, - user_domain_id=None, user_domain_name=None, - project_domain_id=None, project_domain_name=None, - service_type=SERVICE_TYPE, endpoint_type=None, - **ignored_kwargs): - """Get an authenticated client, based on the credentials.""" - kwargs = {} - interface = endpoint_type or 'publicURL' - endpoint = endpoint or inventory_url - if auth_token and endpoint: - kwargs.update({ - 'token': auth_token, - }) - if timeout: - kwargs.update({ - 'timeout': timeout, - }) - elif auth_url: - auth_kwargs = {} - auth_type = 'password' - auth_kwargs.update({ - 'auth_url': auth_url, - 'project_id': project_id, - 'project_name': project_name, - 'user_domain_id': user_domain_id, - 'user_domain_name': user_domain_name, - 'project_domain_id': project_domain_id, - 'project_domain_name': project_domain_name, - }) - if username and password: - auth_kwargs.update({ - 'username': username, - 'password': password - }) - elif auth_token: - auth_type = 'token' - auth_kwargs.update({ - 'token': auth_token, - }) - - # Create new session only if it was not passed in - if not session: - loader = loading.get_plugin_loader(auth_type) - auth_plugin = loader.load_from_options(**auth_kwargs) - session = loading.session.Session().load_from_options( - auth=auth_plugin, timeout=timeout) - - exception_msg = _('Must provide Keystone credentials or user-defined ' - 'endpoint and token') - if not endpoint: - if session: - try: - endpoint = session.get_endpoint( - service_type=service_type, - interface=interface, - region_name=region_name - ) - except Exception as e: - raise exc.AuthSystem( - _('%(message)s, error was: %(error)s') % - {'message': exception_msg, 'error': e}) - else: - # Neither session, nor valid auth parameters provided - raise exc.AuthSystem(exception_msg) - - kwargs['endpoint_override'] = endpoint - kwargs['service_type'] = service_type - kwargs['interface'] = interface - kwargs['version'] = version - - inventory_module = importutils.import_versioned_module( - 'inventoryclient', version, 'client') - client_class = getattr(inventory_module, 'Client') - return client_class(endpoint, session=session, **kwargs) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/__init__.py b/python-inventoryclient/inventoryclient/inventoryclient/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/base.py b/python-inventoryclient/inventoryclient/inventoryclient/common/base.py deleted file mode 100644 index 885f5ac7..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/common/base.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2013 Wind River, Inc. -# Copyright 2012 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Base utilities to build API operation managers and objects on top of. -""" - -import copy -from inventoryclient import exc - - -def getid(obj): - """Abstracts the common pattern of allowing both an object or an - object's ID (UUID) as a parameter when dealing with relationships. - """ - try: - return obj.id - except AttributeError: - return obj - - -class Resource(object): - """A resource represents a particular instance of an object (tenant, user, - etc). This is pretty much just a bag for attributes. - - :param manager: Manager object - :param info: dictionary representing resource attributes - :param loaded: prevent lazy-loading if set to True - """ - def __init__(self, manager, info, loaded=False): - self.manager = manager - self._info = info - self._add_details(info) - self._loaded = loaded - - def _add_details(self, info): - for (k, v) in info.iteritems(): - setattr(self, k, v) - - def __getattr__(self, k): - if k not in self.__dict__: - # NOTE(bcwaldon): disallow lazy-loading if already loaded once - if not self.is_loaded(): - self.get() - return self.__getattr__(k) - - raise AttributeError(k) - else: - return self.__dict__[k] - - def __repr__(self): - reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and - k != 'manager') - info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) - return "<%s %s>" % (self.__class__.__name__, info) - - def get(self): - # set_loaded() first ... so if we have to bail, we know we tried. - self.set_loaded(True) - if not hasattr(self.manager, 'get'): - return - - new = self.manager.get(self.id) - if new: - self._add_details(new._info) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - if hasattr(self, 'id') and hasattr(other, 'id'): - return self.id == other.id - return self._info == other._info - - def is_loaded(self): - return self._loaded - - def set_loaded(self, val): - self._loaded = val - - def to_dict(self): - return copy.deepcopy(self._info) - - -class ResourceClassNone(Resource): - def __repr__(self): - return "" % self._info - - -class Manager(object): - """Managers interact with a particular type of API and provide CRUD - operations for them. - """ - resource_class = ResourceClassNone - - def __init__(self, api): - self.api = api - - def _create(self, url, body): - resp, body = self.api.post(url, data=body) - if body: - if callable(self.resource_class): - return self.resource_class(self, body) - else: - raise exc.InvalidAttribute(url) - - def _upload(self, url, body, data=None): - files = {'file': ("for_upload", - body, - )} - resp = self.api.post(url, files=files, data=data) - return resp - - def _json_get(self, url): - """send a GET request and return a json serialized object""" - resp, body = self.api.get(url) - return body - - def _format_body_data(self, body, response_key): - if response_key: - try: - data = body[response_key] - except KeyError: - return [] - else: - data = body - - if not isinstance(data, list): - data = [data] - - return data - - def _list(self, url, response_key=None, obj_class=None, body=None): - resp, body = self.api.get(url) - - if obj_class is None: - obj_class = self.resource_class - - data = self._format_body_data(body, response_key) - return [obj_class(self, res, loaded=True) for res in data if res] - - def _update(self, url, **kwargs): - resp, body = self.api.patch(url, **kwargs) - # PATCH/PUT requests may not return a body - if body: - if callable(self.resource_class): - return self.resource_class(self, body) - else: - raise exc.InvalidAttribute(url) - - def _delete(self, url): - self.api.delete(url) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/cli_no_wrap.py b/python-inventoryclient/inventoryclient/inventoryclient/common/cli_no_wrap.py deleted file mode 100644 index 861a08c9..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/common/cli_no_wrap.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -The sole purpose of this module is to manage access to the _no_wrap variable -used by the wrapping_formatters module -""" - -_no_wrap = [False] - - -def is_nowrap_set(no_wrap=None): - """ - returns True if no wrapping desired. - determines this by either the no_wrap parameter - or if the global no_wrap flag is set - :param no_wrap: - :return: - """ - global _no_wrap - if no_wrap is True: - return True - if no_wrap is False: - return False - no_wrap = _no_wrap[0] - return no_wrap - - -def set_no_wrap(no_wrap): - """ - Sets the global nowrap flag - then returns result of call to is_nowrap_set(..) - :param no_wrap: - :return: - """ - global _no_wrap - if no_wrap is not None: - _no_wrap[0] = no_wrap - return is_nowrap_set(no_wrap) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/exceptions.py b/python-inventoryclient/inventoryclient/inventoryclient/common/exceptions.py deleted file mode 100644 index fefc6087..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/common/exceptions.py +++ /dev/null @@ -1,226 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import inspect -from inventoryclient.common.i18n import _ -import json -import six -from six.moves import http_client -import sys - - -class ClientException(Exception): - """An error occurred.""" - def __init__(self, message=None): - self.message = message - - def __str__(self): - return self.message or self.__class__.__doc__ - - -class InvalidEndpoint(ClientException): - """The provided endpoint is invalid.""" - - -class EndpointException(ClientException): - """Something is rotten in Service Catalog.""" - - -class CommunicationError(ClientException): - """Unable to communicate with server.""" - - -class Conflict(ClientException): - """HTTP 409 - Conflict. - - Indicates that the request could not be processed because of conflict - in the request, such as an edit conflict. - """ - http_status = http_client.CONFLICT - message = _("Conflict") - - -# _code_map contains all the classes that have http_status attribute. -_code_map = dict( - (getattr(obj, 'http_status', None), obj) - for name, obj in vars(sys.modules[__name__]).items() - if inspect.isclass(obj) and getattr(obj, 'http_status', False) -) - - -class HttpError(ClientException): - """The base exception class for all HTTP exceptions.""" - http_status = 0 - message = _("HTTP Error") - - def __init__(self, message=None, details=None, - response=None, request_id=None, - url=None, method=None, http_status=None): - self.http_status = http_status or self.http_status - self.message = message or self.message - self.details = details - self.request_id = request_id - self.response = response - self.url = url - self.method = method - formatted_string = "%s (HTTP %s)" % (self.message, self.http_status) - if request_id: - formatted_string += " (Request-ID: %s)" % request_id - super(HttpError, self).__init__(formatted_string) - - -class HTTPRedirection(HttpError): - """HTTP Redirection.""" - message = _("HTTP Redirection") - - -def _extract_error_json(body): - error_json = {} - try: - body_json = json.loads(body) - if 'error_message' in body_json: - raw_msg = body_json['error_message'] - error_json = json.loads(raw_msg) - except ValueError: - return {} - - return error_json - - -class HTTPClientError(HttpError): - """Client-side HTTP error. - - Exception for cases in which the client seems to have erred. - """ - message = _("HTTP Client Error") - - def __init__(self, message=None, details=None, - response=None, request_id=None, - url=None, method=None, http_status=None): - if method: - error_json = _extract_error_json(method) - message = error_json.get('faultstring') - super(HTTPClientError, self).__init__( - message=message, - details=details, - response=response, - request_id=request_id, - url=url, - method=method, - http_status=http_status) - - -class NotFound(HTTPClientError): - """HTTP 404 - Not Found. - - The requested resource could not be found but may be available again - in the future. - """ - http_status = 404 - message = "Not Found" - - -class HttpServerError(HttpError): - """Server-side HTTP error. - - Exception for cases in which the server is aware that it has - erred or is incapable of performing the request. - """ - message = _("HTTP Server Error") - - def __init__(self, message=None, details=None, - response=None, request_id=None, - url=None, method=None, http_status=None): - if method: - error_json = _extract_error_json(method) - message = error_json.get('faultstring') - super(HttpServerError, self).__init__( - message=message, - details=details, - response=response, - request_id=request_id, - url=url, - method=method, - http_status=http_status) - - -class ServiceUnavailable(HttpServerError): - """HTTP 503 - Service Unavailable. - - The server is currently unavailable. - """ - http_status = http_client.SERVICE_UNAVAILABLE - message = _("Service Unavailable") - - -class GatewayTimeout(HttpServerError): - """HTTP 504 - Gateway Timeout. - - The server was acting as a gateway or proxy and did not receive a timely - response from the upstream server. - """ - http_status = http_client.GATEWAY_TIMEOUT - message = "Gateway Timeout" - - -class HttpVersionNotSupported(HttpServerError): - """HTTP 505 - HttpVersion Not Supported. - - The server does not support the HTTP protocol version used in the request. - """ - http_status = http_client.HTTP_VERSION_NOT_SUPPORTED - message = "HTTP Version Not Supported" - - -def from_response(response, method, url=None): - """Returns an instance of :class:`HttpError` or subclass based on response. - - :param response: instance of `requests.Response` class - :param method: HTTP method used for request - :param url: URL used for request - """ - - req_id = response.headers.get("x-openstack-request-id") - kwargs = { - "http_status": response.status_code, - "response": response, - "method": method, - "url": url, - "request_id": req_id, - } - if "retry-after" in response.headers: - kwargs["retry_after"] = response.headers["retry-after"] - - content_type = response.headers.get("Content-Type", "") - if content_type.startswith("application/json"): - try: - body = response.json() - except ValueError: - pass - else: - if isinstance(body, dict): - error = body.get(list(body)[0]) - if isinstance(error, dict): - kwargs["message"] = (error.get("message") or - error.get("faultstring")) - kwargs["details"] = (error.get("details") or - six.text_type(body)) - elif content_type.startswith("text/"): - kwargs["details"] = getattr(response, 'text', '') - - try: - cls = _code_map[response.status_code] - except KeyError: - if 500 <= response.status_code < 600: - cls = HttpServerError - elif 400 <= response.status_code < 500: - cls = HTTPClientError - elif 404 == response.status_code: - cls = NotFound - else: - cls = HttpError - return cls(**kwargs) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/http.py b/python-inventoryclient/inventoryclient/inventoryclient/common/http.py deleted file mode 100644 index ab8a30ea..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/common/http.py +++ /dev/null @@ -1,402 +0,0 @@ -# Copyright 2012 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import json -import logging -import six -import socket - -from keystoneauth1 import adapter -from keystoneauth1 import exceptions as ksa_exc - -import OpenSSL -from oslo_utils import encodeutils -from oslo_utils import importutils -from oslo_utils import netutils -import requests - - -from inventoryclient.common import exceptions as exc -from inventoryclient.common import utils - -osprofiler_web = importutils.try_import("osprofiler.web") - -LOG = logging.getLogger(__name__) - -DEFAULT_VERSION = '1' -USER_AGENT = 'python-inventoryclient' -CHUNKSIZE = 1024 * 64 # 64kB -REQ_ID_HEADER = 'X-OpenStack-Request-ID' - -API_VERSION = '/v1' -API_VERSION_SELECTED_STATES = ('user', 'negotiated', 'cached', 'default') - -SENSITIVE_HEADERS = ('X-Auth-Token',) - -SUPPORTED_ENDPOINT_SCHEME = ('http', 'https') - - -def encode_headers(headers): - """Encodes headers. - - Note: This should be used right before - sending anything out. - - :param headers: Headers to encode - :returns: Dictionary with encoded headers' - names and values - """ - return dict((encodeutils.safe_encode(h), encodeutils.safe_encode(v)) - for h, v in headers.items() if v is not None) - - -class _BaseHTTPClient(object): - - @staticmethod - def _chunk_body(body): - chunk = body - while chunk: - chunk = body.read(CHUNKSIZE) - if not chunk: - break - yield chunk - - def _set_common_request_kwargs(self, headers, kwargs, skip_dumps=False): - """Handle the common parameters used to send the request.""" - - # Default Content-Type is json - content_type = headers.get('Content-Type', 'application/json') - - # NOTE(jamielennox): remove this later. Managers should pass json= if - # they want to send json data. - data = kwargs.pop("data", None) - if data is not None and not isinstance(data, six.string_types): - try: - if not skip_dumps: - data = json.dumps(data) - content_type = 'application/json' - except TypeError: - # Here we assume it's - # a file-like object - # and we'll chunk it - data = self._chunk_body(data) - - if not skip_dumps: - headers['Content-Type'] = content_type - - return data - - def _handle_response(self, resp): - if not resp.ok: - LOG.error("Request returned failure status %s.", resp.status_code) - raise exc.from_response(resp, resp.content) - elif (resp.status_code == requests.codes.multiple_choices and - resp.request.path_url != '/versions'): - # NOTE(flaper87): Eventually, we'll remove the check on `versions` - # which is a bug (1491350) on the server. - raise exc.from_response(resp, resp.content) - - content_type = resp.headers.get('Content-Type') - - # Read body into string if it isn't obviously image data - if content_type == 'application/octet-stream': - # Do not read all response in memory when downloading an image. - body_iter = _close_after_stream(resp, CHUNKSIZE) - else: - content = resp.text - if content_type and content_type.startswith('application/json'): - # Let's use requests json method, it should take care of - # response encoding - body_iter = resp.json() - else: - body_iter = six.StringIO(content) - try: - body_iter = json.loads(''.join([c for c in body_iter])) - except ValueError: - body_iter = None - - return resp, body_iter - - def upload_request_with_data(self, url, auth_token, files, data): - headers = {"X-Auth-Token": auth_token} - req = requests.post(url, headers=headers, files=files, data=data) - return req.json() - - -class HTTPClient(_BaseHTTPClient): - - def __init__(self, endpoint, **kwargs): - self.endpoint = endpoint - self.identity_headers = kwargs.get('identity_headers') - self.auth_token = kwargs.get('token') - self.language_header = kwargs.get('language_header') - self.global_request_id = kwargs.get('global_request_id') - if self.identity_headers: - self.auth_token = self.identity_headers.pop('X-Auth-Token', - self.auth_token) - - self.session = requests.Session() - self.session.headers["User-Agent"] = USER_AGENT - - if self.language_header: - self.session.headers["Accept-Language"] = self.language_header - - self.timeout = float(kwargs.get('timeout', 600)) - - if self.endpoint.startswith("https"): - - if kwargs.get('insecure', False) is True: - self.session.verify = False - else: - if kwargs.get('cacert', None) is not '': - self.session.verify = kwargs.get('cacert', True) - - self.session.cert = (kwargs.get('cert_file'), - kwargs.get('key_file')) - - @staticmethod - def parse_endpoint(endpoint): - return netutils.urlsplit(endpoint) - - def log_curl_request(self, method, url, headers, data, kwargs): - curl = ['curl -g -i -X %s' % method] - - headers = copy.deepcopy(headers) - headers.update(self.session.headers) - - for (key, value) in headers.items(): - header = '-H \'%s: %s\'' % utils.safe_header(key, value) - curl.append(header) - - if not self.session.verify: - curl.append('-k') - else: - if isinstance(self.session.verify, six.string_types): - curl.append(' --cacert %s' % self.session.verify) - - if self.session.cert: - curl.append(' --cert %s --key %s' % self.session.cert) - - if data and isinstance(data, six.string_types): - curl.append('-d \'%s\'' % data) - - curl.append(url) - - msg = ' '.join([encodeutils.safe_decode(item, errors='ignore') - for item in curl]) - LOG.debug(msg) - - @staticmethod - def log_http_response(resp): - status = (resp.raw.version / 10.0, resp.status_code, resp.reason) - dump = ['\nHTTP/%.1f %s %s' % status] - headers = resp.headers.items() - dump.extend(['%s: %s' % utils.safe_header(k, v) for k, v in headers]) - dump.append('') - content_type = resp.headers.get('Content-Type') - - if content_type != 'application/octet-stream': - dump.extend([resp.text, '']) - LOG.debug('\n'.join([encodeutils.safe_decode(x, errors='ignore') - for x in dump])) - - def _request(self, method, url, **kwargs): - """Send an http request with the specified characteristics. - - Wrapper around httplib.HTTP(S)Connection.request to handle tasks such - as setting headers and error handling. - """ - # Copy the kwargs so we can reuse the original in case of redirects - headers = copy.deepcopy(kwargs.pop('headers', {})) - - if self.identity_headers: - for k, v in self.identity_headers.items(): - headers.setdefault(k, v) - - data = self._set_common_request_kwargs(headers, kwargs) - - # add identity header to the request - if not headers.get('X-Auth-Token'): - headers['X-Auth-Token'] = self.auth_token - - if self.global_request_id: - headers.setdefault(REQ_ID_HEADER, self.global_request_id) - - if osprofiler_web: - headers.update(osprofiler_web.get_trace_id_headers()) - - # Note(flaper87): Before letting headers / url fly, - # they should be encoded otherwise httplib will - # complain. - headers = encode_headers(headers) - - # Since some packages send sysinv endpoint with 'v1' and some don't, - # the postprocessing for both options will be done here - # Instead of doing a fix in each of these packages - if 'v1' in self.endpoint and 'v1' in url: - # remove the '/v1' from endpoint - self.endpoint = self.endpoint.replace('/v1', '', 1) - elif 'v1' not in self.endpoint and 'v1' not in url: - self.endpoint = self.endpoint.rstrip('/') + '/v1' - - if self.endpoint.endswith("/") or url.startswith("/"): - conn_url = "%s%s" % (self.endpoint, url) - else: - conn_url = "%s/%s" % (self.endpoint, url) - self.log_curl_request(method, conn_url, headers, data, kwargs) - - try: - resp = self.session.request(method, - conn_url, - data=data, - headers=headers, - **kwargs) - except requests.exceptions.Timeout as e: - message = ("Error communicating with %(url)s: %(e)s" % - dict(url=conn_url, e=e)) - raise exc.InvalidEndpoint(message=message) - except requests.exceptions.ConnectionError as e: - message = ("Error finding address for %(url)s: %(e)s" % - dict(url=conn_url, e=e)) - raise exc.CommunicationError(message=message) - except socket.gaierror as e: - message = "Error finding address for %s %s: %s" % ( - self.endpoint, conn_url, e) - raise exc.InvalidEndpoint(message=message) - except (socket.error, socket.timeout, IOError) as e: - endpoint = self.endpoint - message = ("Error communicating with %(endpoint)s %(e)s" % - {'endpoint': endpoint, 'e': e}) - raise exc.CommunicationError(message=message) - except OpenSSL.SSL.Error as e: - message = ("SSL Error communicating with %(url)s: %(e)s" % - {'url': conn_url, 'e': e}) - raise exc.CommunicationError(message=message) - - # log request-id for each api call - request_id = resp.headers.get('x-openstack-request-id') - if request_id: - LOG.debug('%(method)s call to image for ' - '%(url)s used request id ' - '%(response_request_id)s', - {'method': resp.request.method, - 'url': resp.url, - 'response_request_id': request_id}) - - resp, body_iter = self._handle_response(resp) - self.log_http_response(resp) - return resp, body_iter - - def head(self, url, **kwargs): - return self._request('HEAD', url, **kwargs) - - def get(self, url, **kwargs): - return self._request('GET', url, **kwargs) - - def post(self, url, **kwargs): - return self._request('POST', url, **kwargs) - - def put(self, url, **kwargs): - return self._request('PUT', url, **kwargs) - - def patch(self, url, **kwargs): - return self._request('PATCH', url, **kwargs) - - def delete(self, url, **kwargs): - return self._request('DELETE', url, **kwargs) - - -def _close_after_stream(response, chunk_size): - """Iterate over the content and ensure the response is closed after.""" - # Yield each chunk in the response body - for chunk in response.iter_content(chunk_size=chunk_size): - yield chunk - # Once we're done streaming the body, ensure everything is closed. - # This will return the connection to the HTTPConnectionPool in urllib3 - # and ideally reduce the number of HTTPConnectionPool full warnings. - response.close() - - -class SessionClient(adapter.Adapter, _BaseHTTPClient): - - def __init__(self, session, **kwargs): - kwargs.setdefault('user_agent', USER_AGENT) - self.global_request_id = kwargs.pop('global_request_id', None) - super(SessionClient, self).__init__(session, **kwargs) - - def request(self, url, method, **kwargs): - headers = kwargs.pop('headers', {}) - - if 'v1' in url: - # remove the '/v1' from endpoint - # TODO(jkung) Remove when service catalog is updated - url = url.replace('/v1', '', 1) - - if self.global_request_id: - headers.setdefault(REQ_ID_HEADER, self.global_request_id) - - kwargs['raise_exc'] = False - file_to_upload = kwargs.get('files') - if file_to_upload: - skip_dumps = True - else: - skip_dumps = False - - data = self._set_common_request_kwargs(headers, kwargs, - skip_dumps=skip_dumps) - try: - if file_to_upload: - auth_token = super(SessionClient, self).get_token() - endpoint = super(SessionClient, - self).get_endpoint() - url = endpoint + url - return self.upload_request_with_data(url, - auth_token, - file_to_upload, - data=data) - - # NOTE(pumaranikar): To avoid bug #1641239, no modification of - # headers should be allowed after encode_headers() is called. - resp = super(SessionClient, - self).request(url, - method, - headers=encode_headers(headers), - data=data, - **kwargs) - except ksa_exc.ConnectTimeout as e: - conn_url = self.get_endpoint(auth=kwargs.get('auth')) - conn_url = "%s/%s" % (conn_url.rstrip('/'), url.lstrip('/')) - message = ("Error communicating with %(url)s %(e)s" % - dict(url=conn_url, e=e)) - raise exc.InvalidEndpoint(message=message) - except ksa_exc.ConnectFailure as e: - conn_url = self.get_endpoint(auth=kwargs.get('auth')) - conn_url = "%s/%s" % (conn_url.rstrip('/'), url.lstrip('/')) - message = ("Error finding address for %(url)s: %(e)s" % - dict(url=conn_url, e=e)) - raise exc.CommunicationError(message=message) - - return self._handle_response(resp) - - -def get_http_client(endpoint=None, session=None, **kwargs): - if session: - return SessionClient(session, **kwargs) - elif endpoint: - return HTTPClient(endpoint, **kwargs) - else: - raise AttributeError('Constructing a client must contain either an ' - 'endpoint or a session') diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/i18n.py b/python-inventoryclient/inventoryclient/inventoryclient/common/i18n.py deleted file mode 100644 index 1f0f53b2..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/common/i18n.py +++ /dev/null @@ -1,13 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import oslo_i18n - -_translators = oslo_i18n.TranslatorFactory(domain='inventoryclient') - -# The primary translation function using the well-known name "_" -_ = _translators.primary diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/options.py b/python-inventoryclient/inventoryclient/inventoryclient/common/options.py deleted file mode 100644 index cea260bf..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/common/options.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import re - -from six.moves import urllib - -OP_LOOKUP = {'!=': 'ne', - '>=': 'ge', - '<=': 'le', - '>': 'gt', - '<': 'lt', - '=': 'eq'} - -OP_LOOKUP_KEYS = '|'.join(sorted(OP_LOOKUP.keys(), key=len, reverse=True)) -OP_SPLIT_RE = re.compile(r'(%s)' % OP_LOOKUP_KEYS) - -DATA_TYPE_RE = re.compile(r'^(string|integer|float|datetime|boolean)(::)(.+)$') - - -def build_url(path, q, params=None): - """Convert list of dicts and a list of params to query url format. - - This will convert the following: - "[{field=this,op=le,value=34}, - {field=that,op=eq,value=foo,type=string}], - ['foo=bar','sna=fu']" - to: - "?q.field=this&q.field=that& - q.op=le&q.op=eq& - q.type=&q.type=string& - q.value=34&q.value=foo& - foo=bar&sna=fu" - """ - if q: - query_params = {'q.field': [], - 'q.value': [], - 'q.op': [], - 'q.type': []} - - for query in q: - for name in ['field', 'op', 'value', 'type']: - query_params['q.%s' % name].append(query.get(name, '')) - - # Transform the dict to a sequence of two-element tuples in fixed - # order, then the encoded string will be consistent in Python 2&3. - new_qparams = sorted(query_params.items(), key=lambda x: x[0]) - path += "?" + urllib.parse.urlencode(new_qparams, doseq=True) - - if params: - for p in params: - path += '&%s' % p - elif params: - path += '?%s' % params[0] - for p in params[1:]: - path += '&%s' % p - return path - - -def cli_to_array(cli_query): - """Convert CLI list of queries to the Python API format. - - This will convert the following: - "this<=34;that=string::foo" - to - "[{field=this,op=le,value=34,type=''}, - {field=that,op=eq,value=foo,type=string}]" - - """ - - if cli_query is None: - return None - - def split_by_op(query): - """Split a single query string to field, operator, value.""" - - def _value_error(message): - raise ValueError('invalid query %(query)s: missing %(message)s' % - {'query': query, 'message': message}) - - try: - field, operator, value = OP_SPLIT_RE.split(query, maxsplit=1) - except ValueError: - _value_error('operator') - - if not len(field): - _value_error('field') - - if not len(value): - _value_error('value') - - return field.strip(), operator, value.strip() - - def split_by_data_type(query_value): - frags = DATA_TYPE_RE.match(query_value) - - # The second match is the separator. Return a list without it if - # a type identifier was found. - return frags.group(1, 3) if frags else None - - opts = [] - queries = cli_query.split(';') - for q in queries: - query = split_by_op(q) - opt = {} - opt['field'] = query[0] - opt['op'] = OP_LOOKUP[query[1]] - - # Allow the data type of the value to be specified via ::, - # where type can be one of integer, string, float, datetime, boolean - value_frags = split_by_data_type(query[2]) - if not value_frags: - opt['value'] = query[2] - opt['type'] = '' - else: - opt['type'] = value_frags[0] - opt['value'] = value_frags[1] - opts.append(opt) - return opts diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/utils.py b/python-inventoryclient/inventoryclient/inventoryclient/common/utils.py deleted file mode 100644 index c6e16d2e..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/common/utils.py +++ /dev/null @@ -1,777 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# from __future__ import print_function - -import argparse -import copy -import dateutil -import hashlib -import os -import prettytable -import re -import six -import six.moves.urllib.parse as urlparse -import textwrap -import uuid - -from datetime import datetime -from dateutil import parser -from functools import wraps -from inventoryclient import exc - -from prettytable import ALL -from prettytable import FRAME -from prettytable import NONE - -import wrapping_formatters - - -SENSITIVE_HEADERS = ('X-Auth-Token', ) - - -class HelpFormatter(argparse.HelpFormatter): - def start_section(self, heading): - # Title-case the headings - heading = '%s%s' % (heading[0].upper(), heading[1:]) - super(HelpFormatter, self).start_section(heading) - - -def safe_header(name, value): - if value is not None and name in SENSITIVE_HEADERS: - h = hashlib.sha1(value) - d = h.hexdigest() - return name, "{SHA1}%s" % d - else: - return name, value - - -def strip_version(endpoint): - if not isinstance(endpoint, six.string_types): - raise ValueError("Expected endpoint") - version = None - # Get rid of trailing '/' if present - endpoint = endpoint.rstrip('/') - url_parts = urlparse.urlparse(endpoint) - (scheme, netloc, path, __, __, __) = url_parts - path = path.lstrip('/') - # regex to match 'v1' or 'v2.0' etc - if re.match('v\d+\.?\d*', path): - version = float(path.lstrip('v')) - endpoint = scheme + '://' + netloc - return endpoint, version - - -def endpoint_version_from_url(endpoint, default_version=None): - if endpoint: - endpoint, version = strip_version(endpoint) - return endpoint, version or default_version - else: - return None, default_version - - -def env(*vars, **kwargs): - """Search for the first defined of possibly many env vars - - Returns the first environment variable defined in vars, or - returns the default defined in kwargs. - """ - for v in vars: - value = os.environ.get(v, None) - if value: - return value - return kwargs.get('default', '') - - -# noinspection PyUnusedLocal -def _wrapping_formatter_callback_decorator(subparser, command, callback): - """ - - Adds the --nowrap option to a CLI command. - This option, when on, deactivates word wrapping. - - Decorates the command's callback function in order to process - the nowrap flag - - :param subparser: - :return: decorated callback - """ - - try: - subparser.add_argument('--nowrap', action='store_true', - help='No wordwrapping of output') - except Exception: - # exception happens when nowrap option already configured - # for command - so get out with callback undecorated - return callback - - def no_wrap_decorator_builder(callback): - - def process_callback_with_no_wrap(cc, args={}): - no_wrap = args.nowrap - # turn on/off wrapping formatters when outputting CLI results - wrapping_formatters.set_no_wrap(no_wrap) - return callback(cc, args=args) - - return process_callback_with_no_wrap - - decorated_callback = no_wrap_decorator_builder(callback) - return decorated_callback - - -def _does_command_need_no_wrap(callback): - if callback.__name__.startswith("do_") and \ - callback.__name__.endswith("_list"): - return True - - if callback.__name__ in \ - ['donot_config_ntp_list', - 'donot_config_ptp_list', - 'do_host_apply_memprofile', - 'do_host_apply_cpuprofile', - 'do_host_apply_ifprofile', - 'do_host_apply_profile', - 'do_host_apply_storprofile', - 'donot_config_oam_list', - 'donot_dns_list', - 'do_host_cpu_modify', - 'do_event_suppress', - 'do_event_unsuppress', - 'do_event_unsuppress_all']: - return True - return False - - -def get_terminal_size(): - """Returns a tuple (x, y) representing the width(x) and the height(x) - in characters of the terminal window. - """ - - def ioctl_GWINSZ(fd): - try: - import fcntl - import struct - import termios - cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, - '1234')) - except Exception: - return None - if cr == (0, 0): - return None - if cr == (0, 0): - return None - return cr - - cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - cr = ioctl_GWINSZ(fd) - os.close(fd) - except Exception: - pass - if not cr: - cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) - return int(cr[1]), int(cr[0]) - - -def normalize_field_data(obj, fields): - for f in fields: - if hasattr(obj, f): - data = getattr(obj, f, '') - try: - data = str(data) - except UnicodeEncodeError: - setattr(obj, f, data.encode('utf-8')) - - -# Decorator for cli-args -def arg(*args, **kwargs): - def _decorator(func): - # Because of the sematics of decorator composition if we just append - # to the options list positional options will appear to be backwards. - func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs)) - return func - - return _decorator - - -def define_command(subparsers, command, callback, cmd_mapper): - '''Define a command in the subparsers collection. - - :param subparsers: subparsers collection where the command will go - :param command: command name - :param callback: function that will be used to process the command - ''' - desc = callback.__doc__ or '' - help = desc.strip().split('\n')[0] - arguments = getattr(callback, 'arguments', []) - - subparser = subparsers.add_parser(command, help=help, - description=desc, - add_help=False, - formatter_class=HelpFormatter) - subparser.add_argument('-h', '--help', action='help', - help=argparse.SUPPRESS) - - # Are we a list command? - if _does_command_need_no_wrap(callback): - # then decorate it with wrapping data formatter functionality - func = _wrapping_formatter_callback_decorator( - subparser, command, callback) - else: - func = callback - - cmd_mapper[command] = subparser - for (args, kwargs) in arguments: - subparser.add_argument(*args, **kwargs) - subparser.set_defaults(func=func) - - -def define_commands_from_module(subparsers, command_module, cmd_mapper): - '''Find all methods beginning with 'do_' in a module, and add them - as commands into a subparsers collection. - ''' - for method_name in (a for a in dir(command_module) if a.startswith('do_')): - # Commands should be hypen-separated instead of underscores. - command = method_name[3:].replace('_', '-') - callback = getattr(command_module, method_name) - define_command(subparsers, command, callback, cmd_mapper) - - -def parse_date(string_data): - """Parses a date-like input string into a timezone aware Python - datetime. - """ - - if not isinstance(string_data, six.string_types): - return string_data - - pattern = r'(\d{4}-\d{2}-\d{2}[T ])?\d{2}:\d{2}:\d{2}(\.\d{6})?Z?' - - def convert_date(matchobj): - formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%d %H:%M:%S.%f", - "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S", - "%Y-%m-%dT%H:%M:%SZ"] - datestring = matchobj.group(0) - if datestring: - for format in formats: - try: - datetime.strptime(datestring, format) - datestring += "+0000" - parsed = parser.parse(datestring) - converted = parsed.astimezone(dateutil.tz.tzlocal()) - converted = datetime.strftime(converted, format) - return converted - except Exception: - pass - return datestring - - return re.sub(pattern, convert_date, string_data) - - -def _sort_for_list(objs, fields, formatters={}, sortby=0, reversesort=False): - - # Sort only if necessary - if sortby is None: - return objs - - sort_field = fields[sortby] - # figure out sort key function - if sort_field in formatters: - field_formatter = formatters[sort_field] - if wrapping_formatters.WrapperFormatter.is_wrapper_formatter( - field_formatter): - def sort_key(x): - return field_formatter.\ - wrapper_formatter.get_unwrapped_field_value(x) - else: - def sort_key(x): - return field_formatter(x) - else: - def sort_key(x): - return getattr(x, sort_field, '') - - objs.sort(reverse=reversesort, key=sort_key) - - return objs - - -def str_height(text): - if not text: - return 1 - lines = str(text).split("\n") - height = len(lines) - return height - - -def row_height(texts): - if not texts or len(texts) == 0: - return 1 - height = max(str_height(text) for text in texts) - return height - - -class WRPrettyTable(prettytable.PrettyTable): - """A PrettyTable that allows word wrapping of its headers.""" - - def __init__(self, field_names=None, **kwargs): - super(WRPrettyTable, self).__init__(field_names, **kwargs) - - def _stringify_header(self, options): - """ - This overridden version of _stringify_header can wrap its - header data. It leverages the functionality in _stringify_row - to perform this task. - :returns string of header, including border text - """ - bits = [] - if options["border"]: - if options["hrules"] in (ALL, FRAME): - bits.append(self._hrule) - bits.append("\n") - # For tables with no data or field names - if not self._field_names: - if options["vrules"] in (ALL, FRAME): - bits.append(options["vertical_char"]) - bits.append(options["vertical_char"]) - else: - bits.append(" ") - bits.append(" ") - - header_row_data = [] - for field in self._field_names: - if options["fields"] and field not in options["fields"]: - continue - if self._header_style == "cap": - fieldname = field.capitalize() - elif self._header_style == "title": - fieldname = field.title() - elif self._header_style == "upper": - fieldname = field.upper() - elif self._header_style == "lower": - fieldname = field.lower() - else: - fieldname = field - header_row_data.append(fieldname) - - # output actual header row data, word wrap when necessary - bits.append(self._stringify_row(header_row_data, options)) - - if options["border"] and options["hrules"] != NONE: - bits.append("\n") - bits.append(self._hrule) - - return "".join(bits) - - -def prettytable_builder(field_names=None, **kwargs): - return WRPrettyTable(field_names, **kwargs) - - -def wordwrap_header(field, field_label, formatter): - """ - Given a field label (the header text for one column) and the word - wrapping formatter for a column, - this function asks the formatter for the desired column width and then - performs a wordwrap of field_label - - :param field: the field name associated with the field_label - :param field_label: field_label to word wrap - :param formatter: the field formatter - :return: word wrapped field_label - """ - if wrapping_formatters.is_nowrap_set(): - return field_label - - if not wrapping_formatters.WrapperFormatter.is_wrapper_formatter( - formatter): - return field_label - # go to the column's formatter and ask it what the width should be - wrapper_formatter = formatter.wrapper_formatter - actual_width = wrapper_formatter.get_actual_column_char_len( - wrapper_formatter.get_calculated_desired_width()) - # now word wrap based on column width - wrapped_header = textwrap.fill(field_label, actual_width) - return wrapped_header - - -def default_printer(s): - print(s) - - -def pt_builder(field_labels, fields, formatters, paging, - printer=default_printer): - """ - returns an object that 'fronts' a prettyTable object - that can handle paging as well as automatically falling back - to not word wrapping when word wrapping does not cause the - output to fit the terminal width. - """ - - class PT_Builder(object): - - def __init__(self, field_labels, fields, formatters, no_paging): - self.objs_in_pt = [] - self.unwrapped_field_labels = field_labels - self.fields = fields - self.formatters = formatters - self.header_height = 0 - self.terminal_width, self.terminal_height = get_terminal_size() - self.terminal_lines_left = self.terminal_height - self.paging = not no_paging - self.paged_rows_added = 0 - self.pt = None - self.quit = False - - def add_row(self, obj): - if self.quit: - return False - if not self.pt: - self.build_pretty_table() - return self._row_add(obj) - - def __add_row_and_obj(self, row, obj): - self.pt.add_row(row) - self.objs_in_pt.append(obj) - - def _row_add(self, obj): - - row = _build_row_from_object(self.fields, self.formatters, obj) - - if not paging: - self.__add_row_and_obj(row, obj) - return True - - rheight = row_height(row) - if (self.terminal_lines_left - rheight) >= 0 or \ - self.paged_rows_added == 0: - self.__add_row_and_obj(row, obj) - self.terminal_lines_left -= rheight - else: - printer(self.get_string()) - if self.terminal_lines_left > 0: - printer("\n" * (self.terminal_lines_left - 1)) - - s = six.moves.input( - "Press Enter to continue or 'q' to exit...") - if s == 'q': - self.quit = True - return False - self.terminal_lines_left = \ - self.terminal_height - self.header_height - self.build_pretty_table() - self.__add_row_and_obj(row, obj) - self.terminal_lines_left -= rheight - self.paged_rows_added += 1 - - def get_string(self): - if not self.pt: - self.build_pretty_table() - objs = copy.copy(self.objs_in_pt) - self.objs_in_pt = [] - output = self.pt.get_string() - if wrapping_formatters.is_nowrap_set(): - return output - output_width = wrapping_formatters.get_width(output) - if output_width <= self.terminal_width: - return output - # At this point pretty Table (self.pt) does not fit the terminal - # width so let's temporarily turn wrapping off, - # rebuild the pretty Table with the data unwrapped. - orig_no_wrap_settings = \ - wrapping_formatters.set_no_wrap_on_formatters( - True, self.formatters) - self.build_pretty_table() - for o in objs: - self.add_row(o) - wrapping_formatters.unset_no_wrap_on_formatters( - orig_no_wrap_settings) - return self.pt.get_string() - - def build_pretty_table(self): - field_labels = [wordwrap_header(field, field_label, formatter) - for field, field_label, formatter in - zip(self.fields, self.unwrapped_field_labels, [ - formatters.get(f, None) for f in self.fields])] - self.pt = prettytable_builder( - field_labels, caching=False, print_empty=False) - self.pt.align = 'l' - # 2 header border lines + 1 bottom border + 1 prompt - # + header data height - self.header_height = 2 + 1 + 1 + row_height(field_labels) - self.terminal_lines_left = \ - self.terminal_height - self.header_height - return self.pt - - def done(self): - if self.quit: - return - - if not self.paging or ( - self.terminal_lines_left < - self.terminal_height - self.header_height): - printer(self.get_string()) - - return PT_Builder(field_labels, fields, formatters, not paging) - - -def print_long_list(objs, fields, field_labels, - formatters={}, sortby=0, reversesort=False, - no_wrap_fields=[], no_paging=False, - printer=default_printer): - - formatters = wrapping_formatters.as_wrapping_formatters( - objs, fields, field_labels, formatters, - no_wrap_fields=no_wrap_fields) - - objs = _sort_for_list(objs, fields, - formatters=formatters, - sortby=sortby, - reversesort=reversesort) - - pt = pt_builder(field_labels, fields, formatters, not no_paging, - printer=printer) - - for o in objs: - pt.add_row(o) - - pt.done() - - -def print_dict(d, dict_property="Property", wrap=0): - pt = prettytable.PrettyTable([dict_property, 'Value'], - caching=False, print_empty=False) - pt.align = 'l' - for k, v in sorted(d.iteritems()): - v = parse_date(v) - # convert dict to str to check length - if isinstance(v, dict): - v = str(v) - if wrap > 0: - v = textwrap.fill(six.text_type(v), wrap) - # if value has a newline, add in multiple rows - # e.g. fault with stacktrace - if v and isinstance(v, str) and r'\n' in v: - lines = v.strip().split(r'\n') - col1 = k - for line in lines: - pt.add_row([col1, line]) - col1 = '' - else: - pt.add_row([k, v]) - - print(pt.get_string()) - - -def _build_row_from_object(fields, formatters, o): - """ - takes an object o and converts to an array of values - compatible with the input for prettyTable.add_row(row) - """ - row = [] - for field in fields: - if field in formatters: - data = parse_date(getattr(o, field, '')) - setattr(o, field, data) - data = formatters[field](o) - row.append(data) - else: - data = parse_date(getattr(o, field, '')) - row.append(data) - return row - - -def print_list(objs, fields, field_labels, formatters={}, sortby=0, - reversesort=False, no_wrap_fields=[], printer=default_printer): - # print_list() is the same as print_long_list() with paging turned off - return print_long_list( - objs, fields, field_labels, formatters=formatters, sortby=sortby, - reversesort=reversesort, no_wrap_fields=no_wrap_fields, - no_paging=True, printer=printer) - - -def find_resource(manager, name_or_id): - """Helper for the _find_* methods.""" - # first try to get entity as integer id - try: - if isinstance(name_or_id, int) or name_or_id.isdigit(): - return manager.get(int(name_or_id)) - except exc.NotFound: - pass - - # now try to get entity as uuid - try: - uuid.UUID(str(name_or_id)) - return manager.get(name_or_id) - except (ValueError, exc.NotFound): - pass - - # finally try to find entity by name - try: - return manager.find(name=name_or_id) - except exc.NotFound: - msg = "No %s with a name or ID of '%s' exists." % \ - (manager.resource_class.__name__.lower(), name_or_id) - raise exc.CommandError(msg) - - -def string_to_bool(arg): - return arg.strip().lower() in ('t', 'true', 'yes', '1') - - -def objectify(func): - """Mimic an object given a dictionary. - - Given a dictionary, create an object and make sure that each of its - keys are accessible via attributes. - Ignore everything if the given value is not a dictionary. - :param func: A dictionary or another kind of object. - :returns: Either the created object or the given value. - - >>> obj = {'old_key': 'old_value'} - >>> oobj = objectify(obj) - >>> oobj['new_key'] = 'new_value' - >>> print oobj['old_key'], oobj['new_key'], oobj.old_key, oobj.new_key - - >>> @objectify - ... def func(): - ... return {'old_key': 'old_value'} - >>> obj = func() - >>> obj['new_key'] = 'new_value' - >>> print obj['old_key'], obj['new_key'], obj.old_key, obj.new_key - - - """ - - def create_object(value): - if isinstance(value, dict): - # Build a simple generic object. - class Object(dict): - def __setitem__(self, key, val): - setattr(self, key, val) - return super(Object, self).__setitem__(key, val) - - # Create that simple generic object. - ret_obj = Object() - # Assign the attributes given the dictionary keys. - for key, val in value.iteritems(): - ret_obj[key] = val - setattr(ret_obj, key, val) - return ret_obj - else: - return value - - # If func is a function, wrap around and act like a decorator. - if hasattr(func, '__call__'): - @wraps(func) - def wrapper(*args, **kwargs): - """Wrapper function for the decorator. - - :returns: The return value of the decorated function. - - """ - value = func(*args, **kwargs) - return create_object(value) - - return wrapper - - # Else just try to objectify the value given. - else: - return create_object(func) - - -def is_uuid_like(val): - """Returns validation of a value as a UUID. - - For our purposes, a UUID is canonical form string: - aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaa - - """ - try: - return str(uuid.UUID(val)) == val - except (TypeError, ValueError, AttributeError): - return False - - -def args_array_to_dict(kwargs, key_to_convert): - values_to_convert = kwargs.get(key_to_convert) - if values_to_convert: - try: - kwargs[key_to_convert] = dict(v.split("=", 1) - for v in values_to_convert) - except ValueError: - raise exc.CommandError('%s must be a list of KEY=VALUE not "%s"' % - (key_to_convert, values_to_convert)) - return kwargs - - -def args_array_to_patch(op, attributes): - patch = [] - for attr in attributes: - # Sanitize - if not attr.startswith('/'): - attr = '/' + attr - - if op in ['add', 'replace']: - try: - path, value = attr.split("=", 1) - patch.append({'op': op, 'path': path, 'value': value}) - except ValueError: - raise exc.CommandError('Attributes must be a list of ' - 'PATH=VALUE not "%s"' % attr) - elif op == "remove": - # For remove only the key is needed - patch.append({'op': op, 'path': attr}) - else: - raise exc.CommandError('Unknown PATCH operation: %s' % op) - return patch - - -def dict_to_patch(values, op='replace'): - patch = [] - for key, value in values.iteritems(): - path = '/' + key - patch.append({'op': op, 'path': path, 'value': value}) - return patch - - -def print_tuple_list(tuples, tuple_labels=[], formatters={}): - pt = prettytable.PrettyTable(['Property', 'Value'], - caching=False, print_empty=False) - pt.align = 'l' - - if not tuple_labels: - for t in tuples: - if len(t) == 2: - f, v = t - v = parse_date(v) - if f in formatters: - v = formatters[f](v) - pt.add_row([f, v]) - else: - for t, l in zip(tuples, tuple_labels): - if len(t) == 2: - f, v = t - v = parse_date(v) - if f in formatters: - v = formatters[f](v) - pt.add_row([l, v]) - - print(pt.get_string()) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/common/wrapping_formatters.py b/python-inventoryclient/inventoryclient/inventoryclient/common/wrapping_formatters.py deleted file mode 100644 index 0bbb45a4..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/common/wrapping_formatters.py +++ /dev/null @@ -1,871 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Manages WrapperFormatter objects. - -WrapperFormatter objects can be used for wrapping CLI column celldata in order -for the CLI table (using prettyTable) to fit the terminal screen - -The basic idea is: - - Once celldata is retrieved and ready to display, first iterate through - the celldata and word wrap it so that fits programmer desired column widths. - The WrapperFormatter objects fill this role. - - Once the celldata is formatted to their desired widths, - then it can be passed to the existing prettyTable code base for rendering. - -""" -import copy -import re -import six -import textwrap - -from cli_no_wrap import is_nowrap_set -from cli_no_wrap import set_no_wrap -from prettytable import _get_size - -UUID_MIN_LENGTH = 36 - -# monkey patch (customize) how the textwrap module breaks text into chunks -wordsep_re = re.compile( - r'(\s+|' # any whitespace - r',|' - r'=|' - r'\.|' - r':|' - r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated word - r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash - -textwrap.TextWrapper.wordsep_re = wordsep_re - - -def get_width(value): - if value is None: - return 0 - - return _get_size(six.text_type(value))[0] # get width from [width,height] - - -def _get_terminal_width(): - from utils import get_terminal_size - result = get_terminal_size()[0] - return result - - -def is_uuid_field(field_name): - """ - :param field_name: - :return: True if field_name looks like a uuid name - """ - if field_name is not None and field_name in ["uuid", "UUID"] or \ - field_name.endswith("uuid"): - return True - return False - - -class WrapperContext(object): - """Context for the wrapper formatters - - Maintains a list of the current WrapperFormatters - being used to format the prettyTable celldata - - Allows wrappers access to its 'sibling' wrappers - contains convenience methods and attributes - for calculating current tableWidth. - """ - - def __init__(self): - self.wrappers = [] - self.wrappers_by_field = {} - self.non_data_chrs_used_by_table = 0 - self.num_columns = 0 - self.terminal_width = -1 - - def set_num_columns(self, num_columns): - self.num_columns = num_columns - self.non_data_chrs_used_by_table = (num_columns * 3) + 1 - - def add_column_formatter(self, field, wrapper): - self.wrappers.append(wrapper) - self.wrappers_by_field[field] = wrapper - - def get_terminal_width(self): - if self.terminal_width == -1: - self.terminal_width = _get_terminal_width() - return self.terminal_width - - def get_table_width(self): - """ - Calculates table width by looping through all - column formatters and summing up their widths - :return: total table width - """ - widths = [w.get_actual_column_char_len( - w.get_calculated_desired_width(), - check_remaining_row_chars=False) - for w in self.wrappers] - chars_used_by_data = sum(widths) - width = self.non_data_chrs_used_by_table + chars_used_by_data - return width - - def is_table_too_wide(self): - """ - :return: True if calculated table width is too wide - for the terminal width - """ - if self.get_terminal_width() < self.get_table_width(): - return True - return False - - -def field_value_function_factory(formatter, field): - """Builds function for getting a field value from table cell celldata - As a side-effect, attaches function as the 'get_field_value' attribute - of the formatter - :param formatter:the formatter to attach return function to - :param field: - :return: function that returns cell celldata - """ - - def field_value_function_builder(data): - if isinstance(data, dict): - formatter.get_field_value = \ - lambda celldata: celldata.get(field, None) - else: - formatter.get_field_value = \ - lambda celldata: getattr(celldata, field) - return formatter.get_field_value(data) - - return field_value_function_builder - - -class WrapperFormatter(object): - """Base (abstract) class definition of wrapping formatters""" - - def __init__(self, ctx, field): - self.ctx = ctx - self.add_blank_line = False - self.no_wrap = False - self.min_width = 0 - self.field = field - self.header_width = 0 - self.actual_column_char_len = -1 - self.textWrapper = None - - if self.field: - self.get_field_value = field_value_function_factory(self, field) - else: - self.get_field_value = lambda data: data - - def get_basic_desired_width(self): - return self.min_width - - def get_calculated_desired_width(self): - basic_desired_width = self.get_basic_desired_width() - if self.header_width > basic_desired_width: - return self.header_width - return basic_desired_width - - def get_sibling_wrappers(self): - """ - :return: a list of your sibling wrappers for the other fields - """ - others = [w for w in self.ctx.wrappers if w != self] - return others - - def get_remaining_row_chars(self): - used = [w.get_actual_column_char_len(w.get_calculated_desired_width(), - check_remaining_row_chars=False) - for w in self.get_sibling_wrappers()] - chrs_used_by_data = sum(used) - remaining_chrs_in_row = \ - (self.ctx.get_terminal_width() - - self.ctx.non_data_chrs_used_by_table) - chrs_used_by_data - return remaining_chrs_in_row - - def set_min_width(self, min_width): - self.min_width = min_width - - def set_actual_column_len(self, actual): - self.actual_column_char_len = actual - - def get_actual_column_char_len(self, desired_char_len, - check_remaining_row_chars=True): - """Utility method to adjust desired width to a width - that can actually be applied based on current table width - and current terminal width - - Will not allow actual width to be less than min_width - min_width is typically length of the column header text - or the longest 'word' in the celldata - - :param desired_char_len: - :param check_remaining_row_chars: - :return: - """ - if self.actual_column_char_len != -1: - return self.actual_column_char_len # already calculated - if desired_char_len < self.min_width: - actual = self.min_width - else: - actual = desired_char_len - if check_remaining_row_chars and actual > self.min_width: - remaining = self.get_remaining_row_chars() - if actual > remaining >= self.min_width: - actual = remaining - if check_remaining_row_chars: - self.set_actual_column_len(actual) - if self.ctx.is_table_too_wide(): - # Table too big can I shrink myself? - if actual > self.min_width: - # shrink column - while actual > self.min_width: - actual -= 1 # TODO(jkung): fix in next sprint - # each column needs to share in - # table shrinking - but this is good - # enough for now - also - why the loop? - self.set_actual_column_len(actual) - - return actual - - def _textwrap_fill(self, s, actual_width): - if not self.textWrapper: - self.textWrapper = textwrap.TextWrapper(actual_width) - else: - self.textWrapper.width = actual_width - return self.textWrapper.fill(s) - - def text_wrap(self, s, width): - """ - performs actual text wrap - :param s: - :param width: in characters - :return: formatted text - """ - if self.no_wrap: - return s - actual_width = self.get_actual_column_char_len(width) - new_s = self._textwrap_fill(s, actual_width) - wrapped = new_s != s - if self.add_blank_line and wrapped: - new_s += "\n".ljust(actual_width) - return new_s - - def format(self, data): - return str(self.get_field_value(data)) - - def get_unwrapped_field_value(self, data): - return self.get_field_value(data) - - def as_function(self): - def foo(data): - return self.format(data) - - foo.WrapperFormatterMarker = True - foo.wrapper_formatter = self - return foo - - @staticmethod - def is_wrapper_formatter(foo): - if not foo: - return False - return getattr(foo, "WrapperFormatterMarker", False) - - -class WrapperLambdaFormatter(WrapperFormatter): - """A wrapper formatter that adapts a function (callable) - to look like a WrapperFormatter - """ - - def __init__(self, ctx, field, format_function): - super(WrapperLambdaFormatter, self).__init__(ctx, field) - self.format_function = format_function - - def format(self, data): - return self.format_function(self.get_field_value(data)) - - -class WrapperFixedWidthFormatter(WrapperLambdaFormatter): - """A wrapper formatter that forces the text to wrap within - a specific width (in chars) - """ - - def __init__(self, ctx, field, width): - super(WrapperFixedWidthFormatter, self).__init__( - ctx, field, - lambda data: self.text_wrap( - str(data), self.get_calculated_desired_width())) - self.width = width - - def get_basic_desired_width(self): - return self.width - - -class WrapperPercentWidthFormatter(WrapperFormatter): - """A wrapper formatter that forces the text to wrap within - a specific percentage width of the current terminal width - """ - - def __init__(self, ctx, field, width_as_decimal): - super(WrapperPercentWidthFormatter, self).__init__(ctx, field) - self.width_as_decimal = width_as_decimal - - def get_basic_desired_width(self): - width = int((self.ctx.get_terminal_width() - - self.ctx.non_data_chrs_used_by_table) * - self.width_as_decimal) - return width - - def format(self, data): - width = self.get_calculated_desired_width() - field_value = self.get_field_value(data) - return self.text_wrap(str(field_value), width) - - -class WrapperWithCustomFormatter(WrapperLambdaFormatter): - """A wrapper formatter that allows the programmer to have a custom - formatter (in the form of a function) that is first applied - and then a wrapper function is applied to the result - - See wrapperFormatterFactory for a better explanation! :-) - """ - - # noinspection PyUnusedLocal - def __init__(self, ctx, field, custom_formatter, wrapper_formatter): - super(WrapperWithCustomFormatter, self).__init__( - ctx, None, - lambda data: wrapper_formatter.format(custom_formatter(data))) - self.wrapper_formatter = wrapper_formatter - self.custom_formatter = custom_formatter - - def get_unwrapped_field_value(self, data): - return self.custom_formatter(data) - - def __setattr__(self, name, value): - # - # Some attributes set onto this class need - # to be pushed down to the 'inner' wrapper_formatter - # - super(WrapperWithCustomFormatter, self).__setattr__(name, value) - if hasattr(self, "wrapper_formatter"): - if name == "no_wrap": - self.wrapper_formatter.no_wrap = value - if name == "add_blank_line": - self.wrapper_formatter.add_blank_line = value - if name == "header_width": - self.wrapper_formatter.header_width = value - - def set_min_width(self, min_width): - super(WrapperWithCustomFormatter, self).set_min_width(min_width) - self.wrapper_formatter.set_min_width(min_width) - - def set_actual_column_len(self, actual): - super(WrapperWithCustomFormatter, self).set_actual_column_len(actual) - self.wrapper_formatter.set_actual_column_len(actual) - - def get_basic_desired_width(self): - return self.wrapper_formatter.get_basic_desired_width() - - -def wrapper_formatter_factory(ctx, field, formatter): - """ - This function is a factory for building WrapperFormatter objects. - - The function needs to be called for each celldata column (field) - that will be displayed in the prettyTable. - - The function looks at the formatter parameter and based on its type, - determines what WrapperFormatter to construct per field (column). - - ex: - - formatter = 15 - type = int : Builds a WrapperFixedWidthFormatter that - will wrap at 15 chars - - formatter = .25 - type = int : Builds a WrapperPercentWidthFormatter that - will wrap at 25% terminal width - - formatter = type = callable : Builds a WrapperLambdaFormatter that - will call some arbitrary function - - formatter = type = dict : Builds a WrapperWithCustomFormatter that - will call some arbitrary function to format - and then apply a wrapping formatter - to the result - - ex: this dict - {"formatter" : captializeFunction,, - "wrapperFormatter": .12} - will apply the captializeFunction - to the column celldata and then - wordwrap at 12 % of terminal width - - :param ctx: the WrapperContext that the built WrapperFormatter will use - :param field: name of field (column_ that the WrapperFormatter - will execute on - :param formatter: specifies type and input for WrapperFormatter - that will be built - :return: WrapperFormatter - - """ - if isinstance(formatter, WrapperFormatter): - return formatter - if callable(formatter): - return WrapperLambdaFormatter(ctx, field, formatter) - if isinstance(formatter, int): - return WrapperFixedWidthFormatter(ctx, field, formatter) - if isinstance(formatter, float): - return WrapperPercentWidthFormatter(ctx, field, formatter) - if isinstance(formatter, dict): - if "wrapperFormatter" in formatter: - embedded_wrapper_formatter = wrapper_formatter_factory( - ctx, None, formatter["wrapperFormatter"]) - elif "hard_width" in formatter: - embedded_wrapper_formatter = WrapperFixedWidthFormatter( - ctx, field, formatter["hard_width"]) - embedded_wrapper_formatter.min_width = formatter["hard_width"] - else: - embedded_wrapper_formatter = WrapperFormatter( - ctx, None) # effectively a NOOP width formatter - if "formatter" not in formatter: - return embedded_wrapper_formatter - custom_formatter = formatter["formatter"] - wrapper = WrapperWithCustomFormatter( - ctx, field, custom_formatter, embedded_wrapper_formatter) - return wrapper - - raise Exception("Formatter Error! Unrecognized formatter {} " - "for field {}".format(formatter, field)) - - -def build_column_stats_for_best_guess_formatting(objs, fields, field_labels, - custom_formatters={}): - class ColumnStats(object): - def __init__(self, field, field_label, custom_formatter=None): - self.field = field - self.field_label = field_label - self.average_width = 0 - self.min_width = get_width(field_label) if field_label else 0 - self.max_width = get_width(field_label) if field_label else 0 - self.total_width = 0 - self.count = 0 - self.average_percent = 0 - self.max_percent = 0 - self.isUUID = is_uuid_field(field) - if custom_formatter: - self.get_field_value = custom_formatter - else: - self.get_field_value = \ - field_value_function_factory(self, field) - - def add_value(self, value): - if self.isUUID: - return - self.count += 1 - value_width = get_width(value) - self.total_width = self.total_width + value_width - if value_width < self.min_width: - self.min_width = value_width - if value_width > self.max_width: - self.max_width = value_width - if self.count > 0: - self.average_width = float( - self.total_width) / float(self.count) - - def set_max_percent(self, max_total_width): - if max_total_width > 0: - self.max_percent = float( - self.max_width) / float(max_total_width) - - def set_avg_percent(self, avg_total_width): - if avg_total_width > 0: - self.average_percent = float( - self.average_width) / float(avg_total_width) - - def __str__(self): - return str([self.field, - self.average_width, - self.min_width, - self.max_width, - self.total_width, - self.count, - self.average_percent, - self.max_percent, - self.isUUID]) - - def __repr__(self): - return str([self.field, - self.average_width, - self.min_width, - self.max_width, - self.total_width, - self.count, - self.average_percent, - self.max_percent, - self.isUUID]) - - if objs is None or len(objs) == 0: - return {"stats": {}, - "total_max_width": 0, - "total_avg_width": 0} - - stats = {} - for i in range(0, len(fields)): - stats[fields[i]] = ColumnStats( - fields[i], field_labels[i], custom_formatters.get(fields[i])) - - for obj in objs: - for field in fields: - column_stat = stats[field] - column_stat.add_value(column_stat.get_field_value(obj)) - - total_max_width = sum([s.max_width for s in stats.values()]) - total_avg_width = sum([s.average_width for s in stats.values()]) - return {"stats": stats, - "total_max_width": total_max_width, - "total_avg_width": total_avg_width} - - -def build_best_guess_formatters_using_average_widths( - objs, fields, field_labels, - custom_formatters={}, no_wrap_fields=[]): - - column_info = build_column_stats_for_best_guess_formatting( - objs, fields, field_labels, custom_formatters) - format_spec = {} - total_avg_width = float(column_info["total_avg_width"]) - if total_avg_width <= 0: - return format_spec - for f in [ff for ff in fields if ff not in no_wrap_fields]: - format_spec[f] = float( - column_info["stats"][f].average_width) / total_avg_width - custom_formatter = custom_formatters.get(f, None) - if custom_formatter: - format_spec[f] = {"formatter": custom_formatter, - "wrapperFormatter": format_spec[f]} - - # Handle no wrap fields by building formatters that will not wrap - for f in [ff for ff in fields if ff in no_wrap_fields]: - format_spec[f] = {"hard_width": column_info["stats"][f].max_width} - custom_formatter = custom_formatters.get(f, None) - if custom_formatter: - format_spec[f] = {"formatter": custom_formatter, - "wrapperFormatter": format_spec[f]} - return format_spec - - -def build_best_guess_formatters_using_max_widths(objs, fields, field_labels, - custom_formatters={}, - no_wrap_fields=[]): - column_info = build_column_stats_for_best_guess_formatting( - objs, fields, field_labels, custom_formatters) - format_spec = {} - for f in [ff for ff in fields if ff not in no_wrap_fields]: - format_spec[f] = \ - float(column_info["stats"][f].max_width) / float(column_info["total_max_width"]) # noqa - custom_formatter = custom_formatters.get(f, None) - if custom_formatter: - format_spec[f] = {"formatter": custom_formatter, - "wrapperFormatter": format_spec[f]} - - # Handle no wrap fields by building formatters that will not wrap - for f in [ff for ff in fields if ff in no_wrap_fields]: - format_spec[f] = {"hard_width": column_info["stats"][f].max_width} - custom_formatter = custom_formatters.get(f, None) - if custom_formatter: - format_spec[f] = {"formatter": custom_formatter, - "wrapperFormatter": format_spec[f]} - - return format_spec - - -def needs_wrapping_formatters(formatters, no_wrap=None): - no_wrap = is_nowrap_set(no_wrap) - if no_wrap: - return False - - # handle easy case: - if not formatters: - return True - - # If we have at least one wrapping formatter, - # then we assume we don't need to wrap - for f in formatters.values(): - if WrapperFormatter.is_wrapper_formatter(f): - return False - - # looks like we need wrapping - return True - - -def as_wrapping_formatters(objs, fields, field_labels, formatters, - no_wrap=None, no_wrap_fields=[]): - """This function is the entry point for building the "best guess" - word wrapping formatters. A best guess formatter guesses what the best - columns widths should be for the table celldata. It does this by - collecting various stats on the celldata (min, max average width of - column celldata) and from this celldata decides the desired widths - and the minimum widths. - - Given a list of formatters and the list of objects (objs), this - function first determines if we need to augment the passed formatters - with word wrapping formatters. - If the no_wrap parameter or global no_wrap flag is set, - then we do not build wrapping formatters. If any of the formatters - within formatters is a word wrapping formatter, - then it is assumed no more wrapping is required. - - :param objs: - :param fields: - :param field_labels: - :param formatters: - :param no_wrap: - :param no_wrap_fields: - :return: When no wrapping is required, the formatters parameter is returned - -- effectively a NOOP in this case - - When wrapping is required, best-guess word wrapping formatters - are returned with original parameter formatters embedded in the - word wrapping formatters - """ - no_wrap = is_nowrap_set(no_wrap) - - if not needs_wrapping_formatters(formatters, no_wrap): - return formatters - - format_spec = build_best_guess_formatters_using_average_widths( - objs, fields, field_labels, formatters, no_wrap_fields) - - formatters = build_wrapping_formatters( - objs, fields, field_labels, format_spec) - - return formatters - - -def build_wrapping_formatters(objs, fields, field_labels, format_spec, - add_blank_line=True, - no_wrap=None, - use_max=False): - """ - A convenience function for building all wrapper formatters that - will be used to format a CLI's output when its rendered - in a prettyTable object. - - It iterates through the keys of format_spec and - calls wrapperFormatterFactory to build - wrapperFormatter objects for each column. - - Its best to show by example parameters: - - field_labels = ['UUID', 'Time Stamp', 'State', 'Event Log ID', - 'Reason Text', 'Entity Instance ID', 'Severity'] - fields = ['uuid', 'timestamp', 'state', 'event_log_id', 'reason_text', - 'entity_instance_id', 'severity'] - format_spec = { - "uuid" : .10, - # float = so display as 10% of terminal width - "timestamp" : .08, - "state" : .08, - "event_log_id" : .07, - "reason_text" : .42, - "entity_instance_id" : .13, - "severity" : - {"formatter" : captializeFunction, - "wrapperFormatter": .12} - } - - :param objs: the actual celldata that will get word wrapped - :param fields: fields (attributes of the celldata) that will be - displayed in the table - :param field_labels: column (field headers) - :param format_spec: dict specify formatter for each column (field) - :param add_blank_line: default True, when tru adds blank line to column - if it wraps, aids readability - :param no_wrap: default False, when True turns wrapping off but does - not suppress other custom formatters - :param use_max - :return: wrapping formatters as functions - """ - - no_wrap = set_no_wrap(no_wrap) - - if objs is None or len(objs) == 0: - return {} - - biggest_word_pattern = re.compile("[\.:,;\!\?\\ =-\_]") - - def get_biggest_word(s): - return max(biggest_word_pattern.split(s), key=len) - - wrapping_formatters_as_functions = {} - - if len(fields) != len(field_labels): - raise Exception("Error in buildWrappingFormatters: " - "len(fields) = {}, len(field_labels) = {}," - " they must be the same length!".format( - len(fields), len(field_labels))) - field_to_label = {} - - for i in range(0, len(fields)): - field_to_label[fields[i]] = field_labels[i] - - ctx = WrapperContext() - ctx.set_num_columns(len(fields)) - - if not format_spec: - if use_max: - format_spec = build_best_guess_formatters_using_max_widths( - objs, fields, field_labels) - else: - format_spec = build_best_guess_formatters_using_average_widths( - objs, fields, field_labels) - - for k in format_spec.keys(): - if k not in fields: - raise Exception("Error in buildWrappingFormatters: format_spec " - "specifies a field {} that is not specified " - "in fields : {}".format(k, fields)) - - format_spec_for_k = copy.deepcopy(format_spec[k]) - if callable(format_spec_for_k): - format_spec_for_k = {"formatter": format_spec_for_k} - wrapper_formatter = wrapper_formatter_factory( - ctx, k, format_spec_for_k) - if wrapper_formatter.min_width <= 0: - # need to specify min-width so that - # column is not unnecessarily squashed - if is_uuid_field(k): # special case - wrapper_formatter.set_min_width(UUID_MIN_LENGTH) - else: - # column width cannot be smaller than the widest word - column_data = [ - str(wrapper_formatter.get_unwrapped_field_value(data)) - for data in objs] - widest_word_in_column = max( - [get_biggest_word(d) + " " - for d in column_data + [field_to_label[k]]], key=len) - wrapper_formatter.set_min_width(len(widest_word_in_column)) - wrapper_formatter.header_width = get_width(field_to_label[k]) - - wrapper_formatter.add_blank_line = add_blank_line - wrapper_formatter.no_wrap = no_wrap - wrapping_formatters_as_functions[k] = wrapper_formatter.as_function() - ctx.add_column_formatter(k, wrapper_formatter) - - return wrapping_formatters_as_functions - - -def set_no_wrap_on_formatters(no_wrap, formatters): - """ - Purpose of this function is to temporarily force - the no_wrap setting for the formatters parameter. - returns orig_no_wrap_settings defined for each formatter - Use unset_no_wrap_on_formatters(orig_no_wrap_settings) to undo what - this function does - """ - # handle easy case: - if not formatters: - return {} - - formatter_no_wrap_settings = {} - - global_orig_no_wrap = is_nowrap_set() - set_no_wrap(no_wrap) - - for k, f in formatters.iteritems(): - if WrapperFormatter.is_wrapper_formatter(f): - formatter_no_wrap_settings[k] = ( - f.wrapper_formatter.no_wrap, f.wrapper_formatter) - f.wrapper_formatter.no_wrap = no_wrap - - return {"global_orig_no_wrap": global_orig_no_wrap, - "formatter_no_wrap_settings": formatter_no_wrap_settings} - - -def unset_no_wrap_on_formatters(orig_no_wrap_settings): - """ - It only makes sense to call this function with the return value - from the last call to set_no_wrap_on_formatters(no_wrap, formatters). - It effectively undoes what set_no_wrap_on_formatters() does - """ - if not orig_no_wrap_settings: - return {} - - global_orig_no_wrap = orig_no_wrap_settings["global_orig_no_wrap"] - formatter_no_wrap_settings = \ - orig_no_wrap_settings["formatter_no_wrap_settings"] - - formatters = {} - - for k, v in formatter_no_wrap_settings.iteritems(): - formatters[k] = v[1] - formatters[k].no_wrap = v[0] - - set_no_wrap(global_orig_no_wrap) - - return formatters - - -def _simpleTestHarness(no_wrap): - - import utils - - def testFormatter(event): - return "*{}".format(event["state"]) - - def buildFormatter(field, width): - def f(dict): - if field == 'number': - return dict[field] - return "{}".format(dict[field]).replace("_", " ") - return {"formatter": f, "wrapperFormatter": width} - - set_no_wrap(no_wrap) - - field_labels = ['Time Stamp', 'State', 'Event Log ID', 'Reason Text', - 'Entity Instance ID', 'Severity', 'Number'] - fields = ['timestamp', 'state', 'event_log_id', 'reason_text', - 'entity_instance_id', 'severity', 'number'] - - formatterSpecX = {"timestamp": 10, - "state": 8, - "event_log_id": 70, - "reason_text": 30, - "entity_instance_id": 30, - "severity": 12, - "number": 4} - - formatterSpec = {} - for f in fields: - formatterSpec[f] = buildFormatter(f, formatterSpecX[f]) - - logs = [] - for i in range(0, 30): - log = {} - for f in fields: - if f == 'number': - log[f] = i - else: - log[f] = "{}{}".format(f, i) - logs.append(utils.objectify(log)) - - formatterSpec = formatterSpecX - - formatters = build_wrapping_formatters( - logs, fields, field_labels, formatterSpec) - - utils.print_list( - logs, fields, field_labels, formatters=formatters, sortby=6, - reversesort=True, no_wrap_fields=['entity_instance_id']) - - print("nowrap = {}".format(is_nowrap_set())) - - -if __name__ == "__main__": - _simpleTestHarness(True) - _simpleTestHarness(False) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/exc.py b/python-inventoryclient/inventoryclient/inventoryclient/exc.py deleted file mode 100644 index 5c550b8e..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/exc.py +++ /dev/null @@ -1,101 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -class BaseException(Exception): - """An error occurred.""" - def __init__(self, message=None): - self.message = message - - def __str__(self): - return str(self.message) or self.__class__.__doc__ - - -class AuthSystem(BaseException): - """Could not obtain token and endpoint using provided credentials.""" - pass - - -class CommandError(BaseException): - """Invalid usage of CLI.""" - - -class InvalidEndpoint(BaseException): - """The provided endpoint is invalid.""" - - -class CommunicationError(BaseException): - """Unable to communicate with server.""" - - -class EndpointException(BaseException): - pass - - -class ClientException(Exception): - """DEPRECATED""" - - -class InvalidAttribute(ClientException): - pass - - -class InvalidAttributeValue(ClientException): - pass - - -class HTTPException(Exception): - """Base exception for all HTTP-derived exceptions.""" - code = 'N/A' - - def __init__(self, details=None): - self.details = details - - def __str__(self): - return str(self.details) or "%s (HTTP %s)" % (self.__class__.__name__, - self.code) - - -class HTTPMultipleChoices(HTTPException): - code = 300 - - def __str__(self): - self.details = "Requested version of INVENTORY API is not available." - return "%s (HTTP %s) %s" % (self.__class__.__name__, self.code, - self.details) - - -class Unauthorized(HTTPException): - code = 401 - - -class HTTPUnauthorized(Unauthorized): - pass - - -class NotFound(HTTPException): - """DEPRECATED.""" - code = 404 - - -class HTTPNotFound(NotFound): - pass - - -class HTTPMethodNotAllowed(HTTPException): - code = 405 - - -class HTTPInternalServerError(HTTPException): - code = 500 - - -class HTTPNotImplemented(HTTPException): - code = 501 - - -class HTTPBadGateway(HTTPException): - code = 502 diff --git a/python-inventoryclient/inventoryclient/inventoryclient/shell.py b/python-inventoryclient/inventoryclient/inventoryclient/shell.py deleted file mode 100644 index f3e040a6..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/shell.py +++ /dev/null @@ -1,326 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Command-line interface for Inventory -""" - -import argparse -import httplib2 -import inventoryclient -from inventoryclient import client -from inventoryclient.common import utils -from inventoryclient import exc -import logging -from oslo_utils import importutils -import sys - - -class InventoryShell(object): - - def get_base_parser(self): - parser = argparse.ArgumentParser( - prog='inventory', - description=__doc__.strip(), - epilog='See "inventory help COMMAND" ' - 'for help on a specific command.', - add_help=False, - formatter_class=HelpFormatter, - ) - - # Global arguments - parser.add_argument('-h', '--help', - action='store_true', - help=argparse.SUPPRESS, - ) - - parser.add_argument('--version', - action='version', - version=inventoryclient.__version__) - - parser.add_argument('--debug', - default=bool(utils.env('INVENTORYCLIENT_DEBUG')), - action='store_true', - help='Defaults to env[INVENTORYCLIENT_DEBUG]') - - parser.add_argument('-v', '--verbose', - default=False, action="store_true", - help="Print more verbose output") - - parser.add_argument('--timeout', - default=600, - help='Number of seconds to wait for a response') - - parser.add_argument('--os-username', - default=utils.env('OS_USERNAME'), - help='Defaults to env[OS_USERNAME]') - - parser.add_argument('--os_username', - help=argparse.SUPPRESS) - - parser.add_argument('--os-password', - default=utils.env('OS_PASSWORD'), - help='Defaults to env[OS_PASSWORD]') - - parser.add_argument('--os_password', - help=argparse.SUPPRESS) - - parser.add_argument('--os-tenant-id', - default=utils.env('OS_TENANT_ID'), - help='Defaults to env[OS_TENANT_ID]') - - parser.add_argument('--os_tenant_id', - help=argparse.SUPPRESS) - - parser.add_argument('--os-tenant-name', - default=utils.env('OS_TENANT_NAME'), - help='Defaults to env[OS_TENANT_NAME]') - - parser.add_argument('--os_tenant_name', - help=argparse.SUPPRESS) - - parser.add_argument('--os-auth-url', - default=utils.env('OS_AUTH_URL'), - help='Defaults to env[OS_AUTH_URL]') - - parser.add_argument('--os_auth_url', - help=argparse.SUPPRESS) - - parser.add_argument('--os-region-name', - default=utils.env('OS_REGION_NAME'), - help='Defaults to env[OS_REGION_NAME]') - - parser.add_argument('--os_region_name', - help=argparse.SUPPRESS) - - parser.add_argument('--os-auth-token', - default=utils.env('OS_AUTH_TOKEN'), - help='Defaults to env[OS_AUTH_TOKEN]') - - parser.add_argument('--os_auth_token', - help=argparse.SUPPRESS) - - parser.add_argument('--inventory-url', - default=utils.env('INVENTORY_URL'), - help='Defaults to env[INVENTORY_URL]') - - parser.add_argument('--inventory_url', - help=argparse.SUPPRESS) - - parser.add_argument('--inventory-api-version', - default=utils.env( - 'INVENTORY_API_VERSION', default='1'), - help='Defaults to env[INVENTORY_API_VERSION] ' - 'or 1') - - parser.add_argument('--inventory_api_version', - help=argparse.SUPPRESS) - - parser.add_argument('--os-service-type', - default=utils.env('OS_SERVICE_TYPE', - default=client.SERVICE_TYPE), - help='Defaults to env[OS_SERVICE_TYPE]') - - parser.add_argument('--os_service_type', - help=argparse.SUPPRESS) - - parser.add_argument('--os-endpoint-type', - default=utils.env('OS_ENDPOINT_TYPE'), - help='Defaults to env[OS_ENDPOINT_TYPE]') - - parser.add_argument('--os_endpoint_type', - help=argparse.SUPPRESS) - - parser.add_argument('--os-user-domain-id', - default=utils.env('OS_USER_DOMAIN_ID'), - help='Defaults to env[OS_USER_DOMAIN_ID].') - - parser.add_argument('--os-user-domain-name', - default=utils.env('OS_USER_DOMAIN_NAME'), - help='Defaults to env[OS_USER_DOMAIN_NAME].') - - parser.add_argument('--os-project-id', - default=utils.env('OS_PROJECT_ID'), - help='Another way to specify tenant ID. ' - 'This option is mutually exclusive with ' - ' --os-tenant-id. ' - 'Defaults to env[OS_PROJECT_ID].') - - parser.add_argument('--os-project-name', - default=utils.env('OS_PROJECT_NAME'), - help='Another way to specify tenant name. ' - 'This option is mutually exclusive with ' - ' --os-tenant-name. ' - 'Defaults to env[OS_PROJECT_NAME].') - - parser.add_argument('--os-project-domain-id', - default=utils.env('OS_PROJECT_DOMAIN_ID'), - help='Defaults to env[OS_PROJECT_DOMAIN_ID].') - - parser.add_argument('--os-project-domain-name', - default=utils.env('OS_PROJECT_DOMAIN_NAME'), - help='Defaults to env[OS_PROJECT_DOMAIN_NAME].') - - return parser - - def get_subcommand_parser(self, version): - parser = self.get_base_parser() - - self.subcommands = {} - subparsers = parser.add_subparsers(metavar='') - submodule = importutils.import_versioned_module('inventoryclient', - version, 'shell') - submodule.enhance_parser(parser, subparsers, self.subcommands) - utils.define_commands_from_module(subparsers, self, self.subcommands) - self._add_bash_completion_subparser(subparsers) - return parser - - def _add_bash_completion_subparser(self, subparsers): - subparser = subparsers.add_parser( - 'bash_completion', - add_help=False, - formatter_class=HelpFormatter - ) - self.subcommands['bash_completion'] = subparser - subparser.set_defaults(func=self.do_bash_completion) - - def _setup_debugging(self, debug): - if debug: - logging.basicConfig( - format="%(levelname)s (%(module)s:%(lineno)d) %(message)s", - level=logging.DEBUG) - - httplib2.debuglevel = 1 - else: - logging.basicConfig(format="%(levelname)s %(message)s", - level=logging.CRITICAL) - - def main(self, argv): - # Parse args once to find version - parser = self.get_base_parser() - (options, args) = parser.parse_known_args(argv) - self._setup_debugging(options.debug) - - # build available subcommands based on version - api_version = options.inventory_api_version - subcommand_parser = self.get_subcommand_parser(api_version) - self.parser = subcommand_parser - - # Handle top-level --help/-h before attempting to parse - # a command off the command line - if options.help or not argv: - self.do_help(options) - return 0 - - # Parse args again and call whatever callback was selected - args = subcommand_parser.parse_args(argv) - - # Short-circuit and deal with help command right away. - if args.func == self.do_help: - self.do_help(args) - return 0 - elif args.func == self.do_bash_completion: - self.do_bash_completion(args) - return 0 - - if not (args.os_auth_token and args.inventory_url): - if not args.os_username: - raise exc.CommandError("You must provide a username via " - "either --os-username or via " - "env[OS_USERNAME]") - - if not args.os_password: - raise exc.CommandError("You must provide a password via " - "either --os-password or via " - "env[OS_PASSWORD]") - - if not (args.os_project_id or args.os_project_name): - raise exc.CommandError("You must provide a project name via " - "either --os-project-name or via " - "env[OS_PROJECT_NAME]") - - if not args.os_auth_url: - raise exc.CommandError("You must provide an auth url via " - "either --os-auth-url or via " - "env[OS_AUTH_URL]") - - if not args.os_region_name: - raise exc.CommandError("You must provide an region name via " - "either --os-region-name or via " - "env[OS_REGION_NAME]") - - client_args = ( - 'os_auth_token', 'inventory_url', 'os_username', 'os_password', - 'os_auth_url', 'os_project_id', 'os_project_name', 'os_tenant_id', - 'os_tenant_name', 'os_region_name', 'os_user_domain_id', - 'os_user_domain_name', 'os_project_domain_id', - 'os_project_domain_name', 'os_service_type', 'os_endpoint_type', - 'timeout' - ) - kwargs = {} - for key in client_args: - client_key = key.replace("os_", "", 1) - kwargs[client_key] = getattr(args, key) - - client = inventoryclient.client.get_client(api_version, **kwargs) - - try: - args.func(client, args) - except exc.Unauthorized: - raise exc.CommandError("Invalid Identity credentials.") - - def do_bash_completion(self, args): - """Prints all of the commands and options to stdout. - """ - commands = set() - options = set() - for sc_str, sc in self.subcommands.items(): - commands.add(sc_str) - for option in list(sc._optionals._option_string_actions): - options.add(option) - - commands.remove('bash_completion') - print(' '.join(commands | options)) - - @utils.arg('command', metavar='', nargs='?', - help='Display help for ') - def do_help(self, args): - """Display help about this program or one of its subcommands.""" - if getattr(args, 'command', None): - if args.command in self.subcommands: - self.subcommands[args.command].print_help() - else: - raise exc.CommandError("'%s' is not a valid subcommand" % - args.command) - else: - self.parser.print_help() - - -class HelpFormatter(argparse.HelpFormatter): - def start_section(self, heading): - # Title-case the headings - heading = '%s%s' % (heading[0].upper(), heading[1:]) - super(HelpFormatter, self).start_section(heading) - - -def main(): - try: - InventoryShell().main(sys.argv[1:]) - - except KeyboardInterrupt as e: - print >> sys.stderr, ('caught: %r, aborting' % (e)) - sys.exit(0) - - except IOError as e: - sys.exit(0) - - except Exception as e: - print >> sys.stderr, e - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/python-inventoryclient/inventoryclient/inventoryclient/tests/__init__.py b/python-inventoryclient/inventoryclient/inventoryclient/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python-inventoryclient/inventoryclient/inventoryclient/tests/test_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/tests/test_shell.py deleted file mode 100644 index c63073ac..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/tests/test_shell.py +++ /dev/null @@ -1,92 +0,0 @@ -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import cStringIO -import httplib2 -import re -import sys - -import fixtures -from inventoryclient import exc -from inventoryclient import shell as inventoryclient_shell -from inventoryclient.tests import utils -from testtools import matchers - -FAKE_ENV = {'OS_USERNAME': 'username', - 'OS_PASSWORD': 'password', - 'OS_TENANT_NAME': 'tenant_name', - 'OS_AUTH_URL': 'http://no.where'} - - -class ShellTest(utils.BaseTestCase): - re_options = re.DOTALL | re.MULTILINE - - # Patch os.environ to avoid required auth info. - def make_env(self, exclude=None): - env = dict((k, v) for k, v in FAKE_ENV.items() if k != exclude) - self.useFixture(fixtures.MonkeyPatch('os.environ', env)) - - def setUp(self): - super(ShellTest, self).setUp() - - def shell(self, argstr): - orig = sys.stdout - try: - sys.stdout = cStringIO.StringIO() - _shell = inventoryclient_shell.InventoryShell() - _shell.main(argstr.split()) - except SystemExit: - exc_type, exc_value, exc_traceback = sys.exc_info() - self.assertEqual(exc_value.code, 0) - finally: - out = sys.stdout.getvalue() - sys.stdout.close() - sys.stdout = orig - - return out - - def test_help_unknown_command(self): - self.assertRaises(exc.CommandError, self.shell, 'help foofoo') - - def test_debug(self): - httplib2.debuglevel = 0 - self.shell('--debug help') - self.assertEqual(httplib2.debuglevel, 1) - - def test_help(self): - required = [ - '.*?^usage: inventory', - '.*?^See "inventory help COMMAND" ' - 'for help on a specific command', - ] - for argstr in ['--help', 'help']: - help_text = self.shell(argstr) - for r in required: - self.assertThat(help_text, - matchers.MatchesRegex(r, - self.re_options)) - - def test_help_on_subcommand(self): - required = [ - '.*?^usage: inventory host-show ' - '', - ".*?^Show host attributes.", - '', - ".*?^Positional arguments:", - ".*?^ Name or ID of host", - ] - argstrings = [ - 'help host-show', - ] - for argstr in argstrings: - help_text = self.shell(argstr) - for r in required: - self.assertThat(help_text, - matchers.MatchesRegex(r, self.re_options)) - - def test_auth_param(self): - self.make_env(exclude='OS_USERNAME') - self.test_help() diff --git a/python-inventoryclient/inventoryclient/inventoryclient/tests/test_utils.py b/python-inventoryclient/inventoryclient/inventoryclient/tests/test_utils.py deleted file mode 100644 index 8e17c117..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/tests/test_utils.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2013 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import cStringIO -import sys - -from inventoryclient.common import utils -from inventoryclient import exc -from inventoryclient.tests import utils as test_utils - - -class UtilsTest(test_utils.BaseTestCase): - - def test_prettytable(self): - class Struct(object): - def __init__(self, **entries): - self.__dict__.update(entries) - - # test that the prettytable output is wellformatted (left-aligned) - saved_stdout = sys.stdout - try: - sys.stdout = output_dict = cStringIO.StringIO() - utils.print_dict({'K': 'k', 'Key': 'Value'}) - - finally: - sys.stdout = saved_stdout - - self.assertEqual(output_dict.getvalue(), '''\ -+----------+-------+ -| Property | Value | -+----------+-------+ -| K | k | -| Key | Value | -+----------+-------+ -''') - - def test_args_array_to_dict(self): - my_args = { - 'matching_metadata': ['metadata.key=metadata_value'], - 'other': 'value' - } - cleaned_dict = utils.args_array_to_dict(my_args, - "matching_metadata") - self.assertEqual(cleaned_dict, { - 'matching_metadata': {'metadata.key': 'metadata_value'}, - 'other': 'value' - }) - - def test_args_array_to_patch(self): - my_args = { - 'attributes': ['foo=bar', '/extra/bar=baz'], - 'op': 'add', - } - patch = utils.args_array_to_patch(my_args['op'], - my_args['attributes']) - self.assertEqual(patch, [{'op': 'add', - 'value': 'bar', - 'path': '/foo'}, - {'op': 'add', - 'value': 'baz', - 'path': '/extra/bar'}]) - - def test_args_array_to_patch_format_error(self): - my_args = { - 'attributes': ['foobar'], - 'op': 'add', - } - self.assertRaises(exc.CommandError, utils.args_array_to_patch, - my_args['op'], my_args['attributes']) - - def test_args_array_to_patch_remove(self): - my_args = { - 'attributes': ['/foo', 'extra/bar'], - 'op': 'remove', - } - patch = utils.args_array_to_patch(my_args['op'], - my_args['attributes']) - self.assertEqual(patch, [{'op': 'remove', 'path': '/foo'}, - {'op': 'remove', 'path': '/extra/bar'}]) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/tests/utils.py b/python-inventoryclient/inventoryclient/inventoryclient/tests/utils.py deleted file mode 100644 index cff9f7ba..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/tests/utils.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2012 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import fixtures -import mox -import StringIO -import testtools - -from inventoryclient.common import http - - -class BaseTestCase(testtools.TestCase): - - def setUp(self): - super(BaseTestCase, self).setUp() - self.m = mox.Mox() - self.addCleanup(self.m.UnsetStubs) - self.useFixture(fixtures.FakeLogger()) - - -class FakeAPI(object): - def __init__(self, fixtures): - self.fixtures = fixtures - self.calls = [] - - def _request(self, method, url, headers=None, body=None): - call = (method, url, headers or {}, body) - self.calls.append(call) - return self.fixtures[url][method] - - def raw_request(self, *args, **kwargs): - fixture = self._request(*args, **kwargs) - body_iter = http.ResponseBodyIterator(StringIO.StringIO(fixture[1])) - return FakeResponse(fixture[0]), body_iter - - def json_request(self, *args, **kwargs): - fixture = self._request(*args, **kwargs) - return FakeResponse(fixture[0]), fixture[1] - - -class FakeResponse(object): - def __init__(self, headers, body=None, version=None): - """:param headers: dict representing HTTP response headers - :param body: file-like object - """ - self.headers = headers - self.body = body - - def getheaders(self): - return copy.deepcopy(self.headers).items() - - def getheader(self, key, default): - return self.headers.get(key, default) - - def read(self, amt): - return self.body.read(amt) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/__init__.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/client.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/client.py deleted file mode 100644 index 9cd7db20..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/client.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from inventoryclient.common import exceptions as exc -from inventoryclient.common import http -from inventoryclient.common.http import DEFAULT_VERSION -from inventoryclient.common.i18n import _ -from inventoryclient.v1 import cpu -from inventoryclient.v1 import ethernetport -from inventoryclient.v1 import host -from inventoryclient.v1 import lldp_agent -from inventoryclient.v1 import lldp_neighbour -from inventoryclient.v1 import memory -from inventoryclient.v1 import node -from inventoryclient.v1 import pci_device -from inventoryclient.v1 import port - - -class Client(object): - """Client for the INVENTORY v1 API. - - :param string endpoint: A user-supplied endpoint URL for the inventory - service. - :param function token: Provides token for authentication. - :param integer timeout: Allows customization of the timeout for client - http requests. (optional) - """ - - def __init__(self, endpoint=None, session=None, **kwargs): - """Initialize a new client for the INVENTORY v1 API.""" - if not session: - if kwargs.get('os_inventory_api_version'): - kwargs['api_version_select_state'] = "user" - else: - if not endpoint: - raise exc.EndpointException( - _("Must provide 'endpoint' " - "if os_inventory_api_version isn't specified")) - - # If the user didn't specify a version, use a default version - kwargs['api_version_select_state'] = "default" - kwargs['os_inventory_api_version'] = DEFAULT_VERSION - - self.http_client = http.get_http_client(endpoint, session, **kwargs) - self.host = host.HostManager(self.http_client) - self.cpu = cpu.CpuManager(self.http_client) - self.ethernetport = ethernetport.EthernetPortManager(self.http_client) - self.lldp_agent = lldp_agent.LldpAgentManager(self.http_client) - self.lldp_neighbour = lldp_neighbour.LldpNeighbourManager( - self.http_client) - self.memory = memory.MemoryManager(self.http_client) - self.node = node.NodeManager(self.http_client) - self.pci_device = pci_device.PciDeviceManager(self.http_client) - self.port = port.PortManager(self.http_client) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/cpu.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/cpu.py deleted file mode 100644 index 60eb47fb..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/cpu.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from inventoryclient.common import base -from inventoryclient.common.i18n import _ -from inventoryclient import exc -import json - - -CREATION_ATTRIBUTES = ['host_uuid', 'node_uuid', 'cpu', 'core', 'thread', - 'cpu_family', 'cpu_model', 'allocated_function', - 'numa_node', 'capabilities', 'function', - 'num_cores_on_processor0', 'num_cores_on_processor1', - 'num_cores_on_processor2', 'num_cores_on_processor3'] - -PLATFORM_CPU_TYPE = "Platform" -VSWITCH_CPU_TYPE = "Vswitch" -SHARED_CPU_TYPE = "Shared" -VMS_CPU_TYPE = "VMs" -NONE_CPU_TYPE = "None" - -CPU_TYPE_LIST = [PLATFORM_CPU_TYPE, VSWITCH_CPU_TYPE, - SHARED_CPU_TYPE, VMS_CPU_TYPE, - NONE_CPU_TYPE] - - -PLATFORM_CPU_TYPE_FORMAT = _("Platform") -VSWITCH_CPU_TYPE_FORMAT = _("vSwitch") -SHARED_CPU_TYPE_FORMAT = _("Shared") -VMS_CPU_TYPE_FORMAT = _("VMs") -NONE_CPU_TYPE_FORMAT = _("None") - -CPU_TYPE_FORMATS = {PLATFORM_CPU_TYPE: PLATFORM_CPU_TYPE_FORMAT, - VSWITCH_CPU_TYPE: VSWITCH_CPU_TYPE_FORMAT, - SHARED_CPU_TYPE: SHARED_CPU_TYPE_FORMAT, - VMS_CPU_TYPE: VMS_CPU_TYPE_FORMAT, - NONE_CPU_TYPE: NONE_CPU_TYPE_FORMAT} - - -def _cpu_function_formatter(allocated_function): - if allocated_function in CPU_TYPE_FORMATS: - return CPU_TYPE_FORMATS[allocated_function] - return "unknown({})".format(allocated_function) - - -def _cpu_function_tuple_formatter(data): - return _cpu_function_formatter(data.allocated_function) - - -class Cpu(base.Resource): - def __repr__(self): - return "" % self._info - - -class CpuManager(base.Manager): - resource_class = Cpu - - def list(self, host_id): - path = '/v1/hosts/%s/cpus' % host_id - return self._list(path, "cpus") - - def get(self, cpu_id): - path = '/v1/cpus/%s' % cpu_id - try: - return self._list(path)[0] - except IndexError: - return None - - def create(self, **kwargs): - path = '/v1/cpus/' - new = {} - for (key, value) in kwargs.items(): - if key in CREATION_ATTRIBUTES: - new[key] = value - else: - raise exc.InvalidAttribute(key) - return self._create(path, new) - - def delete(self, cpu_id): - path = '/v1/cpus/%s' % cpu_id - return self._delete(path) - - def update(self, cpu_id, patch): - path = '/v1/cpus/%s' % cpu_id - return self._update(path, - data=(json.dumps(patch))) - - -class CpuFunction(object): - def __init__(self, function): - self.allocated_function = function - self.socket_cores = {} - self.socket_cores_number = {} - - -def check_core_functions(personality, cpus): - platform_cores = 0 - vswitch_cores = 0 - vm_cores = 0 - for cpu in cpus: - allocated_function = cpu.allocated_function - if allocated_function == PLATFORM_CPU_TYPE: - platform_cores += 1 - elif allocated_function == VSWITCH_CPU_TYPE: - vswitch_cores += 1 - elif allocated_function == VMS_CPU_TYPE: - vm_cores += 1 - - error_string = "" - if platform_cores == 0: - error_string = ("There must be at least one core for %s." % - PLATFORM_CPU_TYPE_FORMAT) - elif personality == 'compute' and vswitch_cores == 0: - error_string = ("There must be at least one core for %s." % - VSWITCH_CPU_TYPE_FORMAT) - elif personality == 'compute' and vm_cores == 0: - error_string = ("There must be at least one core for %s." % - VMS_CPU_TYPE_FORMAT) - return error_string - - -def compress_range(c_list): - c_list.append(999) - c_list.sort() - c_sep = "" - c_item = "" - c_str = "" - pn = 0 # pn is not used until second loop anyways - for n in c_list: - if not c_item: - c_item = "%s" % n - else: - if n > (pn + 1): - if int(pn) == int(c_item): - c_str = "%s%s%s" % (c_str, c_sep, c_item) - else: - c_str = "%s%s%s-%s" % (c_str, c_sep, c_item, pn) - c_sep = "," - c_item = "%s" % n - pn = n - return c_str - - -def restructure_host_cpu_data(host): - host.core_assignment = [] - if host.cpus: - host.cpu_model = host.cpus[0].cpu_model - host.sockets = len(host.nodes) - host.hyperthreading = "No" - host.physical_cores = 0 - - core_assignment = {} - number_of_cores = {} - host.node_min_max_cores = {} - - for cpu in host.cpus: - if cpu.numa_node == 0 and cpu.thread == 0: - host.physical_cores += 1 - elif cpu.thread > 0: - host.hyperthreading = "Yes" - - if cpu.numa_node not in host.node_min_max_cores: - host.node_min_max_cores[cpu.numa_node] = \ - {'min': 99999, 'max': 0} - if cpu.cpu < host.node_min_max_cores[cpu.numa_node]['min']: - host.node_min_max_cores[cpu.numa_node]['min'] = cpu.cpu - if cpu.cpu > host.node_min_max_cores[cpu.numa_node]['max']: - host.node_min_max_cores[cpu.numa_node]['max'] = cpu.cpu - - if cpu.allocated_function is None: - cpu.allocated_function = NONE_CPU_TYPE - - if cpu.allocated_function not in core_assignment: - core_assignment[cpu.allocated_function] = {} - number_of_cores[cpu.allocated_function] = {} - if cpu.numa_node not in core_assignment[cpu.allocated_function]: - core_assignment[cpu.allocated_function][cpu.numa_node] = \ - [int(cpu.cpu)] - number_of_cores[cpu.allocated_function][cpu.numa_node] = 1 - else: - core_assignment[ - cpu.allocated_function][cpu.numa_node].append(int(cpu.cpu)) - number_of_cores[cpu.allocated_function][cpu.numa_node] = \ - number_of_cores[cpu.allocated_function][cpu.numa_node] + 1 - - for f in CPU_TYPE_LIST: - cpufunction = CpuFunction(f) - if f in core_assignment: - host.core_assignment.append(cpufunction) - for s, cores in core_assignment[f].items(): - cpufunction.socket_cores[s] = compress_range(cores) - cpufunction.socket_cores_number[s] = number_of_cores[f][s] - else: - if (f == PLATFORM_CPU_TYPE or - (hasattr(host, 'subfunctions') and - 'compute' in host.subfunctions)): - if f != NONE_CPU_TYPE: - host.core_assignment.append(cpufunction) - for s in range(0, len(host.nodes)): - cpufunction.socket_cores[s] = "" - cpufunction.socket_cores_number[s] = 0 diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/cpu_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/cpu_shell.py deleted file mode 100644 index 50af61cd..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/cpu_shell.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventoryclient.common import utils -from inventoryclient import exc -from inventoryclient.v1 import host as host_utils - - -def _print_cpu_show(cpu): - fields = ['cpu', 'numa_node', 'core', 'thread', - 'cpu_model', 'cpu_family', - 'capabilities', - 'uuid', 'host_uuid', 'node_uuid', - 'created_at', 'updated_at'] - labels = ['logical_core', 'processor (numa_node)', 'physical_core', - 'thread', 'processor_model', 'processor_family', - 'capabilities', - 'uuid', 'host_uuid', 'node_uuid', - 'created_at', 'updated_at'] - data = [(f, getattr(cpu, f, '')) for f in fields] - utils.print_tuple_list(data, labels) - - -def _find_cpu(cc, host, cpunameoruuid): - cpus = cc.cpu.list(host.uuid) - - if cpunameoruuid.isdigit(): - cpunameoruuid = int(cpunameoruuid) - - for c in cpus: - if c.uuid == cpunameoruuid or c.cpu == cpunameoruuid: - break - else: - raise exc.CommandError('CPU logical core not found: host %s cpu %s' % - (host.hostname, cpunameoruuid)) - return c - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('cpulcoreoruuid', - metavar='', - help="CPU logical core ID or UUID of cpu") -def do_host_cpu_show(cc, args): - """Show cpu core attributes.""" - host = host_utils._find_host(cc, args.hostnameorid) - cpu = _find_cpu(cc, host, args.cpulcoreoruuid) - _print_cpu_show(cpu) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_cpu_list(cc, args): - """List cpu cores.""" - - host = host_utils._find_host(cc, args.hostnameorid) - - cpus = cc.cpu.list(host.uuid) - - field_labels = ['uuid', 'log_core', 'processor', 'phy_core', 'thread', - 'processor_model'] - fields = ['uuid', 'cpu', 'numa_node', 'core', 'thread', - 'cpu_model'] - - utils.print_list(cpus, fields, field_labels, sortby=1) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/ethernetport.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/ethernetport.py deleted file mode 100644 index 5817bdac..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/ethernetport.py +++ /dev/null @@ -1,65 +0,0 @@ -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -*- encoding: utf-8 -*- -# - -from inventoryclient.common import base -from inventoryclient import exc -import json - - -CREATION_ATTRIBUTES = ['host_uuid', 'name', 'mtu', 'speed', 'bootp', - 'interface_uuid', 'pdevice', 'pclass', 'pciaddr', - 'psdevice', 'link_mode', 'psvendor', 'pvendor'] - - -class EthernetPort(base.Resource): - def __repr__(self): - return "" % self._info - - -class EthernetPortManager(base.Manager): - resource_class = EthernetPort - - def list(self, host_id): - path = '/v1/hosts/%s/ethernet_ports' % host_id - return self._list(path, "ethernet_ports") - - def get(self, port_id): - path = '/v1/ethernet_ports/%s' % port_id - try: - return self._list(path)[0] - except IndexError: - return None - - def create(self, **kwargs): - path = '/v1/ethernet_ports/' - new = {} - for (key, value) in kwargs.items(): - if key in CREATION_ATTRIBUTES: - new[key] = value - else: - raise exc.InvalidAttribute(key) - return self._create(path, new) - - def delete(self, port_id): - path = '/v1/ethernet_ports/%s' % port_id - return self._delete(path) - - def update(self, port_id, patch): - path = '/v1/ethernet_ports/%s' % port_id - return self._update(path, - data=(json.dumps(patch))) - - -def get_port_display_name(p): - if p.name: - return p.name - if p.namedisplay: - return p.namedisplay - else: - return '(' + str(p.uuid)[-8:] + ')' diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/ethernetport_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/ethernetport_shell.py deleted file mode 100644 index 674aa3e1..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/ethernetport_shell.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventoryclient.common import utils -from inventoryclient import exc -from inventoryclient.v1 import host as host_utils - - -def _bootp_formatter(value): - return bool(value) - - -def _bootp_port_formatter(port): - return _bootp_formatter(port.bootp) - - -def _print_ethernet_port_show(port): - fields = ['name', 'namedisplay', - 'mac', 'pciaddr', - 'numa_node', - 'autoneg', 'bootp', - 'pclass', 'pvendor', 'pdevice', - 'link_mode', 'capabilities', - 'uuid', 'host_uuid', - 'created_at', 'updated_at'] - labels = ['name', 'namedisplay', - 'mac', 'pciaddr', - 'processor', - 'autoneg', 'bootp', - 'pclass', 'pvendor', 'pdevice', - 'link_mode', 'capabilities', - 'uuid', 'host_uuid', - 'created_at', 'updated_at'] - data = [(f, getattr(port, f, '')) for f in fields] - utils.print_tuple_list(data, labels, - formatters={'bootp': _bootp_formatter}) - - -def _find_port(cc, host, portnameoruuid): - ports = cc.ethernetport.list(host.uuid) - for p in ports: - if p.name == portnameoruuid or p.uuid == portnameoruuid: - break - else: - raise exc.CommandError('Ethernet port not found: host %s port %s' % - (host.id, portnameoruuid)) - p.autoneg = 'Yes' - return p - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('pnameoruuid', metavar='', - help="Name or UUID of port") -def do_host_ethernet_port_show(cc, args): - """Show host ethernet port attributes.""" - host = host_utils._find_host(cc, args.hostnameorid) - port = _find_port(cc, host, args.pnameoruuid) - _print_ethernet_port_show(port) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_ethernet_port_list(cc, args): - """List host ethernet ports.""" - host = host_utils._find_host(cc, args.hostnameorid) - - ports = cc.ethernetport.list(host.uuid) - for p in ports: - p.autoneg = 'Yes' # TODO(jkung) Remove when autoneg supported in DB - - field_labels = ['uuid', 'name', 'mac address', 'pci address', 'processor', - 'auto neg', 'device type', 'boot i/f'] - fields = ['uuid', 'name', 'mac', 'pciaddr', 'numa_node', - 'autoneg', 'pdevice', 'bootp'] - - utils.print_list(ports, fields, field_labels, sortby=1, - formatters={'bootp': _bootp_port_formatter}) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/host.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/host.py deleted file mode 100644 index 48adcec2..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/host.py +++ /dev/null @@ -1,116 +0,0 @@ -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -*- encoding: utf-8 -*- -# - -from inventoryclient.common import base -from inventoryclient.common import utils -from inventoryclient import exc -import json - - -CREATION_ATTRIBUTES = ['hostname', 'personality', 'subfunctions', - 'mgmt_mac', 'mgmt_ip', - 'bm_ip', 'bm_type', 'bm_username', - 'bm_password', 'serialid', 'location', - 'boot_device', 'rootfs_device', 'install_output', - 'console', 'tboot', 'ttys_dcd', - 'administrative', 'operational', 'availability', - 'invprovision'] - - -class Host(base.Resource): - def __repr__(self): - return "" % self._info - - -class HostManager(base.Manager): - resource_class = Host - - @staticmethod - def _path(id=None): - return '/v1/hosts/%s' % id if id else '/v1/hosts' - - def list(self): - return self._list(self._path(), "hosts") - - def list_port(self, host_id): - path = "%s/ports" % host_id - return self._list(self._path(path), "ports") - - def list_ethernet_port(self, host_id): - path = "%s/ethernet_ports" % host_id - return self._list(self._path(path), "ethernet_ports") - - def list_personality(self, personality): - path = self._path() + "?personality=%s" % personality - return self._list(path, "hosts") - - def get(self, host_id): - try: - return self._list(self._path(host_id))[0] - except IndexError: - return None - - def create(self, **kwargs): - new = {} - for (key, value) in kwargs.items(): - if key in CREATION_ATTRIBUTES: - new[key] = value - else: - raise exc.InvalidAttribute() - return self._create(self._path(), new) - - def upgrade(self, hostid, force): - new = {} - new['force'] = force - resp, body = self.api.json_request( - 'POST', self._path(hostid) + "/upgrade", body=new) - return self.resource_class(self, body) - - def downgrade(self, hostid, force): - new = {} - new['force'] = force - resp, body = self.api.json_request( - 'POST', self._path(hostid) + "/downgrade", body=new) - return self.resource_class(self, body) - - def create_many(self, body): - return self._upload(self._path() + "/bulk_add", body) - - def update_install_uuid(self, hostid, install_uuid): - path = self._path(hostid) + "/state/update_install_uuid" - - self.api.json_request('PUT', path, body=install_uuid) - - def delete(self, host_id): - return self._delete(self._path(host_id)) - - def update(self, host_id, patch): - return self._update(self._path(host_id), - data=(json.dumps(patch))) - - def bulk_export(self): - result = self._json_get(self._path('bulk_export')) - return result - - -def _find_host(cc, host): - if host.isdigit() or utils.is_uuid_like(host): - try: - h = cc.host.get(host) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % host) - else: - return h - else: - hostlist = cc.host.list() - for h in hostlist: - if h.hostname == host: - return h - else: - raise exc.CommandError('host not found: %s' % host) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/host_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/host_shell.py deleted file mode 100644 index c37624e4..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/host_shell.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2013-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from collections import OrderedDict -import datetime -import os - -from inventoryclient.common.i18n import _ -from inventoryclient.common import utils -from inventoryclient import exc -from inventoryclient.v1 import host as host_utils - - -def _print_host_show(host): - fields = ['id', 'uuid', 'personality', 'hostname', 'invprovision', - 'administrative', 'operational', 'availability', 'task', - 'action', 'mgmt_mac', 'mgmt_ip', 'serialid', - 'capabilities', 'bm_type', 'bm_username', 'bm_ip', - 'location', 'uptime', 'reserved', 'created_at', 'updated_at', - 'boot_device', 'rootfs_device', 'install_output', 'console', - 'tboot', 'vim_progress_status', 'software_load', 'install_state', - 'install_state_info'] - optional_fields = ['ttys_dcd'] - if host.subfunctions != host.personality: - fields.append('subfunctions') - if 'controller' in host.subfunctions: - fields.append('subfunction_oper') - fields.append('subfunction_avail') - - # Do not display the trailing '+' which indicates the audit iterations - if host.install_state_info: - host.install_state_info = host.install_state_info.rstrip('+') - if host.install_state: - host.install_state = host.install_state.rstrip('+') - - data_list = [(f, getattr(host, f, '')) for f in fields] - data_list += [(f, getattr(host, f, '')) for f in optional_fields - if hasattr(host, f)] - data = dict(data_list) - ordereddata = OrderedDict(sorted(data.items(), key=lambda t: t[0])) - utils.print_dict(ordereddata, wrap=72) - - -@utils.arg('hostnameorid', metavar='', - help="Name or ID of host") -def do_host_show(cc, args): - """Show host attributes.""" - host = host_utils._find_host(cc, args.hostnameorid) - _print_host_show(host) - - -def do_host_list(cc, args): - """List hosts.""" - hosts = cc.host.list() - field_labels = ['id', 'hostname', 'personality', - 'administrative', 'operational', 'availability'] - fields = ['id', 'hostname', 'personality', - 'administrative', 'operational', 'availability'] - utils.print_list(hosts, fields, field_labels, sortby=0) - - -@utils.arg('-n', '--hostname', - metavar='', - help='Hostname of the host') -@utils.arg('-p', '--personality', - metavar='', - choices=['controller', 'compute', 'storage', 'network', 'profile'], - help='Personality or type of host [REQUIRED]') -@utils.arg('-s', '--subfunctions', - metavar='', - choices=['lowlatency'], - help='Performance profile or subfunctions of host.[Optional]') -@utils.arg('-m', '--mgmt_mac', - metavar='', - help='MAC Address of the host mgmt interface [REQUIRED]') -@utils.arg('-i', '--mgmt_ip', - metavar='', - help='IP Address of the host mgmt interface (when using static ' - 'address allocation)') -@utils.arg('-I', '--bm_ip', - metavar='', - help="IP Address of the host board management interface, " - "only necessary if this host's board management controller " - "is not in the primary region") -@utils.arg('-T', '--bm_type', - metavar='', - help='Type of the host board management interface') -@utils.arg('-U', '--bm_username', - metavar='', - help='Username for the host board management interface') -@utils.arg('-P', '--bm_password', - metavar='', - help='Password for the host board management interface') -@utils.arg('-b', '--boot_device', - metavar='', - help='Device for boot partition, relative to /dev. Default: sda') -@utils.arg('-r', '--rootfs_device', - metavar='', - help='Device for rootfs partition, relative to /dev. Default: sda') -@utils.arg('-o', '--install_output', - metavar='', - choices=['text', 'graphical'], - help='Installation output format, text or graphical. Default: text') -@utils.arg('-c', '--console', - metavar='', - help='Serial console. Default: ttyS0,115200') -@utils.arg('-l', '--location', - metavar='', - help='Physical location of the host') -@utils.arg('-D', '--ttys_dcd', - metavar='', - help='Enable/disable serial console data carrier detection') -def do_host_add(cc, args): - """Add a new host.""" - field_list = ['hostname', 'personality', 'subfunctions', - 'mgmt_mac', 'mgmt_ip', - 'bm_ip', 'bm_type', 'bm_username', 'bm_password', - 'boot_device', 'rootfs_device', 'install_output', 'console', - 'location', 'ttys_dcd'] - fields = dict((k, v) for (k, v) in vars(args).items() - if k in field_list and not (v is None)) - - # This is the expected format of the location field - if 'location' in fields: - fields['location'] = {"locn": fields['location']} - - host = cc.host.create(**fields) - suuid = getattr(host, 'uuid', '') - - try: - host = cc.host.get(suuid) - except exc.HTTPNotFound: - raise exc.CommandError('Host not found: %s' % suuid) - else: - _print_host_show(host) - - -@utils.arg('hostsfile', - metavar='', - help='File containing the XML descriptions of hosts to be ' - 'provisioned [REQUIRED]') -def do_host_bulk_add(cc, args): - """Add multiple new hosts.""" - field_list = ['hostsfile'] - fields = dict((k, v) for (k, v) in vars(args).items() - if k in field_list and not (v is None)) - - hostsfile = fields['hostsfile'] - if os.path.isdir(hostsfile): - raise exc.CommandError("Error: %s is a directory." % hostsfile) - try: - req = open(hostsfile, 'rb') - except Exception: - raise exc.CommandError("Error: Could not open file %s." % hostsfile) - - response = cc.host.create_many(req) - if not response: - raise exc.CommandError("The request timed out or there was an " - "unknown error") - success = response.get('success') - error = response.get('error') - if success: - print("Success: " + success + "\n") - if error: - print("Error:\n" + error) - - -@utils.arg('hostnameorid', - metavar='', - nargs='+', - help="Name or ID of host") -def do_host_delete(cc, args): - """Delete a host.""" - for n in args.hostnameorid: - try: - cc.host.delete(n) - print('Deleted host %s' % n) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % n) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('attributes', - metavar='', - nargs='+', - action='append', - default=[], - help="Attributes to update ") -def do_host_update(cc, args): - """Update host attributes.""" - patch = utils.args_array_to_patch("replace", args.attributes[0]) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('-f', '--force', - action='store_true', - default=False, - help="Force a lock operation ") -def do_host_lock(cc, args): - """Lock a host.""" - attributes = [] - - if args.force is True: - # Forced lock operation - attributes.append('action=force-lock') - else: - # Normal lock operation - attributes.append('action=lock') - - patch = utils.args_array_to_patch("replace", attributes) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('-f', '--force', - action='store_true', - default=False, - help="Force an unlock operation ") -def do_host_unlock(cc, args): - """Unlock a host.""" - attributes = [] - - if args.force is True: - # Forced unlock operation - attributes.append('action=force-unlock') - else: - # Normal unlock operation - attributes.append('action=unlock') - - patch = utils.args_array_to_patch("replace", attributes) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('-f', '--force', - action='store_true', - default=False, - help="Force a host swact operation ") -def do_host_swact(cc, args): - """Switch activity away from this active host.""" - attributes = [] - - if args.force is True: - # Forced swact operation - attributes.append('action=force-swact') - else: - # Normal swact operation - attributes.append('action=swact') - - patch = utils.args_array_to_patch("replace", attributes) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_reset(cc, args): - """Reset a host.""" - attributes = [] - attributes.append('action=reset') - patch = utils.args_array_to_patch("replace", attributes) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_reboot(cc, args): - """Reboot a host.""" - attributes = [] - attributes.append('action=reboot') - patch = utils.args_array_to_patch("replace", attributes) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_reinstall(cc, args): - """Reinstall a host.""" - attributes = [] - attributes.append('action=reinstall') - patch = utils.args_array_to_patch("replace", attributes) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_power_on(cc, args): - """Power on a host.""" - attributes = [] - attributes.append('action=power-on') - patch = utils.args_array_to_patch("replace", attributes) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_power_off(cc, args): - """Power off a host.""" - attributes = [] - attributes.append('action=power-off') - patch = utils.args_array_to_patch("replace", attributes) - host = host_utils._find_host(cc, args.hostnameorid) - try: - host = cc.host.update(host.id, patch) - except exc.HTTPNotFound: - raise exc.CommandError('host not found: %s' % args.hostnameorid) - _print_host_show(host) - - -def _timestamped(dname, fmt='%Y-%m-%d-%H-%M-%S_{dname}'): - return datetime.datetime.now().strftime(fmt).format(dname=dname) - - -@utils.arg('--filename', - help="The full file path to store the host file. Default './hosts.xml'") # noqa -def do_host_bulk_export(cc, args): - """Export host bulk configurations.""" - result = cc.host.bulk_export() - - xml_content = result['content'] - config_filename = './hosts.xml' - if hasattr(args, 'filename') and args.filename: - config_filename = args.filename - try: - with open(config_filename, 'wb') as fw: - fw.write(xml_content) - print(_('Export successfully to %s') % config_filename) - except IOError: - print(_('Cannot write to file: %s') % config_filename) - - return diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_agent.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_agent.py deleted file mode 100644 index c60c57f0..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_agent.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -*- encoding: utf-8 -*- -# - -from inventoryclient.common import base - - -class LldpAgent(base.Resource): - def __repr__(self): - return "" % self._info - - -class LldpAgentManager(base.Manager): - resource_class = LldpAgent - - def list(self, host_id): - path = '/v1/hosts/%s/lldp_agents' % host_id - agents = self._list(path, "lldp_agents") - return agents - - def get(self, uuid): - path = '/v1/lldp_agents/%s' % uuid - try: - return self._list(path)[0] - except IndexError: - return None - - def get_by_port(self, port_id): - path = '/v1/ports/%s/lldp_agents' % port_id - return self._list(path, "lldp_agents") diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_agent_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_agent_shell.py deleted file mode 100644 index cfcea11f..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_agent_shell.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventoryclient.common import utils -from inventoryclient.v1 import host as host_utils - - -class LldpAgentObj(object): - def __init__(self, dictionary): - for k, v in dictionary.items(): - setattr(self, k, v) - - -def _print_lldp_agent_show(agent): - fields = ['uuid', 'host_uuid', - 'created_at', 'updated_at', - 'uuid', 'port_name', 'chassis_id', 'port_identifier', 'ttl', - 'system_description', 'system_name', 'system_capabilities', - 'management_address', 'port_description', 'dot1_lag', - 'dot1_vlan_names', - 'dot3_mac_status', 'dot3_max_frame' - ] - labels = ['uuid', 'host_uuid', - 'created_at', 'updated_at', - 'uuid', 'local_port', 'chassis_id', 'port_identifier', 'ttl', - 'system_description', 'system_name', 'system_capabilities', - 'management_address', 'port_description', 'dot1_lag', - 'dot1_vlan_names', - 'dot3_mac_status', 'dot3_max_frame' - ] - data = [(f, getattr(agent, f, '')) for f in fields] - utils.print_tuple_list(data, labels) - - -def _lldp_carriage_formatter(value): - chars = ['\n', '\\n', '\r', '\\r'] - for char in chars: - if char in value: - value = value.replace(char, '. ') - return value - - -def _lldp_system_name_formatter(lldp): - system_name = getattr(lldp, 'system_name') - if system_name: - return _lldp_carriage_formatter(system_name) - - -def _lldp_system_description_formatter(lldp): - system_description = getattr(lldp, 'system_description') - if system_description: - return _lldp_carriage_formatter(system_description) - - -def _lldp_port_description_formatter(lldp): - port_description = getattr(lldp, 'port_description') - if port_description: - return _lldp_carriage_formatter(port_description) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_lldp_agent_list(cc, args): - """List host lldp agents.""" - host = host_utils._find_host(cc, args.hostnameorid) - agents = cc.lldp_agent.list(host.uuid) - - field_labels = ['uuid', 'local_port', 'status', 'chassis_id', 'port_id', - 'system_name', 'system_description'] - fields = ['uuid', 'port_name', 'status', 'chassis_id', 'port_identifier', - 'system_name', 'system_description'] - formatters = {'system_name': _lldp_system_name_formatter, - 'system_description': _lldp_system_description_formatter, - 'port_description': _lldp_port_description_formatter} - - utils.print_list(agents, fields, field_labels, sortby=1, - formatters=formatters) - - -@utils.arg('uuid', - metavar='', - help="UUID of the LLDP agent") -def do_lldp_agent_show(cc, args): - """Show LLDP agent attributes.""" - agent = cc.lldp_agent.get(args.uuid) - _print_lldp_agent_show(agent) - return diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_neighbour.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_neighbour.py deleted file mode 100644 index 93ab6283..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_neighbour.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -*- encoding: utf-8 -*- -# - -from inventoryclient.common import base - - -class LldpNeighbour(base.Resource): - def __repr__(self): - return "" % self._info - - -class LldpNeighbourManager(base.Manager): - resource_class = LldpNeighbour - - def list(self, host_id): - path = '/v1/hosts/%s/lldp_neighbours' % host_id - neighbours = self._list(path, "lldp_neighbours") - return neighbours - - def list_by_port(self, port_id): - path = '/v1/ports/%s/lldp_neighbours' % port_id - return self._list(path, "lldp_neighbours") - - def get(self, uuid): - path = '/v1/lldp_neighbours/%s' % uuid - try: - return self._list(path)[0] - except IndexError: - return None diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_neighbour_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_neighbour_shell.py deleted file mode 100644 index 9dbe5d18..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/lldp_neighbour_shell.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventoryclient.common import utils -from inventoryclient.v1 import host as host_utils - - -class LldpNeighbourObj(object): - def __init__(self, dictionary): - for k, v in dictionary.items(): - setattr(self, k, v) - - -def _lldp_carriage_formatter(value): - chars = ['\n', '\\n', '\r', '\\r'] - for char in chars: - if char in value: - value = value.replace(char, '. ') - return value - - -def _lldp_system_name_formatter(lldp): - system_name = getattr(lldp, 'system_name') - if system_name: - return _lldp_carriage_formatter(system_name) - - -def _lldp_system_description_formatter(lldp): - system_description = getattr(lldp, 'system_description') - if system_description: - return _lldp_carriage_formatter(system_description) - - -def _lldp_port_description_formatter(lldp): - port_description = getattr(lldp, 'port_description') - if port_description: - return _lldp_carriage_formatter(port_description) - - -def _print_lldp_neighbour_show(neighbour): - fields = ['uuid', 'host_uuid', - 'created_at', 'updated_at', - 'uuid', 'port_name', 'chassis_id', 'port_identifier', 'ttl', - 'msap', 'system_description', 'system_name', - 'system_capabilities', 'management_address', 'port_description', - 'dot1_lag', 'dot1_port_vid', 'dot1_vlan_names', - 'dot1_proto_vids', 'dot1_proto_ids', 'dot3_mac_status', - 'dot3_max_frame' - ] - - labels = ['uuid', 'host_uuid', - 'created_at', 'updated_at', - 'uuid', 'local_port', 'chassis_id', 'port_identifier', 'ttl', - 'msap', 'system_description', 'system_name', - 'system_capabilities', 'management_address', 'port_description', - 'dot1_lag', 'dot1_port_vid', 'dot1_vlan_names', - 'dot1_proto_vids', 'dot1_proto_ids', 'dot3_mac_status', - 'dot3_max_frame' - ] - data = [(f, getattr(neighbour, f, '')) for f in fields] - utils.print_tuple_list(data, labels) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_lldp_neighbor_list(cc, args): - """List host lldp neighbors.""" - host = host_utils._find_host(cc, args.hostnameorid) - neighbours = cc.lldp_neighbour.list(host.uuid) - - field_labels = ['uuid', 'local_port', 'remote_port', 'chassis_id', - 'system_name', 'system_description', - 'management_address'] - fields = ['uuid', 'port_name', 'port_identifier', 'chassis_id', - 'system_name', 'system_description', - 'management_address'] - formatters = {'system_name': _lldp_system_name_formatter, - 'system_description': _lldp_system_description_formatter, - 'port_description': _lldp_port_description_formatter} - - utils.print_list(neighbours, fields, field_labels, sortby=1, - formatters=formatters) - - -@utils.arg('uuid', - metavar='', - help="UUID of the LLDP neighbor") -def do_lldp_neighbor_show(cc, args): - """Show LLDP neighbor attributes.""" - neighbour = cc.lldp_neighbour.get(args.uuid) - _print_lldp_neighbour_show(neighbour) - return diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/memory.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/memory.py deleted file mode 100644 index 83f259cc..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/memory.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -*- encoding: utf-8 -*- -# - -from inventoryclient.common import base -from inventoryclient import exc -import json - -CREATION_ATTRIBUTES = ['host_uuid', 'memtotal_mib', 'memavail_mib', - 'platform_reserved_mib', 'hugepages_configured', - 'vswitch_hugepages_size_mib', 'vswitch_hugepages_reqd', - 'vswitch_hugepages_nr', 'vswitch_hugepages_avail', - 'vm_hugepages_nr_2M_pending', - 'vm_hugepages_nr_1G_pending', - 'vm_hugepages_nr_2M', 'vm_hugepages_avail_2M', - 'vm_hugepages_nr_1G', 'vm_hugepages_avail_1G', - 'vm_hugepages_avail_1G', 'vm_hugepages_use_1G', - 'vm_hugepages_possible_2M', 'vm_hugepages_possible_1G', - 'capabilities', 'numa_node', - 'minimum_platform_reserved_mib'] - - -class Memory(base.Resource): - def __repr__(self): - return "" % self._info - - -class MemoryManager(base.Manager): - resource_class = Memory - - @staticmethod - def _path(id=None): - return '/v1/memorys/%s' % id if id else '/v1/memorys' - - def list(self, host_id): - path = '/v1/hosts/%s/memorys' % host_id - return self._list(path, "memorys") - - def get(self, memory_id): - path = '/v1/memorys/%s' % memory_id - try: - return self._list(path)[0] - except IndexError: - return None - - def update(self, memory_id, patch): - return self._update(self._path(memory_id), - data=(json.dumps(patch))) - - def create(self, **kwargs): - path = '/v1/memorys' - new = {} - for (key, value) in kwargs.items(): - if key in CREATION_ATTRIBUTES: - new[key] = value - else: - raise exc.InvalidAttribute('%s' % key) - return self._create(path, new) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/memory_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/memory_shell.py deleted file mode 100644 index 1d6c4812..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/memory_shell.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventoryclient.common import utils -from inventoryclient import exc -from inventoryclient.v1 import host as host_utils - - -def _print_memory_show(memory): - fields = ['memtotal_mib', - 'platform_reserved_mib', - 'memavail_mib', - 'hugepages_configured', - 'vswitch_hugepages_size_mib', - 'vswitch_hugepages_nr', - 'vswitch_hugepages_avail', - 'vm_hugepages_nr_4K', - 'vm_hugepages_nr_2M', - 'vm_hugepages_nr_2M_pending', - 'vm_hugepages_avail_2M', - 'vm_hugepages_nr_1G', - 'vm_hugepages_nr_1G_pending', - 'vm_hugepages_avail_1G', - 'uuid', 'host_uuid', 'node_uuid', - 'created_at', 'updated_at'] - labels = ['Memory: Usable Total (MiB)', - ' Platform (MiB)', - ' Available (MiB)', - 'Huge Pages Configured', - 'vSwitch Huge Pages: Size (MiB)', - ' Total', - ' Available', - 'VM Pages (4K): Total', - 'VM Huge Pages (2M): Total', - ' Total Pending', - ' Available', - 'VM Huge Pages (1G): Total', - ' Total Pending', - ' Available', - 'uuid', 'host_uuid', 'node_uuid', - 'created_at', 'updated_at'] - - data = [(f, getattr(memory, f, '')) for f in fields] - - for d in data: - if d[0] == 'vm_hugepages_nr_2M_pending': - if d[1] is None: - fields.remove(d[0]) - labels.pop(labels.index(' Total Pending')) - - if d[0] == 'vm_hugepages_nr_1G_pending': - if d[1] is None: - fields.remove(d[0]) - labels.pop(len(labels) - labels[::-1].index( - ' Total Pending') - 1) - - data = [(f, getattr(memory, f, '')) for f in fields] - utils.print_tuple_list(data, labels) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('numa_node', - metavar='', - help="processor") -def do_host_memory_show(cc, args): - """Show memory attributes.""" - host = host_utils._find_host(cc, args.hostnameorid) - nodes = cc.node.list(host.uuid) - memorys = cc.memory.list(host.uuid) - for m in memorys: - for n in nodes: - if m.node_uuid == n.uuid: - if int(n.numa_node) == int(args.numa_node): - _print_memory_show(m) - return - else: - raise exc.CommandError('Processor not found: host %s processor %s' % - (host.hostname, args.numa_node)) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_memory_list(cc, args): - """List memory nodes.""" - - host = host_utils._find_host(cc, args.hostnameorid) - - nodes = cc.node.list(host.uuid) - memorys = cc.memory.list(host.uuid) - for m in memorys: - for n in nodes: - if m.node_uuid == n.uuid: - m.numa_node = n.numa_node - break - - fields = ['numa_node', - 'memtotal_mib', - 'platform_reserved_mib', - 'memavail_mib', - 'hugepages_configured', - 'vswitch_hugepages_size_mib', - 'vswitch_hugepages_nr', - 'vswitch_hugepages_avail', - 'vm_hugepages_nr_4K', - 'vm_hugepages_nr_2M', - 'vm_hugepages_avail_2M', - 'vm_hugepages_nr_2M_pending', - 'vm_hugepages_nr_1G', - 'vm_hugepages_avail_1G', - 'vm_hugepages_nr_1G_pending', - 'vm_hugepages_use_1G'] - - field_labels = ['processor', - 'mem_total(MiB)', - 'mem_platform(MiB)', - 'mem_avail(MiB)', - 'hugepages(hp)_configured', - 'vs_hp_size(MiB)', - 'vs_hp_total', - 'vs_hp_avail', - 'vm_total_4K', - 'vm_hp_total_2M', - 'vm_hp_avail_2M', - 'vm_hp_pending_2M', - 'vm_hp_total_1G', - 'vm_hp_avail_1G', - 'vm_hp_pending_1G', - 'vm_hp_use_1G'] - - utils.print_list(memorys, fields, field_labels, sortby=1) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('numa_node', - metavar='', - help="processor") -@utils.arg('-m', '--platform_reserved_mib', - metavar='', - help='The amount of platform memory (MiB) for the numa node') -@utils.arg('-2M', '--vm_hugepages_nr_2M_pending', - metavar='<2M hugepages number>', - help='The number of 2M vm huge pages for the numa node') -@utils.arg('-1G', '--vm_hugepages_nr_1G_pending', - metavar='<1G hugepages number>', - help='The number of 1G vm huge pages for the numa node') -def do_host_memory_modify(cc, args): - """Modify platform reserved and/or libvirt vm huge page memory - attributes for compute nodes. - """ - - rwfields = ['platform_reserved_mib', - 'vm_hugepages_nr_2M_pending', - 'vm_hugepages_nr_1G_pending'] - - host = host_utils._find_host(cc, args.hostnameorid) - - user_specified_fields = dict((k, v) for (k, v) in vars(args).items() - if k in rwfields and not (v is None)) - - host = host_utils._find_host(cc, args.hostnameorid) - nodes = cc.node.list(host.uuid) - memorys = cc.memory.list(host.uuid) - mem = None - for m in memorys: - for n in nodes: - if m.node_uuid == n.uuid: - if int(n.numa_node) == int(args.numa_node): - mem = m - break - if mem: - break - - if mem is None: - raise exc.CommandError('Processor not found: host %s processor %s' % - (host.hostname, args.numa_node)) - - patch = [] - for (k, v) in user_specified_fields.items(): - patch.append({'op': 'replace', 'path': '/' + k, 'value': v}) - - if patch: - memory = cc.memory.update(mem.uuid, patch) - _print_memory_show(memory) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/node.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/node.py deleted file mode 100644 index 8a4acd18..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/node.py +++ /dev/null @@ -1,54 +0,0 @@ -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -*- encoding: utf-8 -*- -# - -from inventoryclient.common import base -from inventoryclient import exc -import json - - -CREATION_ATTRIBUTES = ['numa_node', 'capabilities', 'host_uuid'] - - -class Node(base.Resource): - def __repr__(self): - return "" % self._info - - -class NodeManager(base.Manager): - resource_class = Node - - def list(self, host_id): - path = '/v1/hosts/%s/nodes' % host_id - return self._list(path, "nodes") - - def get(self, node_id): - path = '/v1/nodes/%s' % node_id - try: - return self._list(path)[0] - except IndexError: - return None - - def create(self, **kwargs): - path = '/v1/nodes' - new = {} - for (key, value) in kwargs.items(): - if key in CREATION_ATTRIBUTES: - new[key] = value - else: - raise exc.InvalidAttribute('%s' % key) - return self._create(path, new) - - def delete(self, node_id): - path = '/v1/nodes/%s' % node_id - return self._delete(path) - - def update(self, node_id, patch): - path = '/v1/nodes/%s' % node_id - return self._update(path, - data=(json.dumps(patch))) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/node_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/node_shell.py deleted file mode 100644 index d5ba51d3..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/node_shell.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# All Rights Reserved. -# - -from inventoryclient.common import utils -from inventoryclient import exc -from inventoryclient.v1 import host as host_utils - - -def _print_node_show(node): - fields = ['numa_node', - 'uuid', 'host_uuid', - 'created_at'] - data = [(f, getattr(node, f, '')) for f in fields] - utils.print_tuple_list(data) - - -def _find_node(cc, host, nodeuuid): - nodes = cc.node.list(host.uuid) - for i in nodes: - if i.uuid == nodeuuid: - break - else: - raise exc.CommandError('Inode not found: host %s if %s' % - (host.hostname, nodeuuid)) - return i - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('nodeuuid', - metavar='', - help="Name or UUID of node") -def donot_host_node_show(cc, args): - """Show a node. DEBUG only""" - host = host_utils._find_host(cc, args.hostnameorid) - # API actually doesnt need hostid once it has node uuid - - i = _find_node(cc, host, args.nodeuuid) - - _print_node_show(i) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def donot_host_node_list(cc, args): - """List nodes. DEBUG only""" - host = host_utils._find_host(cc, args.hostnameorid) - - nodes = cc.node.list(host.uuid) - - field_labels = ['uuid', 'numa_node', 'capabilities'] - fields = ['uuid', 'numa_node', 'capabilities'] - utils.print_list(nodes, fields, field_labels, sortby=0) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/pci_device.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/pci_device.py deleted file mode 100755 index edb6d715..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/pci_device.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Copyright (c) 2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -*- encoding: utf-8 -*- -# - -from inventoryclient.common import base -import json - - -class PciDevice(base.Resource): - def __repr__(self): - return "" % self._info - - -class PciDeviceManager(base.Manager): - resource_class = PciDevice - - def list(self, host_id): - path = '/v1/hosts/%s/pci_devices' % host_id - return self._list(path, "pci_devices") - - def list_all(self): - path = '/v1/pci_devices' - return self._list(path, "pci_devices") - - def get(self, pci_id): - path = '/v1/pci_devices/%s' % pci_id - try: - return self._list(path)[0] - except IndexError: - return None - - def update(self, pci_id, patch): - path = '/v1/pci_devices/%s' % pci_id - return self._update(path, - data=(json.dumps(patch))) - - -def get_pci_device_display_name(p): - if p.name: - return p.name - else: - return '(' + str(p.uuid)[-8:] + ')' diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/pci_device_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/pci_device_shell.py deleted file mode 100644 index f8afdb7b..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/pci_device_shell.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventoryclient.common import utils -from inventoryclient import exc -from inventoryclient.v1 import host as host_utils - - -def _print_device_show(device): - fields = ['name', 'pciaddr', 'pclass_id', 'pvendor_id', 'pdevice_id', - 'pclass', 'pvendor', 'pdevice', 'numa_node', 'enabled', - 'sriov_totalvfs', 'sriov_numvfs', 'sriov_vfs_pci_address', - 'extra_info', 'created_at', 'updated_at'] - - labels = ['name', 'address', 'class id', 'vendor id', 'device id', - 'class name', 'vendor name', 'device name', 'numa_node', - 'enabled', 'sriov_totalvfs', 'sriov_numvfs', - 'sriov_vfs_pci_address', 'extra_info', 'created_at', - 'updated_at'] - - data = [(f, getattr(device, f, '')) for f in fields] - utils.print_tuple_list(data, labels) - - -def _find_device(cc, host, nameorpciaddr): - devices = cc.pci_device.list(host.uuid) - for d in devices: - if d.name == nameorpciaddr or d.pciaddr == nameorpciaddr: - break - else: - raise exc.CommandError('PCI devices not found: host %s device %s' % - (host.id, nameorpciaddr)) - return d - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('nameorpciaddr', - metavar='', - help="Name or PCI address of device") -def do_host_device_show(cc, args): - """Show device attributes.""" - host = host_utils._find_host(cc, args.hostnameorid) - device = _find_device(cc, host, args.nameorpciaddr) - _print_device_show(device) - return - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('-a', '--all', - action='store_true', - help='List all devices, including those that are not enabled') -def do_host_device_list(cc, args): - """List devices.""" - - host = host_utils._find_host(cc, args.hostnameorid) - devices = cc.pci_device.list(host.uuid) - for device in devices[:]: - if not args.all: - if not device.enabled: - devices.remove(device) - - fields = ['name', 'pciaddr', 'pclass_id', 'pvendor_id', 'pdevice_id', - 'pclass', 'pvendor', 'pdevice', 'numa_node', 'enabled'] - - labels = ['name', 'address', 'class id', 'vendor id', 'device id', - 'class name', 'vendor name', 'device name', 'numa_node', - 'enabled'] - - utils.print_list(devices, fields, labels, sortby=1) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('nameorpciaddr', - metavar='', - help="Name or PCI address of device") -@utils.arg('-n', '--name', - metavar='', - help='The new name of the device') -@utils.arg('-e', '--enabled', - metavar='', - help='The enabled status of the device') -def do_host_device_modify(cc, args): - """Modify device availability for compute nodes.""" - - rwfields = ['enabled', - 'name'] - - host = host_utils._find_host(cc, args.hostnameorid) - - user_specified_fields = dict((k, v) for (k, v) in vars(args).items() - if k in rwfields and not (v is None)) - - device = _find_device(cc, host, args.nameorpciaddr) - - fields = device.__dict__ - fields.update(user_specified_fields) - - patch = [] - for (k, v) in user_specified_fields.items(): - patch.append({'op': 'replace', 'path': '/' + k, 'value': v}) - - if patch: - try: - device = cc.pci_device.update(device.uuid, patch) - _print_device_show(device) - except exc.HTTPNotFound: - raise exc.CommandError('Device update failed: host %s ' - 'device %s : update %s' % - (args.hostnameorid, - args.nameorpciaddr, - patch)) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/port.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/port.py deleted file mode 100644 index 4ef27e27..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/port.py +++ /dev/null @@ -1,39 +0,0 @@ -# -# Copyright (c) 2013-2014 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -*- encoding: utf-8 -*- -# - -from inventoryclient.common import base - - -class Port(base.Resource): - def __repr__(self): - return "" % self._info - - -class PortManager(base.Manager): - resource_class = Port - - def list(self, host_id): - path = '/v1/hosts/%s/ports' % host_id - return self._list(path, "ports") - - def get(self, port_id): - path = '/v1/ports/%s' % port_id - try: - return self._list(path)[0] - except IndexError: - return None - - -def get_port_display_name(p): - if p.name: - return p.name - if p.namedisplay: - return p.namedisplay - else: - return '(' + str(p.uuid)[-8:] + ')' diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/port_shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/port_shell.py deleted file mode 100644 index d513245b..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/port_shell.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2013-2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# All Rights Reserved. -# - -from inventoryclient.common import utils -from inventoryclient import exc -from inventoryclient.v1 import host as host_utils - - -def _print_port_show(port): - fields = ['name', 'namedisplay', - 'type', 'pciaddr', 'dev_id', 'numa_node', - 'sriov_totalvfs', 'sriov_numvfs', - 'sriov_vfs_pci_address', 'driver', - 'pclass', 'pvendor', 'pdevice', - 'capabilities', - 'uuid', 'host_uuid', 'interface_uuid', - 'dpdksupport', - 'created_at', 'updated_at'] - labels = ['name', 'namedisplay', - 'type', 'pciaddr', 'dev_id', 'processor', - 'sriov_totalvfs', 'sriov_numvfs', - 'sriov_vfs_pci_address', 'driver', - 'pclass', 'pvendor', 'pdevice', - 'capabilities', - 'uuid', 'host_uuid', 'interface_uuid', - 'accelerated', - 'created_at', 'updated_at'] - data = [(f, getattr(port, f, '')) for f in fields] - utils.print_tuple_list(data, labels) - - -def _find_port(cc, host, portnameoruuid): - ports = cc.port.list(host.uuid) - for p in ports: - if p.name == portnameoruuid or p.uuid == portnameoruuid: - break - else: - raise exc.CommandError('Port not found: host %s port %s' % - (host.id, portnameoruuid)) - return p - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -@utils.arg('pnameoruuid', metavar='', - help="Name or UUID of port") -def do_host_port_show(cc, args): - """Show host port details.""" - host = host_utils._find_host(cc, args.hostnameorid) - port = _find_port(cc, host, args.pnameoruuid) - _print_port_show(port) - - -@utils.arg('hostnameorid', - metavar='', - help="Name or ID of host") -def do_host_port_list(cc, args): - """List host ports.""" - - from inventoryclient.common import wrapping_formatters - - terminal_width = utils.get_terminal_size()[0] - - host = host_utils._find_host(cc, args.hostnameorid) - - ports = cc.port.list(host.uuid) - - field_labels = ['uuid', 'name', 'type', 'pci address', 'device', - 'processor', 'accelerated', 'device type'] - fields = ['uuid', 'name', 'type', 'pciaddr', 'dev_id', 'numa_node', - 'dpdksupport', 'pdevice'] - - format_spec = \ - wrapping_formatters.build_best_guess_formatters_using_average_widths( - ports, fields, field_labels, no_wrap_fields=['pciaddr']) - # best-guess formatter does not make a good guess for - # proper width of pdevice until terminal is > 155 - # We override that width here. - pdevice_width = None - if terminal_width <= 130: - pdevice_width = .1 - elif 131 >= terminal_width <= 150: - pdevice_width = .13 - elif 151 >= terminal_width <= 155: - pdevice_width = .14 - - if pdevice_width and format_spec["pdevice"] > pdevice_width: - format_spec["pdevice"] = pdevice_width - - formatters = wrapping_formatters.build_wrapping_formatters( - ports, fields, field_labels, format_spec) - - utils.print_list( - ports, fields, field_labels, formatters=formatters, sortby=1) diff --git a/python-inventoryclient/inventoryclient/inventoryclient/v1/shell.py b/python-inventoryclient/inventoryclient/inventoryclient/v1/shell.py deleted file mode 100644 index 7f3a6dd0..00000000 --- a/python-inventoryclient/inventoryclient/inventoryclient/v1/shell.py +++ /dev/null @@ -1,41 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from inventoryclient.common import utils -from inventoryclient.v1 import cpu_shell -from inventoryclient.v1 import ethernetport_shell -from inventoryclient.v1 import host_shell -from inventoryclient.v1 import lldp_agent_shell -from inventoryclient.v1 import lldp_neighbour_shell -from inventoryclient.v1 import memory_shell -from inventoryclient.v1 import node_shell -from inventoryclient.v1 import pci_device_shell -from inventoryclient.v1 import port_shell - - -COMMAND_MODULES = [ - cpu_shell, - ethernetport_shell, - host_shell, - lldp_agent_shell, - lldp_neighbour_shell, - memory_shell, - node_shell, - pci_device_shell, - port_shell, -] - - -def enhance_parser(parser, subparsers, cmd_mapper): - '''Take a basic (nonversioned) parser and enhance it with - commands and options specific for this version of API. - - :param parser: top level parser :param subparsers: top level - parser's subparsers collection where subcommands will go - ''' - for command_module in COMMAND_MODULES: - utils.define_commands_from_module(subparsers, command_module, - cmd_mapper) diff --git a/python-inventoryclient/inventoryclient/pylint.rc b/python-inventoryclient/inventoryclient/pylint.rc deleted file mode 100755 index 16a03be5..00000000 --- a/python-inventoryclient/inventoryclient/pylint.rc +++ /dev/null @@ -1,218 +0,0 @@ -[MASTER] -# Specify a configuration file. -rcfile=pylint.rc - -# Python code to execute, usually for sys.path manipulation such as pygtk.require(). -#init-hook= - -# Add files or directories to the blacklist. They should be base names, not paths. -ignore=tests - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - - -[MESSAGES CONTROL] -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). -# https://pylint.readthedocs.io/en/latest/user_guide/output.html#source-code-analysis-section -# We are disabling (C)onvention -# We are disabling (R)efactor -# We are probably disabling (W)arning -# We are not disabling (F)atal, (E)rror -#disable=C,R,W -disable=C,R,W - - -[REPORTS] -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=no - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - - -[SIMILARITIES] -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - - -[FORMAT] -# Maximum number of characters on a single line. -max-line-length=85 - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 tab). -indent-string=' ' - - -[TYPECHECK] -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). -ignored-classes=SQLObject - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E0201 when accessed. Python regular -# expressions are accepted. -generated-members=REQUEST,acl_users,aq_parent - - -[BASIC] -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,apply,input - -# Regular expression which should only match correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Regular expression which should only match functions or classes name which do -# not require a docstring -no-docstring-rgx=__.*__ - - -[MISCELLANEOUS] -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[VARIABLES] -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the beginning of the name of dummy variables -# (i.e. not used). -dummy-variables-rgx=_|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - - -[IMPORTS] -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,string,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[DESIGN] -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branchs=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -[CLASSES] -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - - -[EXCEPTIONS] -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/python-inventoryclient/inventoryclient/setup.cfg b/python-inventoryclient/inventoryclient/setup.cfg deleted file mode 100644 index c522a032..00000000 --- a/python-inventoryclient/inventoryclient/setup.cfg +++ /dev/null @@ -1,33 +0,0 @@ -[metadata] -name = inventoryclient -summary = A python client library for Inventory -author = StarlingX -author-email = starlingx-discuss@lists.starlingx.io -home-page = http://www.starlingx.io/ -classifier = - Environment :: StarlingX - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[files] -packages = - inventoryclient - -[entry_points] -console_scripts = - inventory = inventoryclient.shell:main - -[egg_info] -tag_build = -tag_date = 0 -tag_svn_revision = 0 - diff --git a/python-inventoryclient/inventoryclient/setup.py b/python-inventoryclient/inventoryclient/setup.py deleted file mode 100644 index ae3950a6..00000000 --- a/python-inventoryclient/inventoryclient/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT - -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/python-inventoryclient/inventoryclient/test-requirements.txt b/python-inventoryclient/inventoryclient/test-requirements.txt deleted file mode 100644 index b7c9cb9c..00000000 --- a/python-inventoryclient/inventoryclient/test-requirements.txt +++ /dev/null @@ -1,22 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Hacking already pins down pep8, pyflakes and flake8 -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 -bandit>=1.1.0 # Apache-2.0 -coverage!=4.4,>=4.0 # Apache-2.0 -fixtures>=3.0.0 # Apache-2.0/BSD -mock>=2.0 # BSD -mox -os-testr>=0.8.0 # Apache-2.0 -oslotest>=1.10.0 # Apache-2.0 -sphinx>=1.6.2 # BSD -testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=1.4.0 # MIT -testresources>=0.2.4 # Apache-2.0/BSD -tempest>=16.1.0 # Apache-2.0 -httplib2 -python-keystoneclient -pyOpenSSL>=0.14 # Apache-2.0 diff --git a/python-inventoryclient/inventoryclient/tools/inventory.bash_completion b/python-inventoryclient/inventoryclient/tools/inventory.bash_completion deleted file mode 100644 index 3feda711..00000000 --- a/python-inventoryclient/inventoryclient/tools/inventory.bash_completion +++ /dev/null @@ -1,33 +0,0 @@ -# -# Copyright (c) 2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# bash completion for Titanium Cloud inventory commands - -_inventory_opts="" # lazy init -_inventory_flags="" # lazy init -_inventory_opts_exp="" # lazy init -_inventory() -{ - local cur prev kbc - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD-1]}" - - if [ "x$_inventory_opts" == "x" ] ; then - kbc="`inventory bash-completion | sed -e "s/ -h / /"`" - _inventory_opts="`echo "$kbc" | sed -e "s/--[a-z0-9_-]*//g" -e "s/[ ][ ]*/ /g"`" - _inventory_flags="`echo " $kbc" | sed -e "s/ [^-][^-][a-z0-9_-]*//g" -e "s/[ ][ ]*/ /g"`" - _inventory_opts_exp="`echo $_inventory_opts | sed -e "s/[ ]/|/g"`" - fi - - if [[ " ${COMP_WORDS[@]} " =~ " "($_inventory_opts_exp)" " && "$prev" != "help" ]] ; then - COMPREPLY=($(compgen -W "${_inventory_flags}" -- ${cur})) - else - COMPREPLY=($(compgen -W "${_inventory_opts}" -- ${cur})) - fi - return 0 -} -complete -F _inventory inventory diff --git a/python-inventoryclient/inventoryclient/tox.ini b/python-inventoryclient/inventoryclient/tox.ini deleted file mode 100644 index 07b5c285..00000000 --- a/python-inventoryclient/inventoryclient/tox.ini +++ /dev/null @@ -1,67 +0,0 @@ -[tox] -envlist = py27,pep8,cover,pylint -minversion = 1.6 - -# tox does not work if the path to the workdir is too long, so move it to /tmp -toxworkdir = /tmp/{env:USER}_inventoryclienttox -cgcsdir = {toxinidir}/../../../.. -distshare={toxworkdir}/.tox/distshare - -[testenv] -setenv = VIRTUAL_ENV={envdir} - PYTHONWARNINGS=default::DeprecationWarning - OS_TEST_PATH=inventoryclient/tests - TESTR_START_DIR=inventoryclient/tests -basepython = python2.7 -usedevelop = True - -install_command = pip install \ - -U \ - -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/stein/upper-constraints.txt} \ - {opts} {packages} -deps = -r{toxinidir}/test-requirements.txt - -commands = - find {toxinidir} -not -path '{toxinidir}/.tox/*' -name '*.py[c|o]' -delete - python setup.py testr --slowest --testr-args='{posargs}' - -whitelist_externals = - bash - find - rm -passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY - -[testenv:pep8] -commands = - flake8 inventoryclient - -[testenv:venv] -commands = {posargs} - -[testenv:cover] -basepython = python2.7 -commands = - find . -type f -name ".coverage\.*" -delete - rm -f .coverage - rm -Rf cover - rm -f coverage.xml - python setup.py testr --coverage --testr-args='{posargs}' - coverage xml - -[flake8] -show-source = true -exclude=.*,dist,*lib/python*,*egg,build -max-complexity=25 -# H102 Apache 2.0 license header not found -# H233 Python 3.x incompatible use of print operator -# H404 multi line docstring should start without a leading new line -# H405 multi line docstring summary not separated with an empty line -ignore = H102,H233,H404,H405 - -[testenv:pylint] -basepython = python2.7 - -deps = {[testenv]deps} - pylint - -commands = pylint {posargs} inventoryclient --rcfile=./pylint.rc --extension-pkg-whitelist=lxml.etree,greenlet --ignored-classes=LookupDict diff --git a/python-inventoryclient/opensuse/python-inventoryclient.changes b/python-inventoryclient/opensuse/python-inventoryclient.changes deleted file mode 100644 index c3b8b7d6..00000000 --- a/python-inventoryclient/opensuse/python-inventoryclient.changes +++ /dev/null @@ -1,15 +0,0 @@ -------------------------------------------------------------------- -Wed Jul 31 17:02:27 UTC 2019 - Marcela Rosales - -- Remove tarball from OBS and use _service XML to get the source code. - -------------------------------------------------------------------- -Thu Jun 20 16:23:45 UTC 2019 - Marcela Rosales - -- Modify Requires packages to use openSUSE's packages - -------------------------------------------------------------------- -Wed Jun 12 15:16:07 UTC 2019 - mario_90ago@live.com.mx - -- Initial commit - diff --git a/python-inventoryclient/opensuse/python-inventoryclient.rpmlintrc b/python-inventoryclient/opensuse/python-inventoryclient.rpmlintrc deleted file mode 100644 index adc59bf3..00000000 --- a/python-inventoryclient/opensuse/python-inventoryclient.rpmlintrc +++ /dev/null @@ -1 +0,0 @@ -setBadness('script-without-shebang', 2) diff --git a/python-inventoryclient/opensuse/python-inventoryclient.spec b/python-inventoryclient/opensuse/python-inventoryclient.spec deleted file mode 100644 index 4439507e..00000000 --- a/python-inventoryclient/opensuse/python-inventoryclient.spec +++ /dev/null @@ -1,84 +0,0 @@ -%global pypi_name inventoryclient -Name: python-inventoryclient -Version: 1.0.0 -Release: 1 -Summary: A python client library for Inventory -License: Apache-2.0 -Group: base -URL: https://opendev.org/starlingx/metal -Source0: %{name}-%{version}.tar.gz - -BuildRequires: git -BuildRequires: python-pbr >= 2.0.0 -BuildRequires: python-setuptools -Requires: bash-completion -Requires: python-keystoneauth1 >= 3.1.0 -Requires: python2-oslo.i18n >= 2.1.0 -Requires: python2-oslo.utils >= 3.20.0 -Requires: python2-pbr >= 2.0.0 -Requires: python2-requests -Requires: python2-six >= 1.9.0 -%if 0%{?suse_version} -BuildRequires: python-pip -%else -BuildRequires: python2-pip -%endif - -%description -This package is a python client library for Inventory - -%define local_bindir %{_bindir}/ -%define local_etc_bash_completiond %{_sysconfdir}/bash_completion.d/ -%define pythonroot %{_libdir}/python2.7/site-packages -%define debug_package %{nil} - -%package sdk -Summary: SDK files for %{name} - -%description sdk -This package contains SDK files for %{name} package. - -%prep -%autosetup -n %{name}-%{version}/inventoryclient - -# Remove bundled egg-info -rm -rf *.egg-info - -%build -echo "Start build" -export PBR_VERSION=%{version} -python setup.py build - -%install -echo "Start install" -export PBR_VERSION=%{version} -python setup.py install --root=%{buildroot} \ - --install-lib=%{pythonroot} \ - --prefix=%{_prefix} \ - --install-data=%{_datadir} \ - --single-version-externally-managed - -install -d -m 755 %{buildroot}%{local_etc_bash_completiond} -install -p -D -m 664 tools/inventory.bash_completion %{buildroot}%{local_etc_bash_completiond}/inventory.bash_completion - -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/pci_device_shell.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/ethernetport_shell.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/node_shell.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/lldp_agent_shell.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/common/options.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/cpu_shell.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/memory_shell.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/lldp_neighbour_shell.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/port_shell.py -chmod a+x %{buildroot}/%{pythonroot}/inventoryclient/v1/host_shell.py - -%files -%defattr(-,root,root,-) -%license LICENSE -%{local_bindir}/* -%config %{local_etc_bash_completiond}/* -%{pythonroot}/%{pypi_name}/* -%{pythonroot}/%{pypi_name}-%{version}*.egg-info -%dir %{pythonroot}/inventoryclient - -%changelog