Remove unused inventory and python-inventoryclient

Neither of these components were maintained or used, and so are
being abandoned.

 - inventory was an old fork of the sysinv code
 - python-inventoryclient was an old fork of the cgts-client code

The devstack commands, although currently disabled, have also
been updated.

Change-Id: If6a109edbc70eb1bd92012f4261dec4a2c58fbd1
Story: 2004515
Task: 37538
Depends-On: https://review.opendev.org/701591
Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
Al Bailey 2020-01-07 08:41:36 -06:00
parent e68db45a2e
commit d59ba5fdc2
254 changed files with 0 additions and 40243 deletions

View File

@ -23,7 +23,3 @@ pxe-network-installer
# platform-kickstarts
platform-kickstarts
# inventory
inventory
python-inventoryclient

View File

@ -5,6 +5,4 @@ mtce-control
mtce-storage
installer/pxe-network-installer
kickstart
inventory
python-inventoryclient
tools/rvmc

View File

@ -103,22 +103,6 @@ function build_mtce_common {
popd
}
function build_inventory {
pushd ${STX_METAL_DIR}/inventory/inventory
python setup.py build
popd
}
function build_inventory_client {
pushd ${STX_METAL_DIR}/python-inventoryclient/inventoryclient
python setup.py build
popd
}
function install_metal {
install_mtce_common
# components could be seperately installed if
@ -134,13 +118,6 @@ function install_metal {
if is_service_enabled mtce-storage; then
install_mtce_storage
fi
if is_service_enabled inventory-api || is_service_enabled inventory-conductor || is_service_enabled inventory-agent; then
install_inventory
fi
if is_service_enabled inventory-client; then
install_inventory_client
fi
}
function install_mtce_common {
@ -255,64 +232,6 @@ function install_mtce_control {
popd
}
function install_inventory {
local lib_dir=${PREFIX}/lib
local unit_dir=${PREFIX}/lib/systemd/system
local lib64_dir=${PREFIX}/lib64
local pythonroot=${lib64_dir}/python2.7/site-packages
local sysconf_dir=${SYSCONFDIR}
local local_etc_goenabledd=${SYSCONFDIR}/goenabled.d
local local_etc_inventory=${SYSCONFDIR}/inventory
local local_etc_motdd=${SYSCONFDIR}/motd.d
build_inventory
pushd ${STX_METAL_DIR}/inventory/inventory
sudo python setup.py install \
--root=/ \
--install-lib=$PYTHON_SITE_DIR \
--prefix=/usr \
--install-data=/usr/share \
--single-version-externally-managed
sudo install -d -m 755 ${local_etc_goenabledd}
sudo install -p -D -m 755 etc/inventory/inventory_goenabled_check.sh ${local_etc_goenabledd}/inventory_goenabled_check.sh
sudo install -d -m 755 ${local_etc_inventory}
sudo install -p -D -m 755 etc/inventory/policy.json ${local_etc_inventory}/policy.json
sudo install -d -m 755 ${local_etc_motdd}
sudo install -p -D -m 755 etc/inventory/motd-system ${local_etc_motdd}/10-system-config
sudo install -m 755 -p -D scripts/inventory-api ${lib_dir}/ocf/resource.d/platform/inventory-api
sudo install -m 755 -p -D scripts/inventory-conductor ${lib_dir}/ocf/resource.d/platform/inventory-conductor
sudo install -m 644 -p -D scripts/inventory-api.service ${unit_dir}/inventory-api.service
sudo install -m 644 -p -D scripts/inventory-conductor.service ${unit_dir}/inventory-conductor.service
popd
}
function install_inventory_client {
pushd ${STX_METAL_DIR}/python-inventoryclient/inventoryclient
build_inventory_client
sudo python setup.py install \
--root=/ \
--install-lib=$PYTHON_SITE_DIR \
--prefix=/usr \
--install-data=/usr/share \
--single-version-externally-managed
sudo install -d -m 755 /etc/bash_completion.d/
sudo install -p -D -m 664 tools/inventory.bash_completion /etc/bash_completion.d/inventory.bash_completion
popd
}
function install_mtce_storage {
local sysconf_dir=${SYSCONFDIR}
local unit_dir=${SYSCONFDIR}/systemd/system
@ -972,40 +891,6 @@ function cleanup_metal {
sudo rm -rf ${sysconf_dir}/init.d/goenabledStorage
fi
if is_service_enabled inventory-api || is_service_enabled inventory-conductor || is_service_enabled inventory-agent; then
cleanup_inventory
fi
if is_service_enabled inventory-client; then
cleanup_inventory_client
fi
}
function cleanup_inventory {
local lib_dir=${PREFIX}/lib
local unit_dir=${PREFIX}/lib/systemd/system
local lib64_dir=${PREFIX}/lib64
local pythonroot=${lib64_dir}/python2.7/site-packages
local sysconf_dir=${SYSCONFDIR}
local local_etc_goenabledd=${SYSCONFDIR}/goenabled.d
local local_etc_inventory=${SYSCONFDIR}/inventory
local local_etc_motdd=${SYSCONFDIR}/motd.d
sudo pip uninstall -y inventory
sudo rm -rf ${local_etc_goenabledd}/inventory_goenabled_check.sh
sudo rm -rf ${local_etc_inventory}/policy.json
sudo rm -rf ${local_etc_motdd}/10-system-config
sudo rm -rf ${lib_dir}/ocf/resource.d/platform/inventory-api
sudo rm -rf ${lib_dir}/ocf/resource.d/platform/inventory-conductor
sudo rm -rf ${unit_dir}/inventory-api.service
sudo rm -rf ${unit_dir}/inventory-conductor.service
}
function cleanup_inventory_client {
sudo pip uninstall -y inventoryclient
sudo rm -rf /etc/bash_completion.d/inventory.bash_completion
}
function uninstall_files {

View File

@ -1,13 +0,0 @@
Metadata-Version: 1.1
Name: inventory
Version: 1.0
Summary: Inventory
Home-page: https://wiki.openstack.org/wiki/StarlingX
Author: StarlingX
Author-email: starlingx-discuss@lists.starlingx.io
License: Apache-2.0
Description: Inventory Service
Platform: UNKNOWN

View File

@ -1,2 +0,0 @@
SRC_DIR="inventory"
TIS_PATCH_VER=3

View File

@ -1,195 +0,0 @@
Summary: Inventory
Name: inventory
Version: 1.0
Release: %{tis_patch_ver}%{?_tis_dist}
License: Apache-2.0
Group: base
Packager: Wind River <info@windriver.com>
URL: unknown
Source0: %{name}-%{version}.tar.gz
BuildRequires: cgts-client
BuildRequires: python-setuptools
BuildRequires: python-jsonpatch
BuildRequires: python-keystoneauth1
BuildRequires: python-keystonemiddleware
BuildRequires: python-mock
BuildRequires: python-neutronclient
BuildRequires: python-oslo-concurrency
BuildRequires: python-oslo-config
BuildRequires: python-oslo-context
BuildRequires: python-oslo-db
BuildRequires: python-oslo-db-tests
BuildRequires: python-oslo-i18n
BuildRequires: python-oslo-log
BuildRequires: python-oslo-messaging
BuildRequires: python-oslo-middleware
BuildRequires: python-oslo-policy
BuildRequires: python-oslo-rootwrap
BuildRequires: python-oslo-serialization
BuildRequires: python-oslo-service
BuildRequires: python-oslo-utils
BuildRequires: python-oslo-versionedobjects
BuildRequires: python-oslotest
BuildRequires: python-osprofiler
BuildRequires: python-os-testr
BuildRequires: python-pbr
BuildRequires: python-pecan
BuildRequires: python-psutil
BuildRequires: python-requests
BuildRequires: python-retrying
BuildRequires: python-six
BuildRequires: python-sqlalchemy
BuildRequires: python-stevedore
BuildRequires: python-webob
BuildRequires: python-wsme
BuildRequires: systemd
BuildRequires: systemd-devel
Requires: python-pyudev
Requires: pyparted
Requires: python-ipaddr
Requires: python-paste
Requires: python-eventlet
Requires: python-futurist >= 0.11.0
Requires: python-jsonpatch
Requires: python-keystoneauth1 >= 3.1.0
Requires: python-keystonemiddleware >= 4.12.0
Requires: python-neutronclient >= 6.3.0
Requires: python-oslo-concurrency >= 3.8.0
Requires: python-oslo-config >= 2:4.0.0
Requires: python-oslo-context >= 2.14.0
Requires: python-oslo-db >= 4.24.0
Requires: python-oslo-i18n >= 2.1.0
Requires: python-oslo-log >= 3.22.0
Requires: python-oslo-messaging >= 5.24.2
Requires: python-oslo-middleware >= 3.27.0
Requires: python-oslo-policy >= 1.23.0
Requires: python-oslo-rootwrap >= 5.0.0
Requires: python-oslo-serialization >= 1.10.0
Requires: python-oslo-service >= 1.10.0
Requires: python-oslo-utils >= 3.20.0
Requires: python-oslo-versionedobjects >= 1.17.0
Requires: python-osprofiler >= 1.4.0
Requires: python-pbr
Requires: python-pecan
Requires: python-psutil
Requires: python-requests
Requires: python-retrying
Requires: python-six
Requires: python-sqlalchemy
Requires: python-stevedore >= 1.20.0
Requires: python-webob >= 1.7.1
Requires: python-wsme
%description
Inventory Service
%define local_bindir /usr/bin/
%define local_etc_goenabledd /etc/goenabled.d/
%define local_etc_inventory /etc/inventory/
%define local_etc_motdd /etc/motd.d/
%define pythonroot /usr/lib64/python2.7/site-packages
%define ocf_resourced /usr/lib/ocf/resource.d
%define local_etc_initd /etc/init.d/
%define local_etc_pmond /etc/pmon.d/
%define debug_package %{nil}
%prep
%setup
# Remove bundled egg-info
rm -rf *.egg-info
%build
echo "Start inventory build"
export PBR_VERSION=%{version}
%{__python} setup.py build
PYTHONPATH=. oslo-config-generator --config-file=inventory/config-generator.conf
%install
echo "Start inventory install"
export PBR_VERSION=%{version}
%{__python} setup.py install --root=%{buildroot} \
--install-lib=%{pythonroot} \
--prefix=/usr \
--install-data=/usr/share \
--single-version-externally-managed
install -d -m 755 %{buildroot}%{local_etc_goenabledd}
install -p -D -m 755 etc/inventory/inventory_goenabled_check.sh %{buildroot}%{local_etc_goenabledd}/inventory_goenabled_check.sh
install -d -m 755 %{buildroot}%{local_etc_inventory}
install -p -D -m 755 etc/inventory/policy.json %{buildroot}%{local_etc_inventory}/policy.json
install -d -m 755 %{buildroot}%{local_etc_motdd}
install -p -D -m 755 etc/inventory/motd-system %{buildroot}%{local_etc_motdd}/10-system-config
install -m 755 -p -D scripts/inventory-api %{buildroot}/usr/lib/ocf/resource.d/platform/inventory-api
install -m 755 -p -D scripts/inventory-conductor %{buildroot}/usr/lib/ocf/resource.d/platform/inventory-conductor
install -m 644 -p -D scripts/inventory-api.service %{buildroot}%{_unitdir}/inventory-api.service
install -m 644 -p -D scripts/inventory-conductor.service %{buildroot}%{_unitdir}/inventory-conductor.service
# TODO(jkung) activate inventory-agent with puppet integration)
# install -d -m 755 %{buildroot}%{local_etc_initd}
# install -p -D -m 755 scripts/inventory-agent-initd %{buildroot}%{local_etc_initd}/inventory-agent
# install -d -m 755 %{buildroot}%{local_etc_pmond}
# install -p -D -m 644 etc/inventory/inventory-agent-pmond.conf %{buildroot}%{local_etc_pmond}/inventory-agent-pmond.conf
# install -p -D -m 644 scripts/inventory-agent.service %{buildroot}%{_unitdir}/inventory-agent.service
# Install sql migration
install -m 644 inventory/db/sqlalchemy/migrate_repo/migrate.cfg %{buildroot}%{pythonroot}/inventory/db/sqlalchemy/migrate_repo/migrate.cfg
# install default config files
cd %{_builddir}/%{name}-%{version} && oslo-config-generator --config-file inventory/config-generator.conf --output-file %{_builddir}/%{name}-%{version}/inventory.conf.sample
# install -p -D -m 644 %{_builddir}/%{name}-%{version}/inventory.conf.sample %{buildroot}%{_sysconfdir}/inventory/inventory.conf
# TODO(jkung) activate inventory-agent
# %post
# /usr/bin/systemctl enable inventory-agent.service >/dev/null 2>&1
%clean
echo "CLEAN CALLED"
rm -rf $RPM_BUILD_ROOT
%files
%defattr(-,root,root,-)
%doc LICENSE
%{local_bindir}/*
%{pythonroot}/%{name}
%{pythonroot}/%{name}-%{version}*.egg-info
%{local_etc_goenabledd}/*
%{local_etc_inventory}/*
%{local_etc_motdd}/*
# SM OCF Start/Stop/Monitor Scripts
%{ocf_resourced}/platform/inventory-api
%{ocf_resourced}/platform/inventory-conductor
# systemctl service files
%{_unitdir}/inventory-api.service
%{_unitdir}/inventory-conductor.service
# %{_bindir}/inventory-agent
%{_bindir}/inventory-api
%{_bindir}/inventory-conductor
%{_bindir}/inventory-dbsync
%{_bindir}/inventory-dnsmasq-lease-update
# inventory-agent files
# %{local_etc_initd}/inventory-agent
# %{local_etc_pmond}/inventory-agent-pmond.conf
# %{_unitdir}/inventory-agent.service

View File

@ -1,6 +0,0 @@
[run]
branch = True
source = inventory
[report]
ignore_errors = True

View File

@ -1,59 +0,0 @@
*.py[cod]
# C extensions
*.so
# Packages
*.egg*
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
cover/
.coverage*
!.coveragerc
.tox
nosetests.xml
.testrepository
.stestr
.venv
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Complexity
output/*.html
output/*/index.html
# Sphinx
doc/build
# pbr generates these
AUTHORS
ChangeLog
# Editors
*~
.*.swp
.*sw?
# Files created by releasenotes build
releasenotes/build

View File

@ -1,3 +0,0 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>

View File

@ -1,3 +0,0 @@
[DEFAULT]
test_path=./inventory/tests
top_dir=./

View File

@ -1,19 +0,0 @@
If you would like to contribute to the development of StarlingX, you must
follow the steps in this page:
https://wiki.openstack.org/wiki/StarlingX/Contribution_Guidelines
If you already have a good understanding of how the system works and your
StarlingX accounts are set up, you can skip to the development workflow
section of this documentation to learn how changes to StarlingX should be
submitted for review via the Gerrit tool:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad:
https://bugs.launchpad.net/starlingx
Storyboard:
https://storyboard.openstack.org/#!/story/2002950

View File

@ -1,4 +0,0 @@
inventory Style Commandments
============================
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,3 +0,0 @@
Placeholder to allow setup.py to work.
Removing this requires modifying the
setup.py manifest.

View File

@ -1,2 +0,0 @@
[python: **.py]

View File

@ -1,4 +0,0 @@
sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
openstackdocstheme>=1.18.1 # Apache-2.0
# releasenotes
reno>=2.5.0 # Apache-2.0

View File

@ -1,5 +0,0 @@
====================
Administrators guide
====================
Administrators guide of inventory.

View File

@ -1,5 +0,0 @@
================================
Command line interface reference
================================
CLI reference of inventory.

View File

@ -1,82 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'openstackdocstheme',
#'sphinx.ext.intersphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'inventory'
copyright = u'2018, StarlingX'
# openstackdocstheme options
repository_name = 'stx-metal'
bug_project = '22952'
bug_tag = ''
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme = 'starlingxdocs'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Developers', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -1,5 +0,0 @@
=============
Configuration
=============
Configuration of inventory.

View File

@ -1,4 +0,0 @@
============
Contributing
============
.. include:: ../../../CONTRIBUTING.rst

View File

@ -1,9 +0,0 @@
=========================
Contributor Documentation
=========================
.. toctree::
:maxdepth: 2
contributing

View File

@ -1,30 +0,0 @@
.. inventory documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
=========================================
Welcome to the documentation of inventory
=========================================
Contents:
.. toctree::
:maxdepth: 2
readme
install/index
library/index
contributor/index
configuration/index
cli/index
user/index
admin/index
reference/index
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,10 +0,0 @@
2. Edit the ``/etc/inventory/inventory.conf`` file and complete the following
actions:
* In the ``[database]`` section, configure database access:
.. code-block:: ini
[database]
...
connection = mysql+pymysql://inventory:INVENTORY_DBPASS@controller/inventory

View File

@ -1,75 +0,0 @@
Prerequisites
-------------
Before you install and configure the inventory service,
you must create a database, service credentials, and API endpoints.
#. To create the database, complete these steps:
* Use the database access client to connect to the database
server as the ``root`` user:
.. code-block:: console
$ mysql -u root -p
* Create the ``inventory`` database:
.. code-block:: none
CREATE DATABASE inventory;
* Grant proper access to the ``inventory`` database:
.. code-block:: none
GRANT ALL PRIVILEGES ON inventory.* TO 'inventory'@'localhost' \
IDENTIFIED BY 'INVENTORY_DBPASS';
GRANT ALL PRIVILEGES ON inventory.* TO 'inventory'@'%' \
IDENTIFIED BY 'INVENTORY_DBPASS';
Replace ``INVENTORY_DBPASS`` with a suitable password.
* Exit the database access client.
.. code-block:: none
exit;
#. Source the ``admin`` credentials to gain access to
admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. To create the service credentials, complete these steps:
* Create the ``inventory`` user:
.. code-block:: console
$ openstack user create --domain default --password-prompt inventory
* Add the ``admin`` role to the ``inventory`` user:
.. code-block:: console
$ openstack role add --project service --user inventory admin
* Create the inventory service entities:
.. code-block:: console
$ openstack service create --name inventory --description "inventory" inventory
#. Create the inventory service API endpoints:
.. code-block:: console
$ openstack endpoint create --region RegionOne \
inventory public http://controller:XXXX/vY/%\(tenant_id\)s
$ openstack endpoint create --region RegionOne \
inventory internal http://controller:XXXX/vY/%\(tenant_id\)s
$ openstack endpoint create --region RegionOne \
inventory admin http://controller:XXXX/vY/%\(tenant_id\)s

View File

@ -1,9 +0,0 @@
==========================
inventory service overview
==========================
The inventory service provides host inventory of resources on the host.
The inventory service consists of the following components:
``inventory-api`` service
Accepts and responds to end user API calls...

View File

@ -1,17 +0,0 @@
====================================
inventory service installation guide
====================================
.. toctree::
:maxdepth: 2
get_started.rst
install.rst
verify.rst
next-steps.rst
The inventory service (inventory) provides...
This chapter assumes a working setup of StarlingX following the
`StarlingX Installation Guide
<https://docs.starlingx.io/installation_guide/index.html>`_.

View File

@ -1,34 +0,0 @@
.. _install-obs:
Install and configure for openSUSE and SUSE Linux Enterprise
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the inventory service
for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1.
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# zypper --quiet --non-interactive install
.. include:: common_configure.rst
Finalize installation
---------------------
Start the inventory services and configure them to start when
the system boots:
.. code-block:: console
# systemctl enable openstack-inventory-api.service
# systemctl start openstack-inventory-api.service

View File

@ -1,33 +0,0 @@
.. _install-rdo:
Install and configure for Red Hat Enterprise Linux and CentOS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the inventory service
for Red Hat Enterprise Linux 7 and CentOS 7.
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# yum install
.. include:: common_configure.rst
Finalize installation
---------------------
Start the inventory services and configure them to start when
the system boots:
.. code-block:: console
# systemctl enable openstack-inventory-api.service
# systemctl start openstack-inventory-api.service

View File

@ -1,31 +0,0 @@
.. _install-ubuntu:
Install and configure for Ubuntu
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the inventory
service for Ubuntu 14.04 (LTS).
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# apt-get update
# apt-get install
.. include:: common_configure.rst
Finalize installation
---------------------
Restart the inventory services:
.. code-block:: console
# service openstack-inventory-api restart

View File

@ -1,20 +0,0 @@
.. _install:
Install and configure
~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the
inventory service, code-named inventory, on the controller node.
This section assumes that you already have a working OpenStack
environment with at least the following components installed:
.. (add the appropriate services here and further notes)
Note that installation and configuration vary by distribution.
.. toctree::
:maxdepth: 2
install-obs.rst
install-rdo.rst
install-ubuntu.rst

View File

@ -1,9 +0,0 @@
.. _next-steps:
Next steps
~~~~~~~~~~
Your OpenStack environment now includes the inventory service.
To add additional services, see
https://docs.openstack.org/project-install-guide/ocata/.

View File

@ -1,24 +0,0 @@
.. _verify:
Verify operation
~~~~~~~~~~~~~~~~
Verify operation of the inventory service.
.. note::
Perform these commands on the controller node.
#. Source the ``admin`` project credentials to gain access to
admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. List service components to verify successful launch and registration
of each process:
.. code-block:: console
$ openstack inventory service list

View File

@ -1,7 +0,0 @@
=====
Usage
=====
To use inventory in a project:
import inventory

View File

@ -1 +0,0 @@
.. include:: ../../README.rst

View File

@ -1,5 +0,0 @@
==========
References
==========
References of inventory.

View File

@ -1,5 +0,0 @@
===========
Users guide
===========
Users guide of inventory.

View File

@ -1,20 +0,0 @@
#!/bin/bash
# Copyright (c) 2015-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script removes a load from a controller.
# The load version is passed in as the first variable.
: ${1?"Usage $0 VERSION"}
VERSION=$1
FEED_DIR=/www/pages/feed/rel-$VERSION
rm -f /pxeboot/pxelinux.cfg.files/*-$VERSION
rm -rf /pxeboot/rel-$VERSION
rm -f /usr/sbin/pxeboot-update-$VERSION.sh
rm -rf $FEED_DIR

View File

@ -1,9 +0,0 @@
[process]
process = inventory-agent
pidfile = /var/run/inventory-agent.pid
script = /etc/init.d/inventory-agent
style = lsb ; ocf or lsb
severity = major ; minor, major, critical
restarts = 3 ; restarts before error assertion
interval = 5 ; number of seconds to wait between restarts
debounce = 20 ; number of seconds to wait before degrade clear

View File

@ -1,36 +0,0 @@
#!/bin/bash
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Inventory "goenabled" check.
# Wait for inventory information to be posted prior to allowing goenabled.
NAME=$(basename $0)
INVENTORY_READY_FLAG=/var/run/.inventory_ready
# logfile=/var/log/platform.log
function LOG {
logger "$NAME: $*"
# echo "`date "+%FT%T"`: $NAME: $*" >> $logfile
}
count=0
while [ $count -le 45 ]; do
if [ -f $INVENTORY_READY_FLAG ]; then
LOG "Inventory is ready. Passing goenabled check."
echo "Inventory goenabled iterations PASS $count"
LOG "Inventory goenabled iterations PASS $count"
exit 0
fi
sleep 1
count=$(($count+1))
done
echo "Inventory goenabled iterations FAIL $count"
LOG "Inventory is not ready. Continue."
exit 0

View File

@ -1,10 +0,0 @@
#!/bin/bash
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# update inventory MOTD if motd.system content present
[ -f /etc/inventory/motd.system ] && cat /etc/inventory/motd.system || true

View File

@ -1,5 +0,0 @@
{
"admin": "role:admin or role:administrator",
"admin_api": "is_admin:True",
"default": "rule:admin_api"
}

View File

@ -1,11 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pbr.version
__version__ = pbr.version.VersionInfo(
'inventory').version_string()

View File

@ -1,114 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""Base agent manager functionality."""
import futurist
from futurist import periodics
from futurist import rejection
import inspect
from inventory.common import exception
from inventory.common.i18n import _
from oslo_config import cfg
from oslo_log import log
LOG = log.getLogger(__name__)
class BaseAgentManager(object):
def __init__(self, host, topic):
super(BaseAgentManager, self).__init__()
if not host:
host = cfg.CONF.host
self.host = host
self.topic = topic
self._started = False
def init_host(self, admin_context=None):
"""Initialize the agent host.
:param admin_context: the admin context to pass to periodic tasks.
:raises: RuntimeError when agent is already running.
"""
if self._started:
raise RuntimeError(_('Attempt to start an already running '
'agent manager'))
rejection_func = rejection.reject_when_reached(64)
# CONF.conductor.workers_pool_size)
self._executor = futurist.GreenThreadPoolExecutor(
64, check_and_reject=rejection_func)
# JK max_workers=CONF.conductor.workers_pool_size,
"""Executor for performing tasks async."""
# Collect driver-specific periodic tasks.
# Conductor periodic tasks accept context argument,
LOG.info('Collecting periodic tasks')
self._periodic_task_callables = []
self._collect_periodic_tasks(self, (admin_context,))
self._periodic_tasks = periodics.PeriodicWorker(
self._periodic_task_callables,
executor_factory=periodics.ExistingExecutor(self._executor))
# Start periodic tasks
self._periodic_tasks_worker = self._executor.submit(
self._periodic_tasks.start, allow_empty=True)
self._periodic_tasks_worker.add_done_callback(
self._on_periodic_tasks_stop)
self._started = True
def del_host(self, deregister=True):
# Conductor deregistration fails if called on non-initialized
# agent (e.g. when rpc server is unreachable).
if not hasattr(self, 'agent'):
return
self._periodic_tasks.stop()
self._periodic_tasks.wait()
self._executor.shutdown(wait=True)
self._started = False
def _collect_periodic_tasks(self, obj, args):
"""Collect periodic tasks from a given object.
Populates self._periodic_task_callables with tuples
(callable, args, kwargs).
:param obj: object containing periodic tasks as methods
:param args: tuple with arguments to pass to every task
"""
for name, member in inspect.getmembers(obj):
if periodics.is_periodic(member):
LOG.debug('Found periodic task %(owner)s.%(member)s',
{'owner': obj.__class__.__name__,
'member': name})
self._periodic_task_callables.append((member, args, {}))
def _on_periodic_tasks_stop(self, fut):
try:
fut.result()
except Exception as exc:
LOG.critical('Periodic tasks worker has failed: %s', exc)
else:
LOG.info('Successfully shut down periodic tasks')
def _spawn_worker(self, func, *args, **kwargs):
"""Create a greenthread to run func(*args, **kwargs).
Spawns a greenthread if there are free slots in pool, otherwise raises
exception. Execution control returns immediately to the caller.
:returns: Future object.
:raises: NoFreeConductorWorker if worker pool is currently full.
"""
try:
return self._executor.submit(func, *args, **kwargs)
except futurist.RejectedSubmission:
raise exception.NoFreeConductorWorker()

View File

@ -1,369 +0,0 @@
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
""" inventory idisk Utilities and helper functions."""
import os
import pyudev
import re
import subprocess
import sys
from inventory.common import constants
from inventory.common import context
from inventory.common import utils
from inventory.conductor import rpcapi as conductor_rpcapi
from oslo_log import log
LOG = log.getLogger(__name__)
class DiskOperator(object):
'''Class to encapsulate Disk operations for System Inventory'''
def __init__(self):
self.num_cpus = 0
self.num_nodes = 0
self.float_cpuset = 0
self.default_hugepage_size_kB = 0
self.total_memory_MiB = 0
self.free_memory_MiB = 0
self.total_memory_nodes_MiB = []
self.free_memory_nodes_MiB = []
self.topology = {}
def convert_range_string_to_list(self, s):
olist = []
s = s.strip()
if s:
for part in s.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
olist.extend(range(a, b + 1))
else:
a = int(part)
olist.append(a)
olist.sort()
return olist
def get_rootfs_node(self):
cmdline_file = '/proc/cmdline'
device = None
with open(cmdline_file, 'r') as f:
for line in f:
for param in line.split():
params = param.split("=", 1)
if params[0] == "root":
if "UUID=" in params[1]:
key, uuid = params[1].split("=")
symlink = "/dev/disk/by-uuid/%s" % uuid
device = os.path.basename(os.readlink(symlink))
else:
device = os.path.basename(params[1])
if device is not None:
if constants.DEVICE_NAME_NVME in device:
re_line = re.compile(r'^(nvme[0-9]*n[0-9]*)')
else:
re_line = re.compile(r'^(\D*)')
match = re_line.search(device)
if match:
return os.path.join("/dev", match.group(1))
return
@utils.skip_udev_partition_probe
def get_disk_available_mib(self, device_node):
# Check that partition table format is GPT.
# Return 0 if not.
if not utils.disk_is_gpt(device_node=device_node):
LOG.debug("Format of disk node %s is not GPT." % device_node)
return 0
pvs_command = '{} {}'.format('pvs | grep -w ', device_node)
pvs_process = subprocess.Popen(pvs_command, stdout=subprocess.PIPE,
shell=True)
pvs_output = pvs_process.stdout.read()
if pvs_output:
LOG.debug("Disk %s is completely used by a PV => 0 available mib."
% device_node)
return 0
# Get sector size command.
sector_size_bytes_cmd = '{} {}'.format('blockdev --getss', device_node)
# Get total free space in sectors command.
avail_space_sectors_cmd = '{} {} {}'.format(
'sgdisk -p', device_node, "| grep \"Total free space\"")
# Get the sector size.
sector_size_bytes_process = subprocess.Popen(
sector_size_bytes_cmd, stdout=subprocess.PIPE, shell=True)
sector_size_bytes = sector_size_bytes_process.stdout.read().rstrip()
# Get the free space.
avail_space_sectors_process = subprocess.Popen(
avail_space_sectors_cmd, stdout=subprocess.PIPE, shell=True)
avail_space_sectors_output = avail_space_sectors_process.stdout.read()
avail_space_sectors = re.findall(
'\d+', avail_space_sectors_output)[0].rstrip()
# Free space in MiB.
avail_space_mib = (int(sector_size_bytes) * int(avail_space_sectors) /
(1024 ** 2))
# Keep 2 MiB for partition table.
if avail_space_mib >= 2:
avail_space_mib = avail_space_mib - 2
else:
avail_space_mib = 0
return avail_space_mib
def disk_format_gpt(self, host_uuid, idisk_dict, is_cinder_device):
disk_node = idisk_dict.get('device_path')
utils.disk_wipe(disk_node)
utils.execute('parted', disk_node, 'mklabel', 'gpt')
if is_cinder_device:
LOG.debug("Removing .node_cinder_lvm_config_complete_file")
try:
os.remove(constants.NODE_CINDER_LVM_CONFIG_COMPLETE_FILE)
except OSError:
LOG.error(".node_cinder_lvm_config_complete_file not present.")
pass
# On SX ensure wipe succeeds before DB is updated.
# Flag file is used to mark wiping in progress.
try:
os.remove(constants.DISK_WIPE_IN_PROGRESS_FLAG)
except OSError:
# it's ok if file is not present.
pass
# We need to send the updated info about the host disks back to
# the conductor.
idisk_update = self.idisk_get()
ctxt = context.get_admin_context()
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
rpcapi.idisk_update_by_ihost(ctxt,
host_uuid,
idisk_update)
def handle_exception(self, e):
traceback = sys.exc_info()[-1]
LOG.error("%s @ %s:%s" % (
e, traceback.tb_frame.f_code.co_filename, traceback.tb_lineno))
def is_rotational(self, device_name):
"""Find out if a certain disk is rotational or not. Mostly used for
determining if disk is HDD or SSD.
"""
# Obtain the path to the rotational file for the current device.
device = device_name['DEVNAME'].split('/')[-1]
rotational_path = "/sys/block/{device}/queue/rotational"\
.format(device=device)
rotational = None
# Read file and remove trailing whitespaces.
if os.path.isfile(rotational_path):
with open(rotational_path, 'r') as rot_file:
rotational = rot_file.read()
rotational = rotational.rstrip()
return rotational
def get_device_id_wwn(self, device):
"""Determine the ID and WWN of a disk from the value of the DEVLINKS
attribute.
Note: This data is not currently being used for anything. We are
gathering this information so conductor can store for future use.
"""
# The ID and WWN default to None.
device_id = None
device_wwn = None
# If there is no DEVLINKS attribute, return None.
if 'DEVLINKS' not in device:
return device_id, device_wwn
# Extract the ID and the WWN.
LOG.debug("[DiskEnum] get_device_id_wwn: devlinks= %s" %
device['DEVLINKS'])
devlinks = device['DEVLINKS'].split()
for devlink in devlinks:
if "by-id" in devlink:
if "wwn" not in devlink:
device_id = devlink.split('/')[-1]
LOG.debug("[DiskEnum] by-id: %s id: %s" % (devlink,
device_id))
else:
device_wwn = devlink.split('/')[-1]
LOG.debug("[DiskEnum] by-wwn: %s wwn: %s" % (devlink,
device_wwn))
return device_id, device_wwn
def idisk_get(self):
"""Enumerate disk topology based on:
:param self
:returns list of disk and attributes
"""
idisk = []
context = pyudev.Context()
for device in context.list_devices(DEVTYPE='disk'):
if not utils.is_system_usable_block_device(device):
continue
if device['MAJOR'] in constants.VALID_MAJOR_LIST:
if 'ID_PATH' in device:
device_path = "/dev/disk/by-path/" + device['ID_PATH']
LOG.debug("[DiskEnum] device_path: %s ", device_path)
else:
# We should always have a udev supplied /dev/disk/by-path
# value as a matter of normal operation. We do not expect
# this to occur, thus the error.
#
# The kickstart files for the host install require the
# by-path value also to be present or the host install will
# fail. Since the installer and the runtime share the same
# kernel/udev we should not see this message on an
# installed system.
device_path = None
LOG.error("Device %s does not have an ID_PATH value "
"provided by udev" % device.device_node)
size_mib = 0
available_mib = 0
model_num = ''
serial_id = ''
# Can merge all try/except in one block but this allows
# at least attributes with no exception to be filled
try:
size_mib = utils.get_disk_capacity_mib(device.device_node)
except Exception as e:
self.handle_exception("Could not retrieve disk size - %s "
% e)
try:
available_mib = self.get_disk_available_mib(
device_node=device.device_node)
except Exception as e:
self.handle_exception(
"Could not retrieve disk %s free space" % e)
try:
# ID_MODEL received from udev is not correct for disks that
# are used entirely for LVM. LVM replaced the model ID with
# its own identifier that starts with "LVM PV".For this
# reason we will attempt to retrieve the correct model ID
# by using 2 different commands: hdparm and lsblk and
# hdparm. If one of them fails, the other one can attempt
# to retrieve the information. Else we use udev.
# try hdparm command first
hdparm_command = 'hdparm -I %s |grep Model' % (
device.get('DEVNAME'))
hdparm_process = subprocess.Popen(
hdparm_command,
stdout=subprocess.PIPE,
shell=True)
hdparm_output = hdparm_process.communicate()[0]
if hdparm_process.returncode == 0:
second_half = hdparm_output.split(':')[1]
model_num = second_half.strip()
else:
# try lsblk command
lsblk_command = 'lsblk -dn --output MODEL %s' % (
device.get('DEVNAME'))
lsblk_process = subprocess.Popen(
lsblk_command,
stdout=subprocess.PIPE,
shell=True)
lsblk_output = lsblk_process.communicate()[0]
if lsblk_process.returncode == 0:
model_num = lsblk_output.strip()
else:
# both hdparm and lsblk commands failed, try udev
model_num = device.get('ID_MODEL')
if not model_num:
model_num = constants.DEVICE_MODEL_UNKNOWN
except Exception as e:
self.handle_exception("Could not retrieve disk model "
"for disk %s. Exception: %s" %
(device.get('DEVNAME'), e))
try:
if 'ID_SCSI_SERIAL' in device:
serial_id = device['ID_SCSI_SERIAL']
else:
serial_id = device['ID_SERIAL_SHORT']
except Exception as e:
self.handle_exception("Could not retrieve disk "
"serial ID - %s " % e)
capabilities = dict()
if model_num:
capabilities.update({'model_num': model_num})
if self.get_rootfs_node() == device.device_node:
capabilities.update({'stor_function': 'rootfs'})
rotational = self.is_rotational(device)
device_type = device.device_type
rotation_rate = constants.DEVICE_TYPE_UNDETERMINED
if rotational is '1':
device_type = constants.DEVICE_TYPE_HDD
if 'ID_ATA_ROTATION_RATE_RPM' in device:
rotation_rate = device['ID_ATA_ROTATION_RATE_RPM']
elif rotational is '0':
if constants.DEVICE_NAME_NVME in device.device_node:
device_type = constants.DEVICE_TYPE_NVME
else:
device_type = constants.DEVICE_TYPE_SSD
rotation_rate = constants.DEVICE_TYPE_NA
# TODO(sc) else: what are other possible stor_function value?
# or do we just use pair { 'is_rootfs': True } instead?
# Obtain device ID and WWN.
device_id, device_wwn = self.get_device_id_wwn(device)
attr = {
'device_node': device.device_node,
'device_num': device.device_number,
'device_type': device_type,
'device_path': device_path,
'device_id': device_id,
'device_wwn': device_wwn,
'size_mib': size_mib,
'available_mib': available_mib,
'serial_id': serial_id,
'capabilities': capabilities,
'rpm': rotation_rate,
}
idisk.append(attr)
LOG.debug("idisk= %s" % idisk)
return idisk

View File

@ -1,23 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from oslo_config import cfg
from oslo_utils._i18n import _
INVENTORY_LLDP_OPTS = [
cfg.ListOpt('drivers',
default=['lldpd'],
help=_("An ordered list of inventory LLDP driver "
"entrypoints to be loaded from the "
"inventory.agent namespace.")),
]
cfg.CONF.register_opts(INVENTORY_LLDP_OPTS, group="lldp")

View File

@ -1,47 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class InventoryLldpDriverBase(object):
"""Inventory LLDP Driver Base Class."""
@abc.abstractmethod
def lldp_has_neighbour(self, name):
pass
@abc.abstractmethod
def lldp_update(self):
pass
@abc.abstractmethod
def lldp_agents_list(self):
pass
@abc.abstractmethod
def lldp_neighbours_list(self):
pass
@abc.abstractmethod
def lldp_agents_clear(self):
pass
@abc.abstractmethod
def lldp_neighbours_clear(self):
pass
@abc.abstractmethod
def lldp_update_systemname(self, systemname):
pass

View File

@ -1,321 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from oslo_log import log as logging
import simplejson as json
import subprocess
from inventory.agent.lldp.drivers import base
from inventory.agent.lldp import plugin
from inventory.common import k_lldp
LOG = logging.getLogger(__name__)
class InventoryLldpdAgentDriver(base.InventoryLldpDriverBase):
def __init__(self, **kwargs):
self.client = ""
self.agents = []
self.neighbours = []
self.current_neighbours = []
self.previous_neighbours = []
self.current_agents = []
self.previous_agents = []
self.agent_audit_count = 0
self.neighbour_audit_count = 0
def initialize(self):
self.__init__()
@staticmethod
def _lldpd_get_agent_status():
json_obj = json
p = subprocess.Popen(["lldpcli", "-f", "json", "show",
"configuration"],
stdout=subprocess.PIPE)
data = json_obj.loads(p.communicate()[0])
configuration = data['configuration'][0]
config = configuration['config'][0]
rx_only = config['rx-only'][0]
if rx_only.get("value") == "no":
return "rx=enabled,tx=enabled"
else:
return "rx=enabled,tx=disabled"
@staticmethod
def _lldpd_get_attrs(iface):
name_or_uuid = None
chassis_id = None
system_name = None
system_desc = None
capability = None
management_address = None
port_desc = None
dot1_lag = None
dot1_port_vid = None
dot1_vid_digest = None
dot1_mgmt_vid = None
dot1_vlan_names = None
dot1_proto_vids = None
dot1_proto_ids = None
dot3_mac_status = None
dot3_max_frame = None
dot3_power_mdi = None
ttl = None
attrs = {}
# Note: dot1_vid_digest, dot1_mgmt_vid are not currently supported
# by the lldpd daemon
name_or_uuid = iface.get("name")
chassis = iface.get("chassis")[0]
port = iface.get("port")[0]
if not chassis.get('id'):
return attrs
chassis_id = chassis['id'][0].get("value")
if not port.get('id'):
return attrs
port_id = port["id"][0].get("value")
if not port.get('ttl'):
return attrs
ttl = port['ttl'][0].get("value")
if chassis.get("name"):
system_name = chassis['name'][0].get("value")
if chassis.get("descr"):
system_desc = chassis['descr'][0].get("value")
if chassis.get("capability"):
capability = ""
for cap in chassis["capability"]:
if cap.get("enabled"):
if capability:
capability += ", "
capability += cap.get("type").lower()
if chassis.get("mgmt-ip"):
management_address = ""
for addr in chassis["mgmt-ip"]:
if management_address:
management_address += ", "
management_address += addr.get("value").lower()
if port.get("descr"):
port_desc = port["descr"][0].get("value")
if port.get("link-aggregation"):
dot1_lag_supported = port["link-aggregation"][0].get("supported")
dot1_lag_enabled = port["link-aggregation"][0].get("enabled")
dot1_lag = "capable="
if dot1_lag_supported:
dot1_lag += "y,"
else:
dot1_lag += "n,"
dot1_lag += "enabled="
if dot1_lag_enabled:
dot1_lag += "y"
else:
dot1_lag += "n"
if port.get("auto-negotiation"):
port_auto_neg_support = port["auto-negotiation"][0].get(
"supported")
port_auto_neg_enabled = port["auto-negotiation"][0].get("enabled")
dot3_mac_status = "auto-negotiation-capable="
if port_auto_neg_support:
dot3_mac_status += "y,"
else:
dot3_mac_status += "n,"
dot3_mac_status += "auto-negotiation-enabled="
if port_auto_neg_enabled:
dot3_mac_status += "y,"
else:
dot3_mac_status += "n,"
advertised = ""
if port.get("auto-negotiation")[0].get("advertised"):
for adv in port["auto-negotiation"][0].get("advertised"):
if advertised:
advertised += ", "
type = adv.get("type").lower()
if adv.get("hd") and not adv.get("fd"):
type += "hd"
elif adv.get("fd"):
type += "fd"
advertised += type
dot3_mac_status += advertised
if port.get("mfs"):
dot3_max_frame = port["mfs"][0].get("value")
if port.get("power"):
power_mdi_support = port["power"][0].get("supported")
power_mdi_enabled = port["power"][0].get("enabled")
power_mdi_devicetype = port["power"][0].get("device-type")[0].get(
"value")
power_mdi_pairs = port["power"][0].get("pairs")[0].get("value")
power_mdi_class = port["power"][0].get("class")[0].get("value")
dot3_power_mdi = "power-mdi-supported="
if power_mdi_support:
dot3_power_mdi += "y,"
else:
dot3_power_mdi += "n,"
dot3_power_mdi += "power-mdi-enabled="
if power_mdi_enabled:
dot3_power_mdi += "y,"
else:
dot3_power_mdi += "n,"
if power_mdi_support and power_mdi_enabled:
dot3_power_mdi += "device-type=" + power_mdi_devicetype
dot3_power_mdi += ",pairs=" + power_mdi_pairs
dot3_power_mdi += ",class=" + power_mdi_class
vlans = None
if iface.get("vlan"):
vlans = iface.get("vlan")
if vlans:
dot1_vlan_names = ""
for vlan in vlans:
if vlan.get("pvid"):
dot1_port_vid = vlan.get("vlan-id")
continue
if dot1_vlan_names:
dot1_vlan_names += ", "
dot1_vlan_names += vlan.get("value")
ppvids = None
if iface.get("ppvids"):
ppvids = iface.get("ppvid")
if ppvids:
dot1_proto_vids = ""
for ppvid in ppvids:
if dot1_proto_vids:
dot1_proto_vids += ", "
dot1_proto_vids += ppvid.get("value")
pids = None
if iface.get("pi"):
pids = iface.get('pi')
dot1_proto_ids = ""
for id in pids:
if dot1_proto_ids:
dot1_proto_ids += ", "
dot1_proto_ids += id.get("value")
msap = chassis_id + "," + port_id
attrs = {"name_or_uuid": name_or_uuid,
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: chassis_id,
k_lldp.LLDP_TLV_TYPE_PORT_ID: port_id,
k_lldp.LLDP_TLV_TYPE_TTL: ttl,
"msap": msap,
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: system_name,
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: system_desc,
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: capability,
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: management_address,
k_lldp.LLDP_TLV_TYPE_PORT_DESC: port_desc,
k_lldp.LLDP_TLV_TYPE_DOT1_LAG: dot1_lag,
k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID: dot1_port_vid,
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST: dot1_vid_digest,
k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID: dot1_mgmt_vid,
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: dot1_vlan_names,
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS: dot1_proto_vids,
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS: dot1_proto_ids,
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS: dot3_mac_status,
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME: dot3_max_frame,
k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI: dot3_power_mdi}
return attrs
def lldp_has_neighbour(self, name):
p = subprocess.check_output(["lldpcli", "-f", "keyvalue", "show",
"neighbors", "summary", "ports", name])
return len(p) > 0
def lldp_update(self):
subprocess.call(['lldpcli', 'update'])
def lldp_agents_list(self):
json_obj = json
lldp_agents = []
p = subprocess.Popen(["lldpcli", "-f", "json", "show", "interface",
"detail"], stdout=subprocess.PIPE)
data = json_obj.loads(p.communicate()[0])
lldp = data['lldp'][0]
if not lldp.get('interface'):
return lldp_agents
for iface in lldp['interface']:
agent_attrs = self._lldpd_get_attrs(iface)
status = self._lldpd_get_agent_status()
agent_attrs.update({"status": status})
agent = plugin.Agent(**agent_attrs)
lldp_agents.append(agent)
return lldp_agents
def lldp_agents_clear(self):
self.current_agents = []
self.previous_agents = []
def lldp_neighbours_list(self):
json_obj = json
lldp_neighbours = []
p = subprocess.Popen(["lldpcli", "-f", "json", "show", "neighbor",
"detail"], stdout=subprocess.PIPE)
data = json_obj.loads(p.communicate()[0])
lldp = data['lldp'][0]
if not lldp.get('interface'):
return lldp_neighbours
for iface in lldp['interface']:
neighbour_attrs = self._lldpd_get_attrs(iface)
neighbour = plugin.Neighbour(**neighbour_attrs)
lldp_neighbours.append(neighbour)
return lldp_neighbours
def lldp_neighbours_clear(self):
self.current_neighbours = []
self.previous_neighbours = []
def lldp_update_systemname(self, systemname):
p = subprocess.Popen(["lldpcli", "-f", "json", "show", "chassis"],
stdout=subprocess.PIPE)
data = json.loads(p.communicate()[0])
local_chassis = data['local-chassis'][0]
chassis = local_chassis['chassis'][0]
name = chassis.get('name', None)
if name is None or not name[0].get("value"):
return
name = name[0]
hostname = name.get("value").partition(':')[0]
newname = hostname + ":" + systemname
p = subprocess.Popen(["lldpcli", "configure", "system", "hostname",
newname], stdout=subprocess.PIPE)

View File

@ -1,167 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
import simplejson as json
import subprocess
from oslo_log import log as logging
from inventory.agent.lldp.drivers.lldpd import driver as lldpd_driver
from inventory.common import k_lldp
LOG = logging.getLogger(__name__)
class InventoryOVSAgentDriver(lldpd_driver.InventoryLldpdAgentDriver):
def run_cmd(self, cmd):
p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
output, error = p.communicate()
if p.returncode != 0:
LOG.error("Failed to run command %s: error: %s", cmd, error)
return None
return output
def lldp_ovs_get_interface_port_map(self):
interface_port_map = {}
cmd = "ovs-vsctl --timeout 10 --format json "\
"--columns name,_uuid,interfaces list Port"
output = self.run_cmd(cmd)
if not output:
return
ports = json.loads(output)
ports = ports['data']
for port in ports:
port_uuid = port[1][1]
interfaces = port[2][1]
if isinstance(interfaces, list):
for interface in interfaces:
interface_uuid = interface[1]
interface_port_map[interface_uuid] = port_uuid
else:
interface_uuid = interfaces
interface_port_map[interface_uuid] = port_uuid
return interface_port_map
def lldp_ovs_get_port_bridge_map(self):
port_bridge_map = {}
cmd = "ovs-vsctl --timeout 10 --format json "\
"--columns name,ports list Bridge"
output = self.run_cmd(cmd)
if not output:
return
bridges = json.loads(output)
bridges = bridges['data']
for bridge in bridges:
bridge_name = bridge[0]
port_set = bridge[1][1]
for port in port_set:
value = port[1]
port_bridge_map[value] = bridge_name
return port_bridge_map
def lldp_ovs_lldp_flow_exists(self, brname, in_port):
cmd = "ovs-ofctl dump-flows {} in_port={},dl_dst={},dl_type={}".format(
brname, in_port, k_lldp.LLDP_MULTICAST_ADDRESS,
k_lldp.LLDP_ETHER_TYPE)
output = self.run_cmd(cmd)
if not output:
return None
return (output.count("\n") > 1)
def lldp_ovs_add_flows(self, brname, in_port, out_port):
cmd = ("ovs-ofctl add-flow {} in_port={},dl_dst={},dl_type={},"
"actions=output:{}".format(
brname, in_port, k_lldp.LLDP_MULTICAST_ADDRESS,
k_lldp.LLDP_ETHER_TYPE, out_port))
output = self.run_cmd(cmd)
if not output:
return
cmd = ("ovs-ofctl add-flow {} in_port={},dl_dst={},dl_type={},"
"actions=output:{}".format(
brname, out_port, k_lldp.LLDP_MULTICAST_ADDRESS,
k_lldp.LLDP_ETHER_TYPE, in_port))
output = self.run_cmd(cmd)
if not output:
return
def lldp_ovs_update_flows(self):
port_bridge_map = self.lldp_ovs_get_port_bridge_map()
if not port_bridge_map:
return
interface_port_map = self.lldp_ovs_get_interface_port_map()
if not interface_port_map:
return
cmd = "ovs-vsctl --timeout 10 --format json "\
"--columns name,_uuid,type,other_config list Interface"
output = self.run_cmd(cmd)
if not output:
return
data = json.loads(output)
data = data['data']
for interface in data:
name = interface[0]
uuid = interface[1][1]
type = interface[2]
other_config = interface[3]
if type != 'internal':
continue
config_map = other_config[1]
for config in config_map:
key = config[0]
value = config[1]
if key != 'lldp_phy_peer':
continue
phy_peer = value
brname = port_bridge_map[interface_port_map[uuid]]
if not self.lldp_ovs_lldp_flow_exists(brname, name):
LOG.info("Adding missing LLDP flow from %s to %s",
name, phy_peer)
self.lldp_ovs_add_flows(brname, name, phy_peer)
if not self.lldp_ovs_lldp_flow_exists(brname, value):
LOG.info("Adding missing LLDP flow from %s to %s",
phy_peer, name)
self.lldp_ovs_add_flows(brname, phy_peer, name)
def lldp_agents_list(self):
self.lldp_ovs_update_flows()
return lldpd_driver.InventoryLldpdAgentDriver.lldp_agents_list(self)
def lldp_neighbours_list(self):
self.lldp_ovs_update_flows()
return lldpd_driver.InventoryLldpdAgentDriver.lldp_neighbours_list(
self)

View File

@ -1,176 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from inventory.common import exception
from oslo_config import cfg
from oslo_log import log
from stevedore.named import NamedExtensionManager
LOG = log.getLogger(__name__)
cfg.CONF.import_opt('drivers',
'inventory.agent.lldp.config',
group='lldp')
class InventoryLldpDriverManager(NamedExtensionManager):
"""Implementation of Inventory LLDP drivers."""
def __init__(self, namespace='inventory.agent.lldp.drivers'):
# Registered inventory lldp agent drivers, keyed by name.
self.drivers = {}
# Ordered list of inventory lldp agent drivers, defining
# the order in which the drivers are called.
self.ordered_drivers = []
names = cfg.CONF.lldp.drivers
LOG.info("Configured inventory LLDP agent drivers: %s", names)
super(InventoryLldpDriverManager, self).__init__(
namespace,
names,
invoke_on_load=True,
name_order=True)
LOG.info("Loaded inventory LLDP agent drivers: %s", self.names())
self._register_drivers()
def _register_drivers(self):
"""Register all inventory LLDP agent drivers.
This method should only be called once in the
InventoryLldpDriverManager constructor.
"""
for ext in self:
self.drivers[ext.name] = ext
self.ordered_drivers.append(ext)
LOG.info("Registered inventory LLDP agent drivers: %s",
[driver.name for driver in self.ordered_drivers])
def _call_drivers_and_return_array(self, method_name, attr=None,
raise_orig_exc=False):
"""Helper method for calling a method across all drivers.
:param method_name: name of the method to call
:param attr: an optional attribute to provide to the drivers
:param raise_orig_exc: whether or not to raise the original
driver exception, or use a general one
"""
ret = []
for driver in self.ordered_drivers:
try:
method = getattr(driver.obj, method_name)
if attr:
ret = ret + method(attr)
else:
ret = ret + method()
except Exception as e:
LOG.exception(e)
LOG.error(
"Inventory LLDP agent driver '%(name)s' "
"failed in %(method)s",
{'name': driver.name, 'method': method_name}
)
if raise_orig_exc:
raise
else:
raise exception.LLDPDriverError(
method=method_name
)
return list(set(ret))
def _call_drivers(self, method_name, attr=None, raise_orig_exc=False):
"""Helper method for calling a method across all drivers.
:param method_name: name of the method to call
:param attr: an optional attribute to provide to the drivers
:param raise_orig_exc: whether or not to raise the original
driver exception, or use a general one
"""
for driver in self.ordered_drivers:
try:
method = getattr(driver.obj, method_name)
if attr:
method(attr)
else:
method()
except Exception as e:
LOG.exception(e)
LOG.error(
"Inventory LLDP agent driver '%(name)s' "
"failed in %(method)s",
{'name': driver.name, 'method': method_name}
)
if raise_orig_exc:
raise
else:
raise exception.LLDPDriverError(
method=method_name
)
def lldp_has_neighbour(self, name):
try:
return self._call_drivers("lldp_has_neighbour",
attr=name,
raise_orig_exc=True)
except Exception as e:
LOG.exception(e)
return []
def lldp_update(self):
try:
return self._call_drivers("lldp_update",
raise_orig_exc=True)
except Exception as e:
LOG.exception(e)
return []
def lldp_agents_list(self):
try:
return self._call_drivers_and_return_array("lldp_agents_list",
raise_orig_exc=True)
except Exception as e:
LOG.exception(e)
return []
def lldp_neighbours_list(self):
try:
return self._call_drivers_and_return_array("lldp_neighbours_list",
raise_orig_exc=True)
except Exception as e:
LOG.exception(e)
return []
def lldp_agents_clear(self):
try:
return self._call_drivers("lldp_agents_clear",
raise_orig_exc=True)
except Exception as e:
LOG.exception(e)
return
def lldp_neighbours_clear(self):
try:
return self._call_drivers("lldp_neighbours_clear",
raise_orig_exc=True)
except Exception as e:
LOG.exception(e)
return
def lldp_update_systemname(self, systemname):
try:
return self._call_drivers("lldp_update_systemname",
attr=systemname,
raise_orig_exc=True)
except Exception as e:
LOG.exception(e)
return

View File

@ -1,246 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from oslo_log import log
from oslo_utils import excutils
from inventory.agent.lldp import manager
from inventory.common import exception
from inventory.common import k_lldp
from inventory.common.utils import compare as cmp
LOG = log.getLogger(__name__)
class Key(object):
def __init__(self, chassisid, portid, portname):
self.chassisid = chassisid
self.portid = portid
self.portname = portname
def __hash__(self):
return hash((self.chassisid, self.portid, self.portname))
def __cmp__(self, rhs):
return (cmp(self.chassisid, rhs.chassisid) or
cmp(self.portid, rhs.portid) or
cmp(self.portname, rhs.portname))
def __eq__(self, rhs):
return (self.chassisid == rhs.chassisid and
self.portid == rhs.portid and
self.portname == rhs.portname)
def __ne__(self, rhs):
return (self.chassisid != rhs.chassisid or
self.portid != rhs.portid or
self.portname != rhs.portname)
def __str__(self):
return "%s [%s] [%s]" % (self.portname, self.chassisid, self.portid)
def __repr__(self):
return "<Key '%s'>" % str(self)
class Agent(object):
'''Class to encapsulate LLDP agent data for System Inventory'''
def __init__(self, **kwargs):
'''Construct an Agent object with the given values.'''
self.key = Key(kwargs.get(k_lldp.LLDP_TLV_TYPE_CHASSIS_ID),
kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_ID),
kwargs.get("name_or_uuid"))
self.status = kwargs.get('status')
self.ttl = kwargs.get(k_lldp.LLDP_TLV_TYPE_TTL)
self.system_name = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME)
self.system_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC)
self.port_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_DESC)
self.capabilities = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP)
self.mgmt_addr = kwargs.get(k_lldp.LLDP_TLV_TYPE_MGMT_ADDR)
self.dot1_lag = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_LAG)
self.dot1_vlan_names = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES)
self.dot3_max_frame = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME)
self.state = None
def __hash__(self):
return self.key.__hash__()
def __eq__(self, rhs):
return (self.key == rhs.key)
def __ne__(self, rhs):
return (self.key != rhs.key or
self.status != rhs.status or
self.ttl != rhs.ttl or
self.system_name != rhs.system_name or
self.system_desc != rhs.system_desc or
self.port_desc != rhs.port_desc or
self.capabilities != rhs.capabilities or
self.mgmt_addr != rhs.mgmt_addr or
self.dot1_lag != rhs.dot1_lag or
self.dot1_vlan_names != rhs.dot1_vlan_names or
self.dot3_max_frame != rhs.dot3_max_frame or
self.state != rhs.state)
def __str__(self):
return "%s: [%s] [%s] [%s], [%s], [%s], [%s], [%s], [%s]" % (
self.key, self.status, self.system_name, self.system_desc,
self.port_desc, self.capabilities,
self.mgmt_addr, self.dot1_lag,
self.dot3_max_frame)
def __repr__(self):
return "<Agent '%s'>" % str(self)
class Neighbour(object):
'''Class to encapsulate LLDP neighbour data for System Inventory'''
def __init__(self, **kwargs):
'''Construct an Neighbour object with the given values.'''
self.key = Key(kwargs.get(k_lldp.LLDP_TLV_TYPE_CHASSIS_ID),
kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_ID),
kwargs.get("name_or_uuid"))
self.msap = kwargs.get('msap')
self.ttl = kwargs.get(k_lldp.LLDP_TLV_TYPE_TTL)
self.system_name = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME)
self.system_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC)
self.port_desc = kwargs.get(k_lldp.LLDP_TLV_TYPE_PORT_DESC)
self.capabilities = kwargs.get(k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP)
self.mgmt_addr = kwargs.get(k_lldp.LLDP_TLV_TYPE_MGMT_ADDR)
self.dot1_port_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID)
self.dot1_vid_digest = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST)
self.dot1_mgmt_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID)
self.dot1_vid_digest = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST)
self.dot1_mgmt_vid = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID)
self.dot1_lag = kwargs.get(k_lldp.LLDP_TLV_TYPE_DOT1_LAG)
self.dot1_vlan_names = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES)
self.dot1_proto_vids = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS)
self.dot1_proto_ids = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS)
self.dot3_mac_status = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS)
self.dot3_max_frame = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME)
self.dot3_power_mdi = kwargs.get(
k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI)
self.state = None
def __hash__(self):
return self.key.__hash__()
def __eq__(self, rhs):
return (self.key == rhs.key)
def __ne__(self, rhs):
return (self.key != rhs.key or
self.msap != rhs.msap or
self.system_name != rhs.system_name or
self.system_desc != rhs.system_desc or
self.port_desc != rhs.port_desc or
self.capabilities != rhs.capabilities or
self.mgmt_addr != rhs.mgmt_addr or
self.dot1_port_vid != rhs.dot1_port_vid or
self.dot1_vid_digest != rhs.dot1_vid_digest or
self.dot1_mgmt_vid != rhs.dot1_mgmt_vid or
self.dot1_vid_digest != rhs.dot1_vid_digest or
self.dot1_mgmt_vid != rhs.dot1_mgmt_vid or
self.dot1_lag != rhs.dot1_lag or
self.dot1_vlan_names != rhs.dot1_vlan_names or
self.dot1_proto_vids != rhs.dot1_proto_vids or
self.dot1_proto_ids != rhs.dot1_proto_ids or
self.dot3_mac_status != rhs.dot3_mac_status or
self.dot3_max_frame != rhs.dot3_max_frame or
self.dot3_power_mdi != rhs.dot3_power_mdi)
def __str__(self):
return "%s [%s] [%s] [%s], [%s]" % (
self.key, self.system_name, self.system_desc,
self.port_desc, self.capabilities)
def __repr__(self):
return "<Neighbour '%s'>" % str(self)
class InventoryLldpPlugin(object):
"""Implementation of the Plugin."""
def __init__(self):
self.manager = manager.InventoryLldpDriverManager()
def lldp_has_neighbour(self, name):
try:
return self.manager.lldp_has_neighbour(name)
except exception.LLDPDriverError as e:
LOG.exception(e)
with excutils.save_and_reraise_exception():
LOG.error("LLDP has neighbour failed")
def lldp_update(self):
try:
self.manager.lldp_update()
except exception.LLDPDriverError as e:
LOG.exception(e)
with excutils.save_and_reraise_exception():
LOG.error("LLDP update failed")
def lldp_agents_list(self):
try:
agents = self.manager.lldp_agents_list()
except exception.LLDPDriverError as e:
LOG.exception(e)
with excutils.save_and_reraise_exception():
LOG.error("LLDP agents list failed")
return agents
def lldp_agents_clear(self):
try:
self.manager.lldp_agents_clear()
except exception.LLDPDriverError as e:
LOG.exception(e)
with excutils.save_and_reraise_exception():
LOG.error("LLDP agents clear failed")
def lldp_neighbours_list(self):
try:
neighbours = self.manager.lldp_neighbours_list()
except exception.LLDPDriverError as e:
LOG.exception(e)
with excutils.save_and_reraise_exception():
LOG.error("LLDP neighbours list failed")
return neighbours
def lldp_neighbours_clear(self):
try:
self.manager.lldp_neighbours_clear()
except exception.LLDPDriverError as e:
LOG.exception(e)
with excutils.save_and_reraise_exception():
LOG.error("LLDP neighbours clear failed")
def lldp_update_systemname(self, systemname):
try:
self.manager.lldp_update_systemname(systemname)
except exception.LLDPDriverError as e:
LOG.exception(e)
with excutils.save_and_reraise_exception():
LOG.error("LLDP update systemname failed")

View File

@ -1,973 +0,0 @@
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
""" Perform activity related to local inventory.
A single instance of :py:class:`inventory.agent.manager.AgentManager` is
created within the *inventory-agent* process, and is responsible for
performing all actions for this host managed by inventory .
On start, collect and post inventory.
Commands (from conductors) are received via RPC calls.
"""
import errno
import fcntl
import os
import oslo_messaging as messaging
import socket
import subprocess
import time
from futurist import periodics
from oslo_config import cfg
from oslo_log import log
# from inventory.agent import partition
from inventory.agent import base_manager
from inventory.agent.lldp import plugin as lldp_plugin
from inventory.agent import node
from inventory.agent import pci
from inventory.common import constants
from inventory.common import context as mycontext
from inventory.common import exception
from inventory.common.i18n import _
from inventory.common import k_host
from inventory.common import k_lldp
from inventory.common import utils
from inventory.conductor import rpcapi as conductor_rpcapi
import tsconfig.tsconfig as tsc
MANAGER_TOPIC = 'inventory.agent_manager'
LOG = log.getLogger(__name__)
agent_opts = [
cfg.StrOpt('api_url',
default=None,
help=('Url of Inventory API service. If not set Inventory can '
'get current value from Keystone service catalog.')),
cfg.IntOpt('audit_interval',
default=60,
help='Maximum time since the last check-in of a agent'),
]
CONF = cfg.CONF
CONF.register_opts(agent_opts, 'agent')
MAXSLEEP = 300 # 5 minutes
INVENTORY_READY_FLAG = os.path.join(tsc.VOLATILE_PATH, ".inventory_ready")
FIRST_BOOT_FLAG = os.path.join(
tsc.PLATFORM_CONF_PATH, ".first_boot")
class AgentManager(base_manager.BaseAgentManager):
"""Inventory Agent service main class."""
# Must be in sync with rpcapi.AgentAPI's
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, host, topic):
super(AgentManager, self).__init__(host, topic)
self._report_to_conductor = False
self._report_to_conductor_iplatform_avail_flag = False
self._ipci_operator = pci.PCIOperator()
self._inode_operator = node.NodeOperator()
self._lldp_operator = lldp_plugin.InventoryLldpPlugin()
self._ihost_personality = None
self._ihost_uuid = ""
self._agent_throttle = 0
self._subfunctions = None
self._subfunctions_configured = False
self._notify_subfunctions_alarm_clear = False
self._notify_subfunctions_alarm_raise = False
self._first_grub_update = False
@property
def report_to_conductor_required(self):
return self._report_to_conductor
@report_to_conductor_required.setter
def report_to_conductor_required(self, val):
if not isinstance(val, bool):
raise ValueError("report_to_conductor_required not bool %s" %
val)
self._report_to_conductor = val
def start(self):
# Do not collect inventory and report to conductor at startup in
# order to eliminate two inventory reports
# (one from here and one from audit) being sent to the conductor
super(AgentManager, self).start()
if os.path.isfile('/etc/inventory/inventory.conf'):
LOG.info("inventory-agent started, "
"inventory to be reported by audit")
else:
LOG.info("No config file for inventory-agent found.")
if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX:
utils.touch(INVENTORY_READY_FLAG)
def init_host(self, admin_context=None):
super(AgentManager, self).init_host(admin_context)
if os.path.isfile('/etc/inventory/inventory.conf'):
LOG.info(_("inventory-agent started, "
"system config to be reported by audit"))
else:
LOG.info(_("No config file for inventory-agent found."))
if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX:
utils.touch(INVENTORY_READY_FLAG)
def del_host(self, deregister=True):
return
def periodic_tasks(self, context, raise_on_error=False):
"""Periodic tasks are run at pre-specified intervals. """
return self.run_periodic_tasks(context,
raise_on_error=raise_on_error)
def _report_to_conductor_iplatform_avail(self):
utils.touch(INVENTORY_READY_FLAG)
time.sleep(1) # give time for conductor to process
self._report_to_conductor_iplatform_avail_flag = True
def _update_ttys_dcd_status(self, context, host_id):
# Retrieve the serial line carrier detect flag
ttys_dcd = None
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
try:
ttys_dcd = rpcapi.get_host_ttys_dcd(context, host_id)
except exception.InventoryException:
LOG.exception("Inventory Agent exception getting host ttys_dcd.")
pass
if ttys_dcd is not None:
self._config_ttys_login(ttys_dcd)
else:
LOG.debug("ttys_dcd is not configured")
@staticmethod
def _get_active_device():
# the list of currently configured console devices,
# like 'tty1 ttyS0' or just 'ttyS0'
# The last entry in the file is the active device connected
# to /dev/console.
active_device = 'ttyS0'
try:
cmd = 'cat /sys/class/tty/console/active | grep ttyS'
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output = proc.stdout.read().strip()
proc.communicate()[0]
if proc.returncode != 0:
LOG.info("Cannot find the current configured serial device, "
"return default %s" % active_device)
return active_device
# if more than one devices are found, take the last entry
if ' ' in output:
devs = output.split(' ')
active_device = devs[len(devs) - 1]
else:
active_device = output
except subprocess.CalledProcessError as e:
LOG.error("Failed to execute (%s) (%d)", cmd, e.returncode)
except OSError as e:
LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno)
return active_device
@staticmethod
def _is_local_flag_disabled(device):
"""
:param device:
:return: boolean: True if the local flag is disabled 'i.e. -clocal is
set'. This means the serial data carrier detect
signal is significant
"""
try:
# uses -o for only-matching and -e for a pattern beginning with a
# hyphen (-), the following command returns 0 if the local flag
# is disabled
cmd = 'stty -a -F /dev/%s | grep -o -e -clocal' % device
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
proc.communicate()[0]
return proc.returncode == 0
except subprocess.CalledProcessError as e:
LOG.error("Failed to execute (%s) (%d)", cmd, e.returncode)
return False
except OSError as e:
LOG.error("Failed to execute (%s) OS error (%d)", cmd, e.errno)
return False
def _config_ttys_login(self, ttys_dcd):
# agetty is now enabled by systemd
# we only need to disable the local flag to enable carrier detection
# and enable the local flag when the feature is turned off
toggle_flag = None
active_device = self._get_active_device()
local_flag_disabled = self._is_local_flag_disabled(active_device)
if str(ttys_dcd) in ['True', 'true']:
LOG.info("ttys_dcd is enabled")
# check if the local flag is disabled
if not local_flag_disabled:
LOG.info("Disable (%s) local line" % active_device)
toggle_flag = 'stty -clocal -F /dev/%s' % active_device
else:
if local_flag_disabled:
# enable local flag to ignore the carrier detection
LOG.info("Enable local flag for device :%s" % active_device)
toggle_flag = 'stty clocal -F /dev/%s' % active_device
if toggle_flag:
try:
subprocess.Popen(toggle_flag, stdout=subprocess.PIPE,
shell=True)
# restart serial-getty
restart_cmd = ('systemctl restart serial-getty@%s.service'
% active_device)
subprocess.check_call(restart_cmd, shell=True)
except subprocess.CalledProcessError as e:
LOG.error("subprocess error: (%d)", e.returncode)
def _force_grub_update(self):
"""Force update the grub on the first AIO controller after the initial
config is completed
"""
if (not self._first_grub_update and
os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG)):
self._first_grub_update = True
return True
return False
def host_lldp_get_and_report(self, context, rpcapi, host_uuid):
neighbour_dict_array = []
agent_dict_array = []
neighbours = []
agents = []
try:
neighbours = self._lldp_operator.lldp_neighbours_list()
except Exception as e:
LOG.error("Failed to get LLDP neighbours: %s", str(e))
for neighbour in neighbours:
neighbour_dict = {
'name_or_uuid': neighbour.key.portname,
'msap': neighbour.msap,
'state': neighbour.state,
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: neighbour.key.chassisid,
k_lldp.LLDP_TLV_TYPE_PORT_ID: neighbour.key.portid,
k_lldp.LLDP_TLV_TYPE_TTL: neighbour.ttl,
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: neighbour.system_name,
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: neighbour.system_desc,
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: neighbour.capabilities,
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: neighbour.mgmt_addr,
k_lldp.LLDP_TLV_TYPE_PORT_DESC: neighbour.port_desc,
k_lldp.LLDP_TLV_TYPE_DOT1_LAG: neighbour.dot1_lag,
k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID: neighbour.dot1_port_vid,
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST:
neighbour.dot1_vid_digest,
k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID: neighbour.dot1_mgmt_vid,
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS:
neighbour.dot1_proto_vids,
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS:
neighbour.dot1_proto_ids,
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES:
neighbour.dot1_vlan_names,
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS:
neighbour.dot3_mac_status,
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME:
neighbour.dot3_max_frame,
k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI:
neighbour.dot3_power_mdi,
}
neighbour_dict_array.append(neighbour_dict)
if neighbour_dict_array:
try:
rpcapi.lldp_neighbour_update_by_host(context,
host_uuid,
neighbour_dict_array)
except exception.InventoryException:
LOG.exception("Inventory Agent exception updating "
"lldp neighbours.")
self._lldp_operator.lldp_neighbours_clear()
pass
try:
agents = self._lldp_operator.lldp_agents_list()
except Exception as e:
LOG.error("Failed to get LLDP agents: %s", str(e))
for agent in agents:
agent_dict = {
'name_or_uuid': agent.key.portname,
'state': agent.state,
'status': agent.status,
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID: agent.key.chassisid,
k_lldp.LLDP_TLV_TYPE_PORT_ID: agent.key.portid,
k_lldp.LLDP_TLV_TYPE_TTL: agent.ttl,
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME: agent.system_name,
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC: agent.system_desc,
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP: agent.capabilities,
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR: agent.mgmt_addr,
k_lldp.LLDP_TLV_TYPE_PORT_DESC: agent.port_desc,
k_lldp.LLDP_TLV_TYPE_DOT1_LAG: agent.dot1_lag,
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES: agent.dot1_vlan_names,
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME: agent.dot3_max_frame,
}
agent_dict_array.append(agent_dict)
if agent_dict_array:
try:
rpcapi.lldp_agent_update_by_host(context,
host_uuid,
agent_dict_array)
except exception.InventoryException:
LOG.exception("Inventory Agent exception updating "
"lldp agents.")
self._lldp_operator.lldp_agents_clear()
pass
def synchronized_network_config(func):
"""Synchronization decorator to acquire and release
network_config_lock.
"""
def wrap(self, *args, **kwargs):
try:
# Get lock to avoid conflict with apply_network_config.sh
lockfd = self._acquire_network_config_lock()
return func(self, *args, **kwargs)
finally:
self._release_network_config_lock(lockfd)
return wrap
@synchronized_network_config
def _lldp_enable_and_report(self, context, rpcapi, host_uuid):
"""Temporarily enable interfaces and get lldp neighbor information.
This method should only be called before
INITIAL_CONFIG_COMPLETE_FLAG is set.
"""
links_down = []
try:
# Turn on interfaces, so that lldpd can show all neighbors
for interface in self._ipci_operator.pci_get_net_names():
flag = self._ipci_operator.pci_get_net_flags(interface)
# If administrative state is down, bring it up momentarily
if not (flag & pci.IFF_UP):
subprocess.call(['ip', 'link', 'set', interface, 'up'])
links_down.append(interface)
LOG.info('interface %s enabled to receive LLDP PDUs' %
interface)
self._lldp_operator.lldp_update()
# delay maximum 30 seconds for lldpd to receive LLDP PDU
timeout = 0
link_wait_for_lldp = True
while timeout < 30 and link_wait_for_lldp and links_down:
time.sleep(5)
timeout = timeout + 5
link_wait_for_lldp = False
for link in links_down:
if not self._lldp_operator.lldp_has_neighbour(link):
link_wait_for_lldp = True
break
self.host_lldp_get_and_report(context, rpcapi, host_uuid)
except Exception as e:
LOG.exception(e)
pass
finally:
# restore interface administrative state
for interface in links_down:
subprocess.call(['ip', 'link', 'set', interface, 'down'])
LOG.info('interface %s disabled after querying LLDP neighbors'
% interface)
def platform_update_by_host(self, rpcapi, context, host_uuid, msg_dict):
"""Update host platform information.
If this is the first boot (kickstart), then also update the Host
Action State to reinstalled, and remove the flag.
"""
if os.path.exists(FIRST_BOOT_FLAG):
msg_dict.update({k_host.HOST_ACTION_STATE:
k_host.HAS_REINSTALLED})
try:
rpcapi.platform_update_by_host(context,
host_uuid,
msg_dict)
if os.path.exists(FIRST_BOOT_FLAG):
os.remove(FIRST_BOOT_FLAG)
LOG.info("Removed %s" % FIRST_BOOT_FLAG)
except exception.InventoryException:
LOG.warn("platform_update_by_host exception "
"host_uuid=%s msg_dict=%s." %
(host_uuid, msg_dict))
pass
LOG.info("Inventory Agent platform update by host: %s" % msg_dict)
def _acquire_network_config_lock(self):
"""Synchronization with apply_network_config.sh
This method is to acquire the lock to avoid
conflict with execution of apply_network_config.sh
during puppet manifest application.
:returns: fd of the lock, if successful. 0 on error.
"""
lock_file_fd = os.open(
constants.NETWORK_CONFIG_LOCK_FILE, os.O_CREAT | os.O_RDONLY)
count = 1
delay = 5
max_count = 5
while count <= max_count:
try:
fcntl.flock(lock_file_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return lock_file_fd
except IOError as e:
# raise on unrelated IOErrors
if e.errno != errno.EAGAIN:
raise
else:
LOG.info("Could not acquire lock({}): {} ({}/{}), "
"will retry".format(lock_file_fd, str(e),
count, max_count))
time.sleep(delay)
count += 1
LOG.error("Failed to acquire lock (fd={})".format(lock_file_fd))
return 0
def _release_network_config_lock(self, lockfd):
"""Release the lock guarding apply_network_config.sh """
if lockfd:
fcntl.flock(lockfd, fcntl.LOCK_UN)
os.close(lockfd)
def ihost_inv_get_and_report(self, icontext):
"""Collect data for an ihost.
This method allows an ihost data to be collected.
:param: icontext: an admin context
:returns: updated ihost object, including all fields.
"""
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
ihost = None
# find list of network related inics for this ihost
inics = self._ipci_operator.inics_get()
# create an array of ports for each net entry of the NIC device
iports = []
for inic in inics:
lockfd = 0
try:
# Get lock to avoid conflict with apply_network_config.sh
lockfd = self._acquire_network_config_lock()
pci_net_array = \
self._ipci_operator.pci_get_net_attrs(inic.pciaddr)
finally:
self._release_network_config_lock(lockfd)
for net in pci_net_array:
iports.append(pci.Port(inic, **net))
# find list of pci devices for this host
pci_devices = self._ipci_operator.pci_devices_get()
# create an array of pci_devs for each net entry of the device
pci_devs = []
for pci_dev in pci_devices:
pci_dev_array = \
self._ipci_operator.pci_get_device_attrs(pci_dev.pciaddr)
for dev in pci_dev_array:
pci_devs.append(pci.PCIDevice(pci_dev, **dev))
# create a list of MAC addresses that will be used to identify the
# inventoried host (one of the MACs should be the management MAC)
host_macs = [port.mac for port in iports if port.mac]
# get my ihost record which should be avail since booted
LOG.debug('Inventory Agent iports={}, host_macs={}'.format(
iports, host_macs))
slept = 0
while slept < MAXSLEEP:
# wait for controller to come up first may be a DOR
try:
ihost = rpcapi.get_host_by_macs(icontext, host_macs)
except messaging.MessagingTimeout:
LOG.info("get_host_by_macs Messaging Timeout.")
except Exception as ex:
LOG.warn("Conductor RPC get_host_by_macs exception "
"response %s" % ex)
if not ihost:
hostname = socket.gethostname()
if hostname != k_host.LOCALHOST_HOSTNAME:
try:
ihost = rpcapi.get_host_by_hostname(icontext,
hostname)
except messaging.MessagingTimeout:
LOG.info("get_host_by_hostname Messaging Timeout.")
return # wait for next audit cycle
except Exception as ex:
LOG.warn("Conductor RPC get_host_by_hostname "
"exception response %s" % ex)
if ihost and ihost.get('personality'):
self.report_to_conductor_required = True
self._ihost_uuid = ihost['uuid']
self._ihost_personality = ihost['personality']
if os.path.isfile(tsc.PLATFORM_CONF_FILE):
# read the platform config file and check for UUID
found = False
with open(tsc.PLATFORM_CONF_FILE, "r") as fd:
for line in fd:
if line.find("UUID=") == 0:
found = True
if not found:
# the UUID is not found, append it
with open(tsc.PLATFORM_CONF_FILE, "a") as fd:
fd.write("UUID=" + self._ihost_uuid + "\n")
# Report host install status
msg_dict = {}
self.platform_update_by_host(rpcapi,
icontext,
self._ihost_uuid,
msg_dict)
LOG.info("Agent found matching ihost: %s" % ihost['uuid'])
break
time.sleep(30)
slept += 30
if not self.report_to_conductor_required:
# let the audit take care of it instead
LOG.info("Inventory no matching ihost found... await Audit")
return
subfunctions = self.subfunctions_get()
try:
rpcapi.subfunctions_update_by_host(icontext,
ihost['uuid'],
subfunctions)
except exception.InventoryException:
LOG.exception("Inventory Agent exception updating "
"subfunctions conductor.")
pass
# post to inventory db by ihost['uuid']
iport_dict_array = []
for port in iports:
inic_dict = {'pciaddr': port.ipci.pciaddr,
'pclass': port.ipci.pclass,
'pvendor': port.ipci.pvendor,
'pdevice': port.ipci.pdevice,
'prevision': port.ipci.prevision,
'psvendor': port.ipci.psvendor,
'psdevice': port.ipci.psdevice,
'pname': port.name,
'numa_node': port.numa_node,
'sriov_totalvfs': port.sriov_totalvfs,
'sriov_numvfs': port.sriov_numvfs,
'sriov_vfs_pci_address': port.sriov_vfs_pci_address,
'driver': port.driver,
'mac': port.mac,
'mtu': port.mtu,
'speed': port.speed,
'link_mode': port.link_mode,
'dev_id': port.dev_id,
'dpdksupport': port.dpdksupport}
LOG.debug('Inventory Agent inic {}'.format(inic_dict))
iport_dict_array.append(inic_dict)
try:
# may get duplicate key if already sent on earlier init
rpcapi.port_update_by_host(icontext,
ihost['uuid'],
iport_dict_array)
except messaging.MessagingTimeout:
LOG.info("pci_device_update_by_host Messaging Timeout.")
self.report_to_conductor_required = False
return # wait for next audit cycle
# post to inventory db by ihost['uuid']
pci_device_dict_array = []
for dev in pci_devs:
pci_dev_dict = {'name': dev.name,
'pciaddr': dev.pci.pciaddr,
'pclass_id': dev.pclass_id,
'pvendor_id': dev.pvendor_id,
'pdevice_id': dev.pdevice_id,
'pclass': dev.pci.pclass,
'pvendor': dev.pci.pvendor,
'pdevice': dev.pci.pdevice,
'prevision': dev.pci.prevision,
'psvendor': dev.pci.psvendor,
'psdevice': dev.pci.psdevice,
'numa_node': dev.numa_node,
'sriov_totalvfs': dev.sriov_totalvfs,
'sriov_numvfs': dev.sriov_numvfs,
'sriov_vfs_pci_address': dev.sriov_vfs_pci_address,
'driver': dev.driver,
'enabled': dev.enabled,
'extra_info': dev.extra_info}
LOG.debug('Inventory Agent dev {}'.format(pci_dev_dict))
pci_device_dict_array.append(pci_dev_dict)
try:
# may get duplicate key if already sent on earlier init
rpcapi.pci_device_update_by_host(icontext,
ihost['uuid'],
pci_device_dict_array)
except messaging.MessagingTimeout:
LOG.info("pci_device_update_by_host Messaging Timeout.")
self.report_to_conductor_required = True
# Find list of numa_nodes and cpus for this ihost
inumas, icpus = self._inode_operator.inodes_get_inumas_icpus()
try:
# may get duplicate key if already sent on earlier init
rpcapi.numas_update_by_host(icontext,
ihost['uuid'],
inumas)
except messaging.RemoteError as e:
LOG.error("numas_update_by_host RemoteError exc_type=%s" %
e.exc_type)
except messaging.MessagingTimeout:
LOG.info("pci_device_update_by_host Messaging Timeout.")
self.report_to_conductor_required = True
except Exception as e:
LOG.exception("Inventory Agent exception updating inuma e=%s." % e)
pass
force_grub_update = self._force_grub_update()
try:
# may get duplicate key if already sent on earlier init
rpcapi.cpus_update_by_host(icontext,
ihost['uuid'],
icpus,
force_grub_update)
except messaging.RemoteError as e:
LOG.error("cpus_update_by_host RemoteError exc_type=%s" %
e.exc_type)
except messaging.MessagingTimeout:
LOG.info("cpus_update_by_host Messaging Timeout.")
self.report_to_conductor_required = True
except Exception as e:
LOG.exception("Inventory exception updating cpus e=%s." % e)
self.report_to_conductor_required = True
pass
except exception.InventoryException:
LOG.exception("Inventory exception updating cpus conductor.")
pass
imemory = self._inode_operator.inodes_get_imemory()
if imemory:
try:
# may get duplicate key if already sent on earlier init
rpcapi.memory_update_by_host(icontext,
ihost['uuid'],
imemory)
except messaging.MessagingTimeout:
LOG.info("memory_update_by_host Messaging Timeout.")
except messaging.RemoteError as e:
LOG.error("memory_update_by_host RemoteError exc_type=%s" %
e.exc_type)
except exception.InventoryException:
LOG.exception("Inventory Agent exception updating imemory "
"conductor.")
if self._ihost_uuid and \
os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG):
if not self._report_to_conductor_iplatform_avail_flag:
# and not self._wait_for_nova_lvg()
imsg_dict = {'availability': k_host.AVAILABILITY_AVAILABLE}
iscsi_initiator_name = self.get_host_iscsi_initiator_name()
if iscsi_initiator_name is not None:
imsg_dict.update({'iscsi_initiator_name':
iscsi_initiator_name})
# Before setting the host to AVAILABILITY_AVAILABLE make
# sure that nova_local aggregates are correctly set
self.platform_update_by_host(rpcapi,
icontext,
self._ihost_uuid,
imsg_dict)
self._report_to_conductor_iplatform_avail()
def subfunctions_get(self):
"""returns subfunctions on this host.
"""
self._subfunctions = ','.join(tsc.subfunctions)
return self._subfunctions
@staticmethod
def subfunctions_list_get():
"""returns list of subfunctions on this host.
"""
subfunctions = ','.join(tsc.subfunctions)
subfunctions_list = subfunctions.split(',')
return subfunctions_list
def subfunctions_configured(self, subfunctions_list):
"""Determines whether subfunctions configuration is completed.
return: Bool whether subfunctions configuration is completed.
"""
if (k_host.CONTROLLER in subfunctions_list and
k_host.COMPUTE in subfunctions_list):
if not os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE):
self._subfunctions_configured = False
return False
self._subfunctions_configured = True
return True
@staticmethod
def _wait_for_nova_lvg(icontext, rpcapi, ihost_uuid, nova_lvgs=None):
"""See if we wait for a provisioned nova-local volume group
This method queries the conductor to see if we are provisioning
a nova-local volume group on this boot cycle. This check is used
to delay sending the platform availability to the conductor.
:param: icontext: an admin context
:param: rpcapi: conductor rpc api
:param: ihost_uuid: an admin context
:returns: True if we are provisioning false otherwise
"""
return True
LOG.info("TODO _wait_for_nova_lvg from systemconfig")
def _is_config_complete(self):
"""Check if this node has completed config
This method queries node's config flag file to see if it has
complete config.
:return: True if the complete flag file exists false otherwise
"""
if not os.path.isfile(tsc.INITIAL_CONFIG_COMPLETE_FLAG):
return False
subfunctions = self.subfunctions_list_get()
if k_host.CONTROLLER in subfunctions:
if not os.path.isfile(tsc.INITIAL_CONTROLLER_CONFIG_COMPLETE):
return False
if k_host.COMPUTE in subfunctions:
if not os.path.isfile(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE):
return False
if k_host.STORAGE in subfunctions:
if not os.path.isfile(tsc.INITIAL_STORAGE_CONFIG_COMPLETE):
return False
return True
@periodics.periodic(spacing=CONF.agent.audit_interval,
run_immediately=True)
def _agent_audit(self, context):
# periodically, perform inventory audit
self.agent_audit(context, host_uuid=self._ihost_uuid,
force_updates=None)
def agent_audit(self, context,
host_uuid, force_updates, cinder_device=None):
# perform inventory audit
if self._ihost_uuid != host_uuid:
# The function call is not for this host agent
return
icontext = mycontext.get_admin_context()
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
if not self.report_to_conductor_required:
LOG.info("Inventory Agent audit running inv_get_and_report.")
self.ihost_inv_get_and_report(icontext)
if self._ihost_uuid and os.path.isfile(
tsc.INITIAL_CONFIG_COMPLETE_FLAG):
if (not self._report_to_conductor_iplatform_avail_flag and
not self._wait_for_nova_lvg(
icontext, rpcapi, self._ihost_uuid)):
imsg_dict = {'availability': k_host.AVAILABILITY_AVAILABLE}
iscsi_initiator_name = self.get_host_iscsi_initiator_name()
if iscsi_initiator_name is not None:
imsg_dict.update({'iscsi_initiator_name':
iscsi_initiator_name})
# Before setting the host to AVAILABILITY_AVAILABLE make
# sure that nova_local aggregates are correctly set
self.platform_update_by_host(rpcapi,
icontext,
self._ihost_uuid,
imsg_dict)
self._report_to_conductor_iplatform_avail()
if (self._ihost_personality == k_host.CONTROLLER and
not self._notify_subfunctions_alarm_clear):
subfunctions_list = self.subfunctions_list_get()
if ((k_host.CONTROLLER in subfunctions_list) and
(k_host.COMPUTE in subfunctions_list)):
if self.subfunctions_configured(subfunctions_list) and \
not self._wait_for_nova_lvg(
icontext, rpcapi, self._ihost_uuid):
ihost_notify_dict = {'subfunctions_configured': True}
rpcapi.notify_subfunctions_config(icontext,
self._ihost_uuid,
ihost_notify_dict)
self._notify_subfunctions_alarm_clear = True
else:
if not self._notify_subfunctions_alarm_raise:
ihost_notify_dict = {'subfunctions_configured':
False}
rpcapi.notify_subfunctions_config(
icontext, self._ihost_uuid, ihost_notify_dict)
self._notify_subfunctions_alarm_raise = True
else:
self._notify_subfunctions_alarm_clear = True
if self._ihost_uuid:
LOG.debug("Inventory Agent Audit running.")
if force_updates:
LOG.debug("Inventory Agent Audit force updates: (%s)" %
(', '.join(force_updates)))
self._update_ttys_dcd_status(icontext, self._ihost_uuid)
if self._agent_throttle > 5:
# throttle updates
self._agent_throttle = 0
imemory = self._inode_operator.inodes_get_imemory()
rpcapi.memory_update_by_host(icontext,
self._ihost_uuid,
imemory)
if self._is_config_complete():
self.host_lldp_get_and_report(
icontext, rpcapi, self._ihost_uuid)
else:
self._lldp_enable_and_report(
icontext, rpcapi, self._ihost_uuid)
self._agent_throttle += 1
if os.path.isfile(tsc.PLATFORM_CONF_FILE):
# read the platform config file and check for UUID
if 'UUID' not in open(tsc.PLATFORM_CONF_FILE).read():
# the UUID is not in found, append it
with open(tsc.PLATFORM_CONF_FILE, "a") as fd:
fd.write("UUID=" + self._ihost_uuid)
def configure_lldp_systemname(self, context, systemname):
"""Configure the systemname into the lldp agent with the supplied data.
:param context: an admin context.
:param systemname: the systemname
"""
# TODO(sc): This becomes an inventory-api call from
# via systemconfig: configure_isystemname
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
# Update the lldp agent
self._lldp_operator.lldp_update_systemname(systemname)
# Trigger an audit to ensure the db is up to date
self.host_lldp_get_and_report(context, rpcapi, self._ihost_uuid)
def configure_ttys_dcd(self, context, uuid, ttys_dcd):
"""Configure the getty on the serial device.
:param context: an admin context.
:param uuid: the host uuid
:param ttys_dcd: the flag to enable/disable dcd
"""
LOG.debug("AgentManager.configure_ttys_dcd: %s %s" % (uuid, ttys_dcd))
if self._ihost_uuid and self._ihost_uuid == uuid:
LOG.debug("AgentManager configure getty on serial console")
self._config_ttys_login(ttys_dcd)
return
def execute_command(self, context, host_uuid, command):
"""Execute a command on behalf of inventory-conductor
:param context: request context
:param host_uuid: the host uuid
:param command: the command to execute
"""
LOG.debug("AgentManager.execute_command: (%s)" % command)
if self._ihost_uuid and self._ihost_uuid == host_uuid:
LOG.info("AgentManager execute_command: (%s)" % command)
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call(command, stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError as e:
LOG.error("Failed to execute (%s) (%d)",
command, e.returncode)
except OSError as e:
LOG.error("Failed to execute (%s), OS error:(%d)",
command, e.errno)
LOG.info("(%s) executed.", command)
def get_host_iscsi_initiator_name(self):
iscsi_initiator_name = None
try:
stdout, __ = utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
run_as_root=True)
if stdout:
stdout = stdout.strip()
iscsi_initiator_name = stdout.split('=')[-1]
LOG.info("iscsi initiator name = %s" % iscsi_initiator_name)
except Exception:
LOG.error("Failed retrieving iscsi initiator name")
return iscsi_initiator_name
def update_host_memory(self, context, host_uuid):
"""update the host memory
:param context: an admin context
:param host_uuid: ihost uuid unique id
:return: None
"""
if self._ihost_uuid and self._ihost_uuid == host_uuid:
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
memory = self._inode_operator.inodes_get_imemory()
rpcapi.memory_update_by_host(context,
self._ihost_uuid,
memory,
force_update=True)

View File

@ -1,608 +0,0 @@
#
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
""" inventory numa node Utilities and helper functions."""
import os
from os import listdir
from os.path import isfile
from os.path import join
from oslo_log import log
import re
import subprocess
import tsconfig.tsconfig as tsc
LOG = log.getLogger(__name__)
# Defines per-socket vswitch memory requirements (in MB)
VSWITCH_MEMORY_MB = 1024
# Defines the size of one kilobyte
SIZE_KB = 1024
# Defines the size of 2 megabytes in kilobyte units
SIZE_2M_KB = 2048
# Defines the size of 1 gigabyte in kilobyte units
SIZE_1G_KB = 1048576
# Defines the size of 2 megabytes in megabyte units
SIZE_2M_MB = int(SIZE_2M_KB / SIZE_KB)
# Defines the size of 1 gigabyte in megabyte units
SIZE_1G_MB = int(SIZE_1G_KB / SIZE_KB)
# Defines the minimum size of memory for a controller node in megabyte units
CONTROLLER_MIN_MB = 6000
# Defines the minimum size of memory for a compute node in megabyte units
COMPUTE_MIN_MB = 1600
# Defines the minimum size of memory for a secondary compute node in megabyte
# units
COMPUTE_MIN_NON_0_MB = 500
class CPU(object):
'''Class to encapsulate CPU data for System Inventory'''
def __init__(self, cpu, numa_node, core, thread,
cpu_family=None, cpu_model=None, revision=None):
'''Construct a cpu object with the given values.'''
self.cpu = cpu
self.numa_node = numa_node
self.core = core
self.thread = thread
self.cpu_family = cpu_family
self.cpu_model = cpu_model
self.revision = revision
# self.allocated_functions = mgmt (usu. 0), vswitch
def __eq__(self, rhs):
return (self.cpu == rhs.cpu and
self.numa_node == rhs.numa_node and
self.core == rhs.core and
self.thread == rhs.thread)
def __ne__(self, rhs):
return (self.cpu != rhs.cpu or
self.numa_node != rhs.numa_node or
self.core != rhs.core or
self.thread != rhs.thread)
def __str__(self):
return "%s [%s] [%s] [%s]" % (self.cpu, self.numa_node,
self.core, self.thread)
def __repr__(self):
return "<CPU '%s'>" % str(self)
class NodeOperator(object):
'''Class to encapsulate CPU operations for System Inventory'''
def __init__(self):
self.num_cpus = 0
self.num_nodes = 0
self.float_cpuset = 0
self.total_memory_mb = 0
self.free_memory_mb = 0
self.total_memory_nodes_mb = []
self.free_memory_nodes_mb = []
self.topology = {}
# self._get_cpu_topology()
# self._get_total_memory_mb()
# self._get_total_memory_nodes_mb()
# self._get_free_memory_mb()
# self._get_free_memory_nodes_mb()
def _is_strict(self):
with open(os.devnull, "w") as fnull:
try:
output = subprocess.check_output(
["cat", "/proc/sys/vm/overcommit_memory"],
stderr=fnull)
if int(output) == 2:
return True
except subprocess.CalledProcessError as e:
LOG.info("Failed to check for overcommit, error (%s)",
e.output)
return False
def convert_range_string_to_list(self, s):
olist = []
s = s.strip()
if s:
for part in s.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
olist.extend(range(a, b + 1))
else:
a = int(part)
olist.append(a)
olist.sort()
return olist
def inodes_get_inumas_icpus(self):
'''Enumerate logical cpu topology based on parsing /proc/cpuinfo
as function of socket_id, core_id, and thread_id. This updates
topology.
:param self
:updates self.num_cpus- number of logical cpus
:updates self.num_nodes- number of sockets;maps to number of numa nodes
:updates self.topology[socket_id][core_id][thread_id] = cpu
:returns None
'''
self.num_cpus = 0
self.num_nodes = 0
self.topology = {}
thread_cnt = {}
cpu = socket_id = core_id = thread_id = -1
re_processor = re.compile(r'^[Pp]rocessor\s+:\s+(\d+)')
re_socket = re.compile(r'^physical id\s+:\s+(\d+)')
re_core = re.compile(r'^core id\s+:\s+(\d+)')
re_cpu_family = re.compile(r'^cpu family\s+:\s+(\d+)')
re_cpu_model = re.compile(r'^model name\s+:\s+(\w+)')
inumas = []
icpus = []
sockets = []
with open('/proc/cpuinfo', 'r') as infile:
icpu_attrs = {}
for line in infile:
match = re_processor.search(line)
if match:
cpu = int(match.group(1))
socket_id = -1
core_id = -1
thread_id = -1
self.num_cpus += 1
continue
match = re_cpu_family.search(line)
if match:
name_value = [s.strip() for s in line.split(':', 1)]
name, value = name_value
icpu_attrs.update({'cpu_family': value})
continue
match = re_cpu_model.search(line)
if match:
name_value = [s.strip() for s in line.split(':', 1)]
name, value = name_value
icpu_attrs.update({'cpu_model': value})
continue
match = re_socket.search(line)
if match:
socket_id = int(match.group(1))
if socket_id not in sockets:
sockets.append(socket_id)
attrs = {
'numa_node': socket_id,
'capabilities': {},
}
inumas.append(attrs)
continue
match = re_core.search(line)
if match:
core_id = int(match.group(1))
if socket_id not in thread_cnt:
thread_cnt[socket_id] = {}
if core_id not in thread_cnt[socket_id]:
thread_cnt[socket_id][core_id] = 0
else:
thread_cnt[socket_id][core_id] += 1
thread_id = thread_cnt[socket_id][core_id]
if socket_id not in self.topology:
self.topology[socket_id] = {}
if core_id not in self.topology[socket_id]:
self.topology[socket_id][core_id] = {}
self.topology[socket_id][core_id][thread_id] = cpu
attrs = {
'cpu': cpu,
'numa_node': socket_id,
'core': core_id,
'thread': thread_id,
'capabilities': {},
}
icpu_attrs.update(attrs)
icpus.append(icpu_attrs)
icpu_attrs = {}
continue
self.num_nodes = len(self.topology.keys())
# In the case topology not detected, hard-code structures
if self.num_nodes == 0:
n_sockets, n_cores, n_threads = (1, int(self.num_cpus), 1)
self.topology = {}
for socket_id in range(n_sockets):
self.topology[socket_id] = {}
if socket_id not in sockets:
sockets.append(socket_id)
attrs = {
'numa_node': socket_id,
'capabilities': {},
}
inumas.append(attrs)
for core_id in range(n_cores):
self.topology[socket_id][core_id] = {}
for thread_id in range(n_threads):
self.topology[socket_id][core_id][thread_id] = 0
attrs = {
'cpu': cpu,
'numa_node': socket_id,
'core': core_id,
'thread': thread_id,
'capabilities': {},
}
icpus.append(attrs)
# Define Thread-Socket-Core order for logical cpu enumeration
cpu = 0
for thread_id in range(n_threads):
for core_id in range(n_cores):
for socket_id in range(n_sockets):
if socket_id not in sockets:
sockets.append(socket_id)
attrs = {
'numa_node': socket_id,
'capabilities': {},
}
inumas.append(attrs)
self.topology[socket_id][core_id][thread_id] = cpu
attrs = {
'cpu': cpu,
'numa_node': socket_id,
'core': core_id,
'thread': thread_id,
'capabilities': {},
}
icpus.append(attrs)
cpu += 1
self.num_nodes = len(self.topology.keys())
LOG.debug("inumas= %s, cpus = %s" % (inumas, icpus))
return inumas, icpus
def _get_immediate_subdirs(self, dir):
return [name for name in listdir(dir)
if os.path.isdir(join(dir, name))]
def _inode_get_memory_hugepages(self):
"""Collect hugepage info, including vswitch, and vm.
Collect platform reserved if config.
:param self
:returns list of memory nodes and attributes
"""
imemory = []
initial_compute_config_completed = \
os.path.exists(tsc.INITIAL_COMPUTE_CONFIG_COMPLETE)
# check if it is initial report before the huge pages are allocated
initial_report = not initial_compute_config_completed
# do not send report if the initial compute config is completed and
# compute config has not finished, i.e.during subsequent
# reboot before the manifest allocates the huge pages
compute_config_completed = \
os.path.exists(tsc.VOLATILE_COMPUTE_CONFIG_COMPLETE)
if (initial_compute_config_completed and
not compute_config_completed):
return imemory
for node in range(self.num_nodes):
attr = {}
total_hp_mb = 0 # Total memory (MB) currently configured in HPs
free_hp_mb = 0
# Check vswitch and libvirt memory
# Loop through configured hugepage sizes of this node and record
# total number and number free
hugepages = "/sys/devices/system/node/node%d/hugepages" % node
try:
subdirs = self._get_immediate_subdirs(hugepages)
for subdir in subdirs:
hp_attr = {}
sizesplit = subdir.split('-')
if sizesplit[1].startswith("1048576kB"):
size = SIZE_1G_MB
else:
size = SIZE_2M_MB
nr_hugepages = 0
free_hugepages = 0
mydir = hugepages + '/' + subdir
files = [f for f in listdir(mydir)
if isfile(join(mydir, f))]
if files:
for file in files:
with open(mydir + '/' + file, 'r') as f:
if file.startswith("nr_hugepages"):
nr_hugepages = int(f.readline())
if file.startswith("free_hugepages"):
free_hugepages = int(f.readline())
total_hp_mb = total_hp_mb + int(nr_hugepages * size)
free_hp_mb = free_hp_mb + int(free_hugepages * size)
# Libvirt hugepages can be 1G and 2M
if size == SIZE_1G_MB:
vswitch_hugepages_nr = VSWITCH_MEMORY_MB / size
hp_attr = {
'vswitch_hugepages_size_mib': size,
'vswitch_hugepages_nr': vswitch_hugepages_nr,
'vswitch_hugepages_avail': 0,
'vm_hugepages_nr_1G':
(nr_hugepages - vswitch_hugepages_nr),
'vm_hugepages_avail_1G': free_hugepages,
'vm_hugepages_use_1G': 'True'
}
else:
if len(subdirs) == 1:
# No 1G hugepage support.
vswitch_hugepages_nr = VSWITCH_MEMORY_MB / size
hp_attr = {
'vswitch_hugepages_size_mib': size,
'vswitch_hugepages_nr': vswitch_hugepages_nr,
'vswitch_hugepages_avail': 0,
}
hp_attr.update({'vm_hugepages_use_1G': 'False'})
else:
# vswitch will use 1G hugpages
vswitch_hugepages_nr = 0
hp_attr.update({
'vm_hugepages_avail_2M': free_hugepages,
'vm_hugepages_nr_2M':
(nr_hugepages - vswitch_hugepages_nr)
})
attr.update(hp_attr)
except IOError:
# silently ignore IO errors (eg. file missing)
pass
# Get the free and total memory from meminfo for this node
re_node_memtotal = re.compile(r'^Node\s+\d+\s+\MemTotal:\s+(\d+)')
re_node_memfree = re.compile(r'^Node\s+\d+\s+\MemFree:\s+(\d+)')
re_node_filepages = \
re.compile(r'^Node\s+\d+\s+\FilePages:\s+(\d+)')
re_node_sreclaim = \
re.compile(r'^Node\s+\d+\s+\SReclaimable:\s+(\d+)')
re_node_commitlimit = \
re.compile(r'^Node\s+\d+\s+\CommitLimit:\s+(\d+)')
re_node_committed_as = \
re.compile(r'^Node\s+\d+\s+\'Committed_AS:\s+(\d+)')
free_kb = 0 # Free Memory (KB) available
total_kb = 0 # Total Memory (KB)
limit = 0 # only used in strict accounting
committed = 0 # only used in strict accounting
meminfo = "/sys/devices/system/node/node%d/meminfo" % node
try:
with open(meminfo, 'r') as infile:
for line in infile:
match = re_node_memtotal.search(line)
if match:
total_kb += int(match.group(1))
continue
match = re_node_memfree.search(line)
if match:
free_kb += int(match.group(1))
continue
match = re_node_filepages.search(line)
if match:
free_kb += int(match.group(1))
continue
match = re_node_sreclaim.search(line)
if match:
free_kb += int(match.group(1))
continue
match = re_node_commitlimit.search(line)
if match:
limit = int(match.group(1))
continue
match = re_node_committed_as.search(line)
if match:
committed = int(match.group(1))
continue
if self._is_strict():
free_kb = limit - committed
except IOError:
# silently ignore IO errors (eg. file missing)
pass
# Calculate PSS
pss_mb = 0
if node == 0:
cmd = 'cat /proc/*/smaps 2>/dev/null | awk \'/^Pss:/ ' \
'{a += $2;} END {printf "%d\\n", a/1024.0;}\''
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
shell=True)
result = proc.stdout.read().strip()
pss_mb = int(result)
except subprocess.CalledProcessError as e:
LOG.error("Cannot calculate PSS (%s) (%d)", cmd,
e.returncode)
except OSError as e:
LOG.error("Failed to execute (%s) OS error (%d)", cmd,
e.errno)
# need to multiply total_mb by 1024 to match compute_huge
node_total_kb = total_hp_mb * SIZE_KB + free_kb + pss_mb * SIZE_KB
# Read base memory from compute_reserved.conf
base_mem_mb = 0
with open('/etc/nova/compute_reserved.conf', 'r') as infile:
for line in infile:
if "COMPUTE_BASE_RESERVED" in line:
val = line.split("=")
base_reserves = val[1].strip('\n')[1:-1]
for reserve in base_reserves.split():
reserve = reserve.split(":")
if reserve[0].strip('"') == "node%d" % node:
base_mem_mb = int(reserve[1].strip('MB'))
# On small systems, clip memory overhead to more reasonable minimal
# settings
if (total_kb / SIZE_KB - base_mem_mb) < 1000:
if node == 0:
base_mem_mb = COMPUTE_MIN_MB
if tsc.nodetype == 'controller':
base_mem_mb += CONTROLLER_MIN_MB
else:
base_mem_mb = COMPUTE_MIN_NON_0_MB
eng_kb = node_total_kb - base_mem_mb * SIZE_KB
vswitch_mem_kb = (attr.get('vswitch_hugepages_size_mib', 0) *
attr.get('vswitch_hugepages_nr', 0) * SIZE_KB)
vm_kb = (eng_kb - vswitch_mem_kb)
max_vm_pages_2mb = vm_kb / SIZE_2M_KB
max_vm_pages_1gb = vm_kb / SIZE_1G_KB
attr.update({
'vm_hugepages_possible_2M': max_vm_pages_2mb,
'vm_hugepages_possible_1G': max_vm_pages_1gb,
})
# calculate 90% 2M pages if it is initial report and the huge
# pages have not been allocated
if initial_report:
max_vm_pages_2mb = max_vm_pages_2mb * 0.9
total_hp_mb += int(max_vm_pages_2mb * (SIZE_2M_KB / SIZE_KB))
free_hp_mb = total_hp_mb
attr.update({
'vm_hugepages_nr_2M': max_vm_pages_2mb,
'vm_hugepages_avail_2M': max_vm_pages_2mb,
'vm_hugepages_nr_1G': 0
})
attr.update({
'numa_node': node,
'memtotal_mib': total_hp_mb,
'memavail_mib': free_hp_mb,
'hugepages_configured': 'True',
'node_memtotal_mib': node_total_kb / 1024,
})
imemory.append(attr)
return imemory
def _inode_get_memory_nonhugepages(self):
'''Collect nonhugepage info, including platform reserved if config.
:param self
:returns list of memory nodes and attributes
'''
imemory = []
self.total_memory_mb = 0
re_node_memtotal = re.compile(r'^Node\s+\d+\s+\MemTotal:\s+(\d+)')
re_node_memfree = re.compile(r'^Node\s+\d+\s+\MemFree:\s+(\d+)')
re_node_filepages = re.compile(r'^Node\s+\d+\s+\FilePages:\s+(\d+)')
re_node_sreclaim = re.compile(r'^Node\s+\d+\s+\SReclaimable:\s+(\d+)')
for node in range(self.num_nodes):
attr = {}
total_mb = 0
free_mb = 0
meminfo = "/sys/devices/system/node/node%d/meminfo" % node
try:
with open(meminfo, 'r') as infile:
for line in infile:
match = re_node_memtotal.search(line)
if match:
total_mb += int(match.group(1))
continue
match = re_node_memfree.search(line)
if match:
free_mb += int(match.group(1))
continue
match = re_node_filepages.search(line)
if match:
free_mb += int(match.group(1))
continue
match = re_node_sreclaim.search(line)
if match:
free_mb += int(match.group(1))
continue
except IOError:
# silently ignore IO errors (eg. file missing)
pass
total_mb /= 1024
free_mb /= 1024
self.total_memory_nodes_mb.append(total_mb)
attr = {
'numa_node': node,
'memtotal_mib': total_mb,
'memavail_mib': free_mb,
'hugepages_configured': 'False',
}
imemory.append(attr)
return imemory
def inodes_get_imemory(self):
'''Enumerate logical memory topology based on:
if CONF.compute_hugepages:
self._inode_get_memory_hugepages()
else:
self._inode_get_memory_nonhugepages()
:param self
:returns list of memory nodes and attributes
'''
imemory = []
# if CONF.compute_hugepages:
if os.path.isfile("/etc/nova/compute_reserved.conf"):
imemory = self._inode_get_memory_hugepages()
else:
imemory = self._inode_get_memory_nonhugepages()
LOG.debug("imemory= %s" % imemory)
return imemory

View File

@ -1,621 +0,0 @@
#
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
""" inventory pci Utilities and helper functions."""
import glob
import os
import shlex
import subprocess
from inventory.common import k_pci
from inventory.common import utils
from oslo_log import log
LOG = log.getLogger(__name__)
# Look for PCI class 0x0200 and 0x0280 so that we get generic ethernet
# controllers and those that may report as "other" network controllers.
ETHERNET_PCI_CLASSES = ['ethernet controller', 'network controller']
# Look for other devices we may want to inventory.
KNOWN_PCI_DEVICES = [
{"vendor_id": k_pci.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"device_id": k_pci.NOVA_PCI_ALIAS_QAT_DH895XCC_PF_DEVICE,
"class_id": k_pci.NOVA_PCI_ALIAS_QAT_CLASS},
{"vendor_id": k_pci.NOVA_PCI_ALIAS_QAT_PF_VENDOR,
"device_id": k_pci.NOVA_PCI_ALIAS_QAT_C62X_PF_DEVICE,
"class_id": k_pci.NOVA_PCI_ALIAS_QAT_CLASS},
{"class_id": k_pci.NOVA_PCI_ALIAS_GPU_CLASS}]
# PCI-SIG 0x06 bridge devices to not inventory.
IGNORE_BRIDGE_PCI_CLASSES = ['bridge', 'isa bridge', 'host bridge']
# PCI-SIG 0x08 generic peripheral devices to not inventory.
IGNORE_PERIPHERAL_PCI_CLASSES = ['system peripheral', 'pic', 'dma controller',
'iommu', 'rtc']
# PCI-SIG 0x11 signal processing devices to not inventory.
IGNORE_SIGNAL_PROCESSING_PCI_CLASSES = ['performance counters']
# Blacklist of devices we do not want to inventory, because they are dealt
# with separately (ie. Ethernet devices), or do not make sense to expose
# to a guest.
IGNORE_PCI_CLASSES = ETHERNET_PCI_CLASSES + IGNORE_BRIDGE_PCI_CLASSES + \
IGNORE_PERIPHERAL_PCI_CLASSES + IGNORE_SIGNAL_PROCESSING_PCI_CLASSES
pciaddr = 0
pclass = 1
pvendor = 2
pdevice = 3
prevision = 4
psvendor = 5
psdevice = 6
VALID_PORT_SPEED = ['10', '100', '1000', '10000', '40000', '100000']
# Network device flags (from include/uapi/linux/if.h)
IFF_UP = 1 << 0
IFF_BROADCAST = 1 << 1
IFF_DEBUG = 1 << 2
IFF_LOOPBACK = 1 << 3
IFF_POINTOPOINT = 1 << 4
IFF_NOTRAILERS = 1 << 5
IFF_RUNNING = 1 << 6
IFF_NOARP = 1 << 7
IFF_PROMISC = 1 << 8
IFF_ALLMULTI = 1 << 9
IFF_MASTER = 1 << 10
IFF_SLAVE = 1 << 11
IFF_MULTICAST = 1 << 12
IFF_PORTSEL = 1 << 13
IFF_AUTOMEDIA = 1 << 14
IFF_DYNAMIC = 1 << 15
class PCI(object):
'''Class to encapsulate PCI data for System Inventory'''
def __init__(self, pciaddr, pclass, pvendor, pdevice, prevision,
psvendor, psdevice):
'''Construct a pci object with the given values.'''
self.pciaddr = pciaddr
self.pclass = pclass
self.pvendor = pvendor
self.pdevice = pdevice
self.prevision = prevision
self.psvendor = psvendor
self.psdevice = psdevice
def __eq__(self, rhs):
return (self.pvendor == rhs.pvendor and
self.pdevice == rhs.pdevice)
def __ne__(self, rhs):
return (self.pvendor != rhs.pvendor or
self.pdevice != rhs.pdevice)
def __str__(self):
return "%s [%s] [%s]" % (self.pciaddr, self.pvendor, self.pdevice)
def __repr__(self):
return "<PCI '%s'>" % str(self)
class Port(object):
'''Class to encapsulate PCI data for System Inventory'''
def __init__(self, ipci, **kwargs):
'''Construct an port object with the given values.'''
self.ipci = ipci
self.name = kwargs.get('name')
self.mac = kwargs.get('mac')
self.mtu = kwargs.get('mtu')
self.speed = kwargs.get('speed')
self.link_mode = kwargs.get('link_mode')
self.numa_node = kwargs.get('numa_node')
self.dev_id = kwargs.get('dev_id')
self.sriov_totalvfs = kwargs.get('sriov_totalvfs')
self.sriov_numvfs = kwargs.get('sriov_numvfs')
self.sriov_vfs_pci_address = kwargs.get('sriov_vfs_pci_address')
self.driver = kwargs.get('driver')
self.dpdksupport = kwargs.get('dpdksupport')
def __str__(self):
return "%s %s: [%s] [%s] [%s], [%s], [%s], [%s], [%s]" % (
self.ipci, self.name, self.mac, self.mtu, self.speed,
self.link_mode, self.numa_node, self.dev_id, self.dpdksupport)
def __repr__(self):
return "<Port '%s'>" % str(self)
class PCIDevice(object):
'''Class to encapsulate extended PCI data for System Inventory'''
def __init__(self, pci, **kwargs):
'''Construct a PciDevice object with the given values.'''
self.pci = pci
self.name = kwargs.get('name')
self.pclass_id = kwargs.get('pclass_id')
self.pvendor_id = kwargs.get('pvendor_id')
self.pdevice_id = kwargs.get('pdevice_id')
self.numa_node = kwargs.get('numa_node')
self.sriov_totalvfs = kwargs.get('sriov_totalvfs')
self.sriov_numvfs = kwargs.get('sriov_numvfs')
self.sriov_vfs_pci_address = kwargs.get('sriov_vfs_pci_address')
self.driver = kwargs.get('driver')
self.enabled = kwargs.get('enabled')
self.extra_info = kwargs.get('extra_info')
def __str__(self):
return "%s %s: [%s]" % (
self.pci, self.numa_node, self.driver)
def __repr__(self):
return "<PCIDevice '%s'>" % str(self)
class PCIOperator(object):
'''Class to encapsulate PCI operations for System Inventory'''
def format_lspci_output(self, device):
# hack for now
if device[prevision].strip() == device[pvendor].strip():
# no revision info
device.append(device[psvendor])
device[psvendor] = device[prevision]
device[prevision] = "0"
elif len(device) <= 6: # one less entry, no revision
LOG.debug("update psdevice length=%s" % len(device))
device.append(device[psvendor])
return device
def get_pci_numa_node(self, pciaddr):
fnuma_node = '/sys/bus/pci/devices/' + pciaddr + '/numa_node'
try:
with open(fnuma_node, 'r') as f:
numa_node = f.readline().strip()
LOG.debug("ATTR numa_node: %s " % numa_node)
except Exception:
LOG.debug("ATTR numa_node unknown for: %s " % pciaddr)
numa_node = None
return numa_node
def get_pci_sriov_totalvfs(self, pciaddr):
fsriov_totalvfs = '/sys/bus/pci/devices/' + pciaddr + '/sriov_totalvfs'
try:
with open(fsriov_totalvfs, 'r') as f:
sriov_totalvfs = f.readline()
LOG.debug("ATTR sriov_totalvfs: %s " % sriov_totalvfs)
f.close()
except Exception:
LOG.debug("ATTR sriov_totalvfs unknown for: %s " % pciaddr)
sriov_totalvfs = None
pass
return sriov_totalvfs
def get_pci_sriov_numvfs(self, pciaddr):
fsriov_numvfs = '/sys/bus/pci/devices/' + pciaddr + '/sriov_numvfs'
try:
with open(fsriov_numvfs, 'r') as f:
sriov_numvfs = f.readline()
LOG.debug("ATTR sriov_numvfs: %s " % sriov_numvfs)
f.close()
except Exception:
LOG.debug("ATTR sriov_numvfs unknown for: %s " % pciaddr)
sriov_numvfs = 0
pass
LOG.debug("sriov_numvfs: %s" % sriov_numvfs)
return sriov_numvfs
def get_pci_sriov_vfs_pci_address(self, pciaddr, sriov_numvfs):
dirpcidev = '/sys/bus/pci/devices/' + pciaddr
sriov_vfs_pci_address = []
i = 0
while i < int(sriov_numvfs):
lvf = dirpcidev + '/virtfn' + str(i)
try:
sriov_vfs_pci_address.append(
os.path.basename(os.readlink(lvf)))
except Exception:
LOG.warning("virtfn link %s non-existent (sriov_numvfs=%s)"
% (lvf, sriov_numvfs))
pass
i += 1
LOG.debug("sriov_vfs_pci_address: %s" % sriov_vfs_pci_address)
return sriov_vfs_pci_address
def get_pci_driver_name(self, pciaddr):
ddriver = '/sys/bus/pci/devices/' + pciaddr + '/driver/module/drivers'
try:
drivers = [
os.path.basename(os.readlink(ddriver + '/' + d))
for d in os.listdir(ddriver)]
driver = str(','.join(str(d) for d in drivers))
except Exception:
LOG.debug("ATTR driver unknown for: %s " % pciaddr)
driver = None
pass
LOG.debug("driver: %s" % driver)
return driver
def pci_devices_get(self):
p = subprocess.Popen(["lspci", "-Dm"], stdout=subprocess.PIPE)
pci_devices = []
for line in p.stdout:
pci_device = shlex.split(line.strip())
pci_device = self.format_lspci_output(pci_device)
if any(x in pci_device[pclass].lower() for x in
IGNORE_PCI_CLASSES):
continue
dirpcidev = '/sys/bus/pci/devices/'
physfn = dirpcidev + pci_device[pciaddr] + '/physfn'
if not os.path.isdir(physfn):
# Do not report VFs
pci_devices.append(PCI(pci_device[pciaddr],
pci_device[pclass],
pci_device[pvendor],
pci_device[pdevice],
pci_device[prevision],
pci_device[psvendor],
pci_device[psdevice]))
p.wait()
return pci_devices
def inics_get(self):
p = subprocess.Popen(["lspci", "-Dm"], stdout=subprocess.PIPE)
pci_inics = []
for line in p.stdout:
inic = shlex.split(line.strip())
if any(x in inic[pclass].lower() for x in ETHERNET_PCI_CLASSES):
# hack for now
if inic[prevision].strip() == inic[pvendor].strip():
# no revision info
inic.append(inic[psvendor])
inic[psvendor] = inic[prevision]
inic[prevision] = "0"
elif len(inic) <= 6: # one less entry, no revision
LOG.debug("update psdevice length=%s" % len(inic))
inic.append(inic[psvendor])
dirpcidev = '/sys/bus/pci/devices/'
physfn = dirpcidev + inic[pciaddr] + '/physfn'
if os.path.isdir(physfn):
# Do not report VFs
continue
pci_inics.append(PCI(inic[pciaddr], inic[pclass],
inic[pvendor], inic[pdevice],
inic[prevision], inic[psvendor],
inic[psdevice]))
p.wait()
return pci_inics
def pci_get_enabled_attr(self, class_id, vendor_id, product_id):
for known_device in KNOWN_PCI_DEVICES:
if (class_id == known_device.get("class_id", None) or
(vendor_id == known_device.get("vendor_id", None) and
product_id == known_device.get("device_id", None))):
return True
return False
def pci_get_device_attrs(self, pciaddr):
"""For this pciaddr, build a list of device attributes """
pci_attrs_array = []
dirpcidev = '/sys/bus/pci/devices/'
pciaddrs = os.listdir(dirpcidev)
for a in pciaddrs:
if ((a == pciaddr) or (a == ("0000:" + pciaddr))):
LOG.debug("Found device pci bus: %s " % a)
dirpcideva = dirpcidev + a
numa_node = self.get_pci_numa_node(a)
sriov_totalvfs = self.get_pci_sriov_totalvfs(a)
sriov_numvfs = self.get_pci_sriov_numvfs(a)
sriov_vfs_pci_address = \
self.get_pci_sriov_vfs_pci_address(a, sriov_numvfs)
driver = self.get_pci_driver_name(a)
fclass = dirpcideva + '/class'
fvendor = dirpcideva + '/vendor'
fdevice = dirpcideva + '/device'
try:
with open(fvendor, 'r') as f:
pvendor_id = f.readline().strip('0x').strip()
except Exception:
LOG.debug("ATTR vendor unknown for: %s " % a)
pvendor_id = None
try:
with open(fdevice, 'r') as f:
pdevice_id = f.readline().replace('0x', '').strip()
except Exception:
LOG.debug("ATTR device unknown for: %s " % a)
pdevice_id = None
try:
with open(fclass, 'r') as f:
pclass_id = f.readline().replace('0x', '').strip()
except Exception:
LOG.debug("ATTR class unknown for: %s " % a)
pclass_id = None
name = "pci_" + a.replace(':', '_').replace('.', '_')
attrs = {
"name": name,
"pci_address": a,
"pclass_id": pclass_id,
"pvendor_id": pvendor_id,
"pdevice_id": pdevice_id,
"numa_node": numa_node,
"sriov_totalvfs": sriov_totalvfs,
"sriov_numvfs": sriov_numvfs,
"sriov_vfs_pci_address":
','.join(str(x) for x in sriov_vfs_pci_address),
"driver": driver,
"enabled": self.pci_get_enabled_attr(
pclass_id, pvendor_id, pdevice_id),
}
pci_attrs_array.append(attrs)
return pci_attrs_array
def get_pci_net_directory(self, pciaddr):
device_directory = '/sys/bus/pci/devices/' + pciaddr
# Look for the standard device 'net' directory
net_directory = device_directory + '/net/'
if os.path.exists(net_directory):
return net_directory
# Otherwise check whether this is a virtio based device
net_pattern = device_directory + '/virtio*/net/'
results = glob.glob(net_pattern)
if not results:
return None
if len(results) > 1:
LOG.warning("PCI device {} has multiple virtio "
"sub-directories".format(pciaddr))
return results[0]
def _read_flags(self, fflags):
try:
with open(fflags, 'r') as f:
hex_str = f.readline().rstrip()
flags = int(hex_str, 16)
except Exception:
flags = None
return flags
def _get_netdev_flags(self, dirpcinet, pci):
fflags = dirpcinet + pci + '/flags'
return self._read_flags(fflags)
def pci_get_net_flags(self, name):
fflags = '/sys/class/net/' + name + '/flags'
return self._read_flags(fflags)
def pci_get_net_names(self):
'''build a list of network device names.'''
names = []
for name in os.listdir('/sys/class/net/'):
if os.path.isdir('/sys/class/net/' + name):
names.append(name)
return names
def pci_get_net_attrs(self, pciaddr):
"""For this pciaddr, build a list of network attributes per port"""
pci_attrs_array = []
dirpcidev = '/sys/bus/pci/devices/'
pciaddrs = os.listdir(dirpcidev)
for a in pciaddrs:
if ((a == pciaddr) or (a == ("0000:" + pciaddr))):
# Look inside net expect to find address,speed,mtu etc. info
# There may be more than 1 net device for this NIC.
LOG.debug("Found NIC pci bus: %s " % a)
dirpcideva = dirpcidev + a
numa_node = self.get_pci_numa_node(a)
sriov_totalvfs = self.get_pci_sriov_totalvfs(a)
sriov_numvfs = self.get_pci_sriov_numvfs(a)
sriov_vfs_pci_address = \
self.get_pci_sriov_vfs_pci_address(a, sriov_numvfs)
driver = self.get_pci_driver_name(a)
# Determine DPDK support
dpdksupport = False
fvendor = dirpcideva + '/vendor'
fdevice = dirpcideva + '/device'
try:
with open(fvendor, 'r') as f:
vendor = f.readline().strip()
except Exception:
LOG.debug("ATTR vendor unknown for: %s " % a)
vendor = None
try:
with open(fdevice, 'r') as f:
device = f.readline().strip()
except Exception:
LOG.debug("ATTR device unknown for: %s " % a)
device = None
try:
with open(os.devnull, "w") as fnull:
subprocess.check_call(
["query_pci_id", "-v " + str(vendor),
"-d " + str(device)],
stdout=fnull, stderr=fnull)
dpdksupport = True
LOG.debug("DPDK does support NIC "
"(vendor: %s device: %s)",
vendor, device)
except subprocess.CalledProcessError as e:
dpdksupport = False
if e.returncode == 1:
# NIC is not supprted
LOG.debug("DPDK does not support NIC "
"(vendor: %s device: %s)",
vendor, device)
else:
# command failed, default to DPDK support to False
LOG.info("Could not determine DPDK support for "
"NIC (vendor %s device: %s), defaulting "
"to False", vendor, device)
# determine the net directory for this device
dirpcinet = self.get_pci_net_directory(a)
if dirpcinet is None:
LOG.warning("no /net for PCI device: %s " % a)
continue # go to next PCI device
# determine which netdevs are associated to this device
netdevs = os.listdir(dirpcinet)
for n in netdevs:
mac = None
fmac = dirpcinet + n + '/' + "address"
fmaster = dirpcinet + n + '/' + "master"
# if a port is a member of a bond the port MAC address
# must be retrieved from /proc/net/bonding/<bond_name>
if os.path.exists(fmaster):
dirmaster = os.path.realpath(fmaster)
master_name = os.path.basename(dirmaster)
procnetbonding = '/proc/net/bonding/' + master_name
found_interface = False
try:
with open(procnetbonding, 'r') as f:
for line in f:
if 'Slave Interface: ' + n in line:
found_interface = True
if (found_interface and
'Permanent HW addr:' in line):
mac = line.split(': ')[1].rstrip()
mac = utils.validate_and_normalize_mac(
mac)
break
if not mac:
LOG.info("ATTR mac could not be determined"
" for slave interface %s" % n)
except Exception:
LOG.info("ATTR mac could not be determined, "
"could not open %s" % procnetbonding)
else:
try:
with open(fmac, 'r') as f:
mac = f.readline().rstrip()
mac = utils.validate_and_normalize_mac(mac)
except Exception:
LOG.info("ATTR mac unknown for: %s " % n)
fmtu = dirpcinet + n + '/' + "mtu"
try:
with open(fmtu, 'r') as f:
mtu = f.readline().rstrip()
except Exception:
LOG.debug("ATTR mtu unknown for: %s " % n)
mtu = None
# Check the administrative state before reading the speed
flags = self._get_netdev_flags(dirpcinet, n)
# If administrative state is down, bring it up momentarily
if not(flags & IFF_UP):
LOG.warning("Enabling device %s to query link speed" %
n)
cmd = 'ip link set dev %s up' % n
subprocess.Popen(cmd, stdout=subprocess.PIPE,
shell=True)
# Read the speed
fspeed = dirpcinet + n + '/' + "speed"
try:
with open(fspeed, 'r') as f:
speed = f.readline().rstrip()
if speed not in VALID_PORT_SPEED:
LOG.error("Invalid port speed = %s for %s " %
(speed, n))
speed = None
except Exception:
LOG.warning("ATTR speed unknown for: %s "
"(flags: %s)" % (n, hex(flags)))
speed = None
# If the administrative state was down, take it back down
if not(flags & IFF_UP):
LOG.warning("Disabling device %s after querying "
"link speed" % n)
cmd = 'ip link set dev %s down' % n
subprocess.Popen(cmd, stdout=subprocess.PIPE,
shell=True)
flink_mode = dirpcinet + n + '/' + "link_mode"
try:
with open(flink_mode, 'r') as f:
link_mode = f.readline().rstrip()
except Exception:
LOG.debug("ATTR link_mode unknown for: %s " % n)
link_mode = None
fdevport = dirpcinet + n + '/' + "dev_port"
try:
with open(fdevport, 'r') as f:
dev_port = int(f.readline().rstrip(), 0)
except Exception:
LOG.debug("ATTR dev_port unknown for: %s " % n)
# Kernel versions older than 3.15 used dev_id
# (incorrectly) to identify the network devices,
# therefore support the fallback if dev_port is not
# available
try:
fdevid = dirpcinet + n + '/' + "dev_id"
with open(fdevid, 'r') as f:
dev_port = int(f.readline().rstrip(), 0)
except Exception:
LOG.debug("ATTR dev_id unknown for: %s " % n)
dev_port = 0
attrs = {
"name": n,
"numa_node": numa_node,
"sriov_totalvfs": sriov_totalvfs,
"sriov_numvfs": sriov_numvfs,
"sriov_vfs_pci_address":
','.join(str(x) for x in sriov_vfs_pci_address),
"driver": driver,
"pci_address": a,
"mac": mac,
"mtu": mtu,
"speed": speed,
"link_mode": link_mode,
"dev_id": dev_port,
"dpdksupport": dpdksupport
}
pci_attrs_array.append(attrs)
return pci_attrs_array

View File

@ -1,161 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Client side of the agent RPC API.
"""
from oslo_log import log
import oslo_messaging as messaging
from inventory.common import rpc
from inventory.objects import base as objects_base
LOG = log.getLogger(__name__)
MANAGER_TOPIC = 'inventory.agent_manager'
class AgentAPI(object):
"""Client side of the agent RPC API.
API version history:
1.0 - Initial version.
"""
RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
super(AgentAPI, self).__init__()
self.topic = topic
if self.topic is None:
self.topic = MANAGER_TOPIC
target = messaging.Target(topic=self.topic,
version='1.0')
serializer = objects_base.InventoryObjectSerializer()
version_cap = self.RPC_API_VERSION
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def host_inventory(self, context, values, topic=None):
"""Synchronously, have a agent collect inventory for this host.
Collect ihost inventory and report to conductor.
:param context: request context.
:param values: dictionary with initial values for new host object
:returns: created ihost object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0')
return cctxt.call(context,
'host_inventory',
values=values)
def configure_ttys_dcd(self, context, uuid, ttys_dcd, topic=None):
"""Asynchronously, have the agent configure the getty on the serial
console.
:param context: request context.
:param uuid: the host uuid
:param ttys_dcd: the flag to enable/disable dcd
:returns: none ... uses asynchronous cast().
"""
# fanout / broadcast message to all inventory agents
LOG.debug("AgentApi.configure_ttys_dcd: fanout_cast: sending "
"dcd update to agent: (%s) (%s" % (uuid, ttys_dcd))
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0',
fanout=True)
retval = cctxt.cast(context,
'configure_ttys_dcd',
uuid=uuid,
ttys_dcd=ttys_dcd)
return retval
def execute_command(self, context, host_uuid, command, topic=None):
"""Asynchronously, have the agent execute a command
:param context: request context.
:param host_uuid: the host uuid
:param command: the command to execute
:returns: none ... uses asynchronous cast().
"""
# fanout / broadcast message to all inventory agents
LOG.debug("AgentApi.update_cpu_config: fanout_cast: sending "
"host uuid: (%s) " % host_uuid)
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0',
fanout=True)
retval = cctxt.cast(context,
'execute_command',
host_uuid=host_uuid,
command=command)
return retval
def agent_update(self, context, host_uuid, force_updates,
cinder_device=None,
topic=None):
"""
Asynchronously, have the agent update partitions, ipv and ilvg state
:param context: request context
:param host_uuid: the host uuid
:param force_updates: list of inventory objects to update
:param cinder_device: device by path of cinder volumes
:return: none ... uses asynchronous cast().
"""
# fanout / broadcast message to all inventory agents
LOG.info("AgentApi.agent_update: fanout_cast: sending "
"update request to agent for: (%s)" %
(', '.join(force_updates)))
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0',
fanout=True)
retval = cctxt.cast(context,
'agent_audit',
host_uuid=host_uuid,
force_updates=force_updates,
cinder_device=cinder_device)
return retval
def disk_format_gpt(self, context, host_uuid, idisk_dict,
is_cinder_device, topic=None):
"""Asynchronously, GPT format a disk.
:param context: an admin context
:param host_uuid: ihost uuid unique id
:param idisk_dict: values for disk object
:param is_cinder_device: bool value tells if the idisk is for cinder
:returns: pass or fail
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0',
fanout=True)
return cctxt.cast(context,
'disk_format_gpt',
host_uuid=host_uuid,
idisk_dict=idisk_dict,
is_cinder_device=is_cinder_device)

View File

@ -1,90 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from oslo_config import cfg
from oslo_log import log
from oslo_service import service
from oslo_service import wsgi
import pecan
from inventory.api import config
from inventory.api import middleware
from inventory.common.i18n import _
from inventory.common import policy
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_launcher = None
_launcher_pxe = None
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(config=None):
policy.init_enforcer()
if not config:
config = get_pecan_config()
pecan.configuration.set_config(dict(config), overwrite=True)
app_conf = dict(config.app)
app = pecan.make_app(
app_conf.pop('root'),
debug=CONF.debug,
logging=getattr(config, 'logging', {}),
force_canonical=getattr(config.app, 'force_canonical', True),
guess_content_type_from_ext=False,
wrap_app=middleware.ParsableErrorMiddleware,
**app_conf
)
return app
def load_paste_app(app_name=None):
"""Loads a WSGI app from a paste config file."""
if app_name is None:
app_name = cfg.CONF.prog
loader = wsgi.Loader(cfg.CONF)
app = loader.load_app(app_name)
return app
def app_factory(global_config, **local_conf):
return setup_app()
def serve(api_service, conf, workers=1):
global _launcher
if _launcher:
raise RuntimeError(_('serve() _launcher can only be called once'))
_launcher = service.launch(conf, api_service, workers=workers)
def serve_pxe(api_service, conf, workers=1):
global _launcher_pxe
if _launcher_pxe:
raise RuntimeError(_('serve() _launcher_pxe can only be called once'))
_launcher_pxe = service.launch(conf, api_service, workers=workers)
def wait():
_launcher.wait()
def wait_pxe():
_launcher_pxe.wait()

View File

@ -1,73 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from inventory.api import hooks
from inventory.common import config
from inventory import objects
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log as logging
import pbr.version
import sys
LOG = logging.getLogger(__name__)
sysinv_group = cfg.OptGroup(
'sysinv',
title='Sysinv Options',
help="Configuration options for the platform service")
sysinv_opts = [
cfg.StrOpt('catalog_info',
default='platform:sysinv:internalURL',
help="Service catalog Look up info."),
cfg.StrOpt('os_region_name',
default='RegionOne',
help="Region name of this node. It is used for catalog lookup"),
]
version_info = pbr.version.VersionInfo('inventory')
# Pecan Application Configurations
app = {
'root': 'inventory.api.controllers.root.RootController',
'modules': ['inventory.api'],
'hooks': [
hooks.DBHook(),
hooks.ContextHook(),
hooks.RPCHook(),
hooks.SystemConfigHook(),
],
'acl_public_routes': [
'/',
'/v1',
],
}
def init(args, **kwargs):
cfg.CONF.register_group(sysinv_group)
cfg.CONF.register_opts(sysinv_opts, group=sysinv_group)
ks_loading.register_session_conf_options(cfg.CONF,
sysinv_group.name)
logging.register_options(cfg.CONF)
cfg.CONF(args=args, project='inventory',
version='%%(prog)s %s' % version_info.release_string(),
**kwargs)
objects.register_all()
config.parse_args(args)
def setup_logging():
"""Sets up the logging options for a log with supplied name."""
logging.setup(cfg.CONF, "inventory")
LOG.debug("Logging enabled!")
LOG.debug("%(prog)s version %(version)s",
{'prog': sys.argv[0],
'version': version_info.release_string()})
LOG.debug("command line: %s", " ".join(sys.argv))

View File

@ -1,115 +0,0 @@
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from inventory.api.controllers import v1
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import link
ID_VERSION = 'v1'
def expose(*args, **kwargs):
"""Ensure that only JSON, and not XML, is supported."""
if 'rest_content_types' not in kwargs:
kwargs['rest_content_types'] = ('json',)
return wsme_pecan.wsexpose(*args, **kwargs)
class Version(base.APIBase):
"""An API version representation.
This class represents an API version, including the minimum and
maximum minor versions that are supported within the major version.
"""
id = wtypes.text
"""The ID of the (major) version, also acts as the release number"""
links = [link.Link]
"""A Link that point to a specific version of the API"""
@classmethod
def convert(cls, vid):
version = Version()
version.id = vid
version.links = [link.Link.make_link('self', pecan.request.host_url,
vid, '', bookmark=True)]
return version
class Root(base.APIBase):
name = wtypes.text
"""The name of the API"""
description = wtypes.text
"""Some information about this API"""
versions = [Version]
"""Links to all the versions available in this API"""
default_version = Version
"""A link to the default version of the API"""
@staticmethod
def convert():
root = Root()
root.name = "Inventory API"
root.description = ("Inventory is an OpenStack project which "
"provides REST API services for "
"system configuration.")
root.default_version = Version.convert(ID_VERSION)
root.versions = [root.default_version]
return root
class RootController(rest.RestController):
_versions = [ID_VERSION]
"""All supported API versions"""
_default_version = ID_VERSION
"""The default API version"""
v1 = v1.Controller()
@expose(Root)
def get(self):
# NOTE: The reason why convert() it's being called for every
# request is because we need to get the host url from
# the request object to make the links.
return Root.convert()
@pecan.expose()
def _route(self, args, request=None):
"""Overrides the default routing behavior.
It redirects the request to the default version of the Inventory API
if the version number is not specified in the url.
"""
if args[0] and args[0] not in self._versions:
args = [self._default_version] + args
return super(RootController, self)._route(args, request)

View File

@ -1,198 +0,0 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from pecan import rest
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import cpu
from inventory.api.controllers.v1 import ethernet_port
from inventory.api.controllers.v1 import host
from inventory.api.controllers.v1 import link
from inventory.api.controllers.v1 import lldp_agent
from inventory.api.controllers.v1 import lldp_neighbour
from inventory.api.controllers.v1 import memory
from inventory.api.controllers.v1 import node
from inventory.api.controllers.v1 import pci_device
from inventory.api.controllers.v1 import port
from inventory.api.controllers.v1 import sensor
from inventory.api.controllers.v1 import sensorgroup
from inventory.api.controllers.v1 import system
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
class MediaType(base.APIBase):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
def __init__(self, base, type):
self.base = base
self.type = type
class V1(base.APIBase):
"""The representation of the version 1 of the API."""
id = wtypes.text
"The ID of the version, also acts as the release number"
media_types = [MediaType]
"An array of supported media types for this version"
links = [link.Link]
"Links that point to a specific URL for this version and documentation"
systems = [link.Link]
"Links to the system resource"
hosts = [link.Link]
"Links to the host resource"
lldp_agents = [link.Link]
"Links to the lldp agents resource"
lldp_neighbours = [link.Link]
"Links to the lldp neighbours resource"
@classmethod
def convert(self):
v1 = V1()
v1.id = "v1"
v1.links = [link.Link.make_link('self', pecan.request.host_url,
'v1', '', bookmark=True),
link.Link.make_link('describedby',
'http://www.starlingx.io/',
'developer/inventory/dev',
'api-spec-v1.html',
bookmark=True, type='text/html')
]
v1.media_types = [MediaType('application/json',
'application/vnd.openstack.inventory.v1+json')]
v1.systems = [link.Link.make_link('self', pecan.request.host_url,
'systems', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'systems', '',
bookmark=True)
]
v1.hosts = [link.Link.make_link('self', pecan.request.host_url,
'hosts', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'hosts', '',
bookmark=True)
]
v1.nodes = [link.Link.make_link('self', pecan.request.host_url,
'nodes', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'nodes', '',
bookmark=True)
]
v1.cpus = [link.Link.make_link('self', pecan.request.host_url,
'cpus', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'cpus', '',
bookmark=True)
]
v1.memory = [link.Link.make_link('self', pecan.request.host_url,
'memory', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'memory', '',
bookmark=True)
]
v1.ports = [link.Link.make_link('self',
pecan.request.host_url,
'ports', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ports', '',
bookmark=True)
]
v1.ethernet_ports = [link.Link.make_link('self',
pecan.request.host_url,
'ethernet_ports', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ethernet_ports', '',
bookmark=True)
]
v1.lldp_agents = [link.Link.make_link('self',
pecan.request.host_url,
'lldp_agents', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'lldp_agents', '',
bookmark=True)
]
v1.lldp_neighbours = [link.Link.make_link('self',
pecan.request.host_url,
'lldp_neighbours', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'lldp_neighbours', '',
bookmark=True)
]
v1.sensors = [link.Link.make_link('self',
pecan.request.host_url,
'sensors', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'sensors', '',
bookmark=True)
]
v1.sensorgroups = [link.Link.make_link('self',
pecan.request.host_url,
'sensorgroups', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'sensorgroups', '',
bookmark=True)
]
return v1
class Controller(rest.RestController):
"""Version 1 API controller root."""
systems = system.SystemController()
hosts = host.HostController()
nodes = node.NodeController()
cpus = cpu.CPUController()
memorys = memory.MemoryController()
ports = port.PortController()
ethernet_ports = ethernet_port.EthernetPortController()
lldp_agents = lldp_agent.LLDPAgentController()
lldp_neighbours = lldp_neighbour.LLDPNeighbourController()
pci_devices = pci_device.PCIDeviceController()
sensors = sensor.SensorController()
sensorgroups = sensorgroup.SensorGroupController()
@wsme_pecan.wsexpose(V1)
def get(self):
return V1.convert()
__all__ = ('Controller',)

View File

@ -1,130 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import datetime
import functools
from oslo_utils._i18n import _
from webob import exc
import wsme
from wsme import types as wtypes
class APIBase(wtypes.Base):
created_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is created"""
updated_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is updated"""
def as_dict(self):
"""Render this object as a dict of its fields."""
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k) and
getattr(self, k) != wsme.Unset)
def unset_fields_except(self, except_list=None):
"""Unset fields so they don't appear in the message body.
:param except_list: A list of fields that won't be touched.
"""
if except_list is None:
except_list = []
for k in self.as_dict():
if k not in except_list:
setattr(self, k, wsme.Unset)
@classmethod
def from_rpc_object(cls, m, fields=None):
"""Convert a RPC object to an API object."""
obj_dict = m.as_dict()
# Unset non-required fields so they do not appear
# in the message body
obj_dict.update(dict((k, wsme.Unset)
for k in obj_dict.keys()
if fields and k not in fields))
return cls(**obj_dict)
@functools.total_ordering
class Version(object):
"""API Version object."""
string = 'X-OpenStack-Inventory-API-Version'
"""HTTP Header string carrying the requested version"""
min_string = 'X-OpenStack-Inventory-API-Minimum-Version'
"""HTTP response header"""
max_string = 'X-OpenStack-Inventory-API-Maximum-Version'
"""HTTP response header"""
def __init__(self, headers, default_version, latest_version):
"""Create an API Version object from the supplied headers.
:param headers: webob headers
:param default_version: version to use if not specified in headers
:param latest_version: version to use if latest is requested
:raises: webob.HTTPNotAcceptable
"""
(self.major, self.minor) = Version.parse_headers(
headers, default_version, latest_version)
def __repr__(self):
return '%s.%s' % (self.major, self.minor)
@staticmethod
def parse_headers(headers, default_version, latest_version):
"""Determine the API version requested based on the headers supplied.
:param headers: webob headers
:param default_version: version to use if not specified in headers
:param latest_version: version to use if latest is requested
:returns: a tupe of (major, minor) version numbers
:raises: webob.HTTPNotAcceptable
"""
version_str = headers.get(Version.string, default_version)
if version_str.lower() == 'latest':
parse_str = latest_version
else:
parse_str = version_str
try:
version = tuple(int(i) for i in parse_str.split('.'))
except ValueError:
version = ()
if len(version) != 2:
raise exc.HTTPNotAcceptable(_(
"Invalid value for %s header") % Version.string)
return version
def __gt__(self, other):
return (self.major, self.minor) > (other.major, other.minor)
def __eq__(self, other):
return (self.major, self.minor) == (other.major, other.minor)
def __ne__(self, other):
return not self.__eq__(other)

View File

@ -1,57 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from wsme import types as wtypes
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import link
class Collection(base.APIBase):
next = wtypes.text
"A link to retrieve the next subset of the collection"
@property
def collection(self):
return getattr(self, self._type)
def has_next(self, limit):
"""Return whether collection has more items."""
return len(self.collection) and len(self.collection) == limit
def get_next(self, limit, url=None, **kwargs):
"""Return a link to the next subset of the collection."""
if not self.has_next(limit):
return wtypes.Unset
resource_url = url or self._type
q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
'args': q_args, 'limit': limit,
'marker': self.collection[-1].uuid}
return link.Link.make_link('next', pecan.request.host_url,
resource_url, next_args).href

View File

@ -1,303 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import six
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import collection
from inventory.api.controllers.v1 import link
from inventory.api.controllers.v1 import types
from inventory.api.controllers.v1 import utils
from inventory.common import exception
from inventory.common.i18n import _
from inventory import objects
from oslo_log import log
LOG = log.getLogger(__name__)
class CPUPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class CPU(base.APIBase):
"""API representation of a host CPU.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a cpu.
"""
uuid = types.uuid
"Unique UUID for this cpu"
cpu = int
"Represent the cpu id cpu"
core = int
"Represent the core id cpu"
thread = int
"Represent the thread id cpu"
cpu_family = wtypes.text
"Represent the cpu family of the cpu"
cpu_model = wtypes.text
"Represent the cpu model of the cpu"
function = wtypes.text
"Represent the function of the cpu"
num_cores_on_processor0 = wtypes.text
"The number of cores on processors 0"
num_cores_on_processor1 = wtypes.text
"The number of cores on processors 1"
num_cores_on_processor2 = wtypes.text
"The number of cores on processors 2"
num_cores_on_processor3 = wtypes.text
"The number of cores on processors 3"
numa_node = int
"The numa node or zone the cpu. API only attribute"
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
six.integer_types)}
"This cpu's meta data"
host_id = int
"The hostid that this cpu belongs to"
node_id = int
"The nodeId that this cpu belongs to"
host_uuid = types.uuid
"The UUID of the host this cpu belongs to"
node_uuid = types.uuid
"The UUID of the node this cpu belongs to"
links = [link.Link]
"A list containing a self link and associated cpu links"
def __init__(self, **kwargs):
self.fields = objects.CPU.fields.keys()
for k in self.fields:
setattr(self, k, kwargs.get(k))
# API only attributes
self.fields.append('function')
setattr(self, 'function', kwargs.get('function', None))
self.fields.append('num_cores_on_processor0')
setattr(self, 'num_cores_on_processor0',
kwargs.get('num_cores_on_processor0', None))
self.fields.append('num_cores_on_processor1')
setattr(self, 'num_cores_on_processor1',
kwargs.get('num_cores_on_processor1', None))
self.fields.append('num_cores_on_processor2')
setattr(self, 'num_cores_on_processor2',
kwargs.get('num_cores_on_processor2', None))
self.fields.append('num_cores_on_processor3')
setattr(self, 'num_cores_on_processor3',
kwargs.get('num_cores_on_processor3', None))
@classmethod
def convert_with_links(cls, rpc_port, expand=True):
cpu = CPU(**rpc_port.as_dict())
if not expand:
cpu.unset_fields_except(
['uuid', 'cpu', 'core', 'thread',
'cpu_family', 'cpu_model',
'numa_node', 'host_uuid', 'node_uuid',
'host_id', 'node_id',
'capabilities',
'created_at', 'updated_at'])
# never expose the id attribute
cpu.host_id = wtypes.Unset
cpu.node_id = wtypes.Unset
cpu.links = [link.Link.make_link('self', pecan.request.host_url,
'cpus', cpu.uuid),
link.Link.make_link('bookmark',
pecan.request.host_url,
'cpus', cpu.uuid,
bookmark=True)
]
return cpu
class CPUCollection(collection.Collection):
"""API representation of a collection of cpus."""
cpus = [CPU]
"A list containing cpu objects"
def __init__(self, **kwargs):
self._type = 'cpus'
@classmethod
def convert_with_links(cls, rpc_ports, limit, url=None,
expand=False, **kwargs):
collection = CPUCollection()
collection.cpus = [
CPU.convert_with_links(p, expand) for p in rpc_ports]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
class CPUController(rest.RestController):
"""REST controller for cpus."""
_custom_actions = {
'detail': ['GET'],
}
def __init__(self, from_hosts=False, from_node=False):
self._from_hosts = from_hosts
self._from_node = from_node
def _get_cpus_collection(self, i_uuid, node_uuid, marker,
limit, sort_key, sort_dir,
expand=False, resource_url=None):
if self._from_hosts and not i_uuid:
raise exception.InvalidParameterValue(_(
"Host id not specified."))
if self._from_node and not i_uuid:
raise exception.InvalidParameterValue(_(
"Node id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.CPU.get_by_uuid(pecan.request.context,
marker)
if self._from_hosts:
# cpus = pecan.request.dbapi.cpu_get_by_host(
cpus = objects.CPU.get_by_host(
pecan.request.context,
i_uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
elif self._from_node:
# cpus = pecan.request.dbapi.cpu_get_by_node(
cpus = objects.CPU.get_by_node(
pecan.request.context,
i_uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
if i_uuid and not node_uuid:
# cpus = pecan.request.dbapi.cpu_get_by_host(
cpus = objects.CPU.get_by_host(
pecan.request.context,
i_uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
elif i_uuid and node_uuid:
# cpus = pecan.request.dbapi.cpu_get_by_host_node(
cpus = objects.CPU.get_by_host_node(
pecan.request.context,
i_uuid,
node_uuid,
limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
elif node_uuid:
# cpus = pecan.request.dbapi.cpu_get_by_host_node(
cpus = objects.CPU.get_by_node(
pecan.request.context,
i_uuid,
node_uuid,
limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
# cpus = pecan.request.dbapi.icpu_get_list(
cpus = objects.CPU.list(
pecan.request.context,
limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return CPUCollection.convert_with_links(cpus, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(CPUCollection, types.uuid, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, host_uuid=None, node_uuid=None,
marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of cpus."""
return self._get_cpus_collection(host_uuid, node_uuid,
marker, limit,
sort_key, sort_dir)
@wsme_pecan.wsexpose(CPUCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, host_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of cpus with detail."""
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "cpus":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['cpus', 'detail'])
return self._get_cpus_collection(host_uuid, marker, limit, sort_key,
sort_dir, expand, resource_url)
@wsme_pecan.wsexpose(CPU, types.uuid)
def get_one(self, cpu_uuid):
"""Retrieve information about the given cpu."""
if self._from_hosts:
raise exception.OperationNotPermitted
rpc_port = objects.CPU.get_by_uuid(pecan.request.context, cpu_uuid)
return CPU.convert_with_links(rpc_port)

View File

@ -1,330 +0,0 @@
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from inventory.common import constants
from inventory.common import k_host
from oslo_log import log
LOG = log.getLogger(__name__)
CORE_FUNCTIONS = [
constants.PLATFORM_FUNCTION,
constants.VSWITCH_FUNCTION,
constants.SHARED_FUNCTION,
constants.VM_FUNCTION,
constants.NO_FUNCTION
]
VSWITCH_MIN_CORES = 1
VSWITCH_MAX_CORES = 8
class CpuProfile(object):
class CpuConfigure(object):
def __init__(self):
self.platform = 0
self.vswitch = 0
self.shared = 0
self.vms = 0
self.numa_node = 0
# cpus is a list of cpu sorted by numa_node, core and thread
# if not, provide a node list sorted by numa_node
# (id might not be reliable)
def __init__(self, cpus, nodes=None):
if nodes is not None:
cpus = CpuProfile.sort_cpu_by_numa_node(cpus, nodes)
cores = []
self.number_of_cpu = 0
self.cores_per_cpu = 0
self.hyper_thread = False
self.processors = []
cur_processor = None
for cpu in cpus:
key = '{0}-{1}'.format(cpu.numa_node, cpu.core)
if key not in cores:
cores.append(key)
else:
self.hyper_thread = True
continue
if (cur_processor is None or
cur_processor.numa_node != cpu.numa_node):
cur_processor = CpuProfile.CpuConfigure()
cur_processor.numa_node = cpu.numa_node
self.processors.append(cur_processor)
if cpu.allocated_function == constants.PLATFORM_FUNCTION:
cur_processor.platform += 1
elif cpu.allocated_function == constants.VSWITCH_FUNCTION:
cur_processor.vswitch += 1
elif cpu.allocated_function == constants.SHARED_FUNCTION:
cur_processor.shared += 1
elif cpu.allocated_function == constants.VM_FUNCTION:
cur_processor.vms += 1
self.number_of_cpu = len(self.processors)
self.cores_per_cpu = len(cores) / self.number_of_cpu
@staticmethod
def sort_cpu_by_numa_node(cpus, nodes):
newlist = []
for node in nodes:
for cpu in cpus:
if cpu.node_id == node.id:
cpu.numa_node = node.numa_node
newlist.append(cpu)
return newlist
class HostCpuProfile(CpuProfile):
def __init__(self, subfunctions, cpus, nodes=None):
super(HostCpuProfile, self).__init__(cpus, nodes)
self.subfunctions = subfunctions
# see if a cpu profile is applicable to this host
def profile_applicable(self, profile):
if self.number_of_cpu == profile.number_of_cpu and \
self.cores_per_cpu == profile.cores_per_cpu:
return self.check_profile_core_functions(profile)
return False # Profile is not applicable to host
def check_profile_core_functions(self, profile):
platform_cores = 0
vswitch_cores = 0
shared_cores = 0
vm_cores = 0
for cpu in profile.processors:
platform_cores += cpu.platform
vswitch_cores += cpu.vswitch
shared_cores += cpu.shared
vm_cores += cpu.vms
error_string = ""
if platform_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.PLATFORM_FUNCTION
elif k_host.COMPUTE in self.subfunctions and vswitch_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.VSWITCH_FUNCTION
elif k_host.COMPUTE in self.subfunctions and vm_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.VM_FUNCTION
return error_string
def lookup_function(s):
for f in CORE_FUNCTIONS:
if s.lower() == f.lower():
return f
return s
def check_profile_core_functions(personality, profile):
platform_cores = 0
vswitch_cores = 0
shared_cores = 0
vm_cores = 0
for cpu in profile.processors:
platform_cores += cpu.platform
vswitch_cores += cpu.vswitch
shared_cores += cpu.shared
vm_cores += cpu.vms
error_string = ""
if platform_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.PLATFORM_FUNCTION
elif k_host.COMPUTE in personality and vswitch_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.VSWITCH_FUNCTION
elif k_host.COMPUTE in personality and vm_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.VM_FUNCTION
return error_string
def check_core_functions(personality, icpus):
platform_cores = 0
vswitch_cores = 0
shared_cores = 0
vm_cores = 0
for cpu in icpus:
allocated_function = cpu.allocated_function
if allocated_function == constants.PLATFORM_FUNCTION:
platform_cores += 1
elif allocated_function == constants.VSWITCH_FUNCTION:
vswitch_cores += 1
elif allocated_function == constants.SHARED_FUNCTION:
shared_cores += 1
elif allocated_function == constants.VM_FUNCTION:
vm_cores += 1
error_string = ""
if platform_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.PLATFORM_FUNCTION
elif k_host.COMPUTE in personality and vswitch_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.VSWITCH_FUNCTION
elif k_host.COMPUTE in personality and vm_cores == 0:
error_string = "There must be at least one core for %s." % \
constants.VM_FUNCTION
return error_string
def get_default_function(host):
"""Return the default function to be assigned to cpus on this host"""
if k_host.COMPUTE in host.subfunctions:
return constants.VM_FUNCTION
return constants.PLATFORM_FUNCTION
def get_cpu_function(host, cpu):
"""Return the function that is assigned to the specified cpu"""
for s in range(0, len(host.nodes)):
functions = host.cpu_functions[s]
for f in CORE_FUNCTIONS:
if cpu.cpu in functions[f]:
return f
return constants.NO_FUNCTION
def get_cpu_counts(host):
"""Return the CPU counts for this host by socket and function."""
counts = {}
for s in range(0, len(host.nodes)):
counts[s] = {}
for f in CORE_FUNCTIONS:
counts[s][f] = len(host.cpu_functions[s][f])
return counts
def init_cpu_counts(host):
"""Create empty data structures to track CPU assignments by socket and
function.
"""
host.cpu_functions = {}
host.cpu_lists = {}
for s in range(0, len(host.nodes)):
host.cpu_functions[s] = {}
for f in CORE_FUNCTIONS:
host.cpu_functions[s][f] = []
host.cpu_lists[s] = []
def _sort_by_coreid(cpu):
"""Sort a list of cpu database objects such that threads of the same core
are adjacent in the list with the lowest thread number appearing first.
"""
return (int(cpu.core), int(cpu.thread))
def restructure_host_cpu_data(host):
"""Reorganize the cpu list by socket and function so that it can more
easily be consumed by other utilities.
"""
init_cpu_counts(host)
host.sockets = len(host.nodes or [])
host.hyperthreading = False
host.physical_cores = 0
if not host.cpus:
return
host.cpu_model = host.cpus[0].cpu_model
cpu_list = sorted(host.cpus, key=_sort_by_coreid)
for cpu in cpu_list:
inode = pecan.request.dbapi.inode_get(inode_id=cpu.node_id)
cpu.numa_node = inode.numa_node
if cpu.thread == 0:
host.physical_cores += 1
elif cpu.thread > 0:
host.hyperthreading = True
function = cpu.allocated_function or get_default_function(host)
host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu))
host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))
def check_core_allocations(host, cpu_counts, func):
"""Check that minimum and maximum core values are respected."""
total_platform_cores = 0
total_vswitch_cores = 0
total_shared_cores = 0
for s in range(0, len(host.nodes)):
available_cores = len(host.cpu_lists[s])
platform_cores = cpu_counts[s][constants.PLATFORM_FUNCTION]
vswitch_cores = cpu_counts[s][constants.VSWITCH_FUNCTION]
shared_cores = cpu_counts[s][constants.SHARED_FUNCTION]
requested_cores = platform_cores + vswitch_cores + shared_cores
if requested_cores > available_cores:
return ("More total logical cores requested than present on "
"'Processor %s' (%s cores)." % (s, available_cores))
total_platform_cores += platform_cores
total_vswitch_cores += vswitch_cores
total_shared_cores += shared_cores
if func.lower() == constants.PLATFORM_FUNCTION.lower():
if ((k_host.CONTROLLER in host.subfunctions) and
(k_host.COMPUTE in host.subfunctions)):
if total_platform_cores < 2:
return "%s must have at least two cores." % \
constants.PLATFORM_FUNCTION
elif total_platform_cores == 0:
return "%s must have at least one core." % \
constants.PLATFORM_FUNCTION
if k_host.COMPUTE in (host.subfunctions or host.personality):
if func.lower() == constants.VSWITCH_FUNCTION.lower():
if host.hyperthreading:
total_physical_cores = total_vswitch_cores / 2
else:
total_physical_cores = total_vswitch_cores
if total_physical_cores < VSWITCH_MIN_CORES:
return ("The %s function must have at least %s core(s)." %
(constants.VSWITCH_FUNCTION.lower(),
VSWITCH_MIN_CORES))
elif total_physical_cores > VSWITCH_MAX_CORES:
return ("The %s function can only be assigned up to %s cores."
% (constants.VSWITCH_FUNCTION.lower(),
VSWITCH_MAX_CORES))
reserved_for_vms = \
len(host.cpus) - total_platform_cores - total_vswitch_cores
if reserved_for_vms <= 0:
return "There must be at least one unused core for %s." % \
constants. VM_FUNCTION
else:
if total_platform_cores != len(host.cpus):
return "All logical cores must be reserved for platform use"
return ""
def update_core_allocations(host, cpu_counts):
"""Update the per socket/function cpu list based on the newly requested
counts.
"""
# Remove any previous assignments
for s in range(0, len(host.nodes)):
for f in CORE_FUNCTIONS:
host.cpu_functions[s][f] = []
# Set new assignments
for s in range(0, len(host.nodes)):
cpu_list = host.cpu_lists[s] if s in host.cpu_lists else []
# Reserve for the platform first
for i in range(0, cpu_counts[s][constants.PLATFORM_FUNCTION]):
host.cpu_functions[s][constants.PLATFORM_FUNCTION].append(
cpu_list.pop(0))
# Reserve for the vswitch next
for i in range(0, cpu_counts[s][constants.VSWITCH_FUNCTION]):
host.cpu_functions[s][constants.VSWITCH_FUNCTION].append(
cpu_list.pop(0))
# Reserve for the shared next
for i in range(0, cpu_counts[s][constants.SHARED_FUNCTION]):
host.cpu_functions[s][constants.SHARED_FUNCTION].append(
cpu_list.pop(0))
# Assign the remaining cpus to the default function for this host
host.cpu_functions[s][get_default_function(host)] += cpu_list
return

View File

@ -1,310 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import six
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import collection
from inventory.api.controllers.v1 import link
from inventory.api.controllers.v1 import types
from inventory.api.controllers.v1 import utils
from inventory.common import exception
from inventory.common.i18n import _
from inventory import objects
from oslo_log import log
LOG = log.getLogger(__name__)
class EthernetPortPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class EthernetPort(base.APIBase):
"""API representation of an Ethernet port
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
Ethernet port.
"""
uuid = types.uuid
"Unique UUID for this port"
type = wtypes.text
"Represent the type of port"
name = wtypes.text
"Represent the name of the port. Unique per host"
namedisplay = wtypes.text
"Represent the display name of the port. Unique per host"
pciaddr = wtypes.text
"Represent the pci address of the port"
dev_id = int
"The unique identifier of PCI device"
pclass = wtypes.text
"Represent the pci class of the port"
pvendor = wtypes.text
"Represent the pci vendor of the port"
pdevice = wtypes.text
"Represent the pci device of the port"
psvendor = wtypes.text
"Represent the pci svendor of the port"
psdevice = wtypes.text
"Represent the pci sdevice of the port"
numa_node = int
"Represent the numa node or zone sdevice of the port"
sriov_totalvfs = int
"The total number of available SR-IOV VFs"
sriov_numvfs = int
"The number of configured SR-IOV VFs"
sriov_vfs_pci_address = wtypes.text
"The PCI Addresses of the VFs"
driver = wtypes.text
"The kernel driver for this device"
mac = wsme.wsattr(types.macaddress, mandatory=False)
"Represent the MAC Address of the port"
mtu = int
"Represent the MTU size (bytes) of the port"
speed = int
"Represent the speed (MBytes/sec) of the port"
link_mode = int
"Represent the link mode of the port"
duplex = wtypes.text
"Represent the duplex mode of the port"
autoneg = wtypes.text
"Represent the auto-negotiation mode of the port"
bootp = wtypes.text
"Represent the bootp port of the host"
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
six.integer_types)}
"Represent meta data of the port"
host_id = int
"Represent the host_id the port belongs to"
bootif = wtypes.text
"Represent whether the port is a boot port"
dpdksupport = bool
"Represent whether or not the port supports DPDK acceleration"
host_uuid = types.uuid
"Represent the UUID of the host the port belongs to"
node_uuid = types.uuid
"Represent the UUID of the node the port belongs to"
links = [link.Link]
"Represent a list containing a self link and associated port links"
def __init__(self, **kwargs):
self.fields = objects.EthernetPort.fields.keys()
for k in self.fields:
setattr(self, k, kwargs.get(k))
@classmethod
def convert_with_links(cls, rpc_port, expand=True):
port = EthernetPort(**rpc_port.as_dict())
if not expand:
port.unset_fields_except(['uuid', 'host_id', 'node_id',
'type', 'name',
'namedisplay', 'pciaddr', 'dev_id',
'pclass', 'pvendor', 'pdevice',
'psvendor', 'psdevice', 'numa_node',
'mac', 'sriov_totalvfs', 'sriov_numvfs',
'sriov_vfs_pci_address', 'driver',
'mtu', 'speed', 'link_mode',
'duplex', 'autoneg', 'bootp',
'capabilities',
'host_uuid',
'node_uuid', 'dpdksupport',
'created_at', 'updated_at'])
# never expose the id attribute
port.host_id = wtypes.Unset
port.node_id = wtypes.Unset
port.links = [link.Link.make_link('self', pecan.request.host_url,
'ethernet_ports', port.uuid),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ethernet_ports', port.uuid,
bookmark=True)
]
return port
class EthernetPortCollection(collection.Collection):
"""API representation of a collection of EthernetPort objects."""
ethernet_ports = [EthernetPort]
"A list containing EthernetPort objects"
def __init__(self, **kwargs):
self._type = 'ethernet_ports'
@classmethod
def convert_with_links(cls, rpc_ports, limit, url=None,
expand=False, **kwargs):
collection = EthernetPortCollection()
collection.ethernet_ports = [EthernetPort.convert_with_links(p, expand)
for p in rpc_ports]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'EthernetPortController'
class EthernetPortController(rest.RestController):
"""REST controller for EthernetPorts."""
_custom_actions = {
'detail': ['GET'],
}
def __init__(self, from_hosts=False, from_node=False):
self._from_hosts = from_hosts
self._from_node = from_node
def _get_ports_collection(self, uuid, node_uuid,
marker, limit, sort_key, sort_dir,
expand=False, resource_url=None):
if self._from_hosts and not uuid:
raise exception.InvalidParameterValue(_(
"Host id not specified."))
if self._from_node and not uuid:
raise exception.InvalidParameterValue(_(
"node id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.EthernetPort.get_by_uuid(
pecan.request.context,
marker)
if self._from_hosts:
ports = objects.EthernetPort.get_by_host(
pecan.request.context,
uuid, limit,
marker=marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
elif self._from_node:
ports = objects.EthernetPort.get_by_numa_node(
pecan.request.context,
uuid, limit,
marker=marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
if uuid:
ports = objects.EthernetPort.get_by_host(
pecan.request.context,
uuid, limit,
marker=marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
ports = objects.EthernetPort.list(
pecan.request.context,
limit, marker=marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return EthernetPortCollection.convert_with_links(
ports, limit, url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(EthernetPortCollection, types.uuid, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, uuid=None, node_uuid=None,
marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of ports."""
return self._get_ports_collection(uuid,
node_uuid,
marker, limit, sort_key, sort_dir)
@wsme_pecan.wsexpose(EthernetPortCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of ports with detail."""
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "ethernet_ports":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['ethernet_ports', 'detail'])
return self._get_ports_collection(uuid, marker, limit, sort_key,
sort_dir, expand, resource_url)
@wsme_pecan.wsexpose(EthernetPort, types.uuid)
def get_one(self, port_uuid):
"""Retrieve information about the given port."""
if self._from_hosts:
raise exception.OperationNotPermitted
rpc_port = objects.EthernetPort.get_by_uuid(
pecan.request.context, port_uuid)
return EthernetPort.convert_with_links(rpc_port)

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from wsme import types as wtypes
from inventory.api.controllers.v1 import base
def build_url(resource, resource_args, bookmark=False, base_url=None):
if base_url is None:
base_url = pecan.request.public_url
template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s'
# FIXME(lucasagomes): I'm getting a 404 when doing a GET on
# a nested resource that the URL ends with a '/'.
# https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs
template += '%(args)s' if resource_args.startswith('?') else '/%(args)s'
return template % {'url': base_url, 'res': resource, 'args': resource_args}
class Link(base.APIBase):
"""A link representation."""
href = wtypes.text
"""The url of a link."""
rel = wtypes.text
"""The name of a link."""
type = wtypes.text
"""Indicates the type of document/link."""
@staticmethod
def make_link(rel_name, url, resource, resource_args,
bookmark=False, type=wtypes.Unset):
href = build_url(resource, resource_args,
bookmark=bookmark, base_url=url)
return Link(href=href, rel=rel_name, type=type)
@classmethod
def sample(cls):
sample = cls(href="http://localhost:18002"
"eeaca217-e7d8-47b4-bb41-3f99f20ead81",
rel="bookmark")
return sample

View File

@ -1,366 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
import jsonpatch
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import collection
from inventory.api.controllers.v1 import link
from inventory.api.controllers.v1 import lldp_tlv
from inventory.api.controllers.v1 import types
from inventory.api.controllers.v1 import utils
from inventory.common import exception
from inventory.common.i18n import _
from inventory.common import k_lldp
from inventory.common import utils as cutils
from inventory import objects
from oslo_log import log
LOG = log.getLogger(__name__)
class LLDPAgentPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class LLDPAgent(base.APIBase):
"""API representation of an LLDP Agent
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
LLDP agent.
"""
uuid = types.uuid
"Unique UUID for this port"
status = wtypes.text
"Represent the status of the lldp agent"
host_id = int
"Represent the host_id the lldp agent belongs to"
port_id = int
"Represent the port_id the lldp agent belongs to"
host_uuid = types.uuid
"Represent the UUID of the host the lldp agent belongs to"
port_uuid = types.uuid
"Represent the UUID of the port the lldp agent belongs to"
port_name = wtypes.text
"Represent the name of the port the lldp neighbour belongs to"
port_namedisplay = wtypes.text
"Represent the display name of the port. Unique per host"
links = [link.Link]
"Represent a list containing a self link and associated lldp agent links"
tlvs = [link.Link]
"Links to the collection of LldpNeighbours on this ihost"
chassis_id = wtypes.text
"Represent the status of the lldp agent"
port_identifier = wtypes.text
"Represent the LLDP port id of the lldp agent"
port_description = wtypes.text
"Represent the port description of the lldp agent"
system_description = wtypes.text
"Represent the status of the lldp agent"
system_name = wtypes.text
"Represent the status of the lldp agent"
system_capabilities = wtypes.text
"Represent the status of the lldp agent"
management_address = wtypes.text
"Represent the status of the lldp agent"
ttl = wtypes.text
"Represent the time-to-live of the lldp agent"
dot1_lag = wtypes.text
"Represent the 802.1 link aggregation status of the lldp agent"
dot1_vlan_names = wtypes.text
"Represent the 802.1 vlan names of the lldp agent"
dot3_mac_status = wtypes.text
"Represent the 802.3 MAC/PHY status of the lldp agent"
dot3_max_frame = wtypes.text
"Represent the 802.3 maximum frame size of the lldp agent"
def __init__(self, **kwargs):
self.fields = objects.LLDPAgent.fields.keys()
for k in self.fields:
setattr(self, k, kwargs.get(k))
@classmethod
def convert_with_links(cls, rpc_lldp_agent, expand=True):
lldp_agent = LLDPAgent(**rpc_lldp_agent.as_dict())
if not expand:
lldp_agent.unset_fields_except([
'uuid', 'host_id', 'port_id', 'status', 'host_uuid',
'port_uuid', 'port_name', 'port_namedisplay',
'created_at', 'updated_at',
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID,
k_lldp.LLDP_TLV_TYPE_PORT_ID,
k_lldp.LLDP_TLV_TYPE_TTL,
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME,
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC,
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP,
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR,
k_lldp.LLDP_TLV_TYPE_PORT_DESC,
k_lldp.LLDP_TLV_TYPE_DOT1_LAG,
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES,
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS,
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME])
# never expose the id attribute
lldp_agent.host_id = wtypes.Unset
lldp_agent.port_id = wtypes.Unset
lldp_agent.links = [
link.Link.make_link('self', pecan.request.host_url,
'lldp_agents', lldp_agent.uuid),
link.Link.make_link('bookmark', pecan.request.host_url,
'lldp_agents', lldp_agent.uuid,
bookmark=True)]
if expand:
lldp_agent.tlvs = [
link.Link.make_link('self',
pecan.request.host_url,
'lldp_agents',
lldp_agent.uuid + "/tlvs"),
link.Link.make_link('bookmark',
pecan.request.host_url,
'lldp_agents',
lldp_agent.uuid + "/tlvs",
bookmark=True)]
return lldp_agent
class LLDPAgentCollection(collection.Collection):
"""API representation of a collection of LldpAgent objects."""
lldp_agents = [LLDPAgent]
"A list containing LldpAgent objects"
def __init__(self, **kwargs):
self._type = 'lldp_agents'
@classmethod
def convert_with_links(cls, rpc_lldp_agents, limit, url=None,
expand=False, **kwargs):
collection = LLDPAgentCollection()
collection.lldp_agents = [LLDPAgent.convert_with_links(a, expand)
for a in rpc_lldp_agents]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'LLDPAgentController'
class LLDPAgentController(rest.RestController):
"""REST controller for LldpAgents."""
tlvs = lldp_tlv.LLDPTLVController(
from_lldp_agents=True)
"Expose tlvs as a sub-element of LldpAgents"
_custom_actions = {
'detail': ['GET'],
}
def __init__(self, from_hosts=False, from_ports=False):
self._from_hosts = from_hosts
self._from_ports = from_ports
def _get_lldp_agents_collection(self, uuid,
marker, limit, sort_key, sort_dir,
expand=False, resource_url=None):
if self._from_hosts and not uuid:
raise exception.InvalidParameterValue(_("Host id not specified."))
if self._from_ports and not uuid:
raise exception.InvalidParameterValue(_("Port id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.LLDPAgent.get_by_uuid(pecan.request.context,
marker)
if self._from_hosts:
agents = objects.LLDPAgent.get_by_host(
pecan.request.context,
uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
elif self._from_ports:
agents = []
agent = objects.LLDPAgent.get_by_port(pecan.request.context, uuid)
agents.append(agent)
else:
agents = objects.LLDPAgent.list(
pecan.request.context,
limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
return LLDPAgentCollection.convert_with_links(agents, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(LLDPAgentCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, uuid=None,
marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of lldp agents."""
return self._get_lldp_agents_collection(uuid, marker, limit, sort_key,
sort_dir)
@wsme_pecan.wsexpose(LLDPAgentCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of lldp_agents with detail."""
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "lldp_agents":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['lldp_agents', 'detail'])
return self._get_lldp_agents_collection(uuid, marker, limit, sort_key,
sort_dir, expand, resource_url)
@wsme_pecan.wsexpose(LLDPAgent, types.uuid)
def get_one(self, port_uuid):
"""Retrieve information about the given lldp agent."""
if self._from_hosts:
raise exception.OperationNotPermitted
rpc_lldp_agent = objects.LLDPAgent.get_by_uuid(
pecan.request.context, port_uuid)
return LLDPAgent.convert_with_links(rpc_lldp_agent)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(LLDPAgent, body=LLDPAgent)
def post(self, agent):
"""Create a new lldp agent."""
if self._from_hosts:
raise exception.OperationNotPermitted
try:
host_uuid = agent.host_uuid
port_uuid = agent.port_uuid
new_agent = objects.LLDPAgent.create(
pecan.request.context,
port_uuid,
host_uuid,
agent.as_dict())
except exception.InventoryException as e:
LOG.exception(e)
raise wsme.exc.ClientSideError(_("Invalid data"))
return agent.convert_with_links(new_agent)
@cutils.synchronized(LOCK_NAME)
@wsme.validate(types.uuid, [LLDPAgentPatchType])
@wsme_pecan.wsexpose(LLDPAgent, types.uuid,
body=[LLDPAgentPatchType])
def patch(self, uuid, patch):
"""Update an existing lldp agent."""
if self._from_hosts:
raise exception.OperationNotPermitted
if self._from_ports:
raise exception.OperationNotPermitted
rpc_agent = objects.LLDPAgent.get_by_uuid(
pecan.request.context, uuid)
# replace ihost_uuid and port_uuid with corresponding
patch_obj = jsonpatch.JsonPatch(patch)
for p in patch_obj:
if p['path'] == '/host_uuid':
p['path'] = '/host_id'
host = objects.Host.get_by_uuid(pecan.request.context,
p['value'])
p['value'] = host.id
if p['path'] == '/port_uuid':
p['path'] = '/port_id'
try:
port = objects.Port.get_by_uuid(
pecan.request.context, p['value'])
p['value'] = port.id
except exception.InventoryException as e:
LOG.exception(e)
p['value'] = None
try:
agent = LLDPAgent(**jsonpatch.apply_patch(rpc_agent.as_dict(),
patch_obj))
except utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.LLDPAgent.fields:
if rpc_agent[field] != getattr(agent, field):
rpc_agent[field] = getattr(agent, field)
rpc_agent.save()
return LLDPAgent.convert_with_links(rpc_agent)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, uuid):
"""Delete an lldp agent."""
if self._from_hosts:
raise exception.OperationNotPermitted
if self._from_ports:
raise exception.OperationNotPermitted
pecan.request.dbapi.lldp_agent_destroy(uuid)

View File

@ -1,390 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
import jsonpatch
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import collection
from inventory.api.controllers.v1 import link
from inventory.api.controllers.v1 import lldp_tlv
from inventory.api.controllers.v1 import types
from inventory.api.controllers.v1 import utils
from inventory.common import exception
from inventory.common.i18n import _
from inventory.common import k_lldp
from inventory.common import utils as cutils
from inventory import objects
from oslo_log import log
LOG = log.getLogger(__name__)
class LLDPNeighbourPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class LLDPNeighbour(base.APIBase):
"""API representation of an LLDP Neighbour
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
LLDP neighbour.
"""
uuid = types.uuid
"Unique UUID for this port"
msap = wtypes.text
"Represent the MAC service access point of the lldp neighbour"
host_id = int
"Represent the host_id the lldp neighbour belongs to"
port_id = int
"Represent the port_id the lldp neighbour belongs to"
host_uuid = types.uuid
"Represent the UUID of the host the lldp neighbour belongs to"
port_uuid = types.uuid
"Represent the UUID of the port the lldp neighbour belongs to"
port_name = wtypes.text
"Represent the name of the port the lldp neighbour belongs to"
port_namedisplay = wtypes.text
"Represent the display name of the port. Unique per host"
links = [link.Link]
"Represent a list containing a self link and associated lldp neighbour"
"links"
tlvs = [link.Link]
"Links to the collection of LldpNeighbours on this ihost"
chassis_id = wtypes.text
"Represent the status of the lldp neighbour"
system_description = wtypes.text
"Represent the status of the lldp neighbour"
system_name = wtypes.text
"Represent the status of the lldp neighbour"
system_capabilities = wtypes.text
"Represent the status of the lldp neighbour"
management_address = wtypes.text
"Represent the status of the lldp neighbour"
port_identifier = wtypes.text
"Represent the port identifier of the lldp neighbour"
port_description = wtypes.text
"Represent the port description of the lldp neighbour"
dot1_lag = wtypes.text
"Represent the 802.1 link aggregation status of the lldp neighbour"
dot1_port_vid = wtypes.text
"Represent the 802.1 port vlan id of the lldp neighbour"
dot1_vid_digest = wtypes.text
"Represent the 802.1 vlan id digest of the lldp neighbour"
dot1_management_vid = wtypes.text
"Represent the 802.1 management vlan id of the lldp neighbour"
dot1_vlan_names = wtypes.text
"Represent the 802.1 vlan names of the lldp neighbour"
dot1_proto_vids = wtypes.text
"Represent the 802.1 protocol vlan ids of the lldp neighbour"
dot1_proto_ids = wtypes.text
"Represent the 802.1 protocol ids of the lldp neighbour"
dot3_mac_status = wtypes.text
"Represent the 802.3 MAC/PHY status of the lldp neighbour"
dot3_max_frame = wtypes.text
"Represent the 802.3 maximum frame size of the lldp neighbour"
dot3_power_mdi = wtypes.text
"Represent the 802.3 power mdi status of the lldp neighbour"
ttl = wtypes.text
"Represent the neighbour time-to-live"
def __init__(self, **kwargs):
self.fields = objects.LLDPNeighbour.fields.keys()
for k in self.fields:
setattr(self, k, kwargs.get(k))
@classmethod
def convert_with_links(cls, rpc_lldp_neighbour, expand=True):
lldp_neighbour = LLDPNeighbour(**rpc_lldp_neighbour.as_dict())
if not expand:
lldp_neighbour.unset_fields_except([
'uuid', 'host_id', 'port_id', 'msap', 'host_uuid', 'port_uuid',
'port_name', 'port_namedisplay', 'created_at', 'updated_at',
k_lldp.LLDP_TLV_TYPE_CHASSIS_ID,
k_lldp.LLDP_TLV_TYPE_PORT_ID,
k_lldp.LLDP_TLV_TYPE_TTL,
k_lldp.LLDP_TLV_TYPE_SYSTEM_NAME,
k_lldp.LLDP_TLV_TYPE_SYSTEM_DESC,
k_lldp.LLDP_TLV_TYPE_SYSTEM_CAP,
k_lldp.LLDP_TLV_TYPE_MGMT_ADDR,
k_lldp.LLDP_TLV_TYPE_PORT_DESC,
k_lldp.LLDP_TLV_TYPE_DOT1_LAG,
k_lldp.LLDP_TLV_TYPE_DOT1_PORT_VID,
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST,
k_lldp.LLDP_TLV_TYPE_DOT1_MGMT_VID,
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_VIDS,
k_lldp.LLDP_TLV_TYPE_DOT1_PROTO_IDS,
k_lldp.LLDP_TLV_TYPE_DOT1_VLAN_NAMES,
k_lldp.LLDP_TLV_TYPE_DOT1_VID_DIGEST,
k_lldp.LLDP_TLV_TYPE_DOT3_MAC_STATUS,
k_lldp.LLDP_TLV_TYPE_DOT3_MAX_FRAME,
k_lldp.LLDP_TLV_TYPE_DOT3_POWER_MDI])
# never expose the id attribute
lldp_neighbour.host_id = wtypes.Unset
lldp_neighbour.port_id = wtypes.Unset
lldp_neighbour.links = [
link.Link.make_link('self', pecan.request.host_url,
'lldp_neighbours', lldp_neighbour.uuid),
link.Link.make_link('bookmark',
pecan.request.host_url,
'lldp_neighbours', lldp_neighbour.uuid,
bookmark=True)]
if expand:
lldp_neighbour.tlvs = [
link.Link.make_link('self',
pecan.request.host_url,
'lldp_neighbours',
lldp_neighbour.uuid + "/tlvs"),
link.Link.make_link('bookmark',
pecan.request.host_url,
'lldp_neighbours',
lldp_neighbour.uuid + "/tlvs",
bookmark=True)]
return lldp_neighbour
class LLDPNeighbourCollection(collection.Collection):
"""API representation of a collection of LldpNeighbour objects."""
lldp_neighbours = [LLDPNeighbour]
"A list containing LldpNeighbour objects"
def __init__(self, **kwargs):
self._type = 'lldp_neighbours'
@classmethod
def convert_with_links(cls, rpc_lldp_neighbours, limit, url=None,
expand=False, **kwargs):
collection = LLDPNeighbourCollection()
collection.lldp_neighbours = [LLDPNeighbour.convert_with_links(a,
expand)
for a in rpc_lldp_neighbours]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'LLDPNeighbourController'
class LLDPNeighbourController(rest.RestController):
"""REST controller for LldpNeighbours."""
tlvs = lldp_tlv.LLDPTLVController(
from_lldp_neighbours=True)
"Expose tlvs as a sub-element of LldpNeighbours"
_custom_actions = {
'detail': ['GET'],
}
def __init__(self, from_hosts=False, from_ports=False):
self._from_hosts = from_hosts
self._from_ports = from_ports
def _get_lldp_neighbours_collection(self, uuid, marker, limit, sort_key,
sort_dir, expand=False,
resource_url=None):
if self._from_hosts and not uuid:
raise exception.InvalidParameterValue(_("Host id not specified."))
if self._from_ports and not uuid:
raise exception.InvalidParameterValue(_("Port id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.LLDPNeighbour.get_by_uuid(
pecan.request.context, marker)
if self._from_hosts:
neighbours = pecan.request.dbapi.lldp_neighbour_get_by_host(
uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
elif self._from_ports:
neighbours = pecan.request.dbapi.lldp_neighbour_get_by_port(
uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
else:
neighbours = pecan.request.dbapi.lldp_neighbour_get_list(
limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
return LLDPNeighbourCollection.convert_with_links(neighbours, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(LLDPNeighbourCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, uuid=None,
marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of lldp neighbours."""
return self._get_lldp_neighbours_collection(uuid, marker, limit,
sort_key, sort_dir)
@wsme_pecan.wsexpose(LLDPNeighbourCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of lldp_neighbours with detail."""
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "lldp_neighbours":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['lldp_neighbours', 'detail'])
return self._get_lldp_neighbours_collection(uuid, marker, limit,
sort_key, sort_dir, expand,
resource_url)
@wsme_pecan.wsexpose(LLDPNeighbour, types.uuid)
def get_one(self, port_uuid):
"""Retrieve information about the given lldp neighbour."""
if self._from_hosts:
raise exception.OperationNotPermitted