Add test plan to the project

This is basically an import of the existing test plan that was stored
in a separate document. There are also some updates to cover the
testing of the LMA Alerting Infrastructure plugin.

Change-Id: I97a7aff9c4a48babcf063ce09167745dff6d4dfa
(cherry picked from commit ab3ee072e6)
This commit is contained in:
Simon Pasquier 2015-11-03 14:42:33 +01:00
parent 152f8eab4d
commit ff7af7fa46
28 changed files with 1388 additions and 1 deletions

177
doc/qa/Makefile Normal file
View File

@ -0,0 +1,177 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/LMAcollector.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/LMAcollector.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/LMAcollector"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/LMAcollector"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

View File

View File

@ -0,0 +1,13 @@
.. _appendix:
Appendix
========
* The `LMA Collector documentation <http://fuel-plugin-lma-collector.readthedocs.org/>`_.
* The `LMA Infrastructure Alerting documentation <http://fuel-plugin-lma-infrastructure-alerting.readthedocs.org/>`_.
* The `Elasticsearch-Kibana documentation <http://fuel-plugin-elasticsearch-kibana.readthedocs.org/>`_.
* The `InfluxDB-Grafana documentation <http://fuel-plugin-influxdb-grafana.readthedocs.org/>`_.
* The official `Kibana documentation <https://www.elastic.co/guide/en/kibana/3.0/index.html>`_.
* The official `Grafana documentation <http://docs.grafana.org/>`_.
* The official `Nagios documentation <https://www.nagios.org/documentation/>`_.

41
doc/qa/source/conf.py Normal file
View File

@ -0,0 +1,41 @@
import sys
import os
RTD_NEW_THEME = True
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'The LMA Collector Plugin for Fuel'
copyright = u'2015, Mirantis Inc.'
version = '0.8'
release = '0.8.0'
exclude_patterns = [
'tests/*.rst'
]
pygments_style = 'sphinx'
html_theme = 'classic'
html_static_path = ['_static']
htmlhelp_basename = 'LMAcollectortestplandoc'
latex_elements = {
}
latex_documents = [
('index', 'LMAcollector.tex', u'The LMA Collector Plugin for Fuel Documentation for QA',
u'Mirantis Inc.', 'manual'),
]
man_pages = [
('index', 'lmacollector', u'The LMA Collector Plugin for Fuel Documentation for QA',
[u'Mirantis Inc.'], 1)
]
texinfo_documents = [
('index', 'LMAcollector', u'The LMA Collector Plugin for Fuel Documentation for QA',
u'Mirantis Inc.', 'LMAcollector', 'One line description of project.',
'Miscellaneous'),
]

18
doc/qa/source/index.rst Normal file
View File

@ -0,0 +1,18 @@
==============================================================
Welcome to the Mirantis OpenStack LMA Collector Documentation!
==============================================================
QA documentation
================
.. toctree::
:maxdepth: 2
strategy
testing
appendix
Indices and Tables
==================
* :ref:`search`

View File

@ -0,0 +1,56 @@
Test Strategy
=============
The test plan implements system, functional and non-functional tests. These
tests will be automated but tests of the user interfaces will have to be done
manually.
Acceptance Criteria
-------------------
#. The plugins can be installed and enabled on the Fuel master node.
#. The LMA Collector service is deployed on all the nodes of the environment
including nodes with the 'base-os' role and custom roles (influxdb_grafana,
elasticsearch_kibana, infrastructure_alerting).
#. The Elasticsearch server and the Kibana UI are deployed on one node with the elasticsearch_kibana role.
#. The InfluxDB server and the Grafana UI are deployed on one node with the influxdb_grafana role.
#. The Nagios server and dashboard are deployed on one node with the infrastructure_alerting role.
#. Kibana UI can be used to index and search both log messages and notifications.
#. The Grafana dashboards display detailed metrics for the main OpenStack services.
#. The Nagios UI displays status of all nodes and OpenStack services.
#. The plugins can be uninstalled when no environment uses them.
Test environment, infrastructure and tools
------------------------------------------
The 4 LMA plugins are installed on the Fuel master node.
For the controller nodes, it is recommended to deploy on hosts with at least 2
CPUs and 4G of RAM.
Product compatibility matrix
----------------------------
+------------------------------------+-----------------+
| Product | Version/Comment |
+====================================+=================+
| Mirantis OpenStack | 7.0 |
+------------------------------------+-----------------+
| LMA collector plugin | 0.8.0 |
+------------------------------------+-----------------+
| Elasticsearch-Kibana plugin | 0.8.0 |
+------------------------------------+-----------------+
| InfluxDB-Grafana plugin | 0.8.0 |
+------------------------------------+-----------------+
| LMA Infrastructure Alerting plugin | 0.8.0 |
+------------------------------------+-----------------+

154
doc/qa/source/testing.rst Normal file
View File

@ -0,0 +1,154 @@
.. _system_testing:
System testing
==============
.. _install_lma_plugins:
Install the plugins
-------------------
.. include:: tests/install.rst
.. _deploy_lma_plugins:
Deploy an environment with the plugins
--------------------------------------
.. include:: tests/deploy.rst
.. _add_remove_controller:
Add/remove controller nodes in existing environment
---------------------------------------------------
.. include:: tests/scale_controller.rst
.. _add_remove_compute:
Add/remove compute nodes in existing environment
------------------------------------------------
.. include:: tests/scale_compute.rst
.. _uninstall_plugins_with_env:
Uninstall the plugins with deployed environment
-----------------------------------------------
.. include:: tests/uninstall_plugins_with_env.rst
.. _uninstall_plugins:
Uninstall the plugins
---------------------
.. include:: tests/uninstall_plugins.rst
.. _functional_testing:
Functional testing
==================
.. _query_logs_in_kibana_ui:
Display and query logs in the Kibana UI
---------------------------------------
.. include:: tests/query_logs_in_kibana_ui.rst
.. _query_nova_notifications_in_kibana_ui:
Display and query Nova notifications in the Kibana UI
-----------------------------------------------------
.. include:: tests/query_nova_notifications_in_kibana_ui.rst
.. _query_glance_notifications_in_kibana_ui:
Display and query Glance notifications in the Kibana UI
-------------------------------------------------------
.. include:: tests/query_glance_notifications_in_kibana_ui.rst
.. _query_cinder_notifications_in_kibana_ui:
Display and query Cinder notifications in the Kibana UI
-------------------------------------------------------
.. include:: tests/query_cinder_notifications_in_kibana_ui.rst
.. _query_heat_notifications_in_kibana_ui:
Display and query Heat notifications in the Kibana UI
-----------------------------------------------------
.. include:: tests/query_heat_notifications_in_kibana_ui.rst
.. _query_neutron_notifications_in_kibana_ui:
Display and query Neutron notifications in the Kibana UI
--------------------------------------------------------
.. include:: tests/query_neutron_notifications_in_kibana_ui.rst
.. _query_keystone_notifications_in_kibana_ui:
Display and query Keystone notifications in the Kibana UI
---------------------------------------------------------
.. include:: tests/query_keystone_notifications_in_kibana_ui.rst
.. _display_dashboards_in_grafana_ui:
Display the dashboards in the Grafana UI
----------------------------------------
.. include:: tests/display_dashboards_in_grafana_ui.rst
.. _display_nova_metrics_in_grafana_ui:
Display the Nova metrics in the Grafana UI
------------------------------------------
.. include:: tests/display_nova_metrics_in_grafana_ui.rst
.. _report_service_alerts_with_warning_severity:
Report service alerts with warning severity
-------------------------------------------
.. include:: tests/report_service_alerts_with_warning_severity.rst
.. _report_service_alerts_with_critical_severity:
Report service alerts with critical severity
--------------------------------------------
.. include:: tests/report_service_alerts_with_critical_severity.rst
.. _report_node_alerts_with_warning_severity:
Report node alerts with warning severity
----------------------------------------
.. include:: tests/report_node_alerts_with_warning_severity.rst
.. _report_node_alerts_with_critical_severity:
Report node alerts with critical severity
-----------------------------------------
.. include:: tests/report_node_alerts_with_critical_severity.rst
.. _non_functional_testing:
Non-functional testing
======================
.. _network_failure_on_analytics_node:
Simulate network failure on the analytics node
----------------------------------------------
.. include:: tests/network_failure_on_analytics_node.rst

View File

@ -0,0 +1,87 @@
+---------------+---------------------------------------------------------------------------------+
| Test Case ID | deploy_lma_plugins |
+---------------+---------------------------------------------------------------------------------+
| Description | Verify that the plugins can be deployed. |
+---------------+---------------------------------------------------------------------------------+
| Prerequisites | Plugins are installed on the Fuel master node (see :ref:`install_lma_plugins`). |
+---------------+---------------------------------------------------------------------------------+
Steps
:::::
#. Connect to the Fuel web UI.
#. Create a new environment with the Fuel UI wizard with the default settings.
#. Click on the Settings tab of the Fuel web UI.
#. Select the LMA collector plugin tab and fill-in the following fields:
a. Enable the plugin.
#. Select 'Local node' for "Event analytics".
#. Select 'Local node' for "Metric analytics".
#. Select 'Alerts sent to a local node running the LMA Infrastructure Alerting plugin' for "Alerting".
#. Select the Elasticsearch-Kibana plugin tab and enable it.
#. Select the InfluxDB-Grafana plugin and fill-in the required fields:
a. Enable the plugin.
#. Enter 'lmapass' as the root, user and grafana user passwords.
#. Select the LMA Infrastructure-Alerting plugin and fill-in the required fields:
a. Enable the plugin.
#. Enter 'root\@localhost' as the recipient
#. Enter 'nagios\@localhost' as the sender
#. Enter '127.0.0.1' as the SMTP server address
#. Choose "None" for SMTP authentication (default)
#. Click on the Nodes tab of the Fuel web UI.
#. Assign roles to nodes:
a. 1 node with these 3 roles (this node is referenced later as the 'lma' node):
i. influxdb_grafana
#. elasticsearch_kibana
#. infrastructure_alerting
#. 3 nodes with the 'controller' role
#. 1 node with the 'compute' + 'cinder' node
#. Click 'Deploy changes'.
#. Once the deployment has finished, connect to each node of the environment using ssh and run the following checks:
a. Check that hekad and collectd processes are up and running on all the nodes as described in the `LMA Collector documentation <http://fuel-plugin-lma-collector.readthedocs.org/en/stable/user/configuration.html#plugin-verification>`_.
#. Look for errors in /var/log/lma_collector.log
#. Check that the node can connect to the Elasticsearch server (:samp:`http://<{IP address of the 'lma' node}>:9200/`)
#. Check that the node can connect to the InfluxDB server (:samp:`http://<{IP address of the 'lma' node}>:8086/`)
#. Check that the dashboards are running
a. Check that you can connect to the Kibana UI (:samp:`http://<{IP address of the 'lma' node}>:80/`)
#. Check that you can connect to the Grafana UI (:samp:`http://<{IP address of the 'lma' node}>:8000/`) with user='lma', password='lmapass'
#. Check that you can connect to the Nagios UI (:samp:`http://<{IP address of the 'lma' node}>:8001/`) with user='nagiosadmin', password='r00tme'
Expected Result
:::::::::::::::
The environment is deployed successfully.

View File

@ -0,0 +1,53 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | display_dashboards_in_grafana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the dashboards show up in the Grafana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Open the Grafana URL at :samp:`http://<{IP address of the 'lma' node}>:8000/`
#. Sign-in using the credentials provided during the configuration of the environment.
#. Go to the Main dashboard and verify that everything is ok.
#. Repeat the previous step for the following dashboards:
a. Cinder
#. Glance
#. Heat
#. Keystone
#. Nova
#. Neutron
#. HAProxy
#. RabbitMQ
#. MySQL
#. Apache
#. Memcached
#. System
#. LMA Self-monitoring
Expected Result
:::::::::::::::
The Grafana UI shows the overall status of the OpenStack services and detailed
statistics about the selected controller.

View File

@ -0,0 +1,27 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | display_nova_metrics_in_grafana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the Nova metrics show up in the Grafana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Open the Grafana URL at :samp:`http://<{IP address of the 'lma' node}>:8000/`
#. Sign-in using the credentials provided during the configuration of the environment.
#. Go to the Nova dashboard.
#. Connect to the Fuel web UI, launch the full suite of OSTF tests and wait for their completion.
#. Check that the 'instance creation time' graph in the Nova dashboard reports values.
Expected Result
:::::::::::::::
The Grafana UI shows the instance creation time over time.

View File

@ -0,0 +1,29 @@
+---------------+-------------------------------------------+
| Test Case ID | install_lma_plugins |
+---------------+-------------------------------------------+
| Description | Verify that the plugins can be installed. |
+---------------+-------------------------------------------+
| Prerequisites | N/A |
+---------------+-------------------------------------------+
Steps
:::::
#. Copy the 4 plugins to the Fuel master node using scp.
#. Connect to the Fuel master node using ssh.
#. Install the plugins using the fuel CLI.
#. Connect to the Fuel web UI.
#. Create a new environment using the Fuel UI Wizard.
#. Click on the Plugins tab.
Expected Result
:::::::::::::::
The 4 plugins are present in the Fuel UI.

View File

@ -0,0 +1,33 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | network_failure_on_analytics_node |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the backends and dashboards recover after a network failure. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Copy this script to the analytics node::
#!/bin/sh
/sbin/iptables -I INPUT -j DROP
sleep 30
/sbin/iptables -D INPUT -j DROP
#. Login to the analytics node using SSH
#. Run the script and wait for it to complete.
#. Check that the Kibana, Grafana and Nagios dashboards are available.
#. Check that data continues to be pushed by the various nodes once the network failure has ended.
Expected Result
:::::::::::::::
The collectors recover from the network outage of the analytics node.

View File

@ -0,0 +1,27 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | query_cinder_notifications_in_kibana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the cinder notifications show up in the Kibana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Create and update a volume in the OpenStack environment (using the Horizon
dashboard for example) and write down the volume id.
#. Open the Kibana URL at :samp:`http://<{IP address of the 'lma' node}>/`
#. Open the Notifications dashboard using the 'Load' icon.
#. Enter 'volume_id:<uuid>' in the Query box where <uuid> is the id of the created volume.
Expected Result
:::::::::::::::
All `event types for Cinder <https://docs.google.com/a/mirantis.com/spreadsheets/d/1ES_hWWLpn_eAur2N1FPNyqQAs5U36fQOcuCxRZjHESY/edit?usp=sharing>`_
are listed.

View File

@ -0,0 +1,26 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | query_glance_notifications_in_kibana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the Glance notifications show up in the Kibana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Run the OSTF platform test "Check create, update and delete image actions using Glance v2".
#. Open the Kibana URL at :samp:`http://<{IP address of the 'lma' node}>/`
#. Open the Notifications dashboard using the 'Load' icon.
#. Enter 'glance' in the Query box.
Expected Result
:::::::::::::::
All `event types for Glance <https://docs.google.com/a/mirantis.com/spreadsheets/d/1ES_hWWLpn_eAur2N1FPNyqQAs5U36fQOcuCxRZjHESY/edit?usp=sharing>`_
are listed.

View File

@ -0,0 +1,26 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | query_heat_notifications_in_kibana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the heat notifications show up in the Kibana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Run all OSTF Heat platform tests.
#. Open the Kibana URL at :samp:`http://<{IP address of the 'lma' node}>/`
#. Open the Notifications dashboard using the 'Load' icon.
#. Enter 'heat' in the Query box.
Expected Result
:::::::::::::::
All `event types for Heat <https://docs.google.com/a/mirantis.com/spreadsheets/d/1ES_hWWLpn_eAur2N1FPNyqQAs5U36fQOcuCxRZjHESY/edit?usp=sharing>`_
are listed.

View File

@ -0,0 +1,26 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | query_keystone_notifications_in_kibana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the Keystone notifications show up in the Kibana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Run OSTF platform test: 'Create user and authenticate with it to Horizon'.
#. Open the Kibana URL at :samp:`http://<{IP address of the 'lma' node}>/`
#. Open the Notifications dashboard using the 'Load' icon.
#. Enter 'keystone' in the Query box.
Expected Result
:::::::::::::::
All `event types for Keystone <https://docs.google.com/a/mirantis.com/spreadsheets/d/1ES_hWWLpn_eAur2N1FPNyqQAs5U36fQOcuCxRZjHESY/edit?usp=sharing>`_
are listed.

View File

@ -0,0 +1,24 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | query_logs_in_kibana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the logs show up in the Kibana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Open the Kibana URL at :samp:`http://<{IP address of the 'lma' node}>/`
#. Enter 'programname:nova*' in the Query box.
#. Check that Nova logs are displayed.
Expected Result
:::::::::::::::
The Kibana UI displays entries for all the controller and compute nodes
deployed in the environment.

View File

@ -0,0 +1,27 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | query_neutron_notifications_in_kibana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the Neutron notifications show up in the Kibana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Run OSTF functional tests: 'Create security group' and 'Check network
connectivity from instance via floating IP'.
#. Open the Kibana URL at :samp:`http://<{IP address of the 'lma' node}>/`
#. Open the Notifications dashboard using the 'Load' icon.
#. Enter 'neutron' in the Query box.
Expected Result
:::::::::::::::
All `event types for Neutron <https://docs.google.com/a/mirantis.com/spreadsheets/d/1ES_hWWLpn_eAur2N1FPNyqQAs5U36fQOcuCxRZjHESY/edit?usp=sharing>`_
are listed.

View File

@ -0,0 +1,28 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | query_nova_notifications_in_kibana_ui |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the Nova notifications show up in the Kibana UI. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Launch, update, rebuild, resize, power-off, power-on, snapshot, suspend,
shutdown, and delete an instance in the OpenStack environment (using the
Horizon dashboard for example) and write down the instance's id.
#. Open the Kibana URL at :samp:`http://<{IP address of the 'lma' node}>/`
#. Open the Notifications dashboard using the 'Load' icon.
#. Enter 'instance_id:<uuid>' in the Query box where <uuid> is the id of the launched instance.
Expected Result
:::::::::::::::
All `event types for Nova <https://docs.google.com/a/mirantis.com/spreadsheets/d/1ES_hWWLpn_eAur2N1FPNyqQAs5U36fQOcuCxRZjHESY/edit?usp=sharing>`_
are listed except compute.instance.create.error and compute.instance.resize.revert.{start|end}.

View File

@ -0,0 +1,83 @@
+---------------+---------------------------------------------------------------------------------+
| Test Case ID | report_node_alerts_with_critical_severity |
+---------------+---------------------------------------------------------------------------------+
| Description | Verify that the critical alerts for nodes show up in the Grafana and Nagios UI. |
+---------------+---------------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+---------------------------------------------------------------------------------+
Steps
:::::
#. Open the Grafana URL at :samp:`http://<{IP address of the 'lma' node}>:8000/`
and load the MySQL dashboard.
#. Open the Nagios URL at :samp:`http://<{IP address of the 'lma' node}>:8001/`
in another tab and click the 'Services' menu item.
#. Connect to one of the controller nodes using ssh and run::
fallocate -l $(df | grep /dev/mapper/mysql-root | awk '{ printf("%.0f\n", 1024 * ((($3 + $4) * 98 / 100) - $3))}') /var/lib/mysql/test
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'OKAY' with an green background,
#. On Nagios, check the following items:
a. the 'mysql' service is in 'OK' state,
#. the 'mysql-nodes.mysql-fs' service is in 'CRITICAL' state for the node.
#. Connect to a second controller node using ssh and run::
fallocate -l $(df | grep /dev/mapper/mysql-root | awk '{ printf("%.0f\n", 1024 * ((($3 + $4) * 98 / 100) - $3))}') /var/lib/mysql/test
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'CRIT' with an red background,
#. an annotation telling that the service went from 'OKAY' to 'CRIT' is displayed.
#. On Nagios, check the following items:
a. the 'mysql' service is in 'CRITICAL' state,
#. the 'mysql-nodes.mysql-fs' service is in 'CRITICAL' state for the 2 nodes,
#. the local user root on the lma node has received an email about the service being in critical state.
#. Run the following command on both controller nodes::
rm /var/lib/mysql/test
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'OKAY' with an green background,
#. an annotation telling that the service went from 'CRIT' to 'OKAY' is displayed.
#. On Nagios, check the following items:
a. the 'mysql' service is in 'OK' state,
#. the 'mysql-nodes.mysql-fs' service is in 'OKAY' state for the 2 nodes,
#. the local user root on the lma node has received an email about the recovery of the service.
Expected Result
:::::::::::::::
The Grafana UI shows that the global 'mysql' status goes from ok to critical and
back to ok. It also reports detailed information about the problem in the annotations.
The Nagios UI shows that the service status goes from ok to critical and back to
ok. Alerts are sent by email to the configured recipient.

View File

@ -0,0 +1,83 @@
+---------------+--------------------------------------------------------------------------------+
| Test Case ID | report_node_alerts_with_warning_severity |
+---------------+--------------------------------------------------------------------------------+
| Description | Verify that the warning alerts for nodes show up in the Grafana and Nagios UI. |
+---------------+--------------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------------+
Steps
:::::
#. Open the Grafana URL at :samp:`http://<{IP address of the 'lma' node}>:8000/`
and load the MySQL dashboard.
#. Open the Nagios URL at :samp:`http://<{IP address of the 'lma' node}>:8001/`
in another tab and click the 'Services' menu item.
#. Connect to one of the controller nodes using ssh and run::
fallocate -l $(df | grep /dev/mapper/mysql-root | awk '{ printf("%.0f\n", 1024 * ((($3 + $4) * 96 / 100) - $3))}') /var/lib/mysql/test
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'OKAY' with an green background,
#. On Nagios, check the following items:
a. the 'mysql' service is in 'OK' state,
#. the 'mysql-nodes.mysql-fs' service is in 'WARNING' state for the node.
#. Connect to a second controller node using ssh and run::
fallocate -l $(df | grep /dev/mapper/mysql-root | awk '{ printf("%.0f\n", 1024 * ((($3 + $4) * 96 / 100) - $3))}') /var/lib/mysql/test
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'WARN' with an orange background,
#. an annotation telling that the service went from 'OKAY' to 'WARN' is displayed.
#. On Nagios, check the following items:
a. the 'mysql' service is in 'WARNING' state,
#. the 'mysql-nodes.mysql-fs' service is in 'WARNING' state for the 2 nodes,
#. the local user root on the lma node has received an email about the service being in warning state.
#. Run the following command on both controller nodes::
rm /var/lib/mysql/test
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'OKAY' with an green background,
#. an annotation telling that the service went from 'WARN' to 'OKAY' is displayed.
#. On Nagios, check the following items:
a. the 'mysql' service is in 'OK' state,
#. the 'mysql-nodes.mysql-fs' service is in 'OKAY' state for the 2 nodes,
#. the local user root on the lma node has received an email about the recovery of the service.
Expected Result
:::::::::::::::
The Grafana UI shows that the global 'mysql' status goes from ok to warning and
back to ok. It also reports detailed information about the problem in the annotations.
The Nagios UI shows that the service status goes from ok to warning and back to
ok. Alerts are sent by email to the configured recipient.

View File

@ -0,0 +1,109 @@
+---------------+------------------------------------------------------------------------------------+
| Test Case ID | report_service_alerts_with_critical_severity |
+---------------+------------------------------------------------------------------------------------+
| Description | Verify that the critical alerts for services show up in the Grafana and Nagios UI. |
+---------------+------------------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+------------------------------------------------------------------------------------+
Steps
:::::
#. Open the Grafana URL at :samp:`http://<{IP address of the 'lma' node}>:8000/`
and load the Nova dashboard.
#. Open the Nagios URL at :samp:`http://<{IP address of the 'lma' node}>:8001/`
in another tab and click the 'Services' menu item.
#. Connect to one of the controller nodes using ssh and stop the nova-api service.
#. Connect to a second controller node using ssh and stop the nova-api service.
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'CRIT' with a red background,
#. the API panels report 2 entities as down.
#. On Nagios, check the following items:
a. the 'nova' service is in 'CRITICAL' state,
#. the local user root on the lma node has received an email about the service being in critical state.
#. Restart the nova-api service on both nodes.
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'OKAY' with an green background,
#. the API panels report 0 entity as down.
#. On Nagios, check the following items:
a. the 'nova' service is in 'OK' state,
#. the local user root on the lma node has received an email about the recovery of the service.
#. Connect to one of the controller nodes using ssh and stop the nova-scheduler service.
#. Connect to a second controller node using ssh and stop the nova-scheduler service.
#. Wait for at least 3 minutes.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'CRIT' with a red background,
#. the scheduler panel reports 2 entities as down.
#. On Nagios, check the following items:
a. the 'nova' service is in 'CRITICAL' state,
#. the local user root on the lma node has received an email about the service being in critical state.
#. Restart the nova-scheduler service on both nodes.
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'OKAY' with an green background,
#. the scheduler panel reports 0 entity as down.
#. On Nagios, check the following items:
a. the 'nova' service is in 'OK' state,
#. the local user root on the lma node has received an email about the recovery of the service.
#. Repeat steps 2 to 21 for the following services:
a. Cinder (stopping and starting the cinder-api and cinder-scheduler services respectively).
#. Neutron (stopping and starting the neutron-server and neutron-openvswitch-agent services respectively).
#. Repeat steps 2 to 11 for the following services:
a. Glance (stopping and starting the glance-api service).
#. Heat (stopping and starting the heat-api service).
#. Keystone (stopping and starting the Apache service).
Expected Result
:::::::::::::::
The Grafana UI shows that the global service status goes from ok to critical and
back to ok. It also reports detailed information about which entity is missing.
The Nagios UI shows that the service status goes from ok to critical and back to
ok. Alerts are sent by email to the configured recipient.

View File

@ -0,0 +1,105 @@
+---------------+-----------------------------------------------------------------------------------+
| Test Case ID | report_service_alerts_with_warning_severity |
+---------------+-----------------------------------------------------------------------------------+
| Description | Verify that the warning alerts for services show up in the Grafana and Nagios UI. |
+---------------+-----------------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+-----------------------------------------------------------------------------------+
Steps
:::::
#. Open the Grafana URL at :samp:`http://<{IP address of the 'lma' node}>:8000/`
and load the Nova dashboard.
#. Open the Nagios URL at :samp:`http://<{IP address of the 'lma' node}>:8001/`
in another tab and click the 'Services' menu item.
#. Connect to one of the controller nodes using ssh and stop the nova-api service.
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'WARN' with an orange background,
#. the API panels report 1 entity as down.
#. On Nagios, check the following items:
a. the 'nova' service is in 'WARNING' state,
#. the local user root on the lma node has received an email about the service being in warning state.
#. Restart the nova-api service.
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'OKAY' with an green background,
#. the API panels report 0 entity as down.
#. On Nagios, check the following items:
a. the 'nova' service is in 'OK' state,
#. the local user root on the lma node has received an email about the recovery of the service.
#. Stop the nova-scheduler service.
#. Wait for at least 3 minutes.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'WARN' with an orange background,
#. the scheduler panel reports 1 entity as down.
#. On Nagios, check the following items:
a. the 'nova' service is in 'WARNING' state,
#. the local user root on the lma node has received an email about the service being in warning state.
#. Restart the nova-scheduler service.
#. Wait for at least 1 minute.
#. On Grafana, check the following items:
a. the box in the upper left corner of the dashboard displays 'OKAY' with an green background,
#. the scheduler panel reports 0 entity as down.
#. On Nagios, check the following items:
a. the 'nova' service is in 'OK' state,
#. the local user root on the lma node has received an email about the recovery of the service.
#. Repeat steps 2 to 18 for the following services:
a. Cinder (stopping and starting the cinder-api and cinder-scheduler services respectively).
#. Neutron (stopping and starting the neutron-server and neutron-openvswitch-agent services respectively).
#. Repeat steps 2 to 10 for the following services:
a. Glance (stopping and starting the glance-api service).
#. Heat (stopping and starting the heat-api service).
#. Keystone (stopping and starting the Apache service).
Expected Result
:::::::::::::::
The Grafana UI shows that the global service status goes from ok to warning and
back to ok. It also reports detailed information about which entity is missing.
The Nagios UI shows that the service status goes from ok to warning and back to
ok. Alerts are sent by email to the configured recipient.

View File

@ -0,0 +1,43 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | modify_env_with_plugin_remove_add_compute |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the number of computes can scale up and down. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (See :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Remove 1 node with the compute role.
#. Re-deploy the cluster.
#. Check the plugin services using the CLI
#. Check in the Nagios UI that the removed node is no longer monitored.
#. Run the health checks (OSTF).
#. Add 1 new node with the compute role.
#. Re-deploy the cluster.
#. Check the plugin services using the CLI.
#. Check in the Nagios UI that the new node is monitored.
#. Run the health checks (OSTF).
Expected Result
:::::::::::::::
The OSTF tests pass successfully.
All the plugin services are running and work as expected after each
modification of the environment.
The Nagios service has been reconfigured to take care of the node removal and
addition.

View File

@ -0,0 +1,43 @@
+---------------+--------------------------------------------------------------------------+
| Test Case ID | modify_env_with_plugin_remove_add_controller |
+---------------+--------------------------------------------------------------------------+
| Description | Verify that the number of controllers can scale up and down. |
+---------------+--------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (See :ref:`deploy_lma_plugins`). |
+---------------+--------------------------------------------------------------------------+
Steps
:::::
#. Remove 1 node with the controller role.
#. Re-deploy the cluster.
#. Check the plugin services using the CLI
#. Check in the Nagios UI that the removed node is no longer monitored.
#. Run the health checks (OSTF).
#. Add 1 new node with the controller role.
#. Re-deploy the cluster.
#. Check the plugin services using the CLI.
#. Check in the Nagios UI that the new node is monitored.
#. Run the health checks (OSTF).
Expected Result
:::::::::::::::
The OSTF tests pass successfully.
All the plugin services are running and work as expected after each
modification of the environment.
The Nagios service has been reconfigured to take care of the node removal and
addition.

View File

@ -0,0 +1,18 @@
+---------------+--------------------------------------------------------------------------------+
| Test Case ID | uninstall_plugin |
+---------------+--------------------------------------------------------------------------------+
| Description | Verify that the plugins can be uninstalled. |
+---------------+--------------------------------------------------------------------------------+
| Prerequisites | The 4 plugins are installed on the Fuel node (see :ref:`install_lma_plugins`). |
+---------------+--------------------------------------------------------------------------------+
Steps
:::::
#. Remove the plugins.
Expected Result
:::::::::::::::
The plugins are removed.

View File

@ -0,0 +1,25 @@
+---------------+---------------------------------------------------------------------------------------+
| Test Case ID | uninstall_plugin_with_deployed_env |
+---------------+---------------------------------------------------------------------------------------+
| Description | Verify that the plugins can be uninstalled after the deployed environment is removed. |
+---------------+---------------------------------------------------------------------------------------+
| Prerequisites | Environment deployed with the 4 plugins (see :ref:`deploy_lma_plugins`). |
+---------------+---------------------------------------------------------------------------------------+
Steps
:::::
#. Try to remove the plugins using the Fuel CLI and ensure that the command
fails with "Can't delete plugin which is enabled for some environment".
#. Remove the environment.
#. Remove the plugins.
Expected Result
:::::::::::::::
An alert is raised when we try to delete plugins which are attached to an active environment.
After the environment is removed, the plugins are removed successfully too.

View File

@ -1,5 +1,5 @@
[tox]
envlist = manifests,heka,lma_collector,docs,build_plugin
envlist = manifests,heka,lma_collector,docs,qa_docs,build_plugin
skipsdist = True
[testenv]
@ -45,6 +45,12 @@ whitelist_externals = make
commands =
make clean html SPHINXOPTS=-W
[testenv:qa_docs]
changedir = {toxinidir}/doc/qa
whitelist_externals = make
commands =
make clean html SPHINXOPTS=-W
[testenv:build_plugin]
changedir = {toxinidir}
whitelist_externals =