Merge "Separate user and testing documentation"

This commit is contained in:
Jenkins 2015-12-01 08:37:41 +00:00 committed by Gerrit Code Review
commit 89ae02de35
26 changed files with 2371 additions and 26 deletions

View File

@ -11,8 +11,8 @@ generator, easiest way to do this is to use doc/requirements.txt.
$ pip install -r doc/requirements.txt
$ cd doc/source
$ cd doc/user
$ make html
After that you can start exploring documentation in doc/source/_build/html/ directory.
After that you can start exploring documentation in doc/user/source/_build/html/ directory.

View File

@ -1,3 +1,2 @@
docutils==0.9.1
oslosphinx
sphinx>=1.1.2,!=1.2.0,<1.3

253
doc/test/conf.py Normal file
View File

@ -0,0 +1,253 @@
# -*- coding: utf-8 -*-
#
# Fuel NSXv plugin documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 14 12:14:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fuel NSXv plugin'
copyright = u'2015, Mirantis Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FuelNSXvplugindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = { 'classoptions': ',openany,oneside', 'babel': '\\usepackage[english]{babel}'
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FuelNSXvplugin.tex', u'Fuel NSXv plugin testing documentation',
u'Mirantis Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuelnsxvplugin', u'Fuel NSXv plugin testing documentation',
[u'Mirantis Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FuelNSXvplugin', u'Fuel NSXv plugin testing documentation',
u'Mirantis Inc.', 'FuelNSXvplugin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Insert footnotes where they are defined instead of at the end.
pdf_inline_footnotes = True

11
doc/test/index.rst Normal file
View File

@ -0,0 +1,11 @@
Fuel NSXv plugin's testing documentation
========================================
Testing documents
-----------------
.. toctree::
:glob:
:maxdepth: 1
source/*

View File

@ -0,0 +1,216 @@
==================================
Test Plan for NSXv plugin v.1.1.0
==================================
.. contents:: Table of contents
:depth: 3
************
Introduction
************
Purpose
=======
Main purpose of this document is intended to describe Quality Assurance
activities, required to insure that Fuel plugin for VMware NSXv driver is
ready for production. The project will be able to offer VMware NSXv
integration functionality with MOS. The scope of this plan defines the
following objectives:
* Identify testing activities;
* Outline testing approach, test types, test cycle that will be used;
* List of metrics and deliverable elements;
* List of items for testing and out of testing scope;
* Detect exit criteria in testing purposes;
* Describe test environment.
Scope
=====
Fuel NSXv plugin includes NSX plugin for Neutron which is developed by
third party. This test plan covers a full functionality of Fuel NSXv plugin,
include basic scenarios related with NSXv Neutron plugin.
Following test types should be provided:
* Smoke/BVT tests
* Integration tests
* System tests
* Destructive tests
* GUI tests
Performance testing will be executed on the scale lab and a custom set of
rally scenarios must be run with NSXv environment. Configuration, enviroment
and scenarios for performance/scale testing should be determine separately.
Intended Audience
=================
This document is intended for project team staff (QA and Dev engineers and
managers) and all other persons who are interested in testing results.
Limitation
==========
Plugin (or its components) has the following limitations:
* VMware NSXv plugin can be enabled only with Neutron tunnel segmentation.
* Enviroment with enabled VMware NSXv plugin can't contains compute nodes.
* Only VMware NSX Manager Virtual Appliance 6.1.4 or later is supported.
Product compatibility matrix
============================
.. list-table:: product compatibility matrix
:widths: 15 10 30
:header-rows: 1
* - Requirement
- Version
- Comment
* - MOS
- 7.0 with Kilo
-
* - Operatin System
- Ubuntu 14.0.4
- Only Ubuntu is supported in MOS 7.0
* - vSphere
- 5.5 and 6.0
-
* - NSXv
- 6.1.4 and 6.2.0
-
**************************************
Evaluation Mission and Test Motivation
**************************************
Project main goal is to build a MOS plugin that integrates a Neutron VMware
NSX plugin. This will allow to use Neutron for networking in vmware-related
environments. The plugin must be compatible with the version 7.0 of Mirantis
OpenStack and should be tested with sofware/hardware described in
`product compatibility matrix`_.
See the VMware NSX Plugin specification for more details.
Evaluation mission
==================
* Find important problems with integration of Neutron VMware NSX plugin.
* Verify a specification.
* Provide tests for maintenance update.
* Lab environment deployment.
* Deploy MOS with developed plugin installed.
* Create and run specific tests for plugin/deployment.
* Documentation.
*************
Test approach
*************
The project test approach consists of Smoke, Integration, System, Regression
Failover and Acceptance test levels.
**Smoke testing**
The goal of smoke testing is to ensure that the most critical features of Fuel
VMware NSXv plugin work after new build delivery. Smoke tests will be used by
QA to accept software builds from Development team.
**Integration and System testing**
The goal of integration and system testing is to ensure that new or modified
components of Fuel and MOS work effectively with Fuel VMware NSXv plugin
without gaps in dataflow.
**Regression testing**
The goal of regression testing is to verify that key features of Fuel VMware
NSXv plugin are not affected by any changes performed during preparation to
release (includes defects fixing, new features introduction and possible
updates).
**Failover testing**
Failover and recovery testing ensures that the target-of-test can successfully
failover and recover from a variety of hardware, software, or network
malfunctions with undue loss of data or data integrity.
**Acceptance testing**
The goal of acceptance testing is to ensure that Fuel VMware NSXv plugin has
reached a level of stability that meets requirements and acceptance criteria.
***********************
Entry and exit criteria
***********************
Criteria for test process starting
==================================
Before test process can be started it is needed to make some preparation
actions - to execute important preconditions. The following steps must be
executed successfully for starting test phase:
* all project requirements are reviewed and confirmed;
* implementation of testing features has finished (a new build is ready for testing);
* implementation code is stored in GIT;
* test environment is prepared with correct configuration, installed all needed software, hardware;
* test environment contains the last delivered build for testing;
* test plan is ready and confirmed internally;
* implementation of manual tests and autotests (if any) has finished.
Feature exit criteria
=====================
Testing of a feature can be finished when:
* All planned tests (prepared before) for the feature are executed; no defects are found during this run;
* All planned tests for the feature are executed; defects found during this run are verified or confirmed to be acceptable (known issues);
* The time for testing of that feature according to the project plan has run out and Project Manager confirms that no changes to the schedule are possible.
Suspension and resumption criteria
==================================
Testing of a particular feature is suspended if there is a blocking issue
which prevents
tests execution. Blocking issue can be one of the following:
* Testing environment for the feature is not ready
* Testing environment is unavailable due to failure
* Feature has a blocking defect, which prevents further usage of this feature and there is no workaround available
* CI tests fail
************
Deliverables
************
List of deliverables
====================
Project testing activities are to be resulted in the following reporting documents:
* Test plan
* Test report
* Automated test cases
Acceptance criteria
===================
* All acceptance criteria for user stories are met.
* All test cases are executed. BVT tests are passed
* Critical and high issues are fixed
* All required documents are delivered
* Release notes including a report on the known errors of that release
**********
Test cases
**********
.. include:: test_suite_smoke.rst
.. include:: test_suite_integration.rst
.. include:: test_suite_system.rst
.. include:: test_suite_destructive.rst
.. include:: test_suite_gui.rst

View File

@ -0,0 +1,490 @@
Destructive
===========
TC-101: Check abilities to bind port on NSXv to VM, disable and enable this port.
----------------------------------------------------------------------------------
**ID**
nsxv_ability_to_bind_port
**Description**
::
Verifies that system could manipulate with port.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Navigate to Project -> Compute -> Instances
Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny.
Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny.
Verify that VMs should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa.
Disable NSXv_port of VM_1.
Verify that VMs should not communicate between each other. Send icmp ping from VM _2 to VM_1 and vice versa.
Enable NSXv_port of VM_1.
Verify that VMs should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa.
**Expected result**
Pings should get a response
TC-102: Verify that vmclusters should migrate after shutdown controller.
------------------------------------------------------------------------
**ID**
nsxv_shutdown_controller
**Description**
::
Verify that vmclusters should migrate after shutdown controller.
**Complexity**
core
**Requre to automate**
No
**Steps**
::
Create a new environment using the Fuel UI Wizard:
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: all by default
In Settings tab:
enable NSXv plugin
Add nodes:
3 controllers
Setup Fuel interfaces on slaves:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set
Managment: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Click button 'save settings'
Click button 'verify networks'
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Deploy Cluster
Run OSTF
Shutdown controller with vmclusters.
Check that vmclusters should migrate to another controller.
**Expected result**
VMclusters should migrate to another controller.
TC-103: Deploy cluster with plugin, addition and deletion of nodes.
-------------------------------------------------------------------
**ID**
nsxv_add_delete_nodes
**Description**
::
Verify that system functionality is ok after redeploy.
**Complexity**
advanced
**Requre to automate**
No
**Steps**
::
Create a new environment using the Fuel UI Wizard:
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: all by default
In Settings tab:
enable NSXv plugin
select Vmware vcenter esxi datastore for images (glance)
Add nodes:
3 controllers
2 compute-vmwares
1 cinder-vmdk
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set
Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Run OSTF
Remove node with cinder-vmdk role.
Add node with cinder role
Redeploy cluster.
Run OSTF
Remove node with compute-vmware role
Add node with cinder-vmware role
Redeploy cluster.
Run OSTF
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed.
TC-104: Deploy cluster with plugin and deletion one node with controller role.
------------------------------------------------------------------------------
**ID**
nsxv_add_delete_controller
**Description**
::
Verifies that system functionality is ok when controller has been removed.
**Complexity**
advanced
**Requre to automate**
No
**Steps**
::
Create a new environment using the Fuel UI Wizard:
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: all by default
In Settings tab:
enable NSXv plugin
select Vmware vcenter esxi datastore for images (glance)
Add nodes:
4 controller
1 compute-vmware
1 cinder-vmdk
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set
Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Run OSTF
Remove node with controller role.
Redeploy cluster
Run OSTF
Add controller
Redeploy cluster
Run OSTF
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed.
TC-105: Verify that it is not possible to uninstall of Fuel NSXv plugin with deployed environment.
---------------------------------------------------------------------------------------------------
**ID**
nsxv_plugin
**Description**
::
It is not possible to remove plugin while at least one environment exists.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Copy plugin to to the Fuel master node using scp.
Install plugin
fuel plugins --install plugin-name-1.0-0.0.1-0.noarch.rpm
Ensure that plugin is installed successfully using cli, run command 'fuel plugins'.
Connect to the Fuel web UI.
Create a new environment using the Fuel UI Wizard:
add name of env and select release version with OS
as hypervisor type: select vcenter check box and Qemu radio button
network setup : Neutron with tunnel segmentation
storage backends: default
additional services: all by default
Click on the Settings tab.
In Settings tab:
enable NSXv plugin
Add nodes:
1 controller
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set-Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks.
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
Deploy cluster
Run OSTF
Try to delete plugin via cli Remove plugin from master node fuel plugins --remove plugin-name==1.0.0
**Expected result**
Alert: "400 Client Error: Bad Request (Can't delete plugin which is enabled for some environment.)" should be displayed.
TC-106: Check cluster functionality after reboot vcenter.
---------------------------------------------------------
**ID**
nsxv_plugin
**Description**
::
Verifies that system functionality is ok when vcenter has been rebooted.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Create a new environment using the Fuel UI Wizard:
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: all by default
In Settings tab:
enable NSXv plugin
select Vmware vcenter esxi datastore for images (glance)
Add nodes:
3 controller
1 computer
1 cinder-vmware
1 cinder
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set
Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Run OSTF
Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny.
Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny.
Check connection between VMs, send ping from VM_1 to VM_2 and vice verse.
Reboot vcenter
vmrun -T ws-shared -h https://localhost:443/sdk -u vmware -p VMware01 reset "[standard] vcenter/vcenter.vmx"
Check that controller lost connection with vCenter
Wait for vCenter
Ensure that all instances from vCenter displayed in dashboard.
Ensure connectivity between vcenter1's and vcenter2's VM.
Run OSTF
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed. Ping should get response.

View File

@ -0,0 +1,39 @@
GUI Testing
===========
TC-131: Verify that all elements of NSXv plugin section require GUI regiments.
-------------------------------------------------------------------------------
**ID**
nsxv_plugin
**Description**
::
Verify that all elements of NSXv plugin section meets the requirements.
**Complexity**
smoke
**Requre to automate**
Yes
**Steps**
::
Login to the Fuel web UI.
Click on the Settings tab.
Verify that section of NSXv plugin is present on the Settings tab.
Verify that check box NSXv plugin is disabled by default.
Verify that user can enabled. Enable NSXv plugin by click on check box NSXv plugin.
Verify that all labels of NSXv plugin section have same font style and color.
Verify that all elements of NSXv plugin section are vertical aligned.
**Expected result**
All elements of NSXv plugin section meets the requirements.

View File

@ -0,0 +1,297 @@
Integration
===========
TC-031: Deploy HA cluster with Fuel NSXv plugin.
-------------------------------------------------
**ID**
nsxv_ha_mode
**Description**
::
Installation in HA mode with 3 controllers.
**Complexity**
core
**Requre to automate**
No
**Steps**
::
Create a new environment using the Fuel UI Wizard.
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: all by default
In Settings tab:
enable NSXv plugin
Add nodes:
3 controller
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set-Managment: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks.
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Deploy cluster
Run OSTF
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed.
TC-032: Deploy cluster with Fuel NSXv plugin and Ceph for Glance and Cinder.
-----------------------------------------------------------------------------
**ID**
nsxv_ceph_no_vcenter
**Description**
::
Verifies installation of plugin with Glance and Cinder.
**Complexity**
core
**Requre to automate**
No
**Steps**
::
Create a new environment using the Fuel UI Wizard.
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: all by default
In Settings tab:
enable NSXv plugin
select 'Ceph RBD for volumes' (Cinder) and 'Ceph RBD for images(Glance)'
Add nodes:
1 controller
1 controller + ceph-osd
1 controller + cinder-vmware + ceph-osd
1 cinder-vmware + ceph-osd
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set-Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks.
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 3 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Deploy cluster
Run OSTF
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed.
TC-034: Deploy cluster with Fuel VMware NSXv plugin and ceilometer.
--------------------------------------------------------------------
**ID**
nsxv_ceilometer
**Description**
::
Installation of plugin with ceilometer.
**Complexity**
core
**Requre to automate**
No
**Steps**
::
Create a new environment using the Fuel UI Wizard.
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: install ceilometer
In Settings tab:
enable NSXv plugin
Add nodes:
3 controller + mongo
1 compute-vmware
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set-Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks.
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 1 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
Deploy cluster
Run OSTF.
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed.
TC-035: Deploy cluster with Fuel VMware NSXv plugin, Ceph for Cinder and VMware datastore backend for Glance.
-------------------------------------------------------------------------------------------------------------
**ID**
nsxv_ceph
**Description**
::
Verifies installation of plugin for vcenter with Glance and Cinder.
**Complexity**
core
**Requre to automate**
No
**Steps**
::
Create a new environment using the Fuel UI Wizard.
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: default
In Settings tab:
enable NSXv plugin
select 'Ceph RBD for volumes' (Cinder) and 'Vmware Datastore for images(Glance)'
Add nodes:
3 controller + ceph-osd
2 cinder-vmware
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set-Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks.
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Deploy cluster
Run OSTF
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed.

View File

@ -0,0 +1,148 @@
Smoke
=====
TC-001: Verify that Fuel VMware NSXv plugin is installed.
----------------------------------------------------------
**ID**
nsxv_plugin
**Description**
::
Test case verifies plugin installation.
**Complexity**
smoke
**Requre to automate**
Yes
**Steps**
::
Connect to fuel node via ssh.
Upload plugin.
Install plugin.
**Expected result**
Ensure that plugin is installed successfully using cli, run command 'fuel plugins'. Check name, version and package version of plugin.
TC-002: Verify that Fuel VMware NSXv plugin is uninstalled.
-------------------------------------------------------------
**ID**
nsxv_plugin
**Description**
::
Test verifies that plugin could be uninstalled.
**Complexity**
smoke
**Requre to automate**
Yes
**Steps**
::
Connect to fuel node with preinstalled plugin via ssh.
Remove plugin from master node
Connect to the Fuel web UI.
Create a new environment.
Click on the Settings tab and check that section of NSXv plugin is not displayed.
**Expected result**
Verify that plugin is removed, run command 'fuel plugins'. Section of NSXv plugin is not displayed.
TC-003: Deploy cluster with plugin and vmware datastore backend.
----------------------------------------------------------------
**ID**
nsxv_smoke
**Description**
::
Test verifies installation with base configuration.
**Complexity**
smoke
**Requre to automate**
No
**Steps**
::
Create a new environment using the Fuel UI Wizard.
add name of env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: all by default
In Settings tab:
enable NSXv plugin
select Vmware vcenter esxi datastore for images (glance)
Add nodes:
1 controller
1 compute-vmware
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set-Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks.
Fill vcenter credentials:
Availability zone: vcenter
vCenter host: '172.16.0.254'
vCenter username: <login>
vCenter password: <password>
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Fill Glance credentials:
vCenter host: 172.16.0.254
vCenter username: <login>
vCenter password: <password>
Datacenter name: Datacenter
Datastore name: nfs
Deploy cluster
Run OSTF
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed.

View File

@ -0,0 +1,718 @@
System
======
Setup for system tests
----------------------
**ID**
nsxv_setup_system
**Description**
::
It is a config for all system tests.
**Complexity**
advanced
**Requre to automate**
Yes
**Steps**
::
Install NSXv plugin on master node.
Launch instances from tcl.vmdk image which is included in plugin package and is available under Horizon.
Create a new environment using the Fuel UI Wizard.
add name of an env and select release version with OS
as hypervisor type: select vcenter check box and QEMU/KVM radio button
network setup : Neutron with tunnel segmentation.
storage backends: default
additional services: all by default
In Settings tab:
enable NSXv plugin
Add nodes:
3 controller
1 compute-vmware
Interfaces on slaves should be setup this way in Fuel interface:
eth0 - admin(PXE)
eth1 - public
eth2 - management
eth3 - VM(Fixed) ID:103
eth4 storage
Networks tab:
Public network: start '172.16.0.2' end '172.16.0.126'
CIDR '172.16.0.0/24'
Gateway 172.16.0.1
Floating ip range start '172.16.0.130' end '172.16.0.254'
Storage: CIDR '192.168.1.0/24'
Vlan tag is not set-Management: CIDR '192.168.0.0/24'
Vlan tag is not set
Neutron L2 configuration by default
Neutron L3 configuration by default
Verify networks.
Add 2 vSphere Clusters:
vSphere Cluster: Cluster1
Service name: vmcluster1
Datastore regex:.*
vSphere Cluster: Cluster2
Service name: vmcluster2
Datastore regex: .*
Deploy cluster
Run OSTF
**Expected result**
Cluster should be deployed and all OSTF test cases should be passed.
TC-061: Check abilities to create and terminate networks on NSX.
----------------------------------------------------------------
**ID**
nsxv_create_terminate_networks
**Description**
::
Verifies that creation of network is translated to vcenter.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Add private networks net_01 and net_02.
Check that networks are present in the vSphere.
Remove private network net_01.
Check that network net_01 is not present in the vSphere.
Add private network net_01.
Check that networks is present in the vSphere.
**Expected result**
Networks net_01 and net_02 should be added.
TC-062: Check abilities to assign multiple vNIC to a single VM.
---------------------------------------------------------------
**ID**
nsxv_assign_multiple_vnic
**Description**
::
It is possible to assign multiple vNICs.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Add two private networks (net01, and net02).
Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network.
Launch instance VM_1 with image TestVM-TCL and flavor m1.tiny in vcenter1 az.
Launch instance VM_2 with image TestVM-TCL and flavor m1.tiny vcenter2 az.
Check abilities to assign multiple vNIC net01 and net02 to VM_1 .
Check abilities to assign multiple vNIC net01 and net02 to VM_2.
Send icmp ping from VM _1 to VM_2 and vice versa.VM_1 and VM_2 should be attached to multiple vNIC net01 and net02.
**Expected result**
Pings should get a response.
TC-063: Check connection between VMs in one tenant.
---------------------------------------------------
**ID**
nsxv_connectivity_in_one_tenant
**Description**
::
Checks connections between VMs inside a tenant.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Navigate to Project -> Compute -> Instances
Launch instance VM_1 with image TestVM-TCL and flavor m1.tiny in vcenter1 az.
Launch instance VM_2 with image TestVM-TCL and flavor m1.tiny in vcenter2 az.
Verify that VMs on same tenants should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa.
**Expected result**
Pings should get a response
TC-064: Check connectivity between VMs attached to different networks with a router between them.
-------------------------------------------------------------------------------------------------
**ID**
nsxv_connectivity_between_different_networks
**Description**
::
Verifies that there is a connection between networks connected through the router.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Add two private networks (net01, and net02).
Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network.
Navigate to Project -> Compute -> Instances
Launch instances VM_1 and VM_2 in the network192.168.101.0/24 with image TestVM-TCL and flavor m1.tiny in vcenter1 az.
Launch instances VM_3 and VM_4 in the 192.168.102.0/24 with image TestVM-TCL and flavor m1.tiny in vcenter2 az.
Verify that VMs of same networks should communicate
between each other. Send icmp ping from VM 1 to VM2, VM 3 to VM4 and vice versa.
Verify that VMs of different networks should not communicate
between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa.
Create Router_01, set gateway and add interface to external network.
Attach private networks to router.
Verify that VMs of different networks should communicate between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa.
Add new Router_02, set gateway and add interface to external network.
Detach net_02 from Router_01 and attach to Router_02
Verify that VMs of different networks should communicate between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa
**Expected result**
Pings should get a response.
TC-065: Check connectivity between VMs attached on the same provider network with shared router.
------------------------------------------------------------------------------------------------
**ID**
nsxv_connectivity_via_shared_router
**Description**
::
Checks that it is possible to connect via shared router type.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Add provider network via cli.
Log in to Horizon Dashboard.
Create shared router(default type) and use it for routing between instances.
Navigate to Project -> compute -> Instances
Launch instance VM_1 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter1 az.
Launch instance VM_2 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter2 az.
Verify that VMs of same provider network should communicate
between each other. Send icmp ping from VM _1 to VM_2 and vice versa.
**Expected result**
Pings should get a response.
TC-066: Check connectivity between VMs attached on the same provider network with distributed router.
-----------------------------------------------------------------------------------------------------
**ID**
nsxv_connectivity_via_distributed_router
**Description**
::
Verifies that there is possibility to connect via distributed router type.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Add provider network via cli.
Log in to Horizon Dashboard.
Create distributed router and use it for routing between instances. Only available via CLI:
neutron router-create rdistributed --distributed True
Navigate to Project -> compute -> Instances
Launch instance VM_1 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter1 az.
Launch instance VM_2 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter2 az.
Verify that VMs of same provider network should communicate
between each other. Send icmp ping from VM _1 to VM_2 and vice versa.
**Expected result**
Pings should get a response.
TC-067: Check connectivity between VMs attached on the same provider network with exclusive router.
---------------------------------------------------------------------------------------------------
**ID**
nsxv_connectivity_via_exclusive_router
**Description**
::
Verifies that there is possibility to connect via exclusive router type.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Add provider network via cli.
Log in to Horizon Dashboard.
Create exclusive router and use it for routing between instances. Only available via CLI:
neutron router-create rexclusive --router_type exclusive
Navigate to Project -> compute -> Instances
Launch instance VM_1 in the provider network with image TestVMDK-TCL and flavor m1.tiny in the vcenter1 az.
Launch instance VM_2 in the provider network with image TestVMDK-TCL and flavor m1.tiny in the vcenter2 az.
Verify that VMs of same provider network should communicate
between each other. Send icmp ping from VM _1 to VM_2 and vice versa.
**Expected result**
Pings should get a response.
TC-068: Check isolation between VMs in different tenants.
---------------------------------------------------------
**ID**
nsxv_different_tenants
**Description**
::
Verifies isolation in different tenants.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Create non-admin tenant test_tenant.
Navigate to Identity -> Projects.
Click on Create Project.
Type name test_tenant.
On tab Project Members add admin with admin and member
Navigate to Project -> Network -> Networks
Create network with 2 subnet
Navigate to Project -> compute -> Instances
Launch instance VM_1
Navigate to test_tenant
Navigate to Project -> Network -> Networks
Create network with subnet.
Create Router, set gateway and add interface
Navigate to Project -> compute -> Instances
Launch instance VM_2
Verify that VMs on different tenants should not communicate
between each other. Send icmp ping from VM _1 of admin tenant to VM_2 of test_tenant and vice versa.
**Expected result**
Pings should not get a response.
TC-069: Check connectivity between VMs with same ip in different tenants.
-------------------------------------------------------------------------
**ID**
nsxv_same_ip_different_tenants
**Description**
::
Verifies connectivity with same IP in different tenants.
**Complexity**
advanced
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Create 2 non-admin tenants test_1 and test_2.
Navigate to Identity -> Projects.
Click on Create Project.
Type name test_1 of tenant.
Click on Create Project.
Type name test_2 of tenant.
On tab Project Members add admin with admin and member.
In tenant test_1 create net1 and subnet1 with CIDR 10.0.0.0/24
In tenant test_1 create security group SG_1 and add rule that allows ingress icmp traffic
In tenant test_2 create net2 and subnet2 with CIDR 10.0.0.0/24
In tenant test_2 create security group SG_2
In tenant test_1 add VM_1 of vcenter1 in net1 with ip 10.0.0.4 and SG_1 as security group.
In tenant test_1 add VM_2 of vcenter2 in net1 with ip 10.0.0.5 and SG_1 as security group.
In tenant test_2 create net1 and subnet1 with CIDR 10.0.0.0/24
In tenant test_2 create security group SG_1 and add rule that allows ingress icmp traffic
In tenant test_2 add VM_3 of vcenter1 in net1 with ip 10.0.0.4 and SG_1 as security group.
In tenant test_2 add VM_4 of vcenter2 in net1 with ip 10.0.0.5 and SG_1 as security group.
Verify that VMs with same ip on different tenants should communicate
between each other. Send icmp ping from VM _1 to VM_3, VM_2 to Vm_4 and vice versa.
**Expected result**
Pings should get a response.
TC-070: Check connectivity Vms to public network.
-------------------------------------------------
**ID**
nsxv_public_network_availability
**Description**
::
Verifies that public network is available.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Create net01: net01_subnet, 192.168.112.0/24 and attach it to the router04
Launch instance VM_1 of vcenter1 AZ with image TestVM-TCL and flavor m1.tiny in the net_04.
Launch instance VM_1 of vcenter2 AZ with image TestVM-TCL and flavor m1.tiny in the net_01.
Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip.
**Expected result**
Pings should get a response
TC-071: Check connectivity Vms to public network with floating ip.
------------------------------------------------------------------
**ID**
nsxv_floating_ip_to_public
**Description**
::
Verifies that public network is available via floating ip.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard
Create net01: net01_subnet, 192.168.112.0/24 and attach it to the router04
Launch instance VM_1 of vcenter1 AZ with image TestVM-TCL and flavor m1.tiny in the net_04. Associate floating ip.
Launch instance VM_1 of vcenter2 AZ with image TestVM-TCL and flavor m1.tiny in the net_01. Associate floating ip.
Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip.
**Expected result**
Pings should get a response
TC-072: Check abilities to create and delete security group.
------------------------------------------------------------
**ID**
nsxv_create_and_delete_secgroups
**Description**
::
Verifies that creation and deletion security group works fine.
**Complexity**
advanced
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Launch instance VM_1 in the tenant network net_02 with image TestVM-TCL and flavor m1.tiny in the vcenter1 az.
Launch instance VM_2 in the tenant net_02 with image TestVM-TCL and flavor m1.tiny in the vcenter2 az.
Create security groups SG_1 to allow ICMP traffic.
Add Ingress rule for ICMP protocol to SG_1
Attach SG_1 to VMs
Check ping between VM_1 and VM_2 and vice verse
Create security groups SG_2 to allow TCP traffic 80 port.
Add Ingress rule for TCP protocol to SG_2
Attach SG_2 to VMs
ssh from VM_1 to VM_2 and vice verse
Delete all rules from SG_1 and SG_2
Check ping and ssh arent available from VM_1 to VM_2 and vice verse
Add Ingress rule for ICMP protocol to SG_1
Add Ingress rule for TCP protocol to SG_2
Check ping between VM_1 and VM_2 and vice verse
Check ssh from VM_1 to VM_2 and vice verse
Delete security groups.
Attach Vms to default security group.
Check ping between VM_1 and VM_2 and vice verse
Check SSH from VM_1 to VM_2 and vice verse
**Expected result**
We should have the ability to send ICMP and TCP traffic between VMs in different tenants.
TC-073: Verify that only the associated MAC and IP addresses can communicate on the logical port.
-------------------------------------------------------------------------------------------------
**ID**
nsxv_associated_addresses_communication_on_port
**Description**
::
Verifies that only associated addresses can communicate on the logical port.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Log in to Horizon Dashboard.
Launch 2 instances.
Verify that traffic can be successfully sent from and received on the MAC and IP address associated with the logical port.
Configure a new IP address on the instance associated with the logical port.
Confirm that the instance cannot communicate with that IP address.
Configure a new MAC address on the instance associated with the logical port.
Confirm that the instance cannot communicate with that MAC address and the original IP address.
**Expected result**
Instance should not communicate with new ip and mac addresses but it should communicate with old IP.
TC-075: Check creation instance in the one group simultaneously.
----------------------------------------------------------------
**ID**
nsxv_create_and_delete_vms
**Description**
::
Verifies that system could create and delete several instances simultaneously.
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Navigate to Project -> Compute -> Instances
Launch 5 instance VM_1 simultaneously with image TestVM-TCL and flavor m1.micro in vcenter1 az in default net_04
All instance should be created without any error.
Launch 5 instance VM_2 simultaneously with image TestVM-TCL and flavor m1.micro in vcenter2 az in default net_04
All instance should be created without any error.
Check connection between VMs (ping, ssh)
Delete all VMs from horizon simultaneously.
**Expected result**
All instance should be created without any error.
TC-076: Check that environment support assigning public network to all nodes
----------------------------------------------------------------------------
**ID**
nsxv_public_network_to_all_nodes
**Description**
::
Verifies that checkbox "Assign public network to all nodes" works as designed.
Assuming default installation has been done with unchecked option "Assign public network to all nodes".
**Complexity**
core
**Requre to automate**
Yes
**Steps**
::
Connect through ssh to Controller node.
Run 'ifconfig'. There is an interface with ip from public network IP Range (Networks tab).
Connect through ssh to compute-vmware node.
Run 'ifconfig'. There is no interface with ip from public network IP Range.
Redeploy environment with checked option Public network assignment -> Assign public network to all nodes.Option is checked after deploy.
Connect through ssh to Controller node.
Run 'ifconfig'. There is an interface with ip from public network IP Range.
Connect through ssh to compute-vmware node.
Run 'ifconfig'. There is an interface with ip from public network IP Range also.
**Expected result**
"Assign public network to all nodes" works as designed.

BIN
doc/test_report.pdf Normal file

Binary file not shown.

177
doc/user/Makefile Normal file
View File

@ -0,0 +1,177 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FuelNSXvplugin.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FuelNSXvplugin.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/FuelNSXvplugin"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FuelNSXvplugin"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

View File

@ -28,7 +28,7 @@ import os
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ 'oslosphinx']
extensions = [ ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@ -181,23 +181,15 @@ htmlhelp_basename = 'FuelNSXvplugindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
latex_elements = { 'classoptions': ',openany,oneside', 'babel': '\\usepackage[english]{babel}'
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FuelNSXvplugin.tex', u'Fuel NSXv plugin Documentation',
u'Igor Zinovik', 'manual'),
('index', 'FuelNSXvplugin.tex', u'Fuel NSXv plugin documentation',
u'Mirantis Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
@ -226,8 +218,8 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuelnsxvplugin', u'Fuel NSXv plugin Documentation',
[u'Igor Zinovik'], 1)
('index', 'fuelnsxvplugin', u'Fuel NSXv plugin documentation',
[u'Mirantis Inc.'], 1)
]
# If true, show URL addresses after external links.
@ -240,8 +232,8 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FuelNSXvplugin', u'Fuel NSXv plugin Documentation',
u'Igor Zinovik', 'FuelNSXvplugin', 'One line description of project.',
('index', 'FuelNSXvplugin', u'Fuel NSXv plugin documentation',
u'Mirantis Inc.', 'FuelNSXvplugin', 'One line description of project.',
'Miscellaneous'),
]
@ -256,3 +248,6 @@ texinfo_documents = [
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Insert footnotes where they are defined instead of at the end.
pdf_inline_footnotes = True

View File

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 29 KiB

View File

Before

Width:  |  Height:  |  Size: 105 KiB

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

View File

Before

Width:  |  Height:  |  Size: 72 KiB

After

Width:  |  Height:  |  Size: 72 KiB

View File

@ -9,7 +9,7 @@ pre-existing vSphere infrastructure with NSX network virtualization platform.
Plugin installs Neutron NSX core plugin and allows logical network equipment
(routers, networks) to be created as NSX entities.
Plugin version 1.0.0 is compatible with Fuel 7.0.
Plugin version 1.x.x series is compatible with Fuel 7.0.
Plugin can work with VMware NSX 6.1.3, 6.1.4.
@ -17,16 +17,18 @@ Through documentation we use term "NSX" and "NSXv" interchangeably, both of
these term refer to `VMware NSX virtualized network platform
<https://www.vmware.com/products/nsx>`_.
Documentation contents:
Documentation contents
======================
.. toctree::
:maxdepth: 2
build
installation
environment
configuration
usage
source/build
source/installation
source/environment
source/configuration
source/usage
Pre-built package of the plugin you can find in
`Fuel Plugin Catalog <https://www.mirantis.com/products/openstack-drivers-and-plugins/fuel-plugins>`_.