From 52fd6923fadf1e65079f5f8f7e9fe14c8f29857e Mon Sep 17 00:00:00 2001 From: Ilya Bumarskov Date: Tue, 1 Dec 2015 20:49:26 +0300 Subject: [PATCH] Test Plan for NSXv Plugin v.2.0.0 Change-Id: I22abc946a3d09105c246b97d450cda89c7162f11 --- doc/test/conf.py | 4 +- doc/test/index.rst | 7 +- doc/test/source/nsx-v_test_plan.rst | 33 +- doc/test/source/test_suite_destructive.rst | 490 --------- doc/test/source/test_suite_failover.rst | 124 +++ doc/test/source/test_suite_gui.rst | 39 - doc/test/source/test_suite_integration.rst | 309 +----- doc/test/source/test_suite_scale.rst | 117 ++ doc/test/source/test_suite_smoke.rst | 314 ++++-- doc/test/source/test_suite_system.rst | 1130 ++++++++++++-------- 10 files changed, 1172 insertions(+), 1395 deletions(-) delete mode 100644 doc/test/source/test_suite_destructive.rst create mode 100644 doc/test/source/test_suite_failover.rst delete mode 100644 doc/test/source/test_suite_gui.rst create mode 100644 doc/test/source/test_suite_scale.rst diff --git a/doc/test/conf.py b/doc/test/conf.py index 773f988..7384c62 100644 --- a/doc/test/conf.py +++ b/doc/test/conf.py @@ -51,9 +51,9 @@ copyright = u'2015, Mirantis Inc.' # built documents. # # The short X.Y version. -version = '1.0.0' +version = '2.0.0' # The full version, including alpha/beta/rc tags. -release = '1.0.0' +release = '2.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/test/index.rst b/doc/test/index.rst index 87f0097..51b896e 100644 --- a/doc/test/index.rst +++ b/doc/test/index.rst @@ -8,4 +8,9 @@ Testing documents :glob: :maxdepth: 1 - source/* + source/nsx-v_test_plan + source/test_suite_failover + source/test_suite_integration + source/test_suite_scale + source/test_suite_smoke + source/test_suite_system diff --git a/doc/test/source/nsx-v_test_plan.rst b/doc/test/source/nsx-v_test_plan.rst index 61ee1f0..23c3354 100644 --- a/doc/test/source/nsx-v_test_plan.rst +++ b/doc/test/source/nsx-v_test_plan.rst @@ -1,6 +1,6 @@ -================================== -Test Plan for NSXv plugin v.1.1.0 -================================== +================================ +Test Plan for NSXv plugin v2.0.0 +================================ .. contents:: Table of contents :depth: 3 @@ -41,7 +41,7 @@ Following test types should be provided: * GUI tests Performance testing will be executed on the scale lab and a custom set of -rally scenarios must be run with NSXv environment. Configuration, enviroment +rally scenarios must be run with NSXv environment. Configuration, environment and scenarios for performance/scale testing should be determine separately. Intended Audience @@ -56,7 +56,7 @@ Limitation Plugin (or its components) has the following limitations: * VMware NSXv plugin can be enabled only with Neutron tunnel segmentation. -* Enviroment with enabled VMware NSXv plugin can't contains compute nodes. +* Environment with enabled VMware NSXv plugin can't contains compute nodes. * Only VMware NSX Manager Virtual Appliance 6.1.4 or later is supported. Product compatibility matrix @@ -70,11 +70,11 @@ Product compatibility matrix - Version - Comment * - MOS - - 7.0 with Kilo + - 8.0 + - + * - OpenStack release + - Kilo with Ubuntu 14.04 - - * - Operatin System - - Ubuntu 14.0.4 - - Only Ubuntu is supported in MOS 7.0 * - vSphere - 5.5 and 6.0 - @@ -88,8 +88,8 @@ Evaluation Mission and Test Motivation Project main goal is to build a MOS plugin that integrates a Neutron VMware NSX plugin. This will allow to use Neutron for networking in vmware-related -environments. The plugin must be compatible with the version 7.0 of Mirantis -OpenStack and should be tested with sofware/hardware described in +environments. The plugin must be compatible with the version 8.0 of Mirantis +OpenStack and should be tested with software/hardware described in `product compatibility matrix`_. See the VMware NSX Plugin specification for more details. @@ -122,7 +122,7 @@ QA to accept software builds from Development team. The goal of integration and system testing is to ensure that new or modified components of Fuel and MOS work effectively with Fuel VMware NSXv plugin -without gaps in dataflow. +without gaps in data flow. **Regression testing** @@ -205,12 +205,3 @@ Acceptance criteria * All required documents are delivered * Release notes including a report on the known errors of that release -********** -Test cases -********** - -.. include:: test_suite_smoke.rst -.. include:: test_suite_integration.rst -.. include:: test_suite_system.rst -.. include:: test_suite_destructive.rst -.. include:: test_suite_gui.rst diff --git a/doc/test/source/test_suite_destructive.rst b/doc/test/source/test_suite_destructive.rst deleted file mode 100644 index 6885520..0000000 --- a/doc/test/source/test_suite_destructive.rst +++ /dev/null @@ -1,490 +0,0 @@ -Destructive -=========== - -TC-101: Check abilities to bind port on NSXv to VM, disable and enable this port. ----------------------------------------------------------------------------------- - -**ID** - -nsxv_ability_to_bind_port - -**Description** -:: - - Verifies that system could manipulate with port. - -**Complexity** - -core - -**Requre to automate** - -Yes - -**Steps** -:: - - Log in to Horizon Dashboard. - Navigate to Project -> Compute -> Instances - - Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny. - - Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny. - - Verify that VMs should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - Disable NSXv_port of VM_1. - Verify that VMs should not communicate between each other. Send icmp ping from VM _2 to VM_1 and vice versa. - - Enable NSXv_port of VM_1. - - Verify that VMs should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - -**Expected result** - -Pings should get a response - -TC-102: Verify that vmclusters should migrate after shutdown controller. ------------------------------------------------------------------------- - -**ID** - -nsxv_shutdown_controller - -**Description** -:: - - Verify that vmclusters should migrate after shutdown controller. - -**Complexity** - -core - -**Requre to automate** - -No - -**Steps** -:: - - Create a new environment using the Fuel UI Wizard: - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: all by default - - In Settings tab: - enable NSXv plugin - - Add nodes: - 3 controllers - - Setup Fuel interfaces on slaves: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set - Managment: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - Click button 'save settings' - Click button 'verify networks' - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - - Deploy Cluster - - Run OSTF - - Shutdown controller with vmclusters. - - Check that vmclusters should migrate to another controller. - -**Expected result** - -VMclusters should migrate to another controller. - -TC-103: Deploy cluster with plugin, addition and deletion of nodes. -------------------------------------------------------------------- - -**ID** - -nsxv_add_delete_nodes - -**Description** -:: - - Verify that system functionality is ok after redeploy. - -**Complexity** - -advanced - -**Requre to automate** - -No - -**Steps** -:: - - Create a new environment using the Fuel UI Wizard: - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: all by default - - In Settings tab: - enable NSXv plugin - select Vmware vcenter esxi datastore for images (glance) - - Add nodes: - 3 controllers - 2 compute-vmwares - 1 cinder-vmdk - - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set - Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - - Verify networks - - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - - Run OSTF - - Remove node with cinder-vmdk role. - - Add node with cinder role - - Redeploy cluster. - - Run OSTF - - Remove node with compute-vmware role - Add node with cinder-vmware role - - Redeploy cluster. - - Run OSTF - -**Expected result** - -Cluster should be deployed and all OSTF test cases should be passed. - -TC-104: Deploy cluster with plugin and deletion one node with controller role. ------------------------------------------------------------------------------- - -**ID** - -nsxv_add_delete_controller - -**Description** -:: - - Verifies that system functionality is ok when controller has been removed. - -**Complexity** - -advanced - -**Requre to automate** - -No - -**Steps** -:: - - Create a new environment using the Fuel UI Wizard: - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: all by default - - In Settings tab: - enable NSXv plugin - select Vmware vcenter esxi datastore for images (glance) - - Add nodes: - 4 controller - 1 compute-vmware - 1 cinder-vmdk - - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set - Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - - Verify networks - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - - Run OSTF - Remove node with controller role. - - Redeploy cluster - - Run OSTF - Add controller - Redeploy cluster - - Run OSTF - -**Expected result** - -Cluster should be deployed and all OSTF test cases should be passed. - -TC-105: Verify that it is not possible to uninstall of Fuel NSXv plugin with deployed environment. ---------------------------------------------------------------------------------------------------- - -**ID** - -nsxv_plugin - -**Description** -:: - - It is not possible to remove plugin while at least one environment exists. - -**Complexity** - -core - -**Requre to automate** - -Yes - -**Steps** -:: - - Copy plugin to to the Fuel master node using scp. - Install plugin - fuel plugins --install plugin-name-1.0-0.0.1-0.noarch.rpm - - Ensure that plugin is installed successfully using cli, run command 'fuel plugins'. - Connect to the Fuel web UI. - - Create a new environment using the Fuel UI Wizard: - add name of env and select release version with OS - as hypervisor type: select vcenter check box and Qemu radio button - network setup : Neutron with tunnel segmentation - storage backends: default - additional services: all by default - - Click on the Settings tab. - - In Settings tab: - enable NSXv plugin - - Add nodes: - 1 controller - - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set-Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - - Verify networks. - - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - - Deploy cluster - Run OSTF - Try to delete plugin via cli Remove plugin from master node fuel plugins --remove plugin-name==1.0.0 - -**Expected result** - -Alert: "400 Client Error: Bad Request (Can't delete plugin which is enabled for some environment.)" should be displayed. - -TC-106: Check cluster functionality after reboot vcenter. ---------------------------------------------------------- - -**ID** - -nsxv_plugin - -**Description** -:: - - Verifies that system functionality is ok when vcenter has been rebooted. - -**Complexity** - -core - -**Requre to automate** - -Yes - -**Steps** -:: - - Create a new environment using the Fuel UI Wizard: - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: all by default - In Settings tab: - enable NSXv plugin - select Vmware vcenter esxi datastore for images (glance) - - Add nodes: - 3 controller - 1 computer - 1 cinder-vmware - 1 cinder - - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set - Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - - Verify networks - - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - - Run OSTF - - Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny. - - Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny. - - Check connection between VMs, send ping from VM_1 to VM_2 and vice verse. - Reboot vcenter - vmrun -T ws-shared -h https://localhost:443/sdk -u vmware -p VMware01 reset "[standard] vcenter/vcenter.vmx" - - Check that controller lost connection with vCenter - - Wait for vCenter - - Ensure that all instances from vCenter displayed in dashboard. - - Ensure connectivity between vcenter1's and vcenter2's VM. - Run OSTF - -**Expected result** - -Cluster should be deployed and all OSTF test cases should be passed. Ping should get response. diff --git a/doc/test/source/test_suite_failover.rst b/doc/test/source/test_suite_failover.rst new file mode 100644 index 0000000..3853674 --- /dev/null +++ b/doc/test/source/test_suite_failover.rst @@ -0,0 +1,124 @@ +Failover +======== + + +Verify that it is not possible to uninstall of Fuel NSX-v plugin with deployed environment. +------------------------------------------------------------------------------------------- + + +ID +## + +nsxv_uninstall_negative + + +Description +########### + +It is not possible to remove plugin while at least one environment exists. + + +Complexity +########## + +smoke + + +Steps +##### + + 1. Install NSXv plugin on master node. + 2. Create a new environment with enabled plugin. + 3. Try to delete plugin via CLI Remove plugin from master node. + + +Expected result +############### + +Alert: "400 Client Error: Bad Request (Can't delete plugin which is enabled for some environment.)" should be displayed. + + +Shutdown primary controller and check plugin functionality. +----------------------------------------------------------- + + +ID +## + +nsxv_shutdown_controller + + +Description +########### + +Check plugin functionality after shutdown primary controller. + + +Complexity +########## + +core + + +Steps +##### + + 1. Log in to Fuel with preinstalled plugin and deployed environment with 3 controllers. + 2. Log in to Horizon. + 3. Create VM and check connectivity to outside world from VM. + 4. Shutdown primary controller. + 5. Ensure that VIPs are moved to other controller. + 6. Ensure connectivity to outside world from created VM. + 7. Create a new network and attach it to router. + 8. Create a VM with new network and check network connectivity. + + +Expected result +############### + +Networking is working correct after failure of primary controller. + + +Check cluster functionality after reboot vcenter. +------------------------------------------------- + + +ID +## + +nsxv_reboot_vcenter + + +Description +########### + +Verifies that system functionality is ok when vcenter has been rebooted. + + +Complexity +########## + +core + + +Steps +##### + + 1. Log in to Fuel with preinstalled plugin and deployed environment. + 2. Log in to Horizon. + 3. Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny. + 4. Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny. + 5. Check connection between VMs, send ping from VM_1 to VM_2 and vice verse. + 6. Reboot vcenter: 'vmrun -T ws-shared -h https://localhost:443/sdk -u vmware -p VMware01 reset "[standard] vcenter/vcenter.vmx"' + 7. Check that controller lost connection with vCenter. + 8. Wait for vCenter. + 9. Ensure that all instances from vCenter displayed in dashboard. + 10. Ensure connectivity between vcenter1's and vcenter2's VM. + 11. Run OSTF. + + +Expected result +############### + +Cluster should be deployed and all OSTF test cases should be passed. ping should get response. + diff --git a/doc/test/source/test_suite_gui.rst b/doc/test/source/test_suite_gui.rst deleted file mode 100644 index 558cc89..0000000 --- a/doc/test/source/test_suite_gui.rst +++ /dev/null @@ -1,39 +0,0 @@ -GUI Testing -=========== - -TC-131: Verify that all elements of NSXv plugin section require GUI regiments. -------------------------------------------------------------------------------- - -**ID** - -nsxv_plugin - -**Description** -:: - - Verify that all elements of NSXv plugin section meets the requirements. - -**Complexity** - -smoke - -**Requre to automate** - -Yes - -**Steps** -:: - - Login to the Fuel web UI. - Click on the Settings tab. - - Verify that section of NSXv plugin is present on the Settings tab. - Verify that check box ‘NSXv plugin’ is disabled by default. - - Verify that user can enabled. Enable NSXv plugin by click on check box ‘NSXv plugin’. - Verify that all labels of NSXv plugin section have same font style and color. - Verify that all elements of NSXv plugin section are vertical aligned. - -**Expected result** - -All elements of NSXv plugin section meets the requirements. diff --git a/doc/test/source/test_suite_integration.rst b/doc/test/source/test_suite_integration.rst index a2760aa..2f4b074 100644 --- a/doc/test/source/test_suite_integration.rst +++ b/doc/test/source/test_suite_integration.rst @@ -1,297 +1,54 @@ Integration =========== -TC-031: Deploy HA cluster with Fuel NSXv plugin. -------------------------------------------------- -**ID** - -nsxv_ha_mode - -**Description** -:: - - Installation in HA mode with 3 controllers. - -**Complexity** - -core - -**Requre to automate** - -No - -**Steps** -:: - - Create a new environment using the Fuel UI Wizard. - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: all by default - In Settings tab: - enable NSXv plugin - Add nodes: - 3 controller - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set-Managment: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - Verify networks. - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - Deploy cluster - Run OSTF - -**Expected result** - -Cluster should be deployed and all OSTF test cases should be passed. +Deploy cluster with NSX-v plugin and ceilometer. +------------------------------------------------ -TC-032: Deploy cluster with Fuel NSXv plugin and Ceph for Glance and Cinder. ------------------------------------------------------------------------------ - -**ID** - -nsxv_ceph_no_vcenter - -**Description** -:: - - Verifies installation of plugin with Glance and Cinder. - -**Complexity** - -core - -**Requre to automate** - -No - -**Steps** -:: - - Create a new environment using the Fuel UI Wizard. - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: all by default - In Settings tab: - enable NSXv plugin - select 'Ceph RBD for volumes' (Cinder) and 'Ceph RBD for images(Glance)' - Add nodes: - 1 controller - 1 controller + ceph-osd - 1 controller + cinder-vmware + ceph-osd - 1 cinder-vmware + ceph-osd - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set-Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - Verify networks. - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - - Add 3 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - - Deploy cluster - Run OSTF - -**Expected result** - -Cluster should be deployed and all OSTF test cases should be passed. - -TC-034: Deploy cluster with Fuel VMware NSXv plugin and ceilometer. --------------------------------------------------------------------- - -**ID** +ID +## nsxv_ceilometer -**Description** -:: - Installation of plugin with ceilometer. +Description +########### -**Complexity** +Check deployment with Fuel NSXv plugin and Ceilometer. + + +Complexity +########## core -**Requre to automate** -No +Steps +##### -**Steps** -:: + 1. Log into Fuel with preinstalled plugin. + 2. Create a new environment with following parameters: + * Compute: KVM/QEMU with vCenter + * Networking: Neutron with tunnel segmentation + * Storage: default + * Additional services: ceilometer + 3. Add nodes with following roles: + * Controller + Mongo + * Controller + Mongo + * Controller + Mongo + * ComputeVMware + 4. Configure interfaces on nodes. + 5. Configure network settings. + 6. Enable and configure NSXv plugin. + 7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on controllers and compute-vmware. + 8. Verify networks. + 9. Deploy cluster. + 10. Run OSTF. - Create a new environment using the Fuel UI Wizard. - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: install ceilometer - In Settings tab: - enable NSXv plugin - Add nodes: - 3 controller + mongo - 1 compute-vmware - - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set-Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - - Verify networks. - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - - Add 1 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - - Deploy cluster - Run OSTF. - -**Expected result** +Expected result +############### Cluster should be deployed and all OSTF test cases should be passed. -TC-035: Deploy cluster with Fuel VMware NSXv plugin, Ceph for Cinder and VMware datastore backend for Glance. -------------------------------------------------------------------------------------------------------------- - -**ID** - -nsxv_ceph - -**Description** -:: - - Verifies installation of plugin for vcenter with Glance and Cinder. - -**Complexity** - -core - -**Requre to automate** - -No - -**Steps** -:: - - Create a new environment using the Fuel UI Wizard. - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: default - - In Settings tab: - enable NSXv plugin - select 'Ceph RBD for volumes' (Cinder) and 'Vmware Datastore for images(Glance)' - - Add nodes: - 3 controller + ceph-osd - 2 cinder-vmware - - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set-Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - - Verify networks. - - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - Deploy cluster - Run OSTF - -**Expected result** - -Cluster should be deployed and all OSTF test cases should be passed. diff --git a/doc/test/source/test_suite_scale.rst b/doc/test/source/test_suite_scale.rst new file mode 100644 index 0000000..f78e406 --- /dev/null +++ b/doc/test/source/test_suite_scale.rst @@ -0,0 +1,117 @@ +Scale +===== + + +Deploy cluster with plugin and deletion one node with controller role. +---------------------------------------------------------------------- + + +ID +## + +nsxv_add_delete_controller + + +Description +########### + +Verifies that system functionality is ok when controller has been removed. + + +Complexity +########## + +core + + +Steps +##### + + 1. Log into Fuel with preinstalled plugin. + 2. Create a new environment with following parameters: + * Compute: KVM/QEMU with vCenter + * Networking: Neutron with tunnel segmentation + * Storage: default + * Additional services: ceilometer + 3. Add nodes with following roles: + * Controller + * Controller + * Controller + * Controller + * CinderVMware + * ComputeVMware + 4. Configure interfaces on nodes. + 5. Configure network settings. + 6. Enable and configure NSXv plugin. + 7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on controllers and compute-vmware. + 8. Deploy cluster. + 9. Run OSTF. + 10. Remove node with controller role. + 11. Redeploy cluster. + 12. Run OSTF. + 13. Add controller. + 14. Redeploy cluster. + 15. Run OSTF. + + +Expected result +############### + +Cluster should be deployed and all OSTF test cases should be passed. + + +Deployment with 3 Controllers, ComputeVMware, CinderVMware and check adding/deleting of nodes +--------------------------------------------------------------------------------------------- + + +ID +## + +nsxv_add_delete_nodes + + +Description +########### + +Verify that system functionality is ok after redeploy. + + +Complexity +########## + +core + + +Steps +##### + + 1. Connect to a Fuel web UI with preinstalled plugin. + 2. Create a new environment with following parameters: + * Compute: KVM/QEMU with vCenter + * Networking: Neutron with VLAN segmentation + * Storage: Ceph + * Additional services: default + 3. Add nodes with following roles: + * Controller + * Controller + * Controller + * ComputeVMware + 4. Configure interfaces on nodes. + 5. Configure network settings. + 6. Enable and configure NSXv plugin. + 7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on controllers and compute-vmware. + 8. Deploy cluster. + 9. Run OSTF. + 10. Add node with CinderVMware role. + 11. Redeploy cluster. + 12. Run OSTF + 13. Remove node with CinderVMware role. + 14. Redeploy cluster. + 15. Run OSTF. + + +Expected result +############### + +Changing of cluster configuration was successful. Cluster should be deployed and all OSTF test cases should be passed. + diff --git a/doc/test/source/test_suite_smoke.rst b/doc/test/source/test_suite_smoke.rst index 2f3c5e2..dcf861c 100644 --- a/doc/test/source/test_suite_smoke.rst +++ b/doc/test/source/test_suite_smoke.rst @@ -1,148 +1,260 @@ Smoke ===== -TC-001: Verify that Fuel VMware NSXv plugin is installed. ----------------------------------------------------------- -**ID** +Install Fuel VMware NSX-v plugin. +--------------------------------- -nsxv_plugin -**Description** -:: +ID +## - Test case verifies plugin installation. +nsxv_install -**Complexity** + +Description +########### + +Check that plugin can be installed. + + +Complexity +########## smoke -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Connect to fuel node via ssh. + 2. Upload plugin. + 3. Install plugin. - Connect to fuel node via ssh. - Upload plugin. - Install plugin. -**Expected result** +Expected result +############### -Ensure that plugin is installed successfully using cli, run command 'fuel plugins'. Check name, version and package version of plugin. +Ensure that plugin is installed successfully using CLI, run command 'fuel plugins'. Check name, version and package version of plugin. -TC-002: Verify that Fuel VMware NSXv plugin is uninstalled. -------------------------------------------------------------- -**ID** +Uninstall Fuel VMware NSX-v plugin. +----------------------------------- -nsxv_plugin -**Description** -:: +ID +## - Test verifies that plugin could be uninstalled. +nsxv_uninstall -**Complexity** + +Description +########### + +Check that plugin can be removed. + + +Complexity +########## smoke -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Connect to fuel node with preinstalled plugin via ssh. + 2. Remove plugin. - Connect to fuel node with preinstalled plugin via ssh. - Remove plugin from master node - Connect to the Fuel web UI. - Create a new environment. - Click on the Settings tab and check that section of NSXv plugin is not displayed. -**Expected result** +Expected result +############### -Verify that plugin is removed, run command 'fuel plugins'. Section of NSXv plugin is not displayed. +Verify that plugin is removed, run command 'fuel plugins'. -TC-003: Deploy cluster with plugin and vmware datastore backend. + +Verify that all elements of NSXv plugin section meets the requirements. +----------------------------------------------------------------------- + + +ID +## + +nsxv_gui + + +Description +########### + +Verify that all elements of NSXv plugin section meets the requirements. + + +Complexity +########## + +smoke + + +Steps +##### + + 1. Login to the Fuel web UI. + 2. Click on the Settings tab. + 3. Verify that section of NSXv plugin is present on the Settings tab. + 4. Verify that check box 'NSXv plugin' is disabled by default. + 5. Verify that user can enabled. Enable NSX-v plugin by click on check box 'NSXv plugin'. + 6. Verify that all labels of NSX-v plugin section have same font style and color. + 7. Verify that all elements of NSX-v plugin section are vertical aligned. + + +Expected result +############### + +All elements of NSX-v plugin section are required GUI regiments. + + +Deployment with plugin, controller and vmware datastore backend. ---------------------------------------------------------------- -**ID** + +ID +## nsxv_smoke -**Description** -:: - Test verifies installation with base configuration. +Description +########### -**Complexity** +Check deployment with NSXv plugin and one controller. + + +Complexity +########## smoke -**Requre to automate** -No +Steps +##### -**Steps** -:: + 1. Log into Fuel with preinstalled plugin. + 2. Create a new environment with following parameters: + * Compute: KVM/QEMU with vCenter + * Networking: Neutron with tunnel segmentation + * Storage: default + * Additional services: default + 3. Add nodes with following roles: + * Controller + 4. Configure interfaces on nodes. + 5. Configure network settings. + 6. Enable and configure NSXv plugin. + 7. Configure settings: + * Enable VMWare vCenter/ESXi datastore for images (Glance). + 8. Configure VMware vCenter Settings. Add 1 vSphere cluster and configure Nova Compute instances on controllers. + 9. Verify networks. + 10. Deploy cluster. + 11. Run OSTF. - Create a new environment using the Fuel UI Wizard. - add name of env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: all by default - In Settings tab: - enable NSXv plugin - select Vmware vcenter esxi datastore for images (glance) - Add nodes: - 1 controller - 1 compute-vmware - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set-Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - Verify networks. - Fill vcenter credentials: - Availability zone: vcenter - vCenter host: '172.16.0.254' - vCenter username: - vCenter password: - - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - - Fill Glance credentials: - vCenter host: 172.16.0.254 - vCenter username: - vCenter password: - Datacenter name: Datacenter - Datastore name: nfs - - Deploy cluster - - Run OSTF - -**Expected result** +Expected result +############### Cluster should be deployed and all OSTF test cases should be passed. + + +Deploy HA cluster with NSX-v plugin. +------------------------------------ + + +ID +## + +nsxv_bvt + + +Description +########### + +Check deployment with NSXv plugin, 3 Controllers, 2 CephOSD, CinderVMware and computeVMware roles. + + +Complexity +########## + +smoke + + +Steps +##### + + 1. Connect to a Fuel web UI with preinstalled plugin. + 2. Create a new environment with following parameters: + * Compute: KVM/QEMU with vCenter + * Networking: Neutron with tunnel segmentation + * Storage: Ceph + * Additional services: default + 3. Add nodes with following roles: + * Controller + * Controller + * Controller + * CephOSD + * CephOSD + * CinderVMware + * ComputeVMware + 4. Configure interfaces on nodes. + 5. Configure network settings. + 6. Enable and configure NSXv plugin. + 7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on controllers and compute-vmware. + 8. Verify networks. + 9. Deploy cluster. + 10. Run OSTF. + + +Expected result +############### + +Cluster should be deployed and all OSTF test cases should be passed. + + +Verify that nsxv driver configured properly after enabling NSX-v plugin +----------------------------------------------------------------------- + + +ID +## + +nsxv_config_ok + + +Description +########### + +Need to check that all parameters of nsxv driver config files have been filled +up with values entered from GUI. Applicable values that are typically used are +described in plugin docs. Root and intermediate certificate are signed, in +attachment. + + +Complexity +########## + +smoke + + +Steps +##### + + 1. Install NSXv plugin. + 2. Enable plugin on tab Settings -> NSXv plugin. + 3. Fill the form with corresponding values. + 4. Uncheck option "Bypass NSX Manager certificate verification". + 5. Do all things that are necessary to provide interoperability of NSXv plugin and NSX Manager with certificate. + 6. Check Additional settings. Fill the form with corresponding values. Save settings by pressing the button. + + +Expected result +############### + +Check that nsx.ini on controller nodes is properly configured. + diff --git a/doc/test/source/test_suite_system.rst b/doc/test/source/test_suite_system.rst index a2a365e..dbe8c94 100644 --- a/doc/test/source/test_suite_system.rst +++ b/doc/test/source/test_suite_system.rst @@ -1,718 +1,918 @@ System ====== + Setup for system tests ---------------------- -**ID** + +ID +## nsxv_setup_system -**Description** -:: - It is a config for all system tests. +Description +########### -**Complexity** +Deploy environment in DualHypervisors mode with 3 controllers and 1 +compute-vmware nodes. Nova Compute instances are running on controllers and +compute-vmware nodes. It is a config for all system tests. -advanced -**Requre to automate** +Complexity +########## -Yes +core -**Steps** -:: - Install NSXv plugin on master node. - Launch instances from tcl.vmdk image which is included in plugin package and is available under Horizon. - Create a new environment using the Fuel UI Wizard. - add name of an env and select release version with OS - as hypervisor type: select vcenter check box and QEMU/KVM radio button - network setup : Neutron with tunnel segmentation. - storage backends: default - additional services: all by default +Steps +##### - In Settings tab: - enable NSXv plugin - Add nodes: - 3 controller - 1 compute-vmware + 1. Log into Fuel with preinstalled plugin. + 2. Create a new environment with following parameters: + * Compute: KVM/QEMU with vCenter + * Networking: Neutron with tunnel segmentation + * Storage: default + * Additional services: default + 3. Add nodes with following roles: + * Controller + * Controller + * Controller + * ComputeVMware + 4. Configure interfaces on nodes. + 5. Configure network settings. + 6. Enable and configure NSXv plugin. + 7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on controllers and compute-vmware. + 8. Verify networks. + 9. Deploy cluster. + 10. Run OSTF. Launch instances from "Test-VMDK" image which is included in plugin package and is available under Horizon. Use m1.tiny flavor. - Interfaces on slaves should be setup this way in Fuel interface: - eth0 - admin(PXE) - eth1 - public - eth2 - management - eth3 - VM(Fixed) ID:103 - eth4 – storage - Networks tab: - Public network: start '172.16.0.2' end '172.16.0.126' - CIDR '172.16.0.0/24' - Gateway 172.16.0.1 - Floating ip range start '172.16.0.130' end '172.16.0.254' - Storage: CIDR '192.168.1.0/24' - Vlan tag is not set-Management: CIDR '192.168.0.0/24' - Vlan tag is not set - Neutron L2 configuration by default - Neutron L3 configuration by default - - Verify networks. - Add 2 vSphere Clusters: - vSphere Cluster: Cluster1 - Service name: vmcluster1 - Datastore regex:.* - vSphere Cluster: Cluster2 - Service name: vmcluster2 - Datastore regex: .* - - Deploy cluster - - Run OSTF - -**Expected result** +Expected result +############### Cluster should be deployed and all OSTF test cases should be passed. -TC-061: Check abilities to create and terminate networks on NSX. ----------------------------------------------------------------- -**ID** +Check abilities to create and terminate networks on NSX. +-------------------------------------------------------- + + +ID +## nsxv_create_terminate_networks -**Description** -:: - Verifies that creation of network is translated to vcenter. +Description +########### -**Complexity** +Verifies that creation of network is translated to vcenter. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Add private networks net_01 and net_02. + 4. Check that networks are present in the vSphere. + 5. Remove private network net_01. + 6. Check that network net_01 is not present in the vSphere. + 7. Add private network net_01. + 8. Check that networks is present in the vSphere. - Log in to Horizon Dashboard. - Add private networks net_01 and net_02. +Expected result +############### - Check that networks are present in the vSphere. +Networks net_01 and net_02 should be added. - Remove private network net_01. - Check that network net_01 is not present in the vSphere. - Add private network net_01. +Check abilities to bind port on NSX-v to VM, disable and enable this port. +-------------------------------------------------------------------------- - Check that networks is present in the vSphere. -**Expected result** +ID +## -Networks net_01 and net_02 should be added. +nsxv_ability_to_bind_port -TC-062: Check abilities to assign multiple vNIC to a single VM. ---------------------------------------------------------------- -**ID** +Description +########### -nsxv_assign_multiple_vnic +Verifies that system could manipulate with port. -**Description** -:: - It is possible to assign multiple vNICs. - -**Complexity** +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Log in to Horizon Dashboard. + 2. Navigate to Project -> Compute -> Instances + 3. Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny. + 4. Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny. + 5. Verify that VMs should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. + 6. Disable NSX-v_port of VM_1. + 7. Verify that VMs should not communicate between each other. Send icmp ping from VM _2 to VM_1 and vice versa. + 8. Enable NSX-v_port of VM_1. + 9. Verify that VMs should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - Log in to Horizon Dashboard. - Add two private networks (net01, and net02). - Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network. - Launch instance VM_1 with image TestVM-TCL and flavor m1.tiny in vcenter1 az. - Launch instance VM_2 with image TestVM-TCL and flavor m1.tiny vcenter2 az. - Check abilities to assign multiple vNIC net01 and net02 to VM_1 . - Check abilities to assign multiple vNIC net01 and net02 to VM_2. - Send icmp ping from VM _1 to VM_2 and vice versa.VM_1 and VM_2 should be attached to multiple vNIC net01 and net02. - -**Expected result** - -Pings should get a response. - -TC-063: Check connection between VMs in one tenant. ---------------------------------------------------- - -**ID** - -nsxv_connectivity_in_one_tenant - -**Description** -:: - - Checks connections between VMs inside a tenant. - -**Complexity** - -core - -**Requre to automate** - -Yes - -**Steps** -:: - - Log in to Horizon Dashboard. - - Navigate to Project -> Compute -> Instances - - Launch instance VM_1 with image TestVM-TCL and flavor m1.tiny in vcenter1 az. - - Launch instance VM_2 with image TestVM-TCL and flavor m1.tiny in vcenter2 az. - - Verify that VMs on same tenants should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - -**Expected result** +Expected result +############### Pings should get a response -TC-064: Check connectivity between VMs attached to different networks with a router between them. -------------------------------------------------------------------------------------------------- -**ID** +Check abilities to assign multiple vNIC to a single VM. +------------------------------------------------------- -nsxv_connectivity_between_different_networks -**Description** -:: +ID +## - Verifies that there is a connection between networks connected through the router. +nsxv_multi_vnic -**Complexity** + +Description +########### + +Check abilities to assign multiple vNICs to a single VM. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Add two private networks (net01, and net02). + 4. Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network. + 5. Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny in vcenter1 az. + 6. Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny vcenter2 az. + 7. Check abilities to assign multiple vNIC net01 and net02 to VM_1. + 8. Check abilities to assign multiple vNIC net01 and net02 to VM_2. + 9. Send icmp ping from VM _1 to VM_2 and vice versa. - Log in to Horizon Dashboard. - Add two private networks (net01, and net02). +Expected result +############### - Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network. +VM_1 and VM_2 should be attached to multiple vNIC net01 and net02. Pings should get a response. - Navigate to Project -> Compute -> Instances - Launch instances VM_1 and VM_2 in the network192.168.101.0/24 with image TestVM-TCL and flavor m1.tiny in vcenter1 az. +Check connection between VMs in one tenant. +------------------------------------------- - Launch instances VM_3 and VM_4 in the 192.168.102.0/24 with image TestVM-TCL and flavor m1.tiny in vcenter2 az. - Verify that VMs of same networks should communicate - between each other. Send icmp ping from VM 1 to VM2, VM 3 to VM4 and vice versa. - Verify that VMs of different networks should not communicate - between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa. - Create Router_01, set gateway and add interface to external network. - Attach private networks to router. +ID +## - Verify that VMs of different networks should communicate between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa. - Add new Router_02, set gateway and add interface to external network. - Detach net_02 from Router_01 and attach to Router_02 +nsxv_connectivity_default_tenant - Verify that VMs of different networks should communicate between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa -**Expected result** +Description +########### + +Checks connections between VMs inside a tenant. + + +Complexity +########## + +core + + +Steps +##### + + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Navigate to Project -> Compute -> Instances + 4. Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny in vcenter1 az. + 5. Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny in vcenter2 az. + 6. Verify that VMs on same tenants should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. + + +Expected result +############### Pings should get a response. -TC-065: Check connectivity between VMs attached on the same provider network with shared router. ------------------------------------------------------------------------------------------------- -**ID** +Check connectivity between VMs attached to different networks with a router between them. +----------------------------------------------------------------------------------------- + + +ID +## + +nsxv_connectivity_diff_networks + + +Description +########### + +Verifies that there is a connection between networks connected through the router. + + +Complexity +########## + +core + + +Steps +##### + + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Add two private networks (net01, and net02). + 4. Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network. + 5. Navigate to Project -> Compute -> Instances + 6. Launch instances VM_1 and VM_2 in the network192.168.101.0/24 with image TestVM-VMDK and flavor m1.tiny in vcenter1 az. + 7. Launch instances VM_3 and VM_4 in the 192.168.102.0/24 with image TestVM-VMDK and flavor m1.tiny in vcenter2 az. + 8. Verify that VMs of same networks should communicate + between each other. Send icmp ping from VM 1 to VM2, VM 3 to VM4 and vice versa. + 9. Verify that VMs of different networks should not communicate + between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa. + 10. Create Router_01, set gateway and add interface to external network. + 11. Attach private networks to router. + 12. Verify that VMs of different networks should communicate between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa. + 13. Add new Router_02, set gateway and add interface to external network. + 14. Detach net_02 from Router_01 and attach to Router_02 + 15. Assign floating IPs for all created VMs. + 16. Verify that VMs of different networks should communicate between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa + + +Expected result +############### + +Pings should get a response. + + +Check connectivity between VMs attached on the same provider network with shared router. +---------------------------------------------------------------------------------------- + + +ID +## nsxv_connectivity_via_shared_router -**Description** -:: - Checks that it is possible to connect via shared router type. +Description +########### -**Complexity** +Checks that it is possible to connect via shared router type. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Add provider network via cli. + 3. Log in to Horizon Dashboard. + 4. Create shared router(default type) and use it for routing between instances. + 5. Navigate to Project -> compute -> Instances + 6. Launch instance VM_1 in the provider network with image TestVM-VMDK and flavor m1.tiny in the vcenter1 az. + 7. Launch instance VM_2 in the provider network with image TestVM-VMDK and flavor m1.tiny in the vcenter2 az. + 8. Verify that VMs of same provider network should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - Add provider network via cli. - Log in to Horizon Dashboard. - Create shared router(default type) and use it for routing between instances. - Navigate to Project -> compute -> Instances - Launch instance VM_1 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter1 az. - - Launch instance VM_2 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter2 az. - - Verify that VMs of same provider network should communicate - between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - -**Expected result** +Expected result +############### Pings should get a response. -TC-066: Check connectivity between VMs attached on the same provider network with distributed router. ------------------------------------------------------------------------------------------------------ -**ID** +Check connectivity between VMs attached on the same provider network with distributed router. +--------------------------------------------------------------------------------------------- + + +ID +## nsxv_connectivity_via_distributed_router -**Description** -:: - Verifies that there is possibility to connect via distributed router type. +Description +########### -**Complexity** +Verifies that there is possibility to connect via distributed router type. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Add provider network via cli. + 3. Log in to Horizon Dashboard. + 4. Create distributed router and use it for routing between instances. Only available via CLI: + neutron router-create rdistributed --distributed True + 5. Navigate to Project -> compute -> Instances + 6. Launch instance VM_1 in the provider network with image TestVM-VMDK and flavor m1.tiny in the vcenter1 az. + 7. Launch instance VM_2 in the provider network with image TestVM-VMDK and flavor m1.tiny in the vcenter2 az. + 8. Verify that VMs of same provider network should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - Add provider network via cli. - Log in to Horizon Dashboard. - - Create distributed router and use it for routing between instances. Only available via CLI: - neutron router-create rdistributed --distributed True - - Navigate to Project -> compute -> Instances - Launch instance VM_1 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter1 az. - - Launch instance VM_2 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter2 az. - - Verify that VMs of same provider network should communicate - between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - -**Expected result** +Expected result +############### Pings should get a response. -TC-067: Check connectivity between VMs attached on the same provider network with exclusive router. ---------------------------------------------------------------------------------------------------- -**ID** +Check connectivity between VMs attached on the same provider network with exclusive router. +------------------------------------------------------------------------------------------- + + +ID +## nsxv_connectivity_via_exclusive_router -**Description** -:: - Verifies that there is possibility to connect via exclusive router type. +Description +########### -**Complexity** +Verifies that there is possibility to connect via exclusive router type. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Add provider network via cli. + 3. Log in to Horizon Dashboard. + 4. Create exclusive router and use it for routing between instances. Only available via CLI: + neutron router-create rexclusive --router_type exclusive + 5. Navigate to Project -> compute -> Instances + 6. Launch instance VM_1 in the provider network with image TestVM-VMDK and flavor m1.tiny in the vcenter1 az. + 7. Launch instance VM_2 in the provider network with image TestVM-VMDK and flavor m1.tiny in the vcenter2 az. + 8. Verify that VMs of same provider network should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - Add provider network via cli. - Log in to Horizon Dashboard. - - Create exclusive router and use it for routing between instances. Only available via CLI: - neutron router-create rexclusive --router_type exclusive - - Navigate to Project -> compute -> Instances - Launch instance VM_1 in the provider network with image TestVMDK-TCL and flavor m1.tiny in the vcenter1 az. - - Launch instance VM_2 in the provider network with image TestVMDK-TCL and flavor m1.tiny in the vcenter2 az. - - Verify that VMs of same provider network should communicate - between each other. Send icmp ping from VM _1 to VM_2 and vice versa. - -**Expected result** +Expected result +############### Pings should get a response. -TC-068: Check isolation between VMs in different tenants. ---------------------------------------------------------- -**ID** +Check isolation between VMs in different tenants. +------------------------------------------------- + + +ID +## nsxv_different_tenants -**Description** -:: - Verifies isolation in different tenants. +Description +########### -**Complexity** +Verifies isolation in different tenants. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Create non-admin tenant test_tenant. + 4. Navigate to Identity -> Projects. + 5. Click on Create Project. + 6. Type name test_tenant. + 7. On tab Project Members add admin with admin and member. + Activate test_tenant project by selecting at the top panel. + 8. Navigate to Project -> Network -> Networks + 9. Create network with 2 subnet. + Create Router, set gateway and add interface. + 10. Navigate to Project -> Compute -> Instances + 11. Launch instance VM_1 + 12. Activate default tenant. + 13. Navigate to Project -> Network -> Networks + 14. Create network with subnet. + Create Router, set gateway and add interface. + 15. Navigate to Project -> Compute -> Instances + 16. Launch instance VM_2 + 17. Verify that VMs on different tenants should not communicate between each other. + Send icmp ping from VM _1 of admin tenant to VM_2 of test_tenant and vice versa. - Log in to Horizon Dashboard. - Create non-admin tenant test_tenant. - Navigate to Identity -> Projects. - - Click on Create Project. - Type name test_tenant. - - On tab Project Members add admin with admin and member - - Navigate to Project -> Network -> Networks - - Create network with 2 subnet - Navigate to Project -> compute -> Instances - Launch instance VM_1 - Navigate to test_tenant - - Navigate to Project -> Network -> Networks - - Create network with subnet. - Create Router, set gateway and add interface - - Navigate to Project -> compute -> Instances - - Launch instance VM_2 - - Verify that VMs on different tenants should not communicate - between each other. Send icmp ping from VM _1 of admin tenant to VM_2 of test_tenant and vice versa. - -**Expected result** +Expected result +############### Pings should not get a response. -TC-069: Check connectivity between VMs with same ip in different tenants. -------------------------------------------------------------------------- -**ID** +Check connectivity between VMs with same ip in different tenants. +----------------------------------------------------------------- + + +ID +## nsxv_same_ip_different_tenants -**Description** -:: - Verifies connectivity with same IP in different tenants. +Description +########### -**Complexity** +Verifies connectivity with same IP in different tenants. + + +Complexity +########## advanced -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Create 2 non-admin tenants 'test_1' and 'test_2'. + 4. Navigate to Identity -> Projects. + 5. Click on Create Project. + 6. Type name 'test_1' of tenant. + 7. Click on Create Project. + 8. Type name 'test_2' of tenant. + 9. On tab Project Members add admin with admin and member. + 10. In tenant 'test_1' create net1 and subnet1 with CIDR 10.0.0.0/24 + 11. In tenant 'test_1' create security group 'SG_1' and add rule that allows ingress icmp traffic + 12. In tenant 'test_2' create net2 and subnet2 with CIDR 10.0.0.0/24 + 13. In tenant 'test_2' create security group 'SG_2' + 14. In tenant 'test_1' add VM_1 of vcenter1 in net1 with ip 10.0.0.4 and 'SG_1' as security group. + 15. In tenant 'test_1' add VM_2 of vcenter2 in net1 with ip 10.0.0.5 and 'SG_1' as security group. + 16. In tenant 'test_2' create net1 and subnet1 with CIDR 10.0.0.0/24 + 17. n tenant 'test_2' create security group 'SG_1' and add rule that allows ingress icmp traffic + 18. In tenant 'test_2' add VM_3 of vcenter1 in net1 with ip 10.0.0.4 and 'SG_1' as security group. + 19. In tenant 'test_2' add VM_4 of vcenter2 in net1 with ip 10.0.0.5 and 'SG_1' as security group. + 20. Assign floating IPs for all created VMs. + 21. Verify that VMs with same ip on different tenants should communicate between each other. + Send icmp ping from VM _1 to VM_3, VM_2 to Vm_4 and vice versa. - Log in to Horizon Dashboard. - Create 2 non-admin tenants ‘test_1’ and ‘test_2’. - Navigate to Identity -> Projects. - Click on Create Project. - - Type name ‘test_1’ of tenant. - - Click on Create Project. - - Type name ‘test_2’ of tenant. - - On tab Project Members add admin with admin and member. - - In tenant ‘test_1’ create net1 and subnet1 with CIDR 10.0.0.0/24 - In tenant ‘test_1’ create security group ‘SG_1’ and add rule that allows ingress icmp traffic - In tenant ‘test_2’ create net2 and subnet2 with CIDR 10.0.0.0/24 - In tenant ‘test_2’ create security group ‘SG_2’ - - In tenant ‘test_1’ add VM_1 of vcenter1 in net1 with ip 10.0.0.4 and ‘SG_1’ as security group. - In tenant ‘test_1’ add VM_2 of vcenter2 in net1 with ip 10.0.0.5 and ‘SG_1’ as security group. - In tenant ‘test_2’ create net1 and subnet1 with CIDR 10.0.0.0/24 - In tenant ‘test_2’ create security group ‘SG_1’ and add rule that allows ingress icmp traffic - In tenant ‘test_2’ add VM_3 of vcenter1 in net1 with ip 10.0.0.4 and ‘SG_1’ as security group. - In tenant ‘test_2’ add VM_4 of vcenter2 in net1 with ip 10.0.0.5 and ‘SG_1’ as security group. - Verify that VMs with same ip on different tenants should communicate - between each other. Send icmp ping from VM _1 to VM_3, VM_2 to Vm_4 and vice versa. - -**Expected result** +Expected result +############### Pings should get a response. -TC-070: Check connectivity Vms to public network. -------------------------------------------------- -**ID** +Check connectivity VMs to public network. +----------------------------------------- + + +ID +## nsxv_public_network_availability -**Description** -:: - Verifies that public network is available. +Description +########### -**Complexity** +Verifies that public network is available. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Create net01: net01_subnet, 192.168.112.0/24 and attach it to the router04 + 4. Launch instance VM_1 of vcenter1 AZ with image TestVM-VMDK and flavor m1.tiny in the net_04. + 5. Launch instance VM_1 of vcenter2 AZ with image TestVM-VMDK and flavor m1.tiny in the net_01. + 6. Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip. - Log in to Horizon Dashboard. - Create net01: net01_subnet, 192.168.112.0/24 and attach it to the router04 - Launch instance VM_1 of vcenter1 AZ with image TestVM-TCL and flavor m1.tiny in the net_04. - Launch instance VM_1 of vcenter2 AZ with image TestVM-TCL and flavor m1.tiny in the net_01. - Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip. - -**Expected result** +Expected result +############### Pings should get a response -TC-071: Check connectivity Vms to public network with floating ip. ------------------------------------------------------------------- -**ID** +Check connectivity VMs to public network with floating ip. +---------------------------------------------------------- + + +ID +## nsxv_floating_ip_to_public -**Description** -:: - Verifies that public network is available via floating ip. +Description +########### -**Complexity** +Verifies that public network is available via floating ip. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Log in to Horizon Dashboard + 3. Create net01: net01_subnet, 192.168.112.0/24 and attach it to the router04 + 4. Launch instance VM_1 of vcenter1 AZ with image TestVM-VMDK and flavor m1.tiny in the net_04. Associate floating ip. + 5. Launch instance VM_1 of vcenter2 AZ with image TestVM-VMDK and flavor m1.tiny in the net_01. Associate floating ip. + 6. Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip. - Log in to Horizon Dashboard - Create net01: net01_subnet, 192.168.112.0/24 and attach it to the router04 - Launch instance VM_1 of vcenter1 AZ with image TestVM-TCL and flavor m1.tiny in the net_04. Associate floating ip. - Launch instance VM_1 of vcenter2 AZ with image TestVM-TCL and flavor m1.tiny in the net_01. Associate floating ip. - - Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip. - -**Expected result** +Expected result +############### Pings should get a response -TC-072: Check abilities to create and delete security group. ------------------------------------------------------------- -**ID** +Check abilities to create and delete security group. +---------------------------------------------------- + + +ID +## nsxv_create_and_delete_secgroups -**Description** -:: - Verifies that creation and deletion security group works fine. +Description +########### -**Complexity** +Verifies that creation and deletion security group works fine. + + +Complexity +########## advanced -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Launch instance VM_1 in the tenant network net_02 with image TestVM-VMDK and flavor m1.tiny in the vcenter1 az. + 4. Launch instance VM_2 in the tenant net_02 with image TestVM-VMDK and flavor m1.tiny in the vcenter2 az. + 5. Create security groups SG_1 to allow ICMP traffic. + 6. Add Ingress rule for ICMP protocol to SG_1 + 7. Attach SG_1 to VMs + 8. Check ping between VM_1 and VM_2 and vice verse + 9. Create security groups SG_2 to allow TCP traffic 22 port. + Add Ingress rule for TCP protocol to SG_2 + 10. Attach SG_2 to VMs. + 11. ssh from VM_1 to VM_2 and vice verse. + 12. Delete custom rules from SG_1 and SG_2. + 13. Check ping and ssh arent available from VM_1 to VM_2 and vice versa. + 14. Add Ingress rule for ICMP protocol to SG_1. + 15. Add Ingress rule for SSH protocol to SG_2. + 16. Check ping between VM_1 and VM_2 and vice verse. + 17. Check ssh from VM_1 to VM_2 and vice verse. + 18. Attach Vms to default security group. + 19. Delete security groups. + 20. Check ping between VM_1 and VM_2 and vice verse. + 21. Check SSH from VM_1 to VM_2 and vice verse. - Log in to Horizon Dashboard. - Launch instance VM_1 in the tenant network net_02 with image TestVM-TCL and flavor m1.tiny in the vcenter1 az. - Launch instance VM_2 in the tenant net_02 with image TestVM-TCL and flavor m1.tiny in the vcenter2 az. - Create security groups SG_1 to allow ICMP traffic. - Add Ingress rule for ICMP protocol to SG_1 - - Attach SG_1 to VMs - - Check ping between VM_1 and VM_2 and vice verse - - Create security groups SG_2 to allow TCP traffic 80 port. - Add Ingress rule for TCP protocol to SG_2 - - Attach SG_2 to VMs - - ssh from VM_1 to VM_2 and vice verse - Delete all rules from SG_1 and SG_2 - - Check ping and ssh aren’t available from VM_1 to VM_2 and vice verse - Add Ingress rule for ICMP protocol to SG_1 - - Add Ingress rule for TCP protocol to SG_2 - - Check ping between VM_1 and VM_2 and vice verse - - Check ssh from VM_1 to VM_2 and vice verse - Delete security groups. - Attach Vms to default security group. - - Check ping between VM_1 and VM_2 and vice verse - Check SSH from VM_1 to VM_2 and vice verse - -**Expected result** +Expected result +############### We should have the ability to send ICMP and TCP traffic between VMs in different tenants. -TC-073: Verify that only the associated MAC and IP addresses can communicate on the logical port. -------------------------------------------------------------------------------------------------- -**ID** +Verify that only the associated MAC and IP addresses can communicate on the logical port. +----------------------------------------------------------------------------------------- + + +ID +## nsxv_associated_addresses_communication_on_port -**Description** -:: - Verifies that only associated addresses can communicate on the logical port. +Description +########### -**Complexity** +Verify that only the associated MAC and IP addresses can communicate on the logical port. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Log in to Horizon Dashboard. + 3. Launch 2 instances in each AZ. + 4. Verify that traffic can be successfully sent from and received on the MAC and IP address associated with the logical port. + 5. Configure a new IP address from the subnet not like original one on the instance associated with the logical port. + * ifconfig eth0 down + * ifconfig eth0 192.168.99.14 netmask 255.255.255.0 + * ifconfig eth0 up + 6. Confirm that the instance cannot communicate with that IP address. + 7. Configure a new MAC address on the instance associated with the logical port. + * ifconfig eth0 down + * ifconfig eth0 hw ether 00:80:48:BA:d1:30 + * ifconfig eth0 up + 8. Confirm that the instance cannot communicate with that MAC address and the original IP address. - Log in to Horizon Dashboard. - Launch 2 instances. - Verify that traffic can be successfully sent from and received on the MAC and IP address associated with the logical port. - Configure a new IP address on the instance associated with the logical port. - Confirm that the instance cannot communicate with that IP address. - Configure a new MAC address on the instance associated with the logical port. - Confirm that the instance cannot communicate with that MAC address and the original IP address. - -**Expected result** +Expected result +############### Instance should not communicate with new ip and mac addresses but it should communicate with old IP. -TC-075: Check creation instance in the one group simultaneously. ----------------------------------------------------------------- -**ID** +Check creation instance in the one group simultaneously. +-------------------------------------------------------- + + +ID +## nsxv_create_and_delete_vms -**Description** -:: - Verifies that system could create and delete several instances simultaneously. +Description +########### -**Complexity** +Verifies that system could create and delete several instances simultaneously. + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Navigate to Project -> Compute -> Instances + 3. Launch 5 instance VM_1 simultaneously with image TestVM-VMDK and flavor m1.tiny in vcenter1 az in default net_04. + 4. All instance should be created without any error. + 5. Launch 5 instance VM_2 simultaneously with image TestVM-VMDK and flavor m1.tiny in vcenter2 az in default net_04. + 6. All instance should be created without any error. + 7. Check connection between VMs (ping, ssh) + 8. Delete all VMs from horizon simultaneously. - Navigate to Project -> Compute -> Instances - Launch 5 instance VM_1 simultaneously with image TestVM-TCL and flavor m1.micro in vcenter1 az in default net_04 - All instance should be created without any error. - - Launch 5 instance VM_2 simultaneously with image TestVM-TCL and flavor m1.micro in vcenter2 az in default net_04 - - All instance should be created without any error. - - Check connection between VMs (ping, ssh) - - Delete all VMs from horizon simultaneously. - -**Expected result** +Expected result +############### All instance should be created without any error. -TC-076: Check that environment support assigning public network to all nodes ----------------------------------------------------------------------------- -**ID** +Check that environment support assigning public network to all nodes +-------------------------------------------------------------------- + + +ID +## nsxv_public_network_to_all_nodes -**Description** -:: - Verifies that checkbox "Assign public network to all nodes" works as designed. - Assuming default installation has been done with unchecked option "Assign public network to all nodes". +Description +########### -**Complexity** +Verifies that checkbox "Assign public network to all nodes" works as designed. + +Assuming default installation has been done with unchecked option "Assign public network to all nodes". + + +Complexity +########## core -**Requre to automate** -Yes +Steps +##### -**Steps** -:: + 1. Setup for system tests. + 2. Connect through ssh to Controller node. Run 'ifconfig'. + 3. Connect through ssh to compute-vmware node. Run 'ifconfig'. + 4. Redeploy environment with checked option Public network assignment -> Assign public network to all nodes. + 5. Connect through ssh to Controller node. Run 'ifconfig'. + 6. Connect through ssh to compute-vmware node. Run 'ifconfig'. - Connect through ssh to Controller node. - Run 'ifconfig'. There is an interface with ip from public network IP Range (Networks tab). - Connect through ssh to compute-vmware node. - Run 'ifconfig'. There is no interface with ip from public network IP Range. - Redeploy environment with checked option Public network assignment -> Assign public network to all nodes.Option is checked after deploy. - Connect through ssh to Controller node. - Run 'ifconfig'. There is an interface with ip from public network IP Range. - Connect through ssh to compute-vmware node. - Run 'ifconfig'. There is an interface with ip from public network IP Range also. -**Expected result** +Expected result +############### -"Assign public network to all nodes" works as designed. +Verify that before cluster redeployment with checked option only controllers have an IP from public network IP range, other nodes don't. +Verify that after cluster redeployment all nodes have an IP from public IP range. + + +Verify LBaaS functionality +-------------------------- + + +ID +## + +nsxv_lbaas + + +Description +########### + +Setup LBaaS before test. Plugin requires attaching of an exclusive router to +the subnet prior to provisioning of a load balancer. You can not use 22 port as +port for VIP if you enable ssh access on edge. + + +Complexity +########## + +advanced + + +Steps +##### + + 1. Setup for system tests. + 2. * Create private network. + * Create exclusive router (neutron router-create rexclusive --router_type exclusive). + * Attach router to the external and private networks. + 3. Create a security group that allows SSH (on port other than 22, e.g, 6022) and HTTP traffic. + 4. * Create three instances based on TestVM-VMDK image. + * Use created private network and security group. + 5. Configure Load Balancer or several for different protocols. Here is example for TCP. + * From Networks -> Load Balancers press button Add Pool. + Example of settings: + Provider vmwareedge + Subnet subnet 10.130.0.0/24 + Protocol TCP + Load Balancing Method ROUND_ROBIN + * Add members. + Members: + 10.130.0.3:22 + 10.130.0.4:22 + 10.130.0.5:22 + * Add Monitor: + Health Monitors PING delay:2 retries:2 timeout:2 + 6. Add VIP. + Example of settings: + Subnet subnet 10.130.0.0/24 + Address 10.130.0.6 + Floating IP 172.16.211.103 + Protocol Port 6022 + Protocol TCP + Pool Name_from_step4 + Session Persistence Type: ROUND_ROBIN + Connection Limit -1 + 7. If LB with TCP was configured. + Try to connect on Floating IP 172.16.211.103 using any TCP protocol. Use tool Mausezahn (in Ubuntu mz) or other. + 8. If LB with HTTP was configured. + Create a file index.html on instance. Like:: + + + + + Hi + + + + Make on instances: while true; do { echo -e 'HTTP/1.1 200 OK\\r\\n'; cat index.html; } | sudo nc -l -p 80; done + Generate HTTP traffic on VIP floating IP. + + Script to send HTTP GET requests in parallel:: + + #!/bin/bash + LIMIT=100 + for ((a=1; a <= LIMIT ; a++)) ;do + curl http://172.16.211.127/ & + done + 9. * Change Load Balancing Method to SOURCE_IP + * Generate traffic. + 10. * Delete one instance from Members. + * Generate traffic. + 11. * Add this member again. + * Generate traffic. + + +Expected result +############### + +All steps passed without errors. + + +Deploy cluster with enabled SpoofGuard +-------------------------------------- + + +ID +## + +nsxv_spoofguard + + +Description +########### + +NSXv spoofguard component is used to implement port-security feature. +If a virtual machine has been compromised, +the IP address can be spoofed and malicious transmissions can bypass firewall policies. +http://pubs.vmware.com/NSX-62/topic/com.vmware.ICbase/PDF/nsx_62_admin.pdf p.137 + + +Complexity +########## + +core + + +Steps +##### + + 1. Deploy cluster with enabled SpoofGuard. + 2. Run OSTF. + 3. Setup spoofguard: + * In the vSphere Web Client, navigate to Networking & Security > SpoofGuard. + * Click the Add icon. + * Type a name for the policy. + * Select Enabled or Disabled to indicate whether the policy is enabled. + * For Operation Mode, select + Automatically Trust IP Assignments + on Their First Use + * Click Allow local address as valid address in this namespace to allow local IP addresses in your setup. + When you power on a virtual machine and it is unable to connect to the DHCP server, a local IP address + is assigned to it. This local IP address is considered valid only if the SpoofGuard mode is set to + Allow local address as valid address in this namespace. Otherwise, the local IP address is ignored. + * Click Next. + * To specify the scope for the policy, click Add and select the networks, distributed port groups, or + logical switches that this policy should apply to. + A port group or logical switch can belong to only one SpoofGuard policy. + * Click OK and then click Finish. + 4. Run OSTF + + +Expected result +############### + +All OSTF test cases should be passed besides +exceptions that are described in Limitation section of Test plan.