Merge "Add scale and integration autotests from Test Plan"

This commit is contained in:
Jenkins 2016-10-24 11:39:56 +00:00 committed by Gerrit Code Review
commit 042d03e583
8 changed files with 941 additions and 13 deletions

View File

@ -65,7 +65,7 @@ core
Steps Steps
##### #####
1. Log in to the Fuel with preinstalled plugin and deployed enviroment with 3 controllers. 1. Log in to the Fuel with preinstalled plugin and deployed enviroment with 3 controllers and 1 compute.
2. Log in to Horizon. 2. Log in to Horizon.
3. Create vcenter VM and check connectivity to outside world from VM. 3. Create vcenter VM and check connectivity to outside world from VM.
4. Shutdown primary controller. 4. Shutdown primary controller.

View File

@ -42,7 +42,7 @@ Steps
4. Configure interfaces on nodes. 4. Configure interfaces on nodes.
5. Configure network settings. 5. Configure network settings.
6. Enable and configure NSX-T plugin. 6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers and compute-vmware. 7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on controllers and compute-vmware.
8. Verify networks. 8. Verify networks.
9. Deploy cluster. 9. Deploy cluster.
10. Run OSTF. 10. Run OSTF.

View File

@ -36,21 +36,19 @@ Steps
* Controller * Controller
* Controller * Controller
* Controller * Controller
* Controller * Compute
* Cinder-vmware
* Compute-vmware
4. Configure interfaces on nodes. 4. Configure interfaces on nodes.
5. Configure network settings. 5. Configure network settings.
6. Enable and configure NSX-T plugin. 6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers and compute-vmware. 7. Configure VMware vCenter Settings. Add vSphere clusters and configure Nova Compute instance on controllers.
8. Deploy cluster. 8. Deploy cluster.
9. Run OSTF. 9. Run OSTF.
10. Launch 1 KVM and 1 vcenter VMs. 10. Launch 1 vcenter instance and 1 nova instance.
11. Remove node with controller role. 11. Add 2 controller nodes.
12. Redeploy cluster. 12. Redeploy cluster.
13. Check that all instances are in place. 13. Check that all instances are in place.
14. Run OSTF. 14. Run OSTF.
15. Add controller. 15. Delete 2 controller nodes.
16. Redeploy cluster. 16. Redeploy cluster.
17. Check that all instances are in place. 17. Check that all instances are in place.
18. Run OSTF. 18. Run OSTF.
@ -103,7 +101,7 @@ Steps
6. Enable and configure NSX-T plugin. 6. Enable and configure NSX-T plugin.
7. Deploy cluster. 7. Deploy cluster.
8. Run OSTF. 8. Run OSTF.
9. Launch KVM vm. 9. Launch instance.
10. Add node with compute role. 10. Add node with compute role.
11. Redeploy cluster. 11. Redeploy cluster.
12. Check that all instances are in place. 12. Check that all instances are in place.
@ -163,12 +161,12 @@ Steps
8. Deploy cluster. 8. Deploy cluster.
9. Run OSTF. 9. Run OSTF.
10. Launch vcenter vm. 10. Launch vcenter vm.
11. Remove node with compute-vmware role. 11. Add node with compute-vmware role.
12. Reconfigure vcenter compute clusters. 12. Reconfigure vcenter compute clusters.
13. Redeploy cluster. 13. Redeploy cluster.
14. Check vm instance has been removed. 14. Check vm instance has been removed.
15. Run OSTF. 15. Run OSTF.
16. Add node with compute-vmware role. 16. Remove node with compute-vmware role from base installation.
17. Reconfigure vcenter compute clusters. 17. Reconfigure vcenter compute clusters.
18. Redeploy cluster. 18. Redeploy cluster.
19. Run OSTF. 19. Run OSTF.

View File

@ -41,7 +41,7 @@ Steps
4. Configure interfaces on nodes. 4. Configure interfaces on nodes.
5. Configure network settings. 5. Configure network settings.
6. Enable and configure NSX-T plugin. 6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 1 vSphere cluster and configure Nova Compute instance on conrollers. 7. Configure VMware vCenter Settings. Add 1 vSphere cluster and configure Nova Compute instance on controllers.
8. Verify networks. 8. Verify networks.
9. Deploy cluster. 9. Deploy cluster.
10. Run OSTF. 10. Run OSTF.

View File

@ -0,0 +1,394 @@
"""Copyright 2016 Mirantis, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import time
import paramiko
from proboscis.asserts import assert_true
from devops.helpers.helpers import icmp_ping
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from fuelweb_test import logger
from fuelweb_test.helpers.ssh_manager import SSHManager
from fuelweb_test.helpers.utils import pretty_log
from helpers import settings
# Defaults
external_net_name = settings.ADMIN_NET
zone_image_maps = {
'vcenter': 'TestVM-VMDK',
'nova': 'TestVM',
'vcenter-cinder': 'TestVM-VMDK'
}
instance_creds = (settings.VM_USER, settings.VM_PASS)
def create_instance(os_conn, net=None, az='nova', sg_names=None,
flavor_name='m1.micro', timeout=180, **kwargs):
"""Create instance with specified az and flavor.
:param os_conn: OpenStack
:param net: network object (default is private net)
:param az: availability zone name
:param sg_names: list of security group names
:param flavor_name: name of flavor
:param timeout: seconds to wait creation
:return: vm
"""
sg_names = sg_names if sg_names else ['default']
def find_by_name(objects, name):
for obj in objects:
if obj.name == name:
return obj
image = find_by_name(os_conn.nova.images.list(), zone_image_maps[az])
flavor = find_by_name(os_conn.nova.flavors.list(), flavor_name)
net = net if net else os_conn.get_network(settings.PRIVATE_NET)
sg = [os_conn.get_security_group(name) for name in sg_names]
vm = os_conn.create_server(availability_zone=az,
timeout=timeout,
image=image,
net_id=net['id'],
security_groups=sg,
flavor_id=flavor.id,
**kwargs)
return vm
def check_instances_state(os_conn):
"""Check that instances were not deleted and have 'active' status."""
instances = os_conn.nova.servers.list()
for inst in instances:
assert_true(not os_conn.is_srv_deleted(inst))
assert_true(os_conn.get_instance_detail(inst).status == 'ACTIVE')
def check_connection_vms(ip_pair, command='pingv4', result_of_command=0,
timeout=30, interval=5):
"""Check network connectivity between instances.
:param ip_pair: type dict, {ip_from: [ip_to1, ip_to2, etc.]}
:param command: type string, key 'pingv4', 'pingv6' or 'arping'
:param result_of_command: type integer, exit code of command execution
:param timeout: wait to get expected result
:param interval: interval of executing command
"""
commands = {
'pingv4': 'ping -c 5 {}',
'pingv6': 'ping6 -c 5 {}',
'arping': 'sudo arping -I eth0 {}'
}
msg = 'Command "{0}", Actual exit code is NOT {1}'
for ip_from in ip_pair:
with get_ssh_connection(ip_from, instance_creds[0],
instance_creds[1]) as ssh:
for ip_to in ip_pair[ip_from]:
logger.info('Check connection from {0} to {1}'.format(
ip_from, ip_to))
cmd = commands[command].format(ip_to)
wait(lambda:
execute(ssh, cmd)['exit_code'] == result_of_command,
interval=interval,
timeout=timeout,
timeout_msg=msg.format(cmd, result_of_command))
def check_connection_through_host(remote, ip_pair, command='pingv4',
result_of_command=0, timeout=30,
interval=5):
"""Check network connectivity between instances.
:param ip_pair: type list, ips of instances
:param remote: access point IP
:param command: type string, key 'pingv4', 'pingv6' or 'arping'
:param result_of_command: type integer, exit code of command execution
:param timeout: wait to get expected result
:param interval: interval of executing command
"""
commands = {
'pingv4': 'ping -c 5 {}',
'pingv6': 'ping6 -c 5 {}',
'arping': 'sudo arping -I eth0 {}'
}
msg = 'Command "{0}", Actual exit code is NOT {1}'
for ip_from in ip_pair:
for ip_to in ip_pair[ip_from]:
logger.info('Check ping from {0} to {1}'.format(ip_from, ip_to))
cmd = commands[command].format(ip_to)
wait(lambda:
remote_execute_command(
remote,
ip_from,
cmd,
wait=timeout)['exit_code'] == result_of_command,
interval=interval,
timeout=timeout,
timeout_msg=msg.format(cmd, result_of_command))
def ping_each_other(ips, command='pingv4', expected_ec=0,
timeout=30, interval=5, access_point_ip=None):
"""Check network connectivity between instances.
:param ips: list, list of ips
:param command: type string, key 'pingv4', 'pingv6' or 'arping'
:param expected_ec: type integer, exit code of command execution
:param timeout: wait to get expected result
:param interval: interval of executing command
:param access_point_ip: It is used if check via host
"""
ip_pair = {key: [ip for ip in ips if ip != key] for key in ips}
if access_point_ip:
check_connection_through_host(remote=access_point_ip,
ip_pair=ip_pair,
command=command,
result_of_command=expected_ec,
timeout=timeout,
interval=interval)
else:
check_connection_vms(ip_pair=ip_pair,
command=command,
result_of_command=expected_ec,
timeout=timeout,
interval=interval)
def create_and_assign_floating_ips(os_conn, instances):
"""Associate floating ips with specified instances.
:param os_conn: type object, openstack
:param instances: type list, instances
"""
fips = []
for instance in instances:
ip = os_conn.assign_floating_ip(instance).ip
fips.append(ip)
wait(lambda: icmp_ping(ip), timeout=60 * 5, interval=5)
return fips
def get_ssh_connection(ip, username, userpassword, timeout=30, port=22):
"""Get ssh to host.
:param ip: string, host ip to connect to
:param username: string, a username to use for authentication
:param userpassword: string, a password to use for authentication
:param timeout: timeout (in seconds) for the TCP connection
:param port: host port to connect to
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port=port, username=username,
password=userpassword, timeout=timeout)
return ssh
def execute(ssh_client, command):
"""Execute command on remote host.
:param ssh_client: SSHClient to instance
:param command: type string, command to execute
"""
channel = ssh_client.get_transport().open_session()
channel.exec_command(command)
result = {
'stdout': channel.recv(1024),
'stderr': channel.recv_stderr(1024),
'exit_code': channel.recv_exit_status()
}
return result
def remote_execute_command(instance1_ip, instance2_ip, command, wait=30):
"""Check execute remote command.
:param instance1_ip: string, instance ip connect from
:param instance2_ip: string, instance ip connect to
:param command: string, remote command
:param wait: integer, time to wait available ip of instances
"""
with get_ssh_connection(instance1_ip, *instance_creds) as ssh:
interm_transp = ssh.get_transport()
try:
logger.info('Opening channel between VMs {0} and {1}'.format(
instance1_ip, instance2_ip))
interm_chan = interm_transp.open_channel('direct-tcpip',
(instance2_ip, 22),
(instance1_ip, 0))
except Exception as e:
message = '{} Wait to update sg rules. Try to open channel again'
logger.info(message.format(e))
time.sleep(wait)
interm_chan = interm_transp.open_channel('direct-tcpip',
(instance2_ip, 22),
(instance1_ip, 0))
transport = paramiko.Transport(interm_chan)
transport.start_client()
logger.info("Passing authentication to VM")
transport.auth_password(
instance_creds[0], instance_creds[1])
channel = transport.open_session()
channel.get_pty()
channel.fileno()
channel.exec_command(command)
logger.debug("Receiving exit_code, stdout, stderr")
result = {
'stdout': channel.recv(1024),
'stderr': channel.recv_stderr(1024),
'exit_code': channel.recv_exit_status()
}
logger.debug('Command: {}'.format(command))
logger.debug(pretty_log(result))
logger.debug('Closing channel''')
channel.close()
return result
def get_role(os_conn, role_name):
"""Get role by name."""
role_list = os_conn.keystone.roles.list()
for role in role_list:
if role.name == role_name:
return role
def add_role_to_user(os_conn, user_name, role_name, tenant_name):
"""Assign role to user.
:param os_conn: type object
:param user_name: type string,
:param role_name: type string
:param tenant_name: type string
"""
tenant_id = os_conn.get_tenant(tenant_name).id
user_id = os_conn.get_user(user_name).id
role_id = get_role(os_conn, role_name).id
os_conn.keystone.roles.add_user_role(user_id, role_id, tenant_id)
def check_service(ip, commands):
"""Check that required nova services are running on controller.
:param ip: ip address of node
:param commands: type list, nova commands to execute on controller,
example of commands:
['nova-manage service list | grep vcenter-vmcluster1'
"""
ssh_manager = SSHManager()
ssh_manager.check_call(ip=ip, command='source openrc')
for cmd in commands:
wait(lambda:
':-)' in ssh_manager.check_call(ip=ip, command=cmd).stdout[-1],
timeout=200)
def create_instances(os_conn, nics, vm_count=1,
security_groups=None, available_hosts=None,
flavor_name='m1.micro'):
"""Create VMs on available hypervisors.
:param os_conn: type object, openstack
:param vm_count: type integer, count of VMs to create
:param nics: type dictionary, neutron networks to assign to instance
:param security_groups: list of security group names
:param available_hosts: available hosts for creating instances
:param flavor_name: name of flavor
"""
def find_by_name(objects, name):
for obj in objects:
if obj.name == name:
return obj
# Get list of available images, flavors and hypervisors
instances = []
images = os_conn.nova.images.list()
flavor = find_by_name(os_conn.nova.flavors.list(), flavor_name)
if not available_hosts:
available_hosts = os_conn.nova.services.list(binary='nova-compute')
for host in available_hosts:
image = find_by_name(images, zone_image_maps[host.zone])
instance = os_conn.nova.servers.create(
flavor=flavor,
name='test_{0}'.format(image.name),
image=image,
min_count=vm_count,
availability_zone='{0}:{1}'.format(host.zone, host.host),
nics=nics, security_groups=security_groups)
instances.append(instance)
return instances
def verify_instance_state(os_conn, instances=None, expected_state='ACTIVE',
boot_timeout=300):
"""Verify that current state of each instance/s is expected.
:param os_conn: type object, openstack
:param instances: type list, list of created instances
:param expected_state: type string, expected state of instance
:param boot_timeout: type int, time in seconds to build instance
"""
if not instances:
instances = os_conn.nova.servers.list()
for instance in instances:
wait(lambda:
os_conn.get_instance_detail(instance).status == expected_state,
timeout=boot_timeout,
timeout_msg='Timeout is reached. '
'Current state of VM {0} is {1}.'
'Expected state is {2}'.format(
instance.name,
os_conn.get_instance_detail(instance).status,
expected_state))
def create_access_point(os_conn, nics, security_groups):
"""Create access point.
Creating instance with floating ip as access point to instances
with private ip in the same network.
:param os_conn: type object, openstack
:param nics: type dictionary, neutron networks to assign to instance
:param security_groups: A list of security group names
"""
# Get any available host
host = os_conn.nova.services.list(binary='nova-compute')[0]
access_point = create_instances( # create access point server
os_conn=os_conn, nics=nics,
vm_count=1,
security_groups=security_groups,
available_hosts=[host]).pop()
verify_instance_state(os_conn)
access_point_ip = os_conn.assign_floating_ip(
access_point, use_neutron=True)['floating_ip_address']
wait(lambda: tcp_ping(access_point_ip, 22), timeout=60 * 5, interval=5)
return access_point, access_point_ip

View File

@ -43,6 +43,8 @@ class CloseSSHConnectionsPlugin(Plugin):
def import_tests(): def import_tests():
from tests import test_plugin_nsxt # noqa from tests import test_plugin_nsxt # noqa
from tests import test_plugin_integration # noqa
from tests import test_plugin_scale # noqa
def run_tests(): def run_tests():

View File

@ -0,0 +1,175 @@
"""Copyright 2016 Mirantis, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from tests.base_plugin_test import TestNSXtBase
@test(groups=['nsxt_plugin', 'nsxt_integration'])
class TestNSXtIntegration(TestNSXtBase):
"""Tests from test plan that have been marked as 'Automated'."""
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=['nsxt_ceilometer'])
@log_snapshot_after_test
def nsxt_ceilometer(self):
"""Check environment deployment with Fuel NSX-T plugin and Ceilometer.
Scenario:
1. Install NSX-T plugin to Fuel Master node with 5 slaves.
2. Create new environment with the following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: default
* Additional services: Ceilometer
3. Add nodes with the following roles:
* Controller + Mongo
* Controller + Mongo
* Controller + Mongo
* Compute-vmware
* Compute
4. Configure interfaces on nodes.
5. Enable plugin and configure network settings.
6. Configure VMware vCenter Settings.
Add 2 vSphere clusters and configure Nova Compute instances on
controllers and compute-vmware.
7. Verify networks.
8. Deploy cluster.
9. Run OSTF.
Duration: 180
"""
# Install NSX-T plugin to Fuel Master node with 5 slaves
self.show_step(1)
self.env.revert_snapshot('ready_with_5_slaves')
self.install_nsxt_plugin()
self.show_step(2) # Create new environment with Ceilometer
settings = self.default.cluster_settings
settings['ceilometer'] = True
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings)
self.show_step(3) # Add nodes
self.fuel_web.update_nodes(cluster_id,
{'slave-01': ['controller', 'mongo'],
'slave-02': ['controller', 'mongo'],
'slave-03': ['controller', 'mongo'],
'slave-04': ['compute-vmware'],
'slave-05': ['compute']})
self.show_step(4) # Configure interfaces on nodes
self.reconfigure_cluster_interfaces(cluster_id)
self.show_step(5) # Enable plugin and configure network settings
self.enable_plugin(cluster_id)
# Configure VMware settings. 2 clusters, 2 Nova Compute instances:
# 1 on controllers and 1 on compute-vmware
self.show_step(6)
target_node = self.fuel_web.get_nailgun_node_by_name('slave-04')
self.fuel_web.vcenter_configure(cluster_id,
target_node_2=target_node['hostname'],
multiclusters=True)
self.show_step(7) # Verify networks
self.fuel_web.verify_network(cluster_id)
self.show_step(8) # Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(9) # Run OSTF
self.fuel_web.run_ostf(cluster_id, timeout=3600,
test_sets=['smoke', 'sanity', 'ha',
'tests_platform'])
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=['nsxt_ceph'])
@log_snapshot_after_test
def nsxt_ceph(self):
"""Check environment deployment with Fuel NSX-T plugin and Ceph.
Scenario:
1. Install NSX-T plugin to Fuel Master node with 5 slaves.
2. Create new environment with the following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: Ceph
* Additional services: default
3. Add nodes with the following roles:
* Controller
* Ceph-OSD
* Ceph-OSD
* Ceph-OSD
* Compute
4. Configure interfaces on nodes.
5. Enable plugin and configure network settings.
6. Configure VMware vCenter Settings. Add 1 vSphere cluster and
configure Nova Compute instance on controller.
7. Verify networks.
8. Deploy cluster.
9. Run OSTF.
Duration: 180
"""
# Install NSX-T plugin to Fuel Master node with 5 slaves
self.show_step(1)
self.env.revert_snapshot('ready_with_5_slaves')
self.install_nsxt_plugin()
self.show_step(2) # Create new environment with Ceph
settings = self.default.cluster_settings
settings['volumes_lvm'] = False
settings['volumes_ceph'] = True
settings['images_ceph'] = True
settings['ephemeral_ceph'] = True
settings['objects_ceph'] = True
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=settings)
self.show_step(3) # Add nodes
self.fuel_web.update_nodes(cluster_id,
{'slave-01': ['controller'],
'slave-02': ['ceph-osd'],
'slave-03': ['ceph-osd'],
'slave-04': ['ceph-osd'],
'slave-05': ['compute']})
self.show_step(4) # Configure interfaces on nodes
self.reconfigure_cluster_interfaces(cluster_id)
self.show_step(5) # Enable plugin and configure network settings
self.enable_plugin(cluster_id)
# Configure VMware settings. 1 cluster, 1 Compute instance: controller
self.show_step(6)
self.fuel_web.vcenter_configure(cluster_id)
self.show_step(7) # Verify networks
self.fuel_web.verify_network(cluster_id)
self.show_step(8) # Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(9) # Run OSTF
self.fuel_web.run_ostf(cluster_id)

View File

@ -0,0 +1,359 @@
"""Copyright 2016 Mirantis, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from proboscis import test
from proboscis.asserts import assert_true
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import SERVTEST_PASSWORD
from fuelweb_test.settings import SERVTEST_TENANT
from fuelweb_test.settings import SERVTEST_USERNAME
from fuelweb_test.tests.base_test_case import SetupEnvironment
from helpers import openstack as os_help
from tests.base_plugin_test import TestNSXtBase
@test(groups=['nsxt_plugin', 'nsxt_scale'])
class TestNSXtScale(TestNSXtBase):
"""Tests from test plan that have been marked as 'Automated'."""
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=['nsxt_add_delete_controller'])
@log_snapshot_after_test
def nsxt_add_delete_controller(self):
"""Check functionality when controller has been removed or added.
Scenario:
1. Install NSX-T plugin to Fuel Master node with 9 slaves.
2. Create new environment with the following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: default
3. Add nodes with the following roles:
* Controller
* Controller
* Controller
* Compute
4. Configure interfaces on nodes.
5. Enable plugin and configure network settings.
6. Configure VMware vCenter Settings. Add vSphere cluster and
configure Nova Compute instance on conrollers.
7. Deploy cluster.
8. Run OSTF.
9. Launch 1 vcenter instance and 1 KVM instance.
10. Add 2 controller nodes.
11. Redeploy cluster.
12. Check that all instances are in place.
13. Run OSTF.
14. Remove 2 controller nodes.
15. Redeploy cluster.
16. Check that all instances are in place.
17. Run OSTF.
Duration: 180 min
"""
# Install NSX-T plugin to Fuel Master node with 9 slaves
self.show_step(1)
self.env.revert_snapshot('ready_with_9_slaves')
self.install_nsxt_plugin()
self.show_step(2) # Create new environment
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=self.default.cluster_settings,
configure_ssl=False)
self.show_step(3) # Add nodes
self.fuel_web.update_nodes(cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute']})
self.show_step(4) # Configure interfaces on nodes
self.reconfigure_cluster_interfaces(cluster_id)
self.show_step(5) # Enable plugin and configure network settings
self.enable_plugin(cluster_id)
# Configure VMware settings. 1 cluster, 1 Nova Compute on controllers
self.show_step(6)
self.fuel_web.vcenter_configure(cluster_id)
self.show_step(7) # Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(8) # Run OSTF
self.fuel_web.run_ostf(cluster_id)
# Launch 1 vcenter instance and 1 KVM instance
self.show_step(9)
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
os_help.create_instance(os_conn)
os_help.create_instance(os_conn, az='vcenter')
self.show_step(10) # Add 2 controller nodes
self.fuel_web.update_nodes(cluster_id, {'slave-05': ['controller'],
'slave-06': ['controller']})
self.show_step(11) # Redeploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(12) # Check that all instances are in place
os_help.check_instances_state(os_conn)
self.show_step(13) # Run OSTF
self.fuel_web.run_ostf(cluster_id)
self.show_step(14) # Remove 2 controller nodes
self.fuel_web.update_nodes(cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller']},
False, True)
self.show_step(15) # Redeploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(16) # Check that all instances are in place
os_help.check_instances_state(os_conn)
self.show_step(17) # Run OSTF
self.fuel_web.run_ostf(cluster_id)
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=['nsxt_add_delete_compute_node'])
@log_snapshot_after_test
def nsxt_add_delete_compute_node(self):
"""Verify functionality when compute node has been removed or added.
Scenario:
1. Install NSX-T plugin to Fuel Master node with 5 slaves.
2. Create new environment with the following parameters:
* Compute: KVM/QEMU
* Networking: Neutron with NSX-T plugin
* Storage: default
* Additional services: default
3. Add nodes with the following roles:
* Controller
* Controller
* Controller
* Compute
4. Configure interfaces on nodes.
5. Enable plugin and configure network settings.
6. Deploy cluster.
7. Run OSTF.
8. Launch KVM vm.
9. Add node with compute role.
10. Redeploy cluster.
11. Check that instance is in place.
12. Run OSTF.
13. Remove node with compute role.
14. Redeploy cluster.
15. Check that instance is in place.
16. Run OSTF.
Duration: 180min
"""
# Install NSX-T plugin to Fuel Master node with 5 slaves
self.show_step(1)
self.env.revert_snapshot('ready_with_5_slaves')
self.install_nsxt_plugin()
self.show_step(2) # Create new environment
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=self.default.cluster_settings,
configure_ssl=False)
self.show_step(3) # Add nodes
self.fuel_web.update_nodes(cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute']})
self.show_step(4) # Configure interfaces on nodes
self.reconfigure_cluster_interfaces(cluster_id)
self.show_step(5) # Enable plugin and configure network settings
self.enable_plugin(cluster_id)
self.show_step(6) # Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(7) # Run OSTF
self.fuel_web.run_ostf(cluster_id)
self.show_step(8) # Launch KVM vm
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
os_help.create_instance(os_conn)
self.show_step(9) # Add node with compute role
self.fuel_web.update_nodes(cluster_id, {'slave-05': ['compute']})
self.show_step(10) # Redeploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(11) # Check that instance is in place
os_help.check_instances_state(os_conn)
self.show_step(12) # Run OSTF
self.fuel_web.run_ostf(cluster_id)
self.show_step(13) # Remove node with compute role
self.fuel_web.update_nodes(cluster_id,
{'slave-04': ['compute']},
False, True)
self.show_step(14) # Redeploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(15) # Check that instance is in place
os_help.check_instances_state(os_conn)
self.show_step(16) # Run OSTF
self.fuel_web.run_ostf(cluster_id)
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=['nsxt_add_delete_compute_vmware_node'])
@log_snapshot_after_test
def nsxt_add_delete_compute_vmware_node(self):
"""Verify functionality when compute-vmware has been removed or added.
Scenario:
1. Install NSX-T plugin to Fuel Master node with 5 slaves.
2. Create new environment with the following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: default
* Additional services: default
3. Add nodes with the following roles:
* Controller
* Controller
* Controller
* Compute-vmware
4. Configure interfaces on nodes.
5. Enable plugin and configure network settings.
6. Configure VMware vCenter Settings. Add 1 vSphere cluster and
configure Nova Compute instance on compute-vmware.
7. Deploy cluster.
8. Run OSTF.
9. Launch vcenter vm.
10. Add node with compute-vmware role.
11. Reconfigure vcenter compute clusters.
12. Redeploy cluster.
13. Check that instance has been removed.
14. Run OSTF.
15. Remove node with compute-vmware role.
16. Reconfigure vcenter compute clusters.
17. Redeploy cluster.
18. Run OSTF.
Duration: 240 min
"""
# Install NSX-T plugin to Fuel Master node with 5 slaves
self.show_step(1)
self.env.revert_snapshot('ready_with_5_slaves')
self.install_nsxt_plugin()
self.show_step(2) # Create new environment
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=self.default.cluster_settings,
configure_ssl=False)
self.show_step(3) # Add nodes
self.fuel_web.update_nodes(cluster_id,
{'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute-vmware']})
self.show_step(4) # Configure interfaces on nodes
self.reconfigure_cluster_interfaces(cluster_id)
self.show_step(5) # Enable plugin and configure network settings
self.enable_plugin(self.fuel_web, cluster_id)
# Configure VMware settings. 1 cluster, 1 Nova Compute: compute-vmware
self.show_step(6)
target_node1 = self.fuel_web.get_nailgun_node_by_name('slave-04')
self.fuel_web.vcenter_configure(cluster_id,
target_node_1=target_node1['hostname'])
self.show_step(7) # Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(8) # Run OSTF
self.fuel_web.run_ostf(cluster_id)
self.show_step(9) # Launch vcenter vm
os_conn = os_actions.OpenStackActions(
self.fuel_web.get_public_vip(cluster_id),
SERVTEST_USERNAME,
SERVTEST_PASSWORD,
SERVTEST_TENANT)
vcenter_vm = os_help.create_instance(os_conn, az='vcenter')
self.show_step(10) # Add node with compute-vmware role
self.fuel_web.update_nodes(cluster_id,
{'slave-05': ['compute-vmware']})
self.show_step(11) # Reconfigure vcenter compute clusters
target_node2 = self.fuel_web.get_nailgun_node_by_name('slave-05')
self.fuel_web.vcenter_configure(cluster_id,
target_node_1=target_node1['hostname'],
target_node_2=target_node2['hostname'])
self.show_step(12) # Redeploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(13) # Check that instance has been removed
assert_true(os_conn.is_srv_deleted(vcenter_vm))
self.show_step(14) # Run OSTF
self.fuel_web.run_ostf(cluster_id)
self.show_step(15) # Remove node with compute-vmware role
self.fuel_web.update_nodes(cluster_id,
{'slave-04': ['compute-vmware']},
False, True)
self.show_step(16) # Reconfigure vcenter compute clusters
target_node2 = self.fuel_web.get_nailgun_node_by_name('slave-04')
self.fuel_web.vcenter_configure(cluster_id,
target_node_1=target_node2['hostname'])
self.show_step(17) # Redeploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(18) # Run OSTF
self.fuel_web.run_ostf(cluster_id)