Delete nova_network class from test_failover and test_failover_with_ceph

Change-Id: Ibce509a2865bf128bec42dceb24ddda336dfc0f9
Closes-bug: #1490492
This commit is contained in:
Veronica Krayneva 2015-08-31 15:44:46 +03:00
parent 653ebd97d0
commit f89596126e
3 changed files with 15 additions and 340 deletions

View File

@ -42,7 +42,7 @@ class TestHaNeutronFailover(TestHaFailoverBase):
Duration 70m
Snapshot prepare_ha_neutron
"""
super(self.__class__, self).deploy_ha(network='neutron')
super(self.__class__, self).deploy_ha()
@test(depends_on_groups=['prepare_ha_neutron'],
groups=["ha_neutron_destroy_controllers", "ha_destroy_controllers"])
@ -329,272 +329,3 @@ class TestHaNeutronFailover(TestHaFailoverBase):
"""
super(self.__class__, self).ha_corosync_stability_check()
@test(enabled=False,
groups=["thread_5", "ha", "ha_nova_destructive"])
class TestHaNovaFailover(TestHaFailoverBase):
# REMOVE THIS NOVA_NETWORK CLASS WHEN NEUTRON BE DEFAULT
snapshot_name = "prepare_ha_nova"
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["prepare_ha_nova", "nova", "cinder", "swift", "glance",
"deployment"])
@log_snapshot_after_test
def prepare_ha_nova(self):
"""Prepare cluster in HA/Nova mode for failover tests
Scenario:
1. Create cluster
2. Add 3 nodes with controller roles
3. Add 2 nodes with compute roles
4. Deploy the cluster
8. Make snapshot
Duration 70m
Snapshot prepare_ha_nova
"""
super(self.__class__, self).deploy_ha(network='nova_network')
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_destroy_controllers"])
@log_snapshot_after_test
def ha_nova_destroy_controllers(self):
"""Destroy two controllers and check pacemaker status is correct
Scenario:
1. Destroy first controller
2. Check pacemaker status
3. Run OSTF
4. Revert environment
5. Destroy second controller
6. Check pacemaker status
7. Run OSTF
Duration 35m
"""
super(self.__class__, self).ha_destroy_controllers()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_disconnect_controllers"])
@log_snapshot_after_test
def ha_nova_disconnect_controllers(self):
"""Disconnect controllers on environment with nova network
Scenario:
1. Block traffic on br-mgmt of the first controller
2. Check pacemaker status
3. Revert environment
4. Block traffic on br-mgmt of the second controller
5. Check pacemaker status
6. Wait until MySQL Galera is UP on some controller
7. Run OSTF
Duration 45m
"""
super(self.__class__, self).ha_disconnect_controllers()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_delete_vips", "ha_delete_vips"])
@log_snapshot_after_test
def ha_nova_delete_vips(self):
"""Delete management and public VIPs 10 times.
Verify that they are restored.
Verify cluster by OSTF
Scenario:
1. Delete 10 time public and management VIPs
2. Wait while it is being restored
3. Verify it is restored
4. Run OSTF
Duration 30m
"""
super(self.__class__, self).ha_delete_vips()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_mysql_termination"])
@log_snapshot_after_test
def ha_nova_mysql_termination(self):
"""Terminate mysql on all controllers one by one
Scenario:
1. Terminate mysql
2. Wait while it is being restarted
3. Verify it is restarted
4. Go to another controller
5. Run OSTF
Duration 15m
"""
super(self.__class__, self).ha_mysql_termination()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_haproxy_termination"])
@log_snapshot_after_test
def ha_nova_haproxy_termination(self):
"""Terminate haproxy on all controllers one by one
Scenario:
1. Terminate haproxy
2. Wait while it is being restarted
3. Verify it is restarted
4. Go to another controller
5. Run OSTF
Duration 25m
"""
super(self.__class__, self).ha_haproxy_termination()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_pacemaker_configuration"])
@log_snapshot_after_test
def ha_nova_pacemaker_configuration(self):
"""Verify resources are configured
Scenario:
1. SSH to controller node
2. Verify resources are configured
3. Go to next controller
Duration 15m
"""
super(self.__class__, self).ha_pacemaker_configuration()
@test(enabled=False, depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_pacemaker_restart_heat_engine"])
@log_snapshot_after_test
def ha_nova_pacemaker_restart_heat_engine(self):
"""Verify heat engine service is restarted
by pacemaker on amqp connection loss
Scenario:
1. SSH to any controller
2. Check heat-engine status
3. Block heat-engine amqp connections
4. Check heat-engine was stopped on current controller
5. Unblock heat-engine amqp connections
6. Check heat-engine process is running with new pid
7. Check amqp connection re-appears for heat-engine
Duration 15m
"""
super(self.__class__, self).ha_pacemaker_restart_heat_engine()
@test(enabled=False, depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_check_monit"])
@log_snapshot_after_test
def ha_nova_check_monit(self):
"""Verify monit restarted nova
service if it was killed
Scenario:
1. SSH to every compute node in cluster
2. Kill nova-compute service
3. Check service is restarted by monit
Duration 25m
"""
super(self.__class__, self).ha_check_monit()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_firewall"])
@log_snapshot_after_test
def ha_nova_firewall(self):
"""Check firewall vulnerability on nova network
Scenario:
1. Start 'socat' on a cluster node to listen for a free random port
2. Put to this port a string using 'nc' from admin node
3. Check if the string appeared in the cluster node
4. Repeat for each cluster node
Duration 25m
"""
super(self.__class__, self).check_firewall_vulnerability()
@test(enabled=False, depends_on_groups=['prepare_ha_nova'],
groups=["check_nova_package_loss"])
@log_snapshot_after_test
def ha_nova_packages_loss(self):
"""Check cluster recovery if br-mgmt loss 5% packages
Scenario:
1. SSH to controller
2. set 5 % package loss on br-mgmt
3. run ostf
Duration
"""
# TODO: enable tests when fencing will be implements
super(self.__class__, self).ha_controller_loss_packages()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_check_alive_rabbit"])
@log_snapshot_after_test
def ha_nova_check_alive_rabbit(self):
"""Check alive rabbit node is not kicked from cluster
when corosync service on node dies
Scenario:
1. SSH to first controller and put corosync cluster to
maintenance mode:
crm configure property maintenance-mode=true
2. Stop corosync service on first controller
3. Check on master node that rabbit-fence.log contains
Ignoring alive node rabbit@node-1
4. On second controller check that rabbitmq cluster_status
contains all 3 nodes
5. On first controller start corosync service and restart pacemaker
6. Check that pcs status contains all 3 nodes
Duration 25m
"""
super(self.__class__, self).check_alive_rabbit_node_not_kicked()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_check_dead_rabbit"])
@log_snapshot_after_test
def ha_nova_check_dead_rabbit(self):
"""Check dead rabbit node is kicked from cluster
when corosync service on node dies
Scenario:
1. SSH to first controller and put corosync cluster to
maintenance mode:
crm configure property maintenance-mode=true
2. Stop rabbit and corosync service on first controller
3. Check on master node that rabbit-fence.log contains
Disconnecting rabbit@node-1
4. On second controller check that rabbitmq cluster_status
contains only 2 nodes
Duration 25m
"""
super(self.__class__, self).check_dead_rabbit_node_kicked()
@test(depends_on_groups=['prepare_ha_nova'],
groups=["ha_nova_test_3_1_rabbit_failover"])
@log_snapshot_after_test
def ha_nova_test_3_1_rabbit_failover(self):
"""Check 3 in 1 rabbit failover
Scenario:
1. SSH to controller and get rabbit master
2. Destroy not rabbit master node
3. Check that rabbit master stay as was
4. Run ostf ha
5. Turn on destroyed slave
6. Check rabbit master is the same
7. Run ostf ha
8. Destroy rabbit master node
9. Check that new rabbit-master appears
10. Run ostf ha
11. Power on destroyed node
12. Check that new rabbit-master was not elected
13. Run ostf ha
Duration 25m
"""
super(self.__class__, self).test_3_1_rabbit_failover()

View File

@ -14,29 +14,29 @@
import re
import time
import yaml
from devops.error import TimeoutError
from devops.helpers.helpers import _wait
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from devops.helpers.helpers import _wait
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_true
from proboscis.asserts import assert_false
from proboscis import SkipTest
import yaml
from fuelweb_test.helpers.checkers import get_file_size
from fuelweb_test.helpers.checkers import check_ping
from fuelweb_test.helpers.checkers import check_mysql
from fuelweb_test.helpers.checkers import check_ping
from fuelweb_test.helpers.checkers import check_public_ping
from fuelweb_test.helpers.checkers import get_file_size
from fuelweb_test.helpers import os_actions
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import DOWNLOAD_LINK
from fuelweb_test.settings import DNS
from fuelweb_test.settings import DNS_SUFFIX
from fuelweb_test.settings import DOWNLOAD_LINK
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_UBUNTU
@ -46,18 +46,16 @@ from fuelweb_test.tests.base_test_case import TestBasic
class TestHaFailoverBase(TestBasic):
"""TestHaFailoverBase.""" # TODO documentation
def deploy_ha(self, network='neutron'):
def deploy_ha(self):
self.check_run(self.snapshot_name)
self.env.revert_snapshot("ready_with_5_slaves")
settings = None
settings = {
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE
}
if network == 'neutron':
settings = {
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE
}
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
@ -81,7 +79,7 @@ class TestHaFailoverBase(TestBasic):
self.env.make_snapshot(self.snapshot_name, is_make=True)
def deploy_ha_ceph(self, network='neutron'):
def deploy_ha_ceph(self):
self.check_run(self.snapshot_name)
self.env.revert_snapshot("ready_with_5_slaves")
@ -90,11 +88,10 @@ class TestHaFailoverBase(TestBasic):
'volumes_ceph': True,
'images_ceph': True,
'volumes_lvm': False,
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE
}
if network == 'neutron':
settings["net_provider"] = 'neutron'
settings["net_segment_type"] = NEUTRON_SEGMENT_TYPE
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,

View File

@ -69,56 +69,3 @@ class TestHaCephNeutronFailover(TestHaFailoverBase):
Duration 40m
"""
super(self.__class__, self).ha_sequential_rabbit_master_failover()
@test(enabled=False,
groups=["ha_destructive_ceph_nova"])
class TestHaCephNovaFailover(TestHaFailoverBase):
# REMOVE THIS NOVA_NETWORK CLASS WHEN NEUTRON BE DEFAULT
snapshot_name = "prepare_ha_ceph_nova"
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_ceph_ha", "prepare_ha_ceph_nova"])
@log_snapshot_after_test
def prepare_ha_ceph_nova(self):
"""Prepare cluster in HA/Nova mode with ceph for failover tests
Scenario:
1. Create cluster
2. Add 2 nodes with controller roles, 1 node controller + ceph-osd
3. Add 1 node with compute role, 1 node compute + ceph-osd
4. Deploy the cluster
5. Make snapshot
Duration 70m
Snapshot prepare_ha_ceph_nova
"""
super(self.__class__, self).deploy_ha_ceph(network='nova_network')
@test(depends_on_groups=['prepare_ha_ceph_nova'],
groups=["ha_ceph_nova_sequential_destroy_controllers"])
@log_snapshot_after_test
def ha_ceph_nova_rabbit_master_destroy(self):
"""Suspend rabbit master 2 times, check cluster,
resume nodes, check cluster
Scenario:
1. Revert snapshot prepare_ha_ceph_neutron
2. Wait galera is up, keystone re-trigger tokens
3. Create instance, assign floating ip
5. Ping instance by floating ip
6. Suspend rabbit-master controller
7. Run OSTF ha suite
8. Ping created instance
9. Suspend second rabbit-master controller
10. Turn on controller from step 6
11. Run OSTF ha suite
12. Ping instance
13. Turn on controller from step 9
14. Run OSTF ha suite
15. Ping instance
16. Run OSTF
Duration 40m
"""
super(self.__class__, self).ha_sequential_rabbit_master_failover()