Add OL tests failover group
- Add test with warm OL compute reboot - Add test with hard OL compute reboot Change-Id: I2168353a93daf7ca72a6e75f3adbbc9aa9de83ab Related-Bug: #1569374
This commit is contained in:
parent
b4ba7a3af1
commit
3adf7139fb
@ -12,6 +12,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from devops.helpers.helpers import tcp_ping
|
||||
from devops.helpers.helpers import wait
|
||||
from proboscis import asserts
|
||||
from proboscis import test
|
||||
|
||||
@ -164,3 +166,171 @@ class OlHaOneController(ExtraComputesBase):
|
||||
|
||||
self.env.make_snapshot("ready_ha_one_controller_with_ol_compute",
|
||||
is_make=True)
|
||||
|
||||
|
||||
@test(groups=['ol', 'ol.failover_group'])
|
||||
class OlFailoverGroup(ExtraComputesBase):
|
||||
"""Failover tests for OL-based computes"""
|
||||
|
||||
@test(depends_on_groups=['deploy_ol_compute_ha_one_controller_tun'],
|
||||
groups=['check_ol_warm_reboot'])
|
||||
@log_snapshot_after_test
|
||||
def check_ol_warm_reboot(self):
|
||||
"""Resume VM after warm reboot of OL-based compute
|
||||
|
||||
Scenario:
|
||||
1. Revert environment with OL-compute.
|
||||
2. Check that services are ready.
|
||||
3. Boot VM on compute and check its connectivity via floating ip.
|
||||
4. Warm reboot OL-based compute.
|
||||
5. Verify VM connectivity via floating ip after successful reboot
|
||||
and VM resume action.
|
||||
|
||||
Duration: 20m
|
||||
Snapshot: check_ol_warm_reboot
|
||||
"""
|
||||
|
||||
self.show_step(1)
|
||||
self.env.revert_snapshot('ready_ha_one_controller_with_ol_compute',
|
||||
skip_timesync=True, skip_slaves_check=True)
|
||||
self.check_slaves_are_ready()
|
||||
logger.debug('All slaves online.')
|
||||
|
||||
self.show_step(2)
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
os_conn = os_actions.OpenStackActions(
|
||||
self.fuel_web.get_public_vip(cluster_id))
|
||||
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
|
||||
logger.debug('Cluster up and ready.')
|
||||
|
||||
self.show_step(3)
|
||||
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
cluster_id, roles=('controller',))
|
||||
asserts.assert_equal(len(controllers), 1,
|
||||
'Environment does not have 1 controller node, '
|
||||
'found {} nodes!'.format(len(controllers)))
|
||||
compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
cluster_id, ['compute'])[0]
|
||||
target_node = self.fuel_web.get_devops_node_by_nailgun_node(
|
||||
compute)
|
||||
net_label = self.fuel_web.get_cluster_predefined_networks_name(
|
||||
cluster_id)['private_net']
|
||||
vm = os_conn.create_server_for_migration(
|
||||
neutron=True, label=net_label)
|
||||
vm_floating_ip = os_conn.assign_floating_ip(vm)
|
||||
logger.info('Trying to get vm via tcp.')
|
||||
wait(lambda: tcp_ping(vm_floating_ip.ip, 22),
|
||||
timeout=120,
|
||||
timeout_msg='Can not ping instance '
|
||||
'by floating ip {0}'.format(vm_floating_ip.ip))
|
||||
logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip))
|
||||
self.show_step(4)
|
||||
self.warm_restart_nodes([target_node])
|
||||
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
|
||||
logger.info('All cluster services up and '
|
||||
'running after compute reboot.')
|
||||
|
||||
self.show_step(5)
|
||||
asserts.assert_equal(
|
||||
os_conn.get_instance_detail(vm).status, "ACTIVE",
|
||||
"Instance did not reach active state after compute back online, "
|
||||
"current state is {0}".format(
|
||||
os_conn.get_instance_detail(vm).status))
|
||||
logger.info('Spawned VM is ACTIVE. Trying to '
|
||||
'access it via ip: {0}'.format(vm_floating_ip.ip))
|
||||
wait(lambda: tcp_ping(vm_floating_ip.ip, 22),
|
||||
timeout=120,
|
||||
timeout_msg='Can not ping instance '
|
||||
'by floating ip {0}'.format(vm_floating_ip.ip))
|
||||
logger.info('VM is accessible. Deleting it.')
|
||||
os_conn.delete_instance(vm)
|
||||
os_conn.verify_srv_deleted(vm)
|
||||
|
||||
self.env.make_snapshot("check_ol_warm_reboot")
|
||||
|
||||
@test(depends_on_groups=['deploy_ol_compute_ha_one_controller_tun'],
|
||||
groups=['check_ol_hard_reboot'])
|
||||
@log_snapshot_after_test
|
||||
def check_ol_hard_reboot(self):
|
||||
"""Resume VM after hard reboot of OL-based compute
|
||||
|
||||
Scenario:
|
||||
1. Revert environment with OL-compute.
|
||||
2. Check that services are ready.
|
||||
3. Boot VM on compute and check its connectivity via floating ip.
|
||||
4. Hard reboot OL-based compute.
|
||||
5. Verify VM connectivity via floating ip after successful reboot
|
||||
and VM resume action.
|
||||
|
||||
Duration: 20m
|
||||
Snapshot: check_ol_hard_reboot
|
||||
"""
|
||||
|
||||
self.show_step(1)
|
||||
self.env.revert_snapshot('ready_ha_one_controller_with_ol_compute',
|
||||
skip_timesync=True, skip_slaves_check=True)
|
||||
self.check_slaves_are_ready()
|
||||
logger.debug('All slaves online.')
|
||||
|
||||
self.show_step(2)
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
os_conn = os_actions.OpenStackActions(
|
||||
self.fuel_web.get_public_vip(cluster_id))
|
||||
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
|
||||
logger.debug('Cluster up and ready.')
|
||||
|
||||
self.show_step(3)
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
cluster_id, roles=('controller',))
|
||||
os_conn = os_actions.OpenStackActions(
|
||||
self.fuel_web.get_public_vip(cluster_id))
|
||||
asserts.assert_equal(len(controllers), 1,
|
||||
'Environment does not have 1 controller node, '
|
||||
'found {} nodes!'.format(len(controllers)))
|
||||
compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||
cluster_id, ['compute'])[0]
|
||||
target_node = self.fuel_web.get_devops_node_by_nailgun_node(
|
||||
compute)
|
||||
target_node_ip = self.fuel_web.get_node_ip_by_devops_name(
|
||||
target_node.name)
|
||||
net_label = self.fuel_web.get_cluster_predefined_networks_name(
|
||||
cluster_id)['private_net']
|
||||
vm = os_conn.create_server_for_migration(
|
||||
neutron=True, label=net_label)
|
||||
vm_floating_ip = os_conn.assign_floating_ip(vm)
|
||||
logger.info('Trying to get vm via tcp.')
|
||||
wait(lambda: tcp_ping(vm_floating_ip.ip, 22),
|
||||
timeout=120,
|
||||
timeout_msg='Can not ping instance '
|
||||
'by floating ip {0}'.format(vm_floating_ip.ip))
|
||||
logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip))
|
||||
self.show_step(4)
|
||||
target_node.destroy()
|
||||
asserts.assert_false(target_node.driver.node_active(node=target_node),
|
||||
'Target node still active')
|
||||
target_node.start()
|
||||
asserts.assert_true(target_node.driver.node_active(node=target_node),
|
||||
'Target node did not start')
|
||||
self.wait_for_slave_provision(target_node_ip)
|
||||
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
|
||||
logger.info('All cluster services up and '
|
||||
'running after compute hard reboot.')
|
||||
|
||||
self.show_step(5)
|
||||
asserts.assert_equal(
|
||||
os_conn.get_instance_detail(vm).status, "ACTIVE",
|
||||
"Instance did not reach active state after compute back online, "
|
||||
"current state is {0}".format(
|
||||
os_conn.get_instance_detail(vm).status))
|
||||
logger.info('Spawned VM is ACTIVE. Trying to '
|
||||
'access it via ip: {0}'.format(vm_floating_ip.ip))
|
||||
wait(lambda: tcp_ping(vm_floating_ip.ip, 22),
|
||||
timeout=120,
|
||||
timeout_msg='Can not ping instance '
|
||||
'by floating ip {0}'.format(vm_floating_ip.ip))
|
||||
logger.info('VM is accessible. Deleting it.')
|
||||
os_conn.delete_instance(vm)
|
||||
os_conn.verify_srv_deleted(vm)
|
||||
|
||||
self.env.make_snapshot("check_ol_hard_reboot")
|
||||
|
Loading…
Reference in New Issue
Block a user