Merge "Fix duplicate pacemaker constraint command invocation"

This commit is contained in:
Jenkins 2016-02-18 17:36:51 +00:00 committed by Gerrit Code Review
commit 2c57305276
2 changed files with 41 additions and 22 deletions

View File

@ -359,15 +359,21 @@ class TestHaNeutronFailover(TestHaFailoverBase):
Scenario: Scenario:
1. Deploy environment with at least 3 controllers 1. Deploy environment with at least 3 controllers
2. Get rabbit master node (Or revert existing snapshot)
3. Move master rabbit resource to slave with pcs 2. Wait for mysql cluster to become active
4. Delete pcs constraint for rabbit resource 3. Run ostf tests before destructive actions
5. Run OSTF 4. Get rabbit master node
6. Get new rabbit master node 5. Move master rabbit resource to slave with pcs
7. Destroy it 6. Delete pcs constraint for rabbit resource
8. Run OSTF 7. Assert HA services ready
9. Power on destroyed node 8. Get new rabbit master node
10. Run OSTF 9. Destroy it
10. Assert HA services ready
11. Run sanity and smoke OSTF sets
12. Power on destroyed node
13. Assert HA services ready
14. Assert OS services ready
15. Run OSTF
Duration 80 min Duration 80 min

View File

@ -1305,6 +1305,7 @@ class TestHaFailoverBase(TestBasic):
if not self.env.d_env.has_snapshot(self.snapshot_name): if not self.env.d_env.has_snapshot(self.snapshot_name):
raise SkipTest() raise SkipTest()
logger.info('Revert environment started...') logger.info('Revert environment started...')
self.show_step(1, initialize=True)
self.env.revert_snapshot(self.snapshot_name) self.env.revert_snapshot(self.snapshot_name)
cluster_id = self.fuel_web.client.get_cluster_id( cluster_id = self.fuel_web.client.get_cluster_id(
@ -1312,10 +1313,12 @@ class TestHaFailoverBase(TestBasic):
logger.info('Waiting for mysql cluster is up') logger.info('Waiting for mysql cluster is up')
self.show_step(2)
# Wait until MySQL Galera is UP on some controller # Wait until MySQL Galera is UP on some controller
self.fuel_web.wait_mysql_galera_is_up(['slave-02']) self.fuel_web.wait_mysql_galera_is_up(['slave-02'])
# Check ha ans services are fine after revert # Check ha ans services are fine after revert
self.show_step(3)
logger.info('Run ostf tests before destructive actions') logger.info('Run ostf tests before destructive actions')
self.fuel_web.assert_ha_services_ready(cluster_id, timeout=600) self.fuel_web.assert_ha_services_ready(cluster_id, timeout=600)
self.fuel_web.assert_os_services_ready(cluster_id) self.fuel_web.assert_os_services_ready(cluster_id)
@ -1326,7 +1329,9 @@ class TestHaFailoverBase(TestBasic):
# Get primary controller from nailgun # Get primary controller from nailgun
p_d_ctrl = self.fuel_web.get_nailgun_primary_node( p_d_ctrl = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0]) self.env.d_env.nodes().slaves[0])
self.show_step(4,
details='Run count: {0}'.format(count),
initialize=True)
# get master rabbit controller # get master rabbit controller
master_rabbit = self.fuel_web.get_rabbit_master_node(p_d_ctrl.name) master_rabbit = self.fuel_web.get_rabbit_master_node(p_d_ctrl.name)
logger.info('Master rabbit is on {0} for attempt {1}'.format( logger.info('Master rabbit is on {0} for attempt {1}'.format(
@ -1362,30 +1367,32 @@ class TestHaFailoverBase(TestBasic):
'location-p_rabbitmq-server 2>&1 >/dev/null| true') 'location-p_rabbitmq-server 2>&1 >/dev/null| true')
remote_master_rabbit.execute(cmd) remote_master_rabbit.execute(cmd)
self.show_step(5, details='Run count: {0}'.format(count))
# Move resource to rabbit slave # Move resource to rabbit slave
cmd_move = ('pcs constraint location p_rabbitmq-server ' cmd_move = ('pcs constraint location p_rabbitmq-server '
'rule role=master score=-INFINITY \#uname ' 'rule role=master score=-INFINITY \#uname '
'ne {0}').format(slaves_rabbit_fqdn[0]) 'ne {0}').format(slaves_rabbit_fqdn[0])
_wait(lambda: assert_equal( result = remote_master_rabbit.execute(cmd_move)
remote_master_rabbit.execute(cmd_move)['exit_code'], 0, assert_equal(
result['exit_code'], 0,
'Fail to move p_rabbitmq-server with {0} on ' 'Fail to move p_rabbitmq-server with {0} on '
'count {1}'.format( 'count {1}'.format(result, count))
remote_master_rabbit.execute(cmd_move), count)),
timeout=20)
# Clear all # Clear all
self.show_step(6, details='Run count: {0}'.format(count))
cmd_clear = ('pcs constraint delete ' cmd_clear = ('pcs constraint delete '
'location-p_rabbitmq-server') 'location-p_rabbitmq-server')
_wait(lambda: assert_equal( result = remote_master_rabbit.execute(cmd_clear)
remote_master_rabbit.execute(cmd_clear)['exit_code'], 0, assert_equal(
'Fail to delete pcs constraint {0} on count {1}'.format( result['exit_code'], 0,
remote_master_rabbit.execute(cmd_clear), count)), 'Fail to delete pcs constraint using {0} on '
timeout=20) 'count {1}'.format(cmd_clear, count))
# check ha # check ha
self.show_step(7)
self.fuel_web.assert_ha_services_ready(cluster_id, timeout=700) self.fuel_web.assert_ha_services_ready(cluster_id, timeout=700)
# get new rabbit master node # get new rabbit master node
self.show_step(8)
master_rabbit_2 = self.fuel_web.get_rabbit_master_node( master_rabbit_2 = self.fuel_web.get_rabbit_master_node(
p_d_ctrl.name) p_d_ctrl.name)
@ -1397,6 +1404,7 @@ class TestHaFailoverBase(TestBasic):
master_rabbit_2.name, count)) master_rabbit_2.name, count))
# destroy devops node with rabbit master # destroy devops node with rabbit master
self.show_step(9)
master_rabbit_2.destroy() master_rabbit_2.destroy()
# Wait until Nailgun marked suspended controller as offline # Wait until Nailgun marked suspended controller as offline
@ -1411,15 +1419,17 @@ class TestHaFailoverBase(TestBasic):
# check ha, should fail 1 test according # check ha, should fail 1 test according
# to haproxy backend from destroyed will be down # to haproxy backend from destroyed will be down
self.show_step(10)
self.fuel_web.assert_ha_services_ready( self.fuel_web.assert_ha_services_ready(
cluster_id, timeout=800, should_fail=1) cluster_id, timeout=800, should_fail=1)
# Run sanity and smoke tests to see if cluster operable # Run sanity and smoke tests to see if cluster operable
self.show_step(11)
self.fuel_web.run_ostf(cluster_id=cluster_id, self.fuel_web.run_ostf(cluster_id=cluster_id,
should_fail=1) should_fail=1)
# turn on destroyed node # turn on destroyed node
self.show_step(12)
master_rabbit_2.start() master_rabbit_2.start()
# Wait until Nailgun marked suspended controller as online # Wait until Nailgun marked suspended controller as online
@ -1432,9 +1442,12 @@ class TestHaFailoverBase(TestBasic):
'in nailgun'.format(master_rabbit_2.name)) 'in nailgun'.format(master_rabbit_2.name))
# check ha # check ha
self.show_step(13)
self.fuel_web.assert_ha_services_ready(cluster_id, timeout=800) self.fuel_web.assert_ha_services_ready(cluster_id, timeout=800)
# check os # check os
self.show_step(14)
self.fuel_web.assert_os_services_ready(cluster_id) self.fuel_web.assert_os_services_ready(cluster_id)
# run ostf smoke and sanity # run ostf smoke and sanity
self.show_step(15)
self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke']) self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])