[IT] Fix deleting transient cluster when cluster in error state

Change-Id: I6e21847d4f19bee32b1872abac3a26c612103348
Closes-bug: #1378831
This commit is contained in:
Sergey Reshetnyak
2014-10-08 17:39:58 +04:00
parent 83cfb81c32
commit 19e946feb1
8 changed files with 19 additions and 8 deletions

View File

@@ -176,7 +176,7 @@ class ITestCase(testcase.WithAttributes, base.BaseTestCase):
description, cluster_configs, node_groups,
self.common_config.USER_KEYPAIR_ID, anti_affinity, net_id)
self.cluster_id = data.id
self.poll_cluster_state(self.cluster_id)
return self.cluster_id
def get_cluster_info(self, plugin_config):
node_ip_list_with_node_processes = (
@@ -557,7 +557,11 @@ class ITestCase(testcase.WithAttributes, base.BaseTestCase):
node_group_template_id_list=None):
if not self.common_config.RETAIN_CLUSTER_AFTER_TEST:
if cluster_id:
self.sahara.clusters.delete(cluster_id)
try:
self.sahara.clusters.delete(cluster_id)
except client_base.APIException:
# cluster in deleting state or deleted
pass
try:
# waiting roughly for 300 seconds for cluster to terminate

View File

@@ -180,7 +180,8 @@ class CDHGatingTest(cluster_configs.ClusterConfigTest,
}
}
}
self.create_cluster(**cluster)
cluster_id = self.create_cluster(**cluster)
self.poll_cluster_state(cluster_id)
self.cluster_info = self.get_cluster_info(self.cdh_config)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.cdh_config)

View File

@@ -113,7 +113,8 @@ class HDP2GatingTest(swift.SwiftTest, scaling.ScalingTest,
'description': 'test cluster',
'cluster_configs': {}
}
self.create_cluster(**cluster)
cluster_id = self.create_cluster(**cluster)
self.poll_cluster_state(cluster_id)
self.cluster_info = self.get_cluster_info(self.hdp2_config)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.hdp2_config)

View File

@@ -120,13 +120,14 @@ class HDPGatingTest(cinder.CinderVolumeTest, edp.EDPTest,
cluster_name = (self.common_config.CLUSTER_NAME + '-' +
self.hdp_config.PLUGIN_NAME)
try:
self.create_cluster(
cluster_id = self.create_cluster(
name=cluster_name,
plugin_config=self.hdp_config,
cluster_template_id=cluster_template_id,
description='test cluster',
cluster_configs={}
)
self.poll_cluster_state(cluster_id)
cluster_info = self.get_cluster_info(self.hdp_config)
self.await_active_workers_for_namenode(cluster_info['node_info'],

View File

@@ -108,7 +108,8 @@ class SparkGatingTest(swift.SwiftTest, scaling.ScalingTest,
'description': 'test cluster',
'cluster_configs': {}
}
self.create_cluster(**cluster)
cluster_id = self.create_cluster(**cluster)
self.poll_cluster_state(cluster_id)
self.cluster_info = self.get_cluster_info(self.spark_config)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.spark_config)

View File

@@ -96,6 +96,7 @@ class TransientGatingTest(edp.EDPTest):
}
cluster_id = self.create_cluster(**cluster)
self.addCleanup(self.delete_objects, cluster_id=cluster_id)
self.poll_cluster_state(cluster_id)
@b.errormsg("Failure while transient cluster testing: ")
def _check_transient(self):

View File

@@ -197,13 +197,14 @@ class VanillaGatingTest(cinder.CinderVolumeTest,
try:
cluster_name = "%s-%s-v1" % (self.common_config.CLUSTER_NAME,
self.vanilla_config.PLUGIN_NAME)
self.create_cluster(
cluster_id = self.create_cluster(
name=cluster_name,
plugin_config=self.vanilla_config,
cluster_template_id=cluster_template_id,
description='test cluster',
cluster_configs={}
)
self.poll_cluster_state(cluster_id)
cluster_info = self.get_cluster_info(self.vanilla_config)
self.await_active_workers_for_namenode(cluster_info['node_info'],

View File

@@ -177,7 +177,8 @@ class VanillaTwoGatingTest(cluster_configs.ClusterConfigTest,
'description': 'test cluster',
'cluster_configs': {}
}
self.create_cluster(**cluster)
cluster_id = self.create_cluster(**cluster)
self.poll_cluster_state(cluster_id)
self.cluster_info = self.get_cluster_info(self.vanilla_two_config)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.vanilla_two_config)