Replacing all hard coded cluster status using cluster_utils
Change-Id: I9550e98553da9f7bfc57923e44a789e07a912ff4 Closes-bug: 1517061
This commit is contained in:
parent
6202f07d3e
commit
3ada667eaf
@ -25,6 +25,10 @@ from tempest.scenario import manager
|
||||
|
||||
TEMPEST_CONF = config.CONF
|
||||
|
||||
# cluster status
|
||||
CLUSTER_STATUS_ACTIVE = "Active"
|
||||
CLUSTER_STATUS_ERROR = "Error"
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -218,15 +222,16 @@ class BaseDataProcessingTest(manager.ScenarioTest):
|
||||
s_time = timeutils.utcnow()
|
||||
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
|
||||
cluster = self.client.clusters.get(cluster_id)
|
||||
if cluster.status == 'Active':
|
||||
if cluster.status == CLUSTER_STATUS_ACTIVE:
|
||||
return
|
||||
if cluster.status == 'Error':
|
||||
if cluster.status == CLUSTER_STATUS_ERROR:
|
||||
raise exceptions.BuildErrorException(
|
||||
'Cluster failed to build and is in "Error" status.')
|
||||
'Cluster failed to build and is in %s status.' %
|
||||
CLUSTER_STATUS_ERROR)
|
||||
time.sleep(TEMPEST_CONF.data_processing.request_timeout)
|
||||
raise exceptions.TimeoutException(
|
||||
'Cluster failed to get to "Active status within %d seconds.'
|
||||
% timeout)
|
||||
'Cluster failed to get to %s status within %d seconds.'
|
||||
% (CLUSTER_STATUS_ACTIVE, timeout))
|
||||
|
||||
def create_job_execution(self, **kwargs):
|
||||
|
||||
|
@ -131,7 +131,7 @@ class TestPeriodicBack(base.SaharaWithDbTestCase):
|
||||
|
||||
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
|
||||
|
||||
self._make_cluster('1', status='Pending')
|
||||
self._make_cluster('1', c_u.CLUSTER_STATUS_SPAWNING)
|
||||
|
||||
timeutils.set_time_override(datetime.datetime(
|
||||
2005, 2, 1, minute=59, second=50))
|
||||
@ -147,7 +147,7 @@ class TestPeriodicBack(base.SaharaWithDbTestCase):
|
||||
self.override_config('cleanup_time_for_incomplete_clusters', 1)
|
||||
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
|
||||
|
||||
self._make_cluster('1', status='Pending')
|
||||
self._make_cluster('1', c_u.CLUSTER_STATUS_SPAWNING)
|
||||
|
||||
timeutils.set_time_override(datetime.datetime(
|
||||
2005, 2, 1, hour=1, second=10))
|
||||
|
@ -68,14 +68,16 @@ class TestScalingValidation(u.ValidationTestCase):
|
||||
ops.get_engine_type_and_version.return_value = "direct.1.1"
|
||||
ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
|
||||
cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
|
||||
[ng1], status='Validating', id='12321')
|
||||
[ng1],
|
||||
status=c_u.CLUSTER_STATUS_VALIDATING,
|
||||
id='12321')
|
||||
|
||||
self._assert_check_scaling(
|
||||
data={}, cluster=cluster,
|
||||
expected_message="Cluster cannot be scaled "
|
||||
"not in 'Active' "
|
||||
"status. Cluster status: "
|
||||
"Validating")
|
||||
"not in '" + c_u.CLUSTER_STATUS_ACTIVE +
|
||||
"' status. Cluster status: " +
|
||||
c_u.CLUSTER_STATUS_VALIDATING)
|
||||
|
||||
cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
|
||||
[ng1], status=c_u.CLUSTER_STATUS_ACTIVE,
|
||||
|
@ -35,11 +35,12 @@ class UtilsClusterTest(base.SaharaWithDbTestCase):
|
||||
def test_change_cluster_status(self):
|
||||
cluster = self._make_sample()
|
||||
cluster = cluster_utils.change_cluster_status(
|
||||
cluster, "Deleting", "desc")
|
||||
self.assertEqual("Deleting", cluster.status)
|
||||
cluster, cluster_utils.CLUSTER_STATUS_DELETING, "desc")
|
||||
self.assertEqual(cluster_utils.CLUSTER_STATUS_DELETING, cluster.status)
|
||||
self.assertEqual("desc", cluster.status_description)
|
||||
cluster_utils.change_cluster_status(cluster, "Spawning")
|
||||
self.assertEqual("Deleting", cluster.status)
|
||||
cluster_utils.change_cluster_status(
|
||||
cluster, cluster_utils.CLUSTER_STATUS_SPAWNING)
|
||||
self.assertEqual(cluster_utils.CLUSTER_STATUS_DELETING, cluster.status)
|
||||
|
||||
def test_change_status_description(self):
|
||||
ctx = context.ctx()
|
||||
|
Loading…
Reference in New Issue
Block a user