Merge "Migrate integration tests to oslotest"

This commit is contained in:
Jenkins 2014-07-24 01:36:34 +00:00 committed by Gerrit Code Review
commit cdeafed735
2 changed files with 105 additions and 89 deletions

View File

@ -14,18 +14,19 @@
# limitations under the License. # limitations under the License.
import logging import logging
import socket
import telnetlib import telnetlib
import time import time
import uuid import uuid
import fixtures
from keystoneclient.v2_0 import client as keystone_client from keystoneclient.v2_0 import client as keystone_client
from neutronclient.v2_0 import client as neutron_client from neutronclient.v2_0 import client as neutron_client
from novaclient.v1_1 import client as nova_client from novaclient.v1_1 import client as nova_client
from oslotest import base
from saharaclient.api import base as client_base from saharaclient.api import base as client_base
import saharaclient.client as sahara_client import saharaclient.client as sahara_client
import six
from swiftclient import client as swift_client from swiftclient import client as swift_client
import testtools
from testtools import testcase from testtools import testcase
from sahara.openstack.common import excutils from sahara.openstack.common import excutils
@ -69,7 +70,7 @@ def skip_test(config_name, message=''):
return handle return handle
class ITestCase(testcase.WithAttributes, testtools.TestCase): class ITestCase(testcase.WithAttributes, base.BaseTestCase):
def setUp(self): def setUp(self):
super(ITestCase, self).setUp() super(ITestCase, self).setUp()
self.common_config = cfg.ITConfig().common_config self.common_config = cfg.ITConfig().common_config
@ -222,19 +223,25 @@ class ITestCase(testcase.WithAttributes, testtools.TestCase):
def poll_cluster_state(self, cluster_id): def poll_cluster_state(self, cluster_id):
data = self.sahara.clusters.get(cluster_id) data = self.sahara.clusters.get(cluster_id)
timeout = self.common_config.CLUSTER_CREATION_TIMEOUT * 60 timeout = self.common_config.CLUSTER_CREATION_TIMEOUT * 60
while str(data.status) != 'Active':
if str(data.status) == 'Error': try:
self.fail('Cluster state == \'Error\'.') with fixtures.Timeout(timeout, gentle=True):
if timeout <= 0: while True:
self.fail( status = str(data.status)
'Cluster did not return to \'Active\' state ' if status == 'Active':
'within %d minutes.' break
% self.common_config.CLUSTER_CREATION_TIMEOUT if status == 'Error':
) self.fail('Cluster state == \'Error\'.')
data = self.sahara.clusters.get(cluster_id)
time.sleep(10) time.sleep(10)
timeout -= 10 data = self.sahara.clusters.get(cluster_id)
return str(data.status)
except fixtures.TimeoutException:
self.fail("Cluster did not return to 'Active' state "
"within %d minutes." %
self.common_config.CLUSTER_CREATION_TIMEOUT)
return status
def get_cluster_node_ip_list_with_node_processes(self, cluster_id): def get_cluster_node_ip_list_with_node_processes(self, cluster_id):
data = self.sahara.clusters.get(cluster_id) data = self.sahara.clusters.get(cluster_id)
@ -272,80 +279,83 @@ class ITestCase(testcase.WithAttributes, testtools.TestCase):
def get_node_info(self, node_ip_list_with_node_processes, plugin_config): def get_node_info(self, node_ip_list_with_node_processes, plugin_config):
tasktracker_count = 0 tasktracker_count = 0
datanode_count = 0 datanode_count = 0
node_count = 0 timeout = self.common_config.TELNET_TIMEOUT * 60
for node_ip, processes in node_ip_list_with_node_processes.items(): with fixtures.Timeout(timeout, gentle=True):
self.try_telnet(node_ip, '22') accessible = False
node_count += 1 proc_with_ports = plugin_config.HADOOP_PROCESSES_WITH_PORTS
for process in processes: while not accessible:
if process in plugin_config.HADOOP_PROCESSES_WITH_PORTS: accessible = True
for i in range(self.common_config.TELNET_TIMEOUT * 60): for node_ip, processes in six.iteritems(
try: node_ip_list_with_node_processes):
time.sleep(1) try:
telnetlib.Telnet( self.try_telnet(node_ip, '22')
node_ip, except Exception:
plugin_config.HADOOP_PROCESSES_WITH_PORTS[ accessible = False
process]
)
break
except socket.error: for process in processes:
print( if process in proc_with_ports:
'Connection attempt. NODE PROCESS: %s, ' try:
'PORT: %s.' self.try_telnet(node_ip,
% (process, proc_with_ports[process])
plugin_config.HADOOP_PROCESSES_WITH_PORTS[ except Exception:
process]) print('Connection attempt. NODE PROCESS: %s, '
) 'PORT: %s.' % (
process, proc_with_ports[process]))
accessible = False
else: if not accessible:
self.try_telnet( time.sleep(1)
node_ip,
plugin_config.HADOOP_PROCESSES_WITH_PORTS[process] for node_ip, processes in six.iteritems(
) node_ip_list_with_node_processes):
if plugin_config.PROCESS_NAMES['tt'] in processes: if plugin_config.PROCESS_NAMES['tt'] in processes:
tasktracker_count += 1 tasktracker_count += 1
if plugin_config.PROCESS_NAMES['dn'] in processes: if plugin_config.PROCESS_NAMES['dn'] in processes:
datanode_count += 1 datanode_count += 1
if plugin_config.PROCESS_NAMES['nn'] in processes: if plugin_config.PROCESS_NAMES['nn'] in processes:
namenode_ip = node_ip namenode_ip = node_ip
return { return {
'namenode_ip': namenode_ip, 'namenode_ip': namenode_ip,
'tasktracker_count': tasktracker_count, 'tasktracker_count': tasktracker_count,
'datanode_count': datanode_count, 'datanode_count': datanode_count,
'node_count': node_count 'node_count': len(node_ip_list_with_node_processes)
} }
def await_active_workers_for_namenode(self, node_info, plugin_config): def await_active_workers_for_namenode(self, node_info, plugin_config):
self.open_ssh_connection( self.open_ssh_connection(
node_info['namenode_ip'], plugin_config.SSH_USERNAME node_info['namenode_ip'], plugin_config.SSH_USERNAME)
) timeout = self.common_config.HDFS_INITIALIZATION_TIMEOUT * 60
for i in range(self.common_config.HDFS_INITIALIZATION_TIMEOUT * 6): try:
time.sleep(10) with fixtures.Timeout(timeout, gentle=True):
active_tasktracker_count = self.execute_command( while True:
'sudo -u %s bash -lc "hadoop job -list-active-trackers" ' active_tasktracker_count = self.execute_command(
'| grep "^tracker_" | wc -l' 'sudo -u %s bash -lc "hadoop job -list-active-trackers'
% plugin_config.HADOOP_USER)[1] '" | grep "^tracker_" | wc -l'
active_tasktracker_count = int(active_tasktracker_count) % plugin_config.HADOOP_USER)[1]
active_datanode_count = int( active_tasktracker_count = int(active_tasktracker_count)
self.execute_command( active_datanode_count = self.execute_command(
'sudo -u %s bash -lc "hadoop dfsadmin -report" \ 'sudo -u %s bash -lc "hadoop dfsadmin -report" | '
| grep "Datanodes available:.*" | awk \'{print $3}\'' 'grep "Datanodes available:.*" | awk \'{print $3}\''
% plugin_config.HADOOP_USER)[1]) % plugin_config.HADOOP_USER)[1]
active_datanode_count = int(active_datanode_count)
if ( if (active_tasktracker_count ==
active_tasktracker_count == node_info['tasktracker_count'] node_info['tasktracker_count'] and
) and ( active_datanode_count ==
active_datanode_count == node_info['datanode_count'] node_info['datanode_count']):
): break
break
else: time.sleep(10)
except fixtures.TimeoutException:
self.fail( self.fail(
'Tasktracker or datanode cannot be started within ' 'Tasktracker or datanode cannot be started within '
'%s minute(s) for namenode.' '%s minute(s) for namenode.'
% self.common_config.HDFS_INITIALIZATION_TIMEOUT % self.common_config.HDFS_INITIALIZATION_TIMEOUT
) )
self.close_ssh_connection() finally:
self.close_ssh_connection()
# --------------------------------Remote--------------------------------------- # --------------------------------Remote---------------------------------------
@ -540,19 +550,19 @@ class ITestCase(testcase.WithAttributes, testtools.TestCase):
if cluster_id: if cluster_id:
self.sahara.clusters.delete(cluster_id) self.sahara.clusters.delete(cluster_id)
# waiting roughly for 300 seconds for cluster to terminate try:
attempts = 60 # waiting roughly for 300 seconds for cluster to terminate
while attempts > 0: with fixtures.Timeout(300, gentle=True):
try: while True:
self.sahara.clusters.get(cluster_id) try:
except client_base.APIException: self.sahara.clusters.get(cluster_id)
# Cluster is finally deleted except client_base.APIException:
break # Cluster is finally deleted
break
attempts -= 1 time.sleep(5)
time.sleep(5)
if attempts == 0: except fixtures.TimeoutException:
self.fail('Cluster failed to terminate in 300 seconds: ' self.fail('Cluster failed to terminate in 300 seconds: '
'%s' % cluster_id) '%s' % cluster_id)

View File

@ -18,6 +18,8 @@ import string
import time import time
import uuid import uuid
import fixtures
from sahara.openstack.common import excutils from sahara.openstack.common import excutils
from sahara.swift import swift_helper as sw from sahara.swift import swift_helper as sw
from sahara.tests.integration.tests import base from sahara.tests.integration.tests import base
@ -45,17 +47,21 @@ class EDPTest(base.ITestCase):
def _await_job_execution(self, job): def _await_job_execution(self, job):
timeout = self.common_config.JOB_LAUNCH_TIMEOUT * 60 timeout = self.common_config.JOB_LAUNCH_TIMEOUT * 60
status = self.sahara.job_executions.get(job.id).info['status'] status = self.sahara.job_executions.get(job.id).info['status']
while status != 'SUCCEEDED': try:
if status == 'KILLED': with fixtures.Timeout(timeout, gentle=True):
self.fail('Job status == \'KILLED\'.') while status != 'SUCCEEDED':
if timeout <= 0: if status == 'KILLED':
self.fail( self.fail('Job status == \'KILLED\'.')
'Job did not return to \'SUCCEEDED\' status within '
'%d minute(s).' % self.common_config.JOB_LAUNCH_TIMEOUT time.sleep(10)
) status = self.sahara.job_executions.get(
status = self.sahara.job_executions.get(job.id).info['status'] job.id).info['status']
time.sleep(10)
timeout -= 10 except fixtures.TimeoutException:
self.fail(
'Job did not return to \'SUCCEEDED\' status within '
'%d minute(s).' % self.common_config.JOB_LAUNCH_TIMEOUT
)
def _create_job_binaries(self, job_data_list, job_binary_internal_list, def _create_job_binaries(self, job_data_list, job_binary_internal_list,
job_binary_list, swift_connection=None, job_binary_list, swift_connection=None,