Merge "Add integration tests to Intel plugin"

This commit is contained in:
Jenkins 2014-02-18 01:44:11 +00:00 committed by Gerrit Code Review
commit 640b250b99
5 changed files with 365 additions and 28 deletions

View File

@ -285,6 +285,99 @@ HDP_CONFIG_OPTS = [
cfg.BoolOpt('SKIP_SCALING_TEST', default=False)
]
IDH_CONFIG_GROUP = cfg.OptGroup(name='IDH')
IDH_CONFIG_OPTS = [
cfg.StrOpt('PLUGIN_NAME',
default='idh',
help='Name of plugin.'),
cfg.StrOpt('MANAGER_FLAVOR_ID',
default='3',
help='Flavor ID to Intel Manager. Intel manager requires '
'>=4Gb RAM'),
cfg.StrOpt('IMAGE_ID',
default=None,
help='ID for image which is used for cluster creation. Also '
'you can specify image name or tag of image instead of '
'image ID. If you do not specify image related parameters '
'then image for cluster creation will be chosen by '
'tag "savanna_i_tests".'),
cfg.StrOpt('IMAGE_NAME',
default=None,
help='Name for image which is used for cluster creation. Also '
'you can specify image ID or tag of image instead of '
'image name. If you do not specify image related '
'parameters then image for cluster creation will be '
'chosen by tag "savanna_i_tests".'),
cfg.StrOpt('IMAGE_TAG',
default=None,
help='Tag for image which is used for cluster creation. Also '
'you can specify image ID or image name instead of tag of '
'image. If you do not specify image related parameters '
'then image for cluster creation will be chosen by '
'tag "savanna_i_tests".'),
cfg.StrOpt('SSH_USERNAME',
default=None,
help='Username to get cluster node with SSH.'),
cfg.StrOpt('HADOOP_VERSION',
default='2.5.1',
help='Version of Hadoop.'),
cfg.StrOpt('HADOOP_USER',
default='hdfs',
help='Username which is used for access to Hadoop services.'),
cfg.StrOpt('HADOOP_DIRECTORY',
default='/usr/lib/hadoop',
help='Directory where Hadoop jar files are located.'),
cfg.StrOpt('HADOOP_LOG_DIRECTORY',
default='/var/log/hadoop/userlogs',
help='Directory where log info about completed jobs is '
'located.'),
cfg.DictOpt('HADOOP_PROCESSES_WITH_PORTS',
default={
'jobtracker': 54311,
'namenode': 50070,
'tasktracker': 50060,
'datanode': 50075,
'secondarynamenode': 50090,
'oozie': 11000
},
help='Hadoop process map with ports for IDH plugin.'),
cfg.DictOpt('PROCESS_NAMES',
default={
'nn': 'namenode',
'tt': 'tasktracker',
'dn': 'datanode'
},
help='Names for namenode, tasktracker and datanode '
'processes.'),
cfg.StrOpt(
'IDH_TARBALL_URL',
default='http://repo1.intelhadoop.com:3424/setup/'
'setup-intelhadoop-2.5.1-en-evaluation.RHEL.tar.gz'
),
cfg.StrOpt(
'IDH_REPO_URL',
default='http://repo1.intelhadoop.com:3424/evaluation/'
'en/RHEL/2.5.1/rpm'
),
cfg.StrOpt(
'OS_REPO_URL',
default='http://mirror.centos.org/centos-6/6/os/x86_64'
),
cfg.BoolOpt('SKIP_ALL_TESTS_FOR_PLUGIN',
default=False,
help='If this flag is True then all tests for IDH plugin '
'will be skipped.'),
cfg.BoolOpt('SKIP_MAP_REDUCE_TEST', default=False),
cfg.BoolOpt('SKIP_SWIFT_TEST', default=True),
cfg.BoolOpt('SKIP_SCALING_TEST', default=False)
]
def register_config(config, config_group, config_opts):
config.register_group(config_group)
@ -313,10 +406,14 @@ class ITConfig:
register_config(cfg.CONF, COMMON_CONFIG_GROUP, COMMON_CONFIG_OPTS)
register_config(cfg.CONF, VANILLA_CONFIG_GROUP, VANILLA_CONFIG_OPTS)
register_config(cfg.CONF, HDP_CONFIG_GROUP, HDP_CONFIG_OPTS)
register_config(cfg.CONF, IDH_CONFIG_GROUP, IDH_CONFIG_OPTS)
cfg.CONF(
[], project='Savanna_integration_tests',
default_config_files=config_files
)
self.common_config = cfg.CONF.COMMON
self.vanilla_config = cfg.CONF.VANILLA
self.hdp_config = cfg.CONF.HDP
self.idh_config = cfg.CONF.IDH

View File

@ -57,6 +57,7 @@ class ITestCase(unittest2.TestCase):
self.common_config = cfg.ITConfig().common_config
self.vanilla_config = cfg.ITConfig().vanilla_config
self.hdp_config = cfg.ITConfig().hdp_config
self.idh_config = cfg.ITConfig().idh_config
telnetlib.Telnet(
self.common_config.SAVANNA_HOST, self.common_config.SAVANNA_PORT
@ -294,11 +295,11 @@ class ITestCase(unittest2.TestCase):
for i in range(self.common_config.HDFS_INITIALIZATION_TIMEOUT * 6):
time.sleep(10)
active_tasktracker_count = self.execute_command(
'sudo su -c "hadoop job -list-active-trackers" %s'
'sudo -u %s bash -c "hadoop job -list-active-trackers"'
% plugin_config.HADOOP_USER)[1]
active_datanode_count = int(
self.execute_command(
'sudo su -c "hadoop dfsadmin -report" %s \
'sudo -u %s bash -c "hadoop dfsadmin -report" \
| grep "Datanodes available:.*" | awk \'{print $3}\''
% plugin_config.HADOOP_USER)[1]
)

View File

@ -0,0 +1,243 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import testcase
import unittest2
from savanna.openstack.common import excutils
from savanna.tests.integration.configs import config as cfg
from savanna.tests.integration.tests import base
from savanna.tests.integration.tests import cluster_configs
from savanna.tests.integration.tests import edp
from savanna.tests.integration.tests import map_reduce
from savanna.tests.integration.tests import scaling
from savanna.tests.integration.tests import swift
def errormessage(message):
def decorator(fct):
def wrapper(*args, **kwargs):
try:
fct(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
base.ITestCase.print_error_log(message, e)
return wrapper
return decorator
class IDHGatingTest(cluster_configs.ClusterConfigTest, edp.EDPTest,
map_reduce.MapReduceTest, swift.SwiftTest,
scaling.ScalingTest):
idh_config = cfg.ITConfig().idh_config
SKIP_MAP_REDUCE_TEST = idh_config.SKIP_MAP_REDUCE_TEST
SKIP_SWIFT_TEST = idh_config.SKIP_SWIFT_TEST
SKIP_SCALING_TEST = idh_config.SKIP_SCALING_TEST
def setUp(self):
super(IDHGatingTest, self).setUp()
self.idh_config = cfg.ITConfig().idh_config
self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
self.internal_neutron_net = None
if self.common_config.NEUTRON_ENABLED:
self.internal_neutron_net = self.get_internal_neutron_net_id()
self.floating_ip_pool = \
self.get_floating_ip_pool_id_for_neutron_net()
self.cluster_id = None
self.cluster_template_id = None
self.ng_template_ids = []
@errormessage("Failure while 'tt-dn' node group template creation: ")
def _create_tt_dn_ng_template(self):
template = {
'name': 'test-node-group-template-idh-tt-dn',
'plugin_config': self.idh_config,
'description': 'test node group template for Intel plugin',
'volumes_per_node': 0,
'volume_size': 0,
'node_processes': ['tasktracker', 'datanode'],
'floating_ip_pool': self.floating_ip_pool,
'node_configs': {}
}
self.ng_tmpl_tt_dn_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_tt_dn_id)
@errormessage("Failure while 'tt' node group template creation: ")
def _create_tt_ng_template(self):
template = {
'name': 'test-node-group-template-idh-tt',
'plugin_config': self.idh_config,
'description': 'test node group template for Intel plugin',
'volumes_per_node': 0,
'volume_size': 0,
'node_processes': ['tasktracker'],
'floating_ip_pool': self.floating_ip_pool,
'node_configs': {}
}
self.ng_tmpl_tt_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_tt_id)
@errormessage("Failure while 'dn' node group template creation: ")
def _create_dn_ng_template(self):
template = {
'name': 'test-node-group-template-idh-dn',
'plugin_config': self.idh_config,
'description': 'test node group template for Intel plugin',
'volumes_per_node': 0,
'volume_size': 0,
'node_processes': ['datanode'],
'floating_ip_pool': self.floating_ip_pool,
'node_configs': {}
}
self.ng_tmpl_dn_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_dn_id)
@errormessage("Failure while cluster template creation: ")
def _create_cluster_template(self):
template = {
'name': 'test-cluster-template-idh',
'plugin_config': self.idh_config,
'description': 'test cluster template for Intel plugin',
'cluster_configs': {
'general': {
'Enable Swift': True,
'IDH tarball URL': self.idh_config.IDH_TARBALL_URL,
'IDH repository URL': self.idh_config.IDH_REPO_URL,
'OS repository URL': self.idh_config.OS_REPO_URL
},
'HDFS': {
'dfs.replication': 1
}
},
'node_groups': [
{
'name': 'manager-node',
'flavor_id': self.idh_config.MANAGER_FLAVOR_ID,
'node_processes': ['manager'],
'floating_ip_pool': self.floating_ip_pool,
'count': 1
},
{
'name': 'master-node-jt-nn',
'flavor_id': self.flavor_id,
'node_processes': ['namenode', 'jobtracker'],
'floating_ip_pool': self.floating_ip_pool,
'count': 1
},
{
'name': 'worker-node-tt-dn',
'node_group_template_id': self.ng_tmpl_tt_dn_id,
'count': 2
},
{
'name': 'worker-node-dn',
'node_group_template_id': self.ng_tmpl_dn_id,
'count': 1
},
{
'name': 'worker-node-tt',
'node_group_template_id': self.ng_tmpl_tt_id,
'count': 1
}
],
'net_id': self.internal_neutron_net
}
self.cluster_template_id = self.create_cluster_template(**template)
@errormessage("Failure while cluster creation: ")
def _create_cluster(self):
cluster = {
'plugin_config': self.idh_config,
'cluster_template_id': self.cluster_template_id,
'description': 'test cluster',
'cluster_configs': {}
}
self.cluster_info = self.create_cluster_and_get_info(**cluster)
@errormessage("Failure while Map Reduce testing: ")
def _check_mapreduce(self):
self.map_reduce_testing(self.cluster_info)
@errormessage("Failure during check of Swift availability: ")
def _check_swift(self):
self.check_swift_availability(self.cluster_info)
@errormessage("Failure while cluster scaling: ")
def _check_scaling(self):
change_list = [
{
'operation': 'resize',
'info': ['worker-node-tt-dn', 4]
},
{
'operation': 'resize',
'info': ['worker-node-dn', 0]
},
{
'operation': 'resize',
'info': ['worker-node-tt', 0]
},
{
'operation': 'add',
'info': [
'new-worker-node-tt', 1, '%s' % self.ng_tmpl_tt_id
]
},
{
'operation': 'add',
'info': [
'new-worker-node-dn', 1, '%s' % self.ng_tmpl_dn_id
]
}
]
self.cluster_info = self.cluster_scaling(self.cluster_info,
change_list)
@errormessage("Failure while Map Reduce testing after cluster scaling: ")
def _check_mapreduce_after_scaling(self):
if not self.idh_config.SKIP_SCALING_TEST:
self.map_reduce_testing(self.cluster_info)
@errormessage(
"Failure during check of Swift availability after cluster scaling: ")
def _check_swift_after_scaling(self):
if not self.idh_config.SKIP_SCALING_TEST:
self.check_swift_availability(self.cluster_info)
@unittest2.skipIf(cfg.ITConfig().idh_config.SKIP_ALL_TESTS_FOR_PLUGIN,
"All tests for Intel plugin were skipped")
@testcase.attr('idh')
def test_idh_plugin_gating(self):
self._create_tt_dn_ng_template()
self._create_tt_ng_template()
self._create_dn_ng_template()
self._create_cluster_template()
self._create_cluster()
self._check_mapreduce()
self._check_swift()
self._check_scaling()
self._check_mapreduce_after_scaling()
self._check_swift_after_scaling()
def tearDown(self):
self.delete_objects(self.cluster_id, self.cluster_template_id,
self.ng_template_ids)
super(IDHGatingTest, self).tearDown()

View File

@ -74,23 +74,19 @@ run_pi_job() {
create_log_directory
echo -e "******************************** DPKG ****************************\n" >> $log
echo -e "`dpkg --get-selections | grep hadoop` \n\n\n" >> $log
echo -e "****************************** NETSTAT ***************************\n" >> $log
echo -e "`sudo netstat -plten | grep java` \n\n\n" >> $log
hadoop_version=-$HADOOP_VERSION
if [ "$PLUGIN_NAME" = "hdp" ]
hadoop_version=""
if [ "$PLUGIN_NAME" = "vanilla" ]
then
hadoop_version=""
hadoop_version=-$HADOOP_VERSION
fi
echo -e "************************ START OF \"PI\" JOB *********************\n" >> $log
echo -e "`sudo su -c \"cd $HADOOP_DIRECTORY && hadoop jar hadoop-examples$hadoop_version.jar pi $[$NODE_COUNT*10] $[$NODE_COUNT*1000]\" $HADOOP_USER` \n" >> $log
echo -e "`sudo -u $HADOOP_USER bash -c \"cd $HADOOP_DIRECTORY && hadoop jar hadoop-examples$hadoop_version.jar pi $[$NODE_COUNT*10] $[$NODE_COUNT*1000]\"` \n" >> $log
echo -e "************************ END OF \"PI\" JOB ***********************" >> $log
}
@ -125,7 +121,7 @@ check_return_code_after_command_execution() {
then
if [ "$2" -ne 0 ]
then
sudo su -c "hadoop dfs -rmr /map-reduce-test" $HADOOP_USER && exit 1
sudo -u $HADOOP_USER bash -c "hadoop dfs -rmr /map-reduce-test" && exit 1
fi
fi
}
@ -136,28 +132,28 @@ run_wordcount_job() {
dmesg > $dir/input
sudo su -c "hadoop dfs -ls /" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -ls /"
check_return_code_after_command_execution -exit `echo "$?"`
sudo su -c "hadoop dfs -mkdir /map-reduce-test" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -mkdir /map-reduce-test"
check_return_code_after_command_execution -exit `echo "$?"`
sudo su -c "hadoop dfs -copyFromLocal $dir/input /map-reduce-test/mydata" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -copyFromLocal $dir/input /map-reduce-test/mydata"
check_return_code_after_command_execution -clean_hdfs `echo "$?"`
hadoop_version=-$HADOOP_VERSION
if [ "$PLUGIN_NAME" = "hdp" ]
hadoop_version=""
if [ "$PLUGIN_NAME" = "vanilla" ]
then
hadoop_version=""
hadoop_version=-$HADOOP_VERSION
fi
sudo su -c "cd $HADOOP_DIRECTORY && hadoop jar hadoop-examples$hadoop_version.jar wordcount /map-reduce-test/mydata /map-reduce-test/output" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "cd $HADOOP_DIRECTORY && hadoop jar hadoop-examples$hadoop_version.jar wordcount /map-reduce-test/mydata /map-reduce-test/output"
check_return_code_after_command_execution -clean_hdfs `echo "$?"`
sudo su -c "hadoop dfs -copyToLocal /map-reduce-test/output/ $dir/output/" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -copyToLocal /map-reduce-test/output/ $dir/output/"
check_return_code_after_command_execution -exit `echo "$?"`
sudo su -c "hadoop dfs -rmr /map-reduce-test" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -rmr /map-reduce-test"
check_return_code_after_command_execution -exit `echo "$?"`
}

View File

@ -38,7 +38,7 @@ check_return_code_after_command_execution() {
then
if [ "$2" -ne 0 ]
then
sudo su -c "hadoop dfs -rmr /swift-test" $HADOOP_USER && exit 1
sudo -u $HADOOP_USER bash -c "hadoop dfs -rmr /swift-test" && exit 1
fi
fi
}
@ -47,26 +47,26 @@ check_swift_availability() {
dd if=/dev/urandom of=/tmp/test-file bs=1048576 count=1
sudo su -c "hadoop dfs -mkdir /swift-test/" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -mkdir /swift-test/"
check_return_code_after_command_execution -exit `echo "$?"`
sudo su -c "hadoop dfs -copyFromLocal /tmp/test-file /swift-test/" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -copyFromLocal /tmp/test-file /swift-test/"
check_return_code_after_command_execution -clean_hdfs `echo "$?"`
sudo su -c "hadoop distcp -D fs.swift.service.savanna.username=$OS_USERNAME -D fs.swift.service.savanna.tenant=$OS_TENANT_NAME -D fs.swift.service.savanna.password=$OS_PASSWORD /swift-test/test-file swift://$SWIFT_CONTAINER_NAME.savanna/" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop distcp -D fs.swift.service.savanna.username=$OS_USERNAME -D fs.swift.service.savanna.tenant=$OS_TENANT_NAME -D fs.swift.service.savanna.password=$OS_PASSWORD /swift-test/test-file swift://$SWIFT_CONTAINER_NAME.savanna/"
check_return_code_after_command_execution -clean_hdfs `echo "$?"`
sudo su -c "hadoop distcp -D fs.swift.service.savanna.username=$OS_USERNAME -D fs.swift.service.savanna.tenant=$OS_TENANT_NAME -D fs.swift.service.savanna.password=$OS_PASSWORD swift://$SWIFT_CONTAINER_NAME.savanna/test-file /swift-test/swift-test-file" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop distcp -D fs.swift.service.savanna.username=$OS_USERNAME -D fs.swift.service.savanna.tenant=$OS_TENANT_NAME -D fs.swift.service.savanna.password=$OS_PASSWORD swift://$SWIFT_CONTAINER_NAME.savanna/test-file /swift-test/swift-test-file"
check_return_code_after_command_execution -clean_hdfs `echo "$?"`
sudo su -c "hadoop dfs -copyToLocal /swift-test/swift-test-file /tmp/swift-test-file" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -copyToLocal /swift-test/swift-test-file /tmp/swift-test-file"
check_return_code_after_command_execution -clean_hdfs `echo "$?"`
sudo su -c "hadoop dfs -rmr /swift-test" $HADOOP_USER
sudo -u $HADOOP_USER bash -c "hadoop dfs -rmr /swift-test"
compare_files /tmp/test-file /tmp/swift-test-file
sudo rm /tmp/test-file /tmp/swift-test-file
sudo rm /tmp/test-file /tmp/swift-test-file
}
check_swift_availability