diff --git a/sahara/tests/integration/configs/config.py b/sahara/tests/integration/configs/config.py index ff63cbaf..1bf221f8 100644 --- a/sahara/tests/integration/configs/config.py +++ b/sahara/tests/integration/configs/config.py @@ -176,9 +176,6 @@ VANILLA_CONFIG_OPTS = [ cfg.StrOpt('HADOOP_USER', default='hadoop', help='Username which is used for access to Hadoop services.'), - cfg.StrOpt('HADOOP_DIRECTORY', - default='/usr/share/hadoop', - help='Directory where Hadoop jar files are located.'), cfg.StrOpt('HADOOP_EXAMPLES_JAR_PATH', default='/usr/share/hadoop/hadoop-examples-1.2.1.jar', help='Path to hadoop examples jar file.'), @@ -253,9 +250,6 @@ VANILLA_TWO_CONFIG_OPTS = [ cfg.StrOpt('HADOOP_USER', default='hadoop', help='Username which is used for access to Hadoop services.'), - cfg.StrOpt('HADOOP_DIRECTORY', - default='/opt/hadoop', - help='Directory where Hadoop jar files are located.'), cfg.StrOpt('HADOOP_EXAMPLES_JAR_PATH', default='/opt/hadoop/share/hadoop/mapreduce/' 'hadoop-mapreduce-examples-2.3.0.jar', @@ -338,9 +332,6 @@ HDP_CONFIG_OPTS = [ cfg.StrOpt('HADOOP_USER', default='hdfs', help='Username which is used for access to Hadoop services.'), - cfg.StrOpt('HADOOP_DIRECTORY', - default='/usr/lib/hadoop', - help='Directory where Hadoop jar files are located.'), cfg.StrOpt('HADOOP_EXAMPLES_JAR_PATH', default='/usr/lib/hadoop/hadoop-examples.jar', help='Path to hadoop examples jar file.'), diff --git a/sahara/tests/integration/configs/itest.conf.sample-full b/sahara/tests/integration/configs/itest.conf.sample-full index 400db16e..066433cd 100644 --- a/sahara/tests/integration/configs/itest.conf.sample-full +++ b/sahara/tests/integration/configs/itest.conf.sample-full @@ -141,9 +141,6 @@ # Username which is used for access to Hadoop services (string value) #HADOOP_USER = 'hadoop' -# Directory where Hadoop jar files are located (string value) -#HADOOP_DIRECTORY = '/usr/share/hadoop' - # Directory where logs of completed jobs are located (string value) #HADOOP_LOG_DIRECTORY = '/mnt/log/hadoop/hadoop/userlogs' @@ -209,9 +206,6 @@ # Username which is used for access to Hadoop services (string value) #HADOOP_USER = 'hdfs' -# Directory where Hadoop jar files are located (string value) -#HADOOP_DIRECTORY = '/usr/lib/hadoop' - # Directory where logs of completed jobs are located (string value) #HADOOP_LOG_DIRECTORY = '/mnt/hadoop/mapred/userlogs' @@ -294,4 +288,4 @@ #SKIP_ALL_TESTS_FOR_PLUGIN = False #SKIP_EDP_TEST = False #SKIP_SWIFT_TEST = False -#SKIP_SCALING_TEST = False \ No newline at end of file +#SKIP_SCALING_TEST = False diff --git a/sahara/tests/integration/tests/map_reduce.py b/sahara/tests/integration/tests/map_reduce.py index 417b5662..e0fcd048 100644 --- a/sahara/tests/integration/tests/map_reduce.py +++ b/sahara/tests/integration/tests/map_reduce.py @@ -67,13 +67,10 @@ class MapReduceTest(base.ITestCase): hadoop_log_directory = ( plugin_config.HADOOP_LOG_DIRECTORY_ON_VOLUME) extra_script_parameters = { - 'HADOOP_VERSION': plugin_config.HADOOP_VERSION, - 'HADOOP_DIRECTORY': plugin_config.HADOOP_DIRECTORY, 'HADOOP_EXAMPLES_JAR_PATH': plugin_config.HADOOP_EXAMPLES_JAR_PATH, 'HADOOP_LOG_DIRECTORY': hadoop_log_directory, 'HADOOP_USER': plugin_config.HADOOP_USER, - 'NODE_COUNT': cluster_info['node_info']['node_count'], - 'PLUGIN_NAME': plugin_config.PLUGIN_NAME + 'NODE_COUNT': cluster_info['node_info']['node_count'] } for instance in node_group['instances']: try: diff --git a/sahara/tests/integration/tests/resources/map_reduce_test_script.sh b/sahara/tests/integration/tests/resources/map_reduce_test_script.sh index 575ae371..d2405b17 100644 --- a/sahara/tests/integration/tests/resources/map_reduce_test_script.sh +++ b/sahara/tests/integration/tests/resources/map_reduce_test_script.sh @@ -3,14 +3,11 @@ dir=/tmp/MapReduceTestOutput log=$dir/log.txt -HADOOP_VERSION="" HADOOP_EXAMPLES_JAR_PATH="" -HADOOP_DIRECTORY="" HADOOP_LOG_DIRECTORY="" HADOOP_USER="" NODE_COUNT="" -PLUGIN_NAME="" case $1 in run_pi_job) @@ -80,12 +77,6 @@ run_pi_job() { echo -e "`sudo netstat -plten | grep java` \n\n\n" >> $log - hadoop_version="" - if [ "$PLUGIN_NAME" = "vanilla" ] - then - hadoop_version=-$HADOOP_VERSION - fi - echo -e "************************ START OF \"PI\" JOB *********************\n" >> $log sudo -u $HADOOP_USER bash -lc "hadoop jar $HADOOP_EXAMPLES_JAR_PATH pi $[$NODE_COUNT*10] $[$NODE_COUNT*1000]" >> $log @@ -143,12 +134,6 @@ run_wordcount_job() { sudo -u $HADOOP_USER bash -lc "hadoop dfs -copyFromLocal $dir/input /map-reduce-test/mydata" check_return_code_after_command_execution -clean_hdfs `echo "$?"` - hadoop_version="" - if [ "$PLUGIN_NAME" = "vanilla" ] - then - hadoop_version=-$HADOOP_VERSION - fi - sudo -u $HADOOP_USER bash -lc "hadoop jar $HADOOP_EXAMPLES_JAR_PATH wordcount /map-reduce-test/mydata /map-reduce-test/output" check_return_code_after_command_execution -clean_hdfs `echo "$?"`