diff --git a/etc/edp-examples/README.rst b/etc/edp-examples/README.rst new file mode 100644 index 0000000000..116a86a395 --- /dev/null +++ b/etc/edp-examples/README.rst @@ -0,0 +1,6 @@ +===================== +Sahara files for EDP +===================== + +All files from this directory have been moved to new +sahara-scenario repository: https://github.com/openstack/sahara-scenario diff --git a/etc/edp-examples/edp-hive/expected_output.csv b/etc/edp-examples/edp-hive/expected_output.csv deleted file mode 100644 index be6bf27cb3..0000000000 --- a/etc/edp-examples/edp-hive/expected_output.csv +++ /dev/null @@ -1,2 +0,0 @@ -Boris -Homer diff --git a/etc/edp-examples/edp-hive/input.csv b/etc/edp-examples/edp-hive/input.csv deleted file mode 100644 index d682e1304b..0000000000 --- a/etc/edp-examples/edp-hive/input.csv +++ /dev/null @@ -1,4 +0,0 @@ -Mike,20 -Boris,42 -Bart,12 -Homer,40 diff --git a/etc/edp-examples/edp-hive/script.q b/etc/edp-examples/edp-hive/script.q deleted file mode 100644 index 22a0428b0a..0000000000 --- a/etc/edp-examples/edp-hive/script.q +++ /dev/null @@ -1,6 +0,0 @@ -CREATE DATABASE IF NOT EXISTS tests LOCATION '/user/hive/warehouse/tests.db'; -CREATE EXTERNAL TABLE IF NOT EXISTS tests.students (name STRING, age INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE; -LOAD DATA INPATH '${INPUT}' INTO TABLE tests.students; -INSERT OVERWRITE DIRECTORY '${OUTPUT}' SELECT name FROM tests.students WHERE age > 30; -DROP TABLE IF EXISTS tests.students; -DROP DATABASE IF EXISTS tests; diff --git a/etc/edp-examples/edp-java/README.rst b/etc/edp-examples/edp-java/README.rst deleted file mode 100644 index 740229b612..0000000000 --- a/etc/edp-examples/edp-java/README.rst +++ /dev/null @@ -1,56 +0,0 @@ -===================== -EDP WordCount Example -===================== - -Overview --------- - -``WordCount.java`` is a modified version of the WordCount example bundled with -version 1.2.1 of Apache Hadoop. It has been extended for use from a java action -in an Oozie workflow. The modification below allows any configuration values -from the ```` tag in an Oozie workflow to be set in the -Configuration object:: - - // This will add properties from the tag specified - // in the Oozie workflow. For java actions, Oozie writes the - // configuration values to a file pointed to by ooze.action.conf.xml - conf.addResource(new Path("file:///", - System.getProperty("oozie.action.conf.xml"))); - -In the example workflow, we use the ```` tag to specify user and -password configuration values for accessing swift objects. - -Compiling ---------- - -To build the jar, add ``hadoop-core`` and ``commons-cli`` to the classpath. - -On a node running Ubuntu 13.04 with hadoop 1.2.1 the following commands -will compile ``WordCount.java`` from within the ``src`` directory:: - - $ mkdir wordcount_classes - $ javac -classpath /usr/share/hadoop/hadoop-core-1.2.1.jar:/usr/share/hadoop/lib/commons-cli-1.2.jar -d wordcount_classes WordCount.java - $ jar -cvf edp-java.jar -C wordcount_classes/ . - -Note, on a node with hadoop 2.3.0 the ``javac`` command above can be replaced with: - - $ javac -classpath /opt/hadoop-2.3.0/share/hadoop/common/hadoop-common-2.3.0.jar:/opt/hadoop-2.3.0/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.3.0.jar:/opt/hadoop-2.3.0/share/hadoop/common/lib/commons-cli-1.2.jar:/opt/hadoop-2.3.0/share/hadoop/mapreduce/lib/hadoop-annotations-2.3.0.jar -d wordcount_classes WordCount.java - -Running from the Sahara UI --------------------------- - -Running the WordCount example from the sahara UI is very similar to running a -Pig, Hive, or MapReduce job. - -1. Create a job binary that points to the ``edp-java.jar`` file -2. Create a ``Java`` job type and add the job binary to the ``libs`` value -3. Launch the job: - - 1. Add the input and output paths to ``args`` - 2. If swift input or output paths are used, set the - ``fs.swift.service.sahara.username`` and - ``fs.swift.service.sahara.password`` configuration values - 3. The Sahara UI will prompt for the required ``main_class`` value and - the optional ``java_opts`` value - - diff --git a/etc/edp-examples/edp-java/edp-java.jar b/etc/edp-examples/edp-java/edp-java.jar deleted file mode 100644 index 910a24aae8..0000000000 Binary files a/etc/edp-examples/edp-java/edp-java.jar and /dev/null differ diff --git a/etc/edp-examples/edp-java/oozie_command_line/README.rst b/etc/edp-examples/edp-java/oozie_command_line/README.rst deleted file mode 100644 index 78dee5392c..0000000000 --- a/etc/edp-examples/edp-java/oozie_command_line/README.rst +++ /dev/null @@ -1,28 +0,0 @@ -===================================================== -Running WordCount example from the Oozie command line -===================================================== - -1. Copy the ``edp-java.jar`` file from ``sahara/edp-examples/edp-java`` - to ``./wordcount/lib/edp-java.jar``. - -2. Modify the ``job.properties`` file to specify the correct ``jobTracker`` - and ``nameNode`` addresses for your cluster. - -3. Modify the ``workflow.xml`` file to contain the correct input and output - paths. These paths may be sahara swift urls or HDFS paths. - - * If swift urls are used, set the ``fs.swift.service.sahara.username`` - and ``fs.swift.service.sahara.password`` properties in the - ```` section. - -4. Upload your ``wordcount`` directory to the ``oozie.wf.application.path`` HDFS directory - (the ``oozie.wf.application.path`` directory is specified in the ``job.properties`` file): - - $ hadoop fs -put wordcount oozie.wf.application.path - -5. Launch the job, specifying the correct oozie server and port: - - $ oozie job -oozie http://oozie_server:port/oozie -config wordcount/job.properties -run - -6. Don't forget to create your swift input path! A sahara swift url looks - like ``swift://container.sahara/object``. diff --git a/etc/edp-examples/edp-java/oozie_command_line/wordcount/job.properties b/etc/edp-examples/edp-java/oozie_command_line/wordcount/job.properties deleted file mode 100644 index 5e9f4fb7fd..0000000000 --- a/etc/edp-examples/edp-java/oozie_command_line/wordcount/job.properties +++ /dev/null @@ -1,23 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -nameNode=hdfs://1.2.3.4:8020 -jobTracker=1.2.3.4:8021 -queueName=default - -oozie.wf.application.path=${nameNode}/user/${user.name}/wordcount diff --git a/etc/edp-examples/edp-java/oozie_command_line/wordcount/workflow.xml b/etc/edp-examples/edp-java/oozie_command_line/wordcount/workflow.xml deleted file mode 100644 index d8bc820ffd..0000000000 --- a/etc/edp-examples/edp-java/oozie_command_line/wordcount/workflow.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - ${jobTracker} - ${nameNode} - - - mapred.job.queue.name - ${queueName} - - - fs.swift.service.sahara.username - swiftuser - - - fs.swift.service.sahara.password - swiftpassword - - - org.openstack.sahara.examples.WordCount - swift://user.sahara/input - swift://user.sahara/output - - - - - - Java failed, error message[${wf:errorMessage(wf:lastErrorNode())}] - - - diff --git a/etc/edp-examples/edp-java/src/NOTICE.txt b/etc/edp-examples/edp-java/src/NOTICE.txt deleted file mode 100644 index 62fc5816c9..0000000000 --- a/etc/edp-examples/edp-java/src/NOTICE.txt +++ /dev/null @@ -1,2 +0,0 @@ -This product includes software developed by The Apache Software -Foundation (http://www.apache.org/). diff --git a/etc/edp-examples/edp-java/src/WordCount.java b/etc/edp-examples/edp-java/src/WordCount.java deleted file mode 100644 index 8f834b5b4d..0000000000 --- a/etc/edp-examples/edp-java/src/WordCount.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.openstack.sahara.examples; - -import java.io.IOException; -import java.util.StringTokenizer; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.Mapper; -import org.apache.hadoop.mapreduce.Reducer; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.apache.hadoop.util.GenericOptionsParser; - -public class WordCount { - - public static class TokenizerMapper - extends Mapper{ - - private final static IntWritable one = new IntWritable(1); - private Text word = new Text(); - - public void map(Object key, Text value, Context context - ) throws IOException, InterruptedException { - StringTokenizer itr = new StringTokenizer(value.toString()); - while (itr.hasMoreTokens()) { - word.set(itr.nextToken()); - context.write(word, one); - } - } - } - - public static class IntSumReducer - extends Reducer { - private IntWritable result = new IntWritable(); - - public void reduce(Text key, Iterable values, - Context context - ) throws IOException, InterruptedException { - int sum = 0; - for (IntWritable val : values) { - sum += val.get(); - } - result.set(sum); - context.write(key, result); - } - } - - public static void main(String[] args) throws Exception { - Configuration conf = new Configuration(); - String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); - if (otherArgs.length != 2) { - System.err.println("Usage: wordcount "); - System.exit(2); - } - - // ---- Begin modifications for EDP ---- - // This will add properties from the tag specified - // in the Oozie workflow. For java actions, Oozie writes the - // configuration values to a file pointed to by ooze.action.conf.xml - conf.addResource(new Path("file:///", - System.getProperty("oozie.action.conf.xml"))); - // ---- End modifications for EDP ---- - - Job job = new Job(conf, "word count"); - job.setJarByClass(WordCount.class); - job.setMapperClass(TokenizerMapper.class); - job.setCombinerClass(IntSumReducer.class); - job.setReducerClass(IntSumReducer.class); - job.setOutputKeyClass(Text.class); - job.setOutputValueClass(IntWritable.class); - FileInputFormat.addInputPath(job, new Path(otherArgs[0])); - FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); - System.exit(job.waitForCompletion(true) ? 0 : 1); - } -} diff --git a/etc/edp-examples/edp-mapreduce/edp-mapreduce.jar b/etc/edp-examples/edp-mapreduce/edp-mapreduce.jar deleted file mode 100644 index 04388a9583..0000000000 Binary files a/etc/edp-examples/edp-mapreduce/edp-mapreduce.jar and /dev/null differ diff --git a/etc/edp-examples/edp-pig/top-todoers/README.rst b/etc/edp-examples/edp-pig/top-todoers/README.rst deleted file mode 100644 index 2c1cb3b9bd..0000000000 --- a/etc/edp-examples/edp-pig/top-todoers/README.rst +++ /dev/null @@ -1,68 +0,0 @@ -Top TODOers Pig job -=================== - -This script calculates top TODOers in input sources. - -Example of usage ----------------- - -This pig script can process as many input files (sources) as you want. -Just put all input files in a directory in HDFS or container in Swift and -give the path of the HDFS directory (Swift object) as input DataSource for EDP. - -Here are steps how to prepare input data: - -1. Create dir 'input' - -.. sourcecode:: console - - $ mkdir input - -2. Get some sources from GitHub and put it to 'input' directory: - -.. sourcecode:: console - - $ cd input - $ git clone "https://github.com/openstack/swift.git" - $ git clone "https://github.com/openstack/nova.git" - $ git clone "https://github.com/openstack/glance.git" - $ git clone "https://github.com/openstack/image-api.git" - $ git clone "https://github.com/openstack/neutron.git" - $ git clone "https://github.com/openstack/horizon.git" - $ git clone "https://github.com/openstack/python-novaclient.git" - $ git clone "https://github.com/openstack/python-keystoneclient.git" - $ git clone "https://github.com/openstack/oslo-incubator.git" - $ git clone "https://github.com/openstack/python-neutronclient.git" - $ git clone "https://github.com/openstack/python-glanceclient.git" - $ git clone "https://github.com/openstack/python-swiftclient.git" - $ git clone "https://github.com/openstack/python-cinderclient.git" - $ git clone "https://github.com/openstack/ceilometer.git" - $ git clone "https://github.com/openstack/cinder.git" - $ git clone "https://github.com/openstack/heat.git" - $ git clone "https://github.com/openstack/python-heatclient.git" - $ git clone "https://github.com/openstack/python-ceilometerclient.git" - $ git clone "https://github.com/openstack/oslo.config.git" - $ git clone "https://github.com/openstack/ironic.git" - $ git clone "https://github.com/openstack/python-ironicclient.git" - $ git clone "https://github.com/openstack/operations-guide.git" - $ git clone "https://github.com/openstack/keystone.git" - $ git clone "https://github.com/openstack/oslo.messaging.git" - $ git clone "https://github.com/openstack/oslo.sphinx.git" - $ git clone "https://github.com/openstack/oslo.version.git" - $ git clone "https://github.com/openstack/sahara.git" - $ git clone "https://github.com/openstack/python-saharaclient.git" - $ git clone "https://github.com/openstack/openstack.git" - $ cd .. - -3. Create single file containing all sources: - -.. sourcecode:: console - - tar -cf input.tar input/* - -.. note:: - - Pig can operate with raw files as well as with compressed data, so in this - step you might want to create *.gz file with sources and it should work. - -4. Upload input.tar to Swift or HDFS as input data source for EDP processing \ No newline at end of file diff --git a/etc/edp-examples/edp-pig/top-todoers/data/expected_output b/etc/edp-examples/edp-pig/top-todoers/data/expected_output deleted file mode 100644 index f2eb5ac34f..0000000000 --- a/etc/edp-examples/edp-pig/top-todoers/data/expected_output +++ /dev/null @@ -1,3 +0,0 @@ -2 https://launchpad.net/~slukjanov -1 https://launchpad.net/~aignatov -1 https://launchpad.net/~mimccune \ No newline at end of file diff --git a/etc/edp-examples/edp-pig/top-todoers/data/input b/etc/edp-examples/edp-pig/top-todoers/data/input deleted file mode 100644 index 91f25eea6e..0000000000 --- a/etc/edp-examples/edp-pig/top-todoers/data/input +++ /dev/null @@ -1,18 +0,0 @@ -# There is some source file with TODO labels inside - - -def sum(a, b): - # TODO(slukjanov): implement how to add numbers - return None - -def sum(a, b): - # TODO(slukjanov): implement how to subtract numbers - return None - -def divide(a, b): - # TODO(aignatov): implement how to divide numbers - return None - -def mul(a, b): - # TODO(mimccune): implement how to multiply numbers - return None diff --git a/etc/edp-examples/edp-pig/top-todoers/example.pig b/etc/edp-examples/edp-pig/top-todoers/example.pig deleted file mode 100644 index e35d05aa4d..0000000000 --- a/etc/edp-examples/edp-pig/top-todoers/example.pig +++ /dev/null @@ -1,17 +0,0 @@ -input_lines = LOAD '$INPUT' AS (line:chararray); - --- filter out any lines that are not with TODO -todo_lines = FILTER input_lines BY line MATCHES '.*TODO\\s*\\(\\w+\\)+.*'; -ids = FOREACH todo_lines GENERATE FLATTEN(REGEX_EXTRACT($0, '(.*)\\((.*)\\)(.*)', 2)); - --- create a group for each word -id_groups = GROUP ids BY $0; - --- count the entries in each group -atc_count = FOREACH id_groups GENERATE COUNT(ids) AS count, group AS atc; - --- order the records by count -result = ORDER atc_count BY count DESC; -result = FOREACH result GENERATE count, CONCAT('https://launchpad.net/~', atc); - -STORE result INTO '$OUTPUT' USING PigStorage(); diff --git a/etc/edp-examples/edp-pig/trim-spaces/README.rst b/etc/edp-examples/edp-pig/trim-spaces/README.rst deleted file mode 100644 index 2dfc5147aa..0000000000 --- a/etc/edp-examples/edp-pig/trim-spaces/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -Example Pig job -=============== - -This script trims spaces in input text - -This sample pig job is based on examples in Chapter 11 of -"Hadoop: The Definitive Guide" by Tom White, published by O'Reilly Media. -The original code can be found in a maven project at - -https://github.com/tomwhite/hadoop-book - diff --git a/etc/edp-examples/edp-pig/trim-spaces/data/expected_output b/etc/edp-examples/edp-pig/trim-spaces/data/expected_output deleted file mode 100644 index 60a0142703..0000000000 --- a/etc/edp-examples/edp-pig/trim-spaces/data/expected_output +++ /dev/null @@ -1,4 +0,0 @@ -pomegranate -banana -apple -lychee \ No newline at end of file diff --git a/etc/edp-examples/edp-pig/trim-spaces/data/input b/etc/edp-examples/edp-pig/trim-spaces/data/input deleted file mode 100644 index d2079b577e..0000000000 --- a/etc/edp-examples/edp-pig/trim-spaces/data/input +++ /dev/null @@ -1,4 +0,0 @@ - pomegranate - banana - apple - lychee \ No newline at end of file diff --git a/etc/edp-examples/edp-pig/trim-spaces/example.pig b/etc/edp-examples/edp-pig/trim-spaces/example.pig deleted file mode 100644 index 4141906ee7..0000000000 --- a/etc/edp-examples/edp-pig/trim-spaces/example.pig +++ /dev/null @@ -1,3 +0,0 @@ -A = load '$INPUT' using PigStorage(':') as (fruit: chararray); -B = foreach A generate com.hadoopbook.pig.Trim(fruit); -store B into '$OUTPUT' USING PigStorage(); diff --git a/etc/edp-examples/edp-pig/trim-spaces/udf.jar b/etc/edp-examples/edp-pig/trim-spaces/udf.jar deleted file mode 100644 index 970001c7ff..0000000000 Binary files a/etc/edp-examples/edp-pig/trim-spaces/udf.jar and /dev/null differ diff --git a/etc/edp-examples/edp-shell/shell-example.sh b/etc/edp-examples/edp-shell/shell-example.sh deleted file mode 100644 index cee6e3f9f1..0000000000 --- a/etc/edp-examples/edp-shell/shell-example.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -cat $EXTRA_FILE > $1 -echo $USER >> $1 \ No newline at end of file diff --git a/etc/edp-examples/edp-shell/shell-example.txt b/etc/edp-examples/edp-shell/shell-example.txt deleted file mode 100644 index fe9a086af8..0000000000 --- a/etc/edp-examples/edp-shell/shell-example.txt +++ /dev/null @@ -1 +0,0 @@ -The user running this shell script is: \ No newline at end of file diff --git a/etc/edp-examples/edp-spark/NOTICE.txt b/etc/edp-examples/edp-spark/NOTICE.txt deleted file mode 100644 index 87f2397f2a..0000000000 --- a/etc/edp-examples/edp-spark/NOTICE.txt +++ /dev/null @@ -1,2 +0,0 @@ -This example includes software developed by The Apache Software -Foundation (http://www.apache.org/). diff --git a/etc/edp-examples/edp-spark/README.rst b/etc/edp-examples/edp-spark/README.rst deleted file mode 100644 index 0f2228287c..0000000000 --- a/etc/edp-examples/edp-spark/README.rst +++ /dev/null @@ -1,66 +0,0 @@ -Example Spark Job -================= - -This example contains the compiled classes for SparkPi extracted from -the example jar distributed with Apache Spark version 1.3.1. - -SparkPi example estimates Pi. It can take a single optional integer -argument specifying the number of slices (tasks) to use. - -Example spark-wordcount Job -=========================== - -spark-wordcount is a modified version of the WordCount example from Apache -Spark. It can read input data from hdfs or swift container, then output the -number of occurrences of each word to standard output or hdfs. - -Launching wordcount job from Sahara UI --------------------------------------- - -1. Create a job binary that points to ``spark-wordcount.jar``. -2. Create a job template and set ``spark-wordcount.jar`` as the main binary - of the job template. -3. Create a Swift container with your input file. As example, you can upload - ``sample_input.txt``. -3. Launch job: - - 1. Put path to input file in ``args`` - 2. Put path to output file in ``args`` - 3. Fill the ``Main class`` input with the following class: - ``sahara.edp.spark.SparkWordCount`` - 4. Put the following values in the job's configs: - ``edp.spark.adapt_for_swift`` with value ``True``, - ``fs.swift.service.sahara.password`` with password for your username, - and ``fs.swift.service.sahara.username`` with your username. These - values are required for correct access to your input file, located in - Swift. - 5. Execute the job. You will be able to view your output in hdfs. - -Launching spark-kafka-example ------------------------------ - -0. Create a cluster with ``Kafka Broker``, ``ZooKeeper`` and - ``Spark History Server``. The Ambari plugin can be used for that purpose. - Please, use your keypair during cluster creation to have the ability to - ssh in instances with that processes. For simplicity, these services - should located on same the node. -1. Ssh to the node with the ``Kafka Broker`` service. Create a sample topic - using the following command: - ``path/kafka-topics.sh --create --zookeeper localhost:2181 \ - --replication-factor 1 --partitions 1 --topic test-topic``. - Also execute ``path/kafka-console-producer.sh --broker-list \ - localhost:6667 --topic test-topic`` and then put several messages in the - topic. Please, note that you need to replace the values ``localhost`` - and ``path`` with your own values. -2. Download the Spark Streaming utils to the node with your - ``Spark History Server`` from this URL: - ``http://central.maven.org/maven2/org/apache/spark/spark-streaming-kafka-assembly_2.10/1.4.1/spark-streaming-kafka-assembly_2.10-1.4.1.jar``. - Now you are ready to launch your job from sahara UI. -3. Create a job binary that points to ``spark-kafka-example.py``. - Also you need to create a job that uses this job binary as a main binary. -4. Execute the job with the following job configs: - ``edp.spark.driver.classpath`` with a value that points to the utils - downloaded during step 2. Also the job should be run with the following - arguments: ``localhost:2181`` as the first argument, ``test-topic`` as - the second, and ``30`` as the third. -5. Congratulations, your job was successfully launched! diff --git a/etc/edp-examples/edp-spark/sample_input.txt b/etc/edp-examples/edp-spark/sample_input.txt deleted file mode 100644 index 2577445083..0000000000 --- a/etc/edp-examples/edp-spark/sample_input.txt +++ /dev/null @@ -1,10 +0,0 @@ -one -one -one -one -two -two -two -three -three -four diff --git a/etc/edp-examples/edp-spark/spark-example.jar b/etc/edp-examples/edp-spark/spark-example.jar deleted file mode 100644 index a85d79cb23..0000000000 Binary files a/etc/edp-examples/edp-spark/spark-example.jar and /dev/null differ diff --git a/etc/edp-examples/edp-spark/spark-kafka-example.py b/etc/edp-examples/edp-spark/spark-kafka-example.py deleted file mode 100644 index 5d33660492..0000000000 --- a/etc/edp-examples/edp-spark/spark-kafka-example.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import sys - -from pyspark import SparkContext -from pyspark.streaming.kafka import KafkaUtils -from pyspark.streaming import StreamingContext - - -def main(): - if len(sys.argv) != 4: - print("Usage: kafka_wordcount.py ", - file=sys.stderr) - exit(-1) - - sc = SparkContext(appName="PythonStreamingKafkaWordCount") - ssc = StreamingContext(sc, 1) - timeout = None - if len(sys.argv) == 4: - zk, topic, timeout = sys.argv[1:] - timeout = int(timeout) - else: - zk, topic = sys.argv[1:] - kvs = KafkaUtils.createStream( - ssc, zk, "spark-streaming-consumer", {topic: 1}) - lines = kvs.map(lambda x: x[1]) - counts = lines.flatMap(lambda line: (line.split(" ")) - .map(lambda word: (word, 1)) - .reduceByKey(lambda a, b: a+b)) - counts.pprint() - kwargs = {} - if timeout: - kwargs['timeout'] = timeout - ssc.start() - ssc.awaitTermination(**kwargs) diff --git a/etc/edp-examples/edp-spark/spark-wordcount.jar b/etc/edp-examples/edp-spark/spark-wordcount.jar deleted file mode 100644 index b5802a40d2..0000000000 Binary files a/etc/edp-examples/edp-spark/spark-wordcount.jar and /dev/null differ diff --git a/etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.6.0.jar b/etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.6.0.jar deleted file mode 100644 index 80f9588183..0000000000 Binary files a/etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.6.0.jar and /dev/null differ diff --git a/etc/edp-examples/json-api-examples/v1.1/README.rst b/etc/edp-examples/json-api-examples/v1.1/README.rst deleted file mode 100644 index 3546272226..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/README.rst +++ /dev/null @@ -1,277 +0,0 @@ -============================= - Sahara EDP JSON API Examples -============================= ------- - v1.1 ------- - -Overview -======== - -This document provides a step-by-step guide to usage of the Sahara EDP API, -with JSON payload examples, covering: - -* Data source creation in both swift and HDFS, -* Binary storage in both swift and the sahara database, and -* Job creation for Pig, Map/Reduce, Java, and Spark jobs. - -Five example flows are provided: - -* A Pig job, using swift for both data and binary storage. -* A Map/Reduce job, using HDFS data sources registered in sahara and swift - for binary storage. -* A Java job, using raw HDFS data paths and the sahara database for binary - storage. -* A Spark job without data inputs, using swift for binary storage. -* A shell job without data inputs, using the sahara database for binary - storage. - -Many other combinations of data source storage, binary storage, and job type -are possible. These examples are intended purely as a point of departure for -modification and experimentation for any sahara user who prefers a -command-line interface to UI (or who intends to automate sahara usage.) - -Notes -===== - -Formatting ----------- - -The json files provided make many assumptions, allowing the examples to be as -literal as possible. However, where objects created by the flow must refer to -one another's generated ids, Python dictionary-style is used. - -Oozie is required for Hadoop ----------------------------- - -When the preconditions for a given example specify that you must have "an -active Hadoop cluster", that cluster must be running an Oozie process in all -cases, as sahara's EDP jobs are scheduled through Oozie in all Hadoop plugins. - -Swift Objects -------------- - -Several of the examples here call for objects in Swift. To upload all of the -required files for all examples, use the following: - - $ cd etc/edp-examples - $ swift upload edp-examples ./* - -Swift credentials ------------------ - -For the sake of simplicity, these examples pass swift credentials to the API -when creating data sources, storing binaries, and executing jobs. Use of a -`swift proxy`_ can improve security by reducing the need to distribute and -store credentials. - -.. _swift proxy: http://docs.openstack.org/developer/sahara/userdoc/advanced.configuration.guide.html - -Swift containers ----------------- - -You may note that the container references in the data source creation JSON -examples for Swift have the ``.sahara`` suffix on the container name, even -though the data must be uploaded to the non-suffixed container. This suffix -informs Hadoop that ``sahara`` is the provider for this data source. See the -`hadoop swift`_ documentation for more information. - -.. _hadoop swift: http://docs.openstack.org/developer/sahara/userdoc/hadoop-swift.html - -REST API usage --------------- - -The CLI and Python sahara client provide their own authentication mechanisms -and endpoint discovery. If you wish to use the raw REST API, however, please -authenticate on all requests described below by passing an auth token provided -by Keystone for your tenant and user in header 'X-Auth-Token'. - -For new sahara REST users, reference to the `Sahara EDP API Documentation`_ -will be useful throughout these exercises. - -.. _Sahara EDP API Documentation: http://developer.openstack.org/api-ref-data-processing-v1.1.html - - -Example 1: Pig, using swift -=========================== - -Preconditions -------------- - -This example assumes the following: - -1. Usage of an OpenStack user named "demo", with password "password". -2. An active Hadoop cluster exists in the demo user's project. -3. In the demo user's project, the following files are stored in swift in the - container ``edp-examples``, as follows: - - * The file at ``edp-examples/edp-pig/trim-spaces/example.pig`` is stored - at path ``swift://edp-examples/edp-pig/trim-spaces/example.pig``. - * The file at ``edp-pig/trim-spaces/udf.jar`` is stored at - path ``swift://edp-examples/edp-pig/trim-spaces/udf.jar``. - * The file at ``edp-examples/edp-pig/trim-spaces/data/input`` is stored at - path ``swift://edp-examples/edp-pig/trim-spaces/data/input``. - -Steps ------ - -1. **Input**: POST the payload at ``data-sources/create.swift-pig-input.json`` - to your sahara endpoint's ``data-sources`` path. Note the new object's - id. -2. **Output**: POST the payload at - ``data-sources/create.swift-pig-output.json`` to your sahara endpoint's - ``data-sources`` path. Note the new object's id. -3. **Script**: POST the payload at ``job-binaries/create.pig-job.json`` to - your sahara endpoint's ``job-binaries`` path. Note the new object's id. -4. **UDF .jar**: POST the payload at ``job-binaries/create.pig-udf.json`` to - your sahara endpoint's ``job-binaries`` path. Note the new object's id. -5. **Job**: Insert the script binary id from step 3 and the UDF binary id from - step 4 into the payload at ``jobs/create.pig.json``. Then POST this file to - your sahara endpoint's ``jobs`` path. Note the new object's id. -6. **Job Execution**: Insert your Hadoop cluster id, the input id from step 1, - and the output id from step 2 into the payload at - ``job-executions/execute.pig.json``. Then POST this file to your sahara - endpoint at path ``jobs/{job id from step 5}/execute``. - -Note ----- - -Pig jobs can take both arguments and parameters, though neither are needed -for the example job. - - -Example 2: Map/Reduce, using HDFS and swift -=========================================== - -Preconditions -------------- - -This example assumes the following: - -1. Usage of an OpenStack user named "demo", with password "password". -2. An active Hadoop cluster exists in the demo user's project, with the - master node's HDFS available at URL - ``hdfs://hadoop-cluster-master-001:8020/``. -3. In the demo user's project, the file at - ``edp-examples/edp-mapreduce/edp-mapreduce.jar`` is stored in swift, at - path ``swift://edp-examples/edp-mapreduce/edp-mapreduce.jar``. -4. A text file exists in your Hadoop cluster's HDFS at path - ``/user/edp-examples/edp-map-reduce/input``. - -Steps ------ - -1. **Input**: POST the payload at - ``data-sources/create.hdfs-map-reduce-input.json`` to your sahara - endpoint's ``data-sources`` path. Note the new object's id. -2. **Output**: POST the payload at - ``data-sources/create.hdfs-map-reduce-output.json`` to your sahara - endpoint's ``data-sources`` path. Note the new object's id. -3. **Binary**: POST the payload at ``job-binaries/create.map-reduce.json`` to - your sahara endpoint's ``job-binaries`` path. Note the new object's id. -4. **Job**: Insert the binary id from step 3 into the payload at - ``jobs/create.map-reduce.json``. Then POST this file to your sahara - endpoint's ``jobs`` path. Note the new object's id. -5. **Job Execution**: Insert your Hadoop cluster id, the input id from step 1, - and the output id from step 2 into the payload at - ``job-executions/execute.map-reduce.json``. Then POST this file to your - sahara endpoint at path ``jobs/{job id from step 4}/execute``. - - -Example 3: Java, using raw HDFS and the sahara database -======================================================= - -Preconditions -------------- - -This example assumes the following: - -1. Usage of an OpenStack user named "demo", with password "password". -2. An active Hadoop cluster exists in the demo user's project, with the - master node's HDFS available at URL - ``hdfs://hadoop-cluster-master-001:8020/``. -3. A text file exists in your Hadoop cluster's HDFS at path - ``/user/edp-examples/edp-java/input``. - -Steps ------ - -1. **Internal Job Binary**: PUT the file at - ``edp-examples/edp-java/edp-java.jar`` into your sahara endpoint at path - ``job-binary-internals/edp-java.jar``. Note the new object's id. -2. **Job Binary**: Insert the internal job binary id from step 1 into the - payload at ``job-binaries/create.java.json``. Then POST this file to your - sahara endpoint's ``job-binaries`` path. Note the new object's id. -3. **Job**: Insert the binary id from step 2 into the payload at - ``jobs/create.java.json``. Then POST this file to your sahara endpoint's - ``jobs`` path. Note the new object's id. -4. **Job Execution**: Insert your Hadoop cluster id into the payload at - ``job-executions/execute.java.json``. Then POST this file to your sahara - endpoint at path ``jobs/{job id from step 3}/execute``. - - -Example 4: Spark, using swift -============================= - -Preconditions -------------- - -This example assumes the following: - -1. Usage of an OpenStack user named "demo", with password "password". -2. An active Spark cluster exists in the demo user's project. -3. In the demo user's project, the file at - ``edp-examples/edp-spark/spark-example.jar`` is stored in swift, at path - ``swift://edp-examples/edp-spark/spark-example.jar``. - -Steps ------ - -1. **Job Binary**: POST the payload at ``job-binaries/create.spark.json`` - to your sahara endpoint's ``job-binaries`` path. Note the new object's id. -2. **Job**: Insert the binary id from step 1 into the payload at - ``jobs/create.spark.json``. Then POST this file to your sahara endpoint's - ``jobs`` path. Note the new object's id. -3. **Job Execution**: Insert your Spark cluster id into the payload at - ``job-executions/execute.spark.json``. Then POST this file to your sahara - endpoint at path ``jobs/{job id from step 2}/execute``. - -Note ----- - -Spark jobs can use additional library binaries, but none are needed for the -example job. - - -Example 5: Shell script, using the sahara database -================================================== - -Preconditions -------------- - -This example assumes the following: - -1. Usage of an OpenStack user named "demo", with password "password". -2. An active Hadoop cluster exists in the demo user's project. - -Steps ------ - -1. **Script File**: PUT the file at - ``edp-examples/edp-shell/shell-example.sh`` into your sahara endpoint at - path ``job-binary-internals/shell-example.sh``. Note the new object's id. -2. **Text File**: PUT the file at - ``edp-examples/edp-shell/shell-example.txt`` into your sahara endpoint at - path ``job-binary-internals/shell-example.txt``. Note the new object's id. -3. **Script Binary**: Insert the script file's id from step 1 into the payload - at ``job-binaries/create.shell-script.json``. Then POST this file to your - sahara endpoint's ``job-binaries`` path. Note the new object's id. -4. **Text Binary**: Insert the text file's id from step 2 into the payload - at ``job-binaries/create.shell-text.json``. Then POST this file to your - sahara endpoint's ``job-binaries`` path. Note the new object's id. -5. **Job**: Insert the binary ids from steps 3 and 4 into the payload at - ``jobs/create.shell.json``. Then POST this file to your sahara endpoint's - ``jobs`` path. Note the new object's id. -6. **Job Execution**: Insert your Hadoop cluster id into the payload at - ``job-executions/execute.java.json``. Then POST this file to your sahara - endpoint at path ``jobs/{job id from step 5}/execute``. diff --git a/etc/edp-examples/json-api-examples/v1.1/data-sources/create.hdfs-map-reduce-input.json b/etc/edp-examples/json-api-examples/v1.1/data-sources/create.hdfs-map-reduce-input.json deleted file mode 100644 index 24b23ac58f..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/data-sources/create.hdfs-map-reduce-input.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "demo-map-reduce-input", - "description": "A data source for Map/Reduce input, stored in HDFS", - "type": "hdfs", - "url": "hdfs://hadoop-cluster-master-001:8020/user/edp-examples/edp-map-reduce/input" -} diff --git a/etc/edp-examples/json-api-examples/v1.1/data-sources/create.hdfs-map-reduce-output.json b/etc/edp-examples/json-api-examples/v1.1/data-sources/create.hdfs-map-reduce-output.json deleted file mode 100644 index 034fa0e303..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/data-sources/create.hdfs-map-reduce-output.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "demo-map-reduce-output", - "description": "A data source for Map/Reduce output, stored in HDFS", - "type": "hdfs", - "url": "hdfs://hadoop-cluster-master-001:8020/user/edp-examples/edp-map-reduce/output" -} diff --git a/etc/edp-examples/json-api-examples/v1.1/data-sources/create.swift-pig-input.json b/etc/edp-examples/json-api-examples/v1.1/data-sources/create.swift-pig-input.json deleted file mode 100644 index cbfe48916a..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/data-sources/create.swift-pig-input.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "name": "demo-pig-input", - "description": "A data source for Pig input, stored in Swift", - "type": "swift", - "url": "swift://edp-examples.sahara/edp-pig/trim-spaces/data/input", - "credentials": { - "user": "demo", - "password": "password" - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/data-sources/create.swift-pig-output.json b/etc/edp-examples/json-api-examples/v1.1/data-sources/create.swift-pig-output.json deleted file mode 100644 index 4430a4b67b..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/data-sources/create.swift-pig-output.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "name": "demo-pig-output", - "description": "A data source for Pig output, stored in Swift", - "type": "swift", - "url": "swift://edp-examples.sahara/edp-pig/trim-spaces/data/output", - "credentials": { - "user": "demo", - "password": "password" - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.java.json b/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.java.json deleted file mode 100644 index ff08d54c22..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.java.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "edp-java.jar", - "description": "An example Java binary", - "url": "internal-db://%(job_binary_internal_id)s", - "extra": {} -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.map-reduce.json b/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.map-reduce.json deleted file mode 100644 index bd47fb3f52..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.map-reduce.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "name": "edp-mapreduce.jar", - "description": "An example map/reduce job binary", - "url": "swift://edp-examples/edp-mapreduce/edp-mapreduce.jar", - "extra": { - "user": "demo", - "password": "password" - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.pig-job.json b/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.pig-job.json deleted file mode 100644 index 30cfafe5e4..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.pig-job.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "name": "example.pig", - "description": "An example pig script", - "url": "swift://edp-examples/edp-pig/trim-spaces/example.pig", - "extra": { - "user": "demo", - "password": "password" - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.pig-udf.json b/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.pig-udf.json deleted file mode 100644 index 93ad30855c..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.pig-udf.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "name": "udf.jar", - "description": "An example pig UDF library", - "url": "swift://edp-examples/edp-pig/trim-spaces/udf.jar", - "extra": { - "user": "demo", - "password": "password" - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.shell-script.json b/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.shell-script.json deleted file mode 100644 index 487dec0a2d..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.shell-script.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "shell-example.sh", - "description": "An example shell script", - "url": "internal-db://%(script_binary_internal_id)s", - "extra": {} -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.shell-text.json b/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.shell-text.json deleted file mode 100644 index d53c6fba23..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.shell-text.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "shell-example.txt", - "description": "An example text file", - "url": "internal-db://%(text_binary_internal_id)s", - "extra": {} -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.spark.json b/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.spark.json deleted file mode 100644 index 8c33880f28..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-binaries/create.spark.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "name": "spark-example.jar", - "description": "An example Spark binary", - "url": "swift://edp-examples/edp-spark/spark-example.jar", - "extra": { - "user": "demo", - "password": "password" - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.java.json b/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.java.json deleted file mode 100644 index b327fa1961..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.java.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "cluster_id": "%(cluster_id)s", - "job_configs": { - "configs": { - "edp.java.main_class": "org.openstack.sahara.examples.WordCount", - "mapred.map.tasks": "1", - "mapred.reduce.tasks": "1" - }, - "args": [ - "hdfs://hadoop-cluster-master-001:8020/user/edp-examples/edp-java/input", - "hdfs://hadoop-cluster-master-001:8020/user/edp-examples/edp-java/output" - ] - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.map-reduce.json b/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.map-reduce.json deleted file mode 100644 index 4cd7d0dfdb..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.map-reduce.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "cluster_id": "%(cluster_id)s", - "input_id": "%(input_source_id)s", - "output_id": "%(output_source_id)s", - "job_configs": { - "configs": { - "mapred.mapper.class": "org.apache.oozie.example.SampleMapper", - "mapred.reducer.class": "org.apache.oozie.example.SampleReducer", - "mapred.map.tasks": "1", - "mapred.reduce.tasks": "1", - "fs.swift.service.sahara.username": "demo", - "fs.swift.service.sahara.password": "password" - } - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.pig.json b/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.pig.json deleted file mode 100644 index 04ed9f0723..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.pig.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "cluster_id": "%(cluster_id)s", - "input_id": "%(input_source_id)s", - "output_id": "%(output_source_id)s", - "job_configs": { - "configs": { - "mapred.map.tasks": "1", - "mapred.reduce.tasks": "1", - "fs.swift.service.sahara.username": "demo", - "fs.swift.service.sahara.password": "password" - }, - "args": [], - "params": {} - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.shell.json b/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.shell.json deleted file mode 100644 index 7f939a3d0e..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.shell.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "cluster_id": "%(cluster_id)s", - "job_configs": { - "configs": {}, - "args": ["/tmp/edp-shell-example-output.txt"], - "params": {"EXTRA_FILE": "shell-example.txt"} - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.spark.json b/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.spark.json deleted file mode 100644 index 33013cfa97..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/job-executions/execute.spark.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "cluster_id": "%(cluster_id)s", - "job_configs": { - "configs": { - "edp.java.main_class": "org.apache.spark.examples.SparkPi" - }, - "args": ["2"] - } -} diff --git a/etc/edp-examples/json-api-examples/v1.1/jobs/create.java.json b/etc/edp-examples/json-api-examples/v1.1/jobs/create.java.json deleted file mode 100644 index b8f1bc133a..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/jobs/create.java.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "demo-java-job", - "type": "Java", - "description": "A runnable Java job", - "libs": ["%(job_binary_id)s"] -} diff --git a/etc/edp-examples/json-api-examples/v1.1/jobs/create.map-reduce.json b/etc/edp-examples/json-api-examples/v1.1/jobs/create.map-reduce.json deleted file mode 100644 index dc4bfd6eb1..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/jobs/create.map-reduce.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "demo-map-reduce-job", - "type": "MapReduce", - "description": "A runnable MapReduce job", - "libs": ["%(job_binary_id)s"] -} diff --git a/etc/edp-examples/json-api-examples/v1.1/jobs/create.pig.json b/etc/edp-examples/json-api-examples/v1.1/jobs/create.pig.json deleted file mode 100644 index 5a14909fb2..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/jobs/create.pig.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "demo-pig-job", - "type": "Pig", - "description": "A runnable Pig job", - "mains": ["%(job_binary_id)s"], - "libs": ["%(udf_binary_id)s"] -} diff --git a/etc/edp-examples/json-api-examples/v1.1/jobs/create.shell.json b/etc/edp-examples/json-api-examples/v1.1/jobs/create.shell.json deleted file mode 100644 index 747cee3e0c..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/jobs/create.shell.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "demo-shell-job", - "type": "Shell", - "description": "A runnable Shell job", - "mains": ["%(script_binary_id)s"], - "libs": ["%(text_binary_id)s"] -} diff --git a/etc/edp-examples/json-api-examples/v1.1/jobs/create.spark.json b/etc/edp-examples/json-api-examples/v1.1/jobs/create.spark.json deleted file mode 100644 index 7019a90b00..0000000000 --- a/etc/edp-examples/json-api-examples/v1.1/jobs/create.spark.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "demo-spark-job", - "type": "Spark", - "description": "A runnable Spark job", - "mains": ["%(job_binary_id)s"], - "libs": [] -} diff --git a/etc/scenario/gate/README.rst b/etc/scenario/gate/README.rst deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/etc/scenario/gate/credentials.yaml.mako b/etc/scenario/gate/credentials.yaml.mako deleted file mode 100644 index 2b7906183f..0000000000 --- a/etc/scenario/gate/credentials.yaml.mako +++ /dev/null @@ -1,10 +0,0 @@ -credentials: - os_username: ${OS_USERNAME} - os_password: ${OS_PASSWORD} - os_tenant: ${OS_TENANT_NAME} - os_auth_url: ${OS_AUTH_URL} - -network: - type: ${network_type} - private_network: ${network_private_name} - public_network: ${network_public_name} diff --git a/etc/scenario/gate/edp.yaml.mako b/etc/scenario/gate/edp.yaml.mako deleted file mode 100644 index 6a98b5681c..0000000000 --- a/etc/scenario/gate/edp.yaml.mako +++ /dev/null @@ -1,15 +0,0 @@ -edp_jobs_flow: - fake: - - type: Pig - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - main_lib: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/example.pig - additional_libs: - - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/udf.jar diff --git a/etc/scenario/gate/fake.yaml.mako b/etc/scenario/gate/fake.yaml.mako deleted file mode 100644 index 655b31b9a1..0000000000 --- a/etc/scenario/gate/fake.yaml.mako +++ /dev/null @@ -1,32 +0,0 @@ -clusters: - - plugin_name: fake - plugin_version: "0.1" - image: ${fake_plugin_image} - node_group_templates: - - name: aio - flavor: ${ci_flavor_id} - node_processes: - - namenode - - jobtracker - - datanode - - tasktracker - volumes_per_node: 2 - volumes_size: 1 - auto_security_group: true - - name: worker - flavor: ${ci_flavor_id} - node_processes: - - datanode - - jobtracker - auto_security_group: true - cluster_template: - name: fake01 - node_group_templates: - aio: 1 - cluster: - name: ${cluster_name} - scaling: - - operation: add - node_group: worker - size: 1 - edp_jobs_flow: fake diff --git a/etc/scenario/sahara-ci/README.rst b/etc/scenario/sahara-ci/README.rst deleted file mode 100644 index 2dc24bf0a5..0000000000 --- a/etc/scenario/sahara-ci/README.rst +++ /dev/null @@ -1,30 +0,0 @@ -========= -Sahara CI -========= - -The files in this directory are needed for the sahara continuous -integration tests. Modifying these files will change the behavior of the -tests. - -Details -------- - -Key values (mako variables): - -* ${OS_USERNAME}, ${OS_PASSWORD}, ${OS_TENANT_NAME}, ${OS_AUTH_URL} - OpenStack credentials and access details -* ${network_type} - network type (neutron or nova-network); -* ${network_private_name}, ${network_public_name} - names of private (tenant) and public networks; -* ${cluster_name} - name of cluster, which generating from $HOST-$ZUUL_CHANGE-$CLUSTER_HASH. Where: - * $HOST - host id (c1 - with neutron, c2 - with nova-network); - * $ZUUL_CHANGE - change number; - * $CLUSTER_HASH - hash, which generating for each cluster by using "uuid" python module; -* ${_image} - name of image for each plugin; -* flavor ids: - * ${ci_flavor} - 2GB RAM, 1 VCPU, 40GB Root disk; - * ${medium_flavor} - 4GB RAM, 2 VCPUs, 40GB Root disk; - -Main URLs ---------- - -https://sahara.mirantis.com/jenkins - Sahara CI Jenkins -https://github.com/openstack/sahara-ci-config/ - Sahara CI Config repo diff --git a/etc/scenario/sahara-ci/ambari-2.3.yaml.mako b/etc/scenario/sahara-ci/ambari-2.3.yaml.mako deleted file mode 100644 index c8dfbf0f71..0000000000 --- a/etc/scenario/sahara-ci/ambari-2.3.yaml.mako +++ /dev/null @@ -1,65 +0,0 @@ -clusters: - - plugin_name: ambari - plugin_version: '2.3' - image: ${ambari_2_1_image} - node_group_templates: - - name: master - flavor: ${medium_flavor_id} - node_processes: - - Ambari - - MapReduce History Server - - Spark History Server - - NameNode - - ResourceManager - - SecondaryNameNode - - YARN Timeline Server - - ZooKeeper - - Kafka Broker - auto_security_group: true - - name: master-edp - flavor: ${ci_flavor_id} - node_processes: - - Hive Metastore - - HiveServer - - Oozie - auto_security_group: true - - name: worker - flavor: ${ci_flavor_id} - node_processes: - - DataNode - - NodeManager - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - cluster_template: - name: ambari21 - node_group_templates: - master: 1 - master-edp: 1 - worker: 3 - cluster_configs: - HDFS: - dfs.datanode.du.reserved: 0 - custom_checks: - check_kafka: - zookeeper_process: ZooKeeper - kafka_process: Kafka Broker - spark_flow: - - type: Spark - main_lib: - type: database - source: etc/edp-examples/edp-spark/spark-kafka-example.jar - args: - - '{zookeeper_list}' - - '{topic}' - - '{timeout}' - timeout: 30 - cluster: - name: ${cluster_name} - scenario: - - run_jobs - - kafka - - edp_jobs_flow: - - java_job - - spark_pi diff --git a/etc/scenario/sahara-ci/cdh-5.3.0.yaml.mako b/etc/scenario/sahara-ci/cdh-5.3.0.yaml.mako deleted file mode 100644 index 9a01126224..0000000000 --- a/etc/scenario/sahara-ci/cdh-5.3.0.yaml.mako +++ /dev/null @@ -1,75 +0,0 @@ -clusters: - - plugin_name: cdh - plugin_version: 5.3.0 - image: ${cdh_image} - node_group_templates: - - name: worker-dn - flavor: ${ci_flavor_id} - node_processes: - - HDFS_DATANODE - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - node_configs: - &ng_configs - DATANODE: - dfs_datanode_du_reserved: 0 - - name: worker-nm - flavor: ${ci_flavor_id} - node_processes: - - YARN_NODEMANAGER - auto_security_group: true - - name: worker-nm-dn - flavor: ${ci_flavor_id} - node_processes: - - YARN_NODEMANAGER - - HDFS_DATANODE - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - node_configs: - *ng_configs - - name: manager - flavor: ${medium_flavor_id} - node_processes: - - CLOUDERA_MANAGER - auto_security_group: true - - name: master-core - flavor: ${medium_flavor_id} - node_processes: - - HDFS_NAMENODE - - YARN_RESOURCEMANAGER - - SENTRY_SERVER - - ZOOKEEPER_SERVER - auto_security_group: true - - name: master-additional - flavor: ${medium_flavor_id} - node_processes: - - OOZIE_SERVER - - YARN_JOBHISTORY - - HDFS_SECONDARYNAMENODE - - HIVE_METASTORE - - HIVE_SERVER2 - auto_security_group: true - cluster_template: - name: cdh530 - node_group_templates: - manager: 1 - master-core: 1 - master-additional: 1 - worker-nm-dn: 1 - worker-nm: 1 - worker-dn: 1 - cluster_configs: - HDFS: - dfs_replication: 1 - cluster: - name: ${cluster_name} - scenario: - - run_jobs - - sentry - edp_jobs_flow: - - pig_job - - mapreduce_job - - mapreduce_streaming_job - - java_job \ No newline at end of file diff --git a/etc/scenario/sahara-ci/cdh-5.4.0.yaml.mako b/etc/scenario/sahara-ci/cdh-5.4.0.yaml.mako deleted file mode 100644 index d82d7ae69e..0000000000 --- a/etc/scenario/sahara-ci/cdh-5.4.0.yaml.mako +++ /dev/null @@ -1,83 +0,0 @@ -<%page args="is_proxy_gateway='true'"/> - -clusters: - - plugin_name: cdh - plugin_version: 5.4.0 - image: ${cdh_5_4_0_image} - node_group_templates: - - name: worker-dn - flavor: ${ci_flavor_id} - node_processes: - - HDFS_DATANODE - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - node_configs: - &ng_configs - DATANODE: - dfs_datanode_du_reserved: 0 - - name: worker-nm - flavor: ${ci_flavor_id} - node_processes: - - YARN_NODEMANAGER - auto_security_group: true - - name: worker-nm-dn - flavor: ${ci_flavor_id} - node_processes: - - YARN_NODEMANAGER - - HDFS_DATANODE - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - node_configs: - *ng_configs - - name: manager - flavor: ${large_flavor_id} - node_processes: - - CLOUDERA_MANAGER - - KMS - auto_security_group: true - - name: master-core - flavor: ${medium_flavor_id} - node_processes: - - HDFS_NAMENODE - - YARN_RESOURCEMANAGER - - SENTRY_SERVER - - YARN_NODEMANAGER - - ZOOKEEPER_SERVER - auto_security_group: true - is_proxy_gateway: ${is_proxy_gateway} - - name: master-additional - flavor: ${medium_flavor_id} - node_processes: - - OOZIE_SERVER - - YARN_JOBHISTORY - - YARN_NODEMANAGER - - HDFS_SECONDARYNAMENODE - - HIVE_METASTORE - - HIVE_SERVER2 - - SPARK_YARN_HISTORY_SERVER - auto_security_group: true - cluster_template: - name: cdh540 - node_group_templates: - manager: 1 - master-core: 1 - master-additional: 1 - worker-nm-dn: 1 - worker-nm: 1 - worker-dn: 1 - cluster_configs: - HDFS: - dfs_replication: 1 - cluster: - name: ${cluster_name} - scenario: - - run_jobs - - sentry - edp_jobs_flow: - - pig_job - - mapreduce_job - - mapreduce_streaming_job - - java_job - - spark_wordcount diff --git a/etc/scenario/sahara-ci/cdh-ha.yaml.mako b/etc/scenario/sahara-ci/cdh-ha.yaml.mako deleted file mode 100644 index 7a82b149c4..0000000000 --- a/etc/scenario/sahara-ci/cdh-ha.yaml.mako +++ /dev/null @@ -1,75 +0,0 @@ -clusters: - - plugin_name: cdh - plugin_version: 5.4.0 - image: ${cdh_5_4_0_image} - node_group_templates: - - name: worker-dn - flavor: ${ci_flavor_id} - node_processes: - - HDFS_DATANODE - - HDFS_JOURNALNODE - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - node_configs: - &ng_configs - DATANODE: - dfs_datanode_du_reserved: 0 - - name: worker-nm - flavor: ${ci_flavor_id} - node_processes: - - YARN_NODEMANAGER - auto_security_group: true - - name: worker-nm-dn - flavor: ${ci_flavor_id} - node_processes: - - YARN_NODEMANAGER - - HDFS_DATANODE - - HDFS_JOURNALNODE - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - node_configs: - *ng_configs - - name: manager - flavor: ${large_flavor_id} - node_processes: - - CLOUDERA_MANAGER - auto_security_group: true - - name: master-core - flavor: ${medium_flavor_id} - node_processes: - - HDFS_NAMENODE - - YARN_RESOURCEMANAGER - - ZOOKEEPER_SERVER - auto_security_group: true - - name: master-additional - flavor: ${large_flavor_id} - node_processes: - - OOZIE_SERVER - - ZOOKEEPER_SERVER - - YARN_JOBHISTORY - - HDFS_SECONDARYNAMENODE - - HDFS_JOURNALNODE - auto_security_group: true - cluster_template: - name: cdh540 - node_group_templates: - manager: 1 - master-core: 1 - master-additional: 1 - worker-nm-dn: 1 - worker-nm: 2 - worker-dn: 1 - cluster_configs: - HDFS: - dfs_replication: 1 - general: - 'Require Anti Affinity': False - cluster: - name: ${cluster_name} - scenario: - - run_jobs - edp_jobs_flow: - - mapreduce_job - - java_job diff --git a/etc/scenario/sahara-ci/credentials.yaml.mako b/etc/scenario/sahara-ci/credentials.yaml.mako deleted file mode 100644 index 2b7906183f..0000000000 --- a/etc/scenario/sahara-ci/credentials.yaml.mako +++ /dev/null @@ -1,10 +0,0 @@ -credentials: - os_username: ${OS_USERNAME} - os_password: ${OS_PASSWORD} - os_tenant: ${OS_TENANT_NAME} - os_auth_url: ${OS_AUTH_URL} - -network: - type: ${network_type} - private_network: ${network_private_name} - public_network: ${network_public_name} diff --git a/etc/scenario/sahara-ci/edp.yaml.mako b/etc/scenario/sahara-ci/edp.yaml.mako deleted file mode 100644 index 9097bf3501..0000000000 --- a/etc/scenario/sahara-ci/edp.yaml.mako +++ /dev/null @@ -1,132 +0,0 @@ -edp_jobs_flow: - pig_job: - - type: Pig - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - main_lib: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/example.pig - additional_libs: - - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/udf.jar - mapreduce_job: - - type: MapReduce - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - additional_libs: - - type: database - source: etc/edp-examples/edp-mapreduce/edp-mapreduce.jar - configs: - mapred.mapper.class: org.apache.oozie.example.SampleMapper - mapred.reducer.class: org.apache.oozie.example.SampleReducer - mapreduce_streaming_job: - - type: MapReduce.Streaming - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - configs: - edp.streaming.mapper: /bin/cat - edp.streaming.reducer: /usr/bin/wc - java_job: - - type: Java - additional_libs: - - type: database - source: etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.6.0.jar - configs: - edp.java.main_class: org.apache.hadoop.examples.QuasiMonteCarlo - args: - - 10 - - 10 - hive_job: - - type: Hive - main_lib: - type: swift - source: etc/edp-examples/edp-hive/script.q - input_datasource: - type: hdfs - hdfs_username: hadoop - source: etc/edp-examples/edp-hive/input.csv - output_datasource: - type: hdfs - destination: /user/edp-output - spark_pi: - - type: Spark - main_lib: - type: database - source: etc/edp-examples/edp-spark/spark-example.jar - configs: - edp.java.main_class: org.apache.spark.examples.SparkPi - args: - - 4 - spark_wordcount: - - type: Spark - input_datasource: - type: swift - source: etc/edp-examples/edp-spark/sample_input.txt - main_lib: - type: database - source: etc/edp-examples/edp-spark/spark-wordcount.jar - configs: - edp.java.main_class: sahara.edp.spark.SparkWordCount - edp.spark.adapt_for_swift: true - fs.swift.service.sahara.username: ${OS_USERNAME} - fs.swift.service.sahara.password: ${OS_PASSWORD} - args: - - '{input_datasource}' - mapr: - - type: Pig - input_datasource: - type: maprfs - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: maprfs - destination: /user/hadoop/edp-output - main_lib: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/example.pig - additional_libs: - - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/udf.jar - - type: MapReduce - input_datasource: - type: maprfs - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: maprfs - destination: /user/hadoop/edp-output - additional_libs: - - type: database - source: etc/edp-examples/edp-mapreduce/edp-mapreduce.jar - configs: - mapred.mapper.class: org.apache.oozie.example.SampleMapper - mapred.reducer.class: org.apache.oozie.example.SampleReducer - - type: MapReduce.Streaming - input_datasource: - type: maprfs - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: maprfs - destination: /user/hadoop/edp-output - configs: - edp.streaming.mapper: /bin/cat - edp.streaming.reducer: /usr/bin/wc - - type: Java - additional_libs: - - type: database - source: etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.6.0.jar - configs: - edp.java.main_class: org.apache.hadoop.examples.QuasiMonteCarlo - args: - - 10 - - 10 diff --git a/etc/scenario/sahara-ci/fake.yaml.mako b/etc/scenario/sahara-ci/fake.yaml.mako deleted file mode 100644 index 402969831d..0000000000 --- a/etc/scenario/sahara-ci/fake.yaml.mako +++ /dev/null @@ -1,34 +0,0 @@ -<%page args="is_proxy_gateway='true'"/> - -clusters: - - plugin_name: fake - plugin_version: "0.1" - image: ${fake_plugin_image} - node_group_templates: - - name: worker - flavor: ${ci_flavor_id} - node_processes: - - datanode - - tasktracker - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - - name: master - flavor: ${ci_flavor_id} - node_processes: - - jobtracker - - namenode - auto_security_group: true - is_proxy_gateway: ${is_proxy_gateway} - cluster_template: - name: fake01 - node_group_templates: - master: 1 - worker: 1 - cluster: - name: ${cluster_name} - scaling: - - operation: add - node_group: worker - size: 1 - edp_jobs_flow: pig_job diff --git a/etc/scenario/sahara-ci/hdp-2.0.6.yaml.mako b/etc/scenario/sahara-ci/hdp-2.0.6.yaml.mako deleted file mode 100644 index eba9391e0e..0000000000 --- a/etc/scenario/sahara-ci/hdp-2.0.6.yaml.mako +++ /dev/null @@ -1,54 +0,0 @@ -<%page args="is_proxy_gateway='true'"/> - -clusters: - - plugin_name: hdp - plugin_version: 2.0.6 - image: ${hdp_two_image} - node_group_templates: - - name: master - flavor: ${ci_flavor_id} - node_processes: - - AMBARI_SERVER - - GANGLIA_SERVER - - HISTORYSERVER - - NAGIOS_SERVER - - NAMENODE - - OOZIE_SERVER - - RESOURCEMANAGER - - SECONDARY_NAMENODE - - ZOOKEEPER_SERVER - auto_security_group: true - is_proxy_gateway: ${is_proxy_gateway} - - name: worker - flavor: ${ci_flavor_id} - node_processes: - - DATANODE - - HDFS_CLIENT - - MAPREDUCE2_CLIENT - - NODEMANAGER - - OOZIE_CLIENT - - PIG - - YARN_CLIENT - - ZOOKEEPER_CLIENT - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - cluster_template: - name: hdp206 - node_group_templates: - master: 1 - worker: 3 - cluster_configs: - YARN: - yarn.log-aggregation-enable: false - cluster: - name: ${cluster_name} - scaling: - - operation: add - node_group: worker - size: 1 - edp_jobs_flow: - - pig_job - - mapreduce_job - - mapreduce_streaming_job - - java_job diff --git a/etc/scenario/sahara-ci/mapr-4.0.2.mrv2.yaml.mako b/etc/scenario/sahara-ci/mapr-4.0.2.mrv2.yaml.mako deleted file mode 100644 index 2a23b7cc0a..0000000000 --- a/etc/scenario/sahara-ci/mapr-4.0.2.mrv2.yaml.mako +++ /dev/null @@ -1,50 +0,0 @@ -<%page args="is_proxy_gateway='true'"/> - -clusters: - - plugin_name: mapr - plugin_version: 4.0.2.mrv2 - image: ${mapr_402mrv2_image} - node_group_templates: - - name: master - flavor: ${ci_flavor_id} - node_processes: - - Metrics - - Webserver - - ZooKeeper - - HTTPFS - - Oozie - - FileServer - - CLDB - - Flume - - Hue - - NodeManager - - HistoryServer - - ResourceManager - - HiveServer2 - - HiveMetastore - - Sqoop2-Client - - Sqoop2-Server - auto_security_group: true - volumes_per_node: 2 - volumes_size: 20 - is_proxy_gateway: ${is_proxy_gateway} - - name: worker - flavor: ${ci_flavor_id} - node_processes: - - NodeManager - - FileServer - auto_security_group: true - volumes_per_node: 2 - volumes_size: 20 - cluster_template: - name: mapr402mrv2 - node_group_templates: - master: 1 - worker: 3 - cluster: - name: ${cluster_name} - scaling: - - operation: add - node_group: worker - size: 1 - edp_jobs_flow: mapr diff --git a/etc/scenario/sahara-ci/mapr-5.0.0.mrv2.yaml.mako b/etc/scenario/sahara-ci/mapr-5.0.0.mrv2.yaml.mako deleted file mode 100644 index db41ca2e5b..0000000000 --- a/etc/scenario/sahara-ci/mapr-5.0.0.mrv2.yaml.mako +++ /dev/null @@ -1,52 +0,0 @@ -clusters: - - plugin_name: mapr - plugin_version: 5.0.0.mrv2 - image: ${mapr_500mrv2_image} - node_group_templates: - - name: master - flavor: - vcpus: 4 - ram: 8192 - root_disk: 80 - ephemeral_disk: 40 - node_processes: - - Metrics - - Webserver - - ZooKeeper - - HTTPFS - - Oozie - - FileServer - - CLDB - - Flume - - Hue - - NodeManager - - HistoryServer - - ResourceManager - - HiveServer2 - - HiveMetastore - - Sqoop2-Client - - Sqoop2-Server - auto_security_group: true - - name: worker - flavor: - vcpus: 2 - ram: 4096 - root_disk: 40 - ephemeral_disk: 40 - node_processes: - - NodeManager - - FileServer - auto_security_group: true - cluster_template: - name: mapr500mrv2 - node_group_templates: - master: 1 - worker: 3 - cluster: - name: ${cluster_name} - scaling: - - operation: add - node_group: worker - size: 1 - scenario: - - scale diff --git a/etc/scenario/sahara-ci/spark-1.0.0.yaml.mako b/etc/scenario/sahara-ci/spark-1.0.0.yaml.mako deleted file mode 100644 index cac2d028db..0000000000 --- a/etc/scenario/sahara-ci/spark-1.0.0.yaml.mako +++ /dev/null @@ -1,34 +0,0 @@ -clusters: - - plugin_name: spark - plugin_version: 1.0.0 - image: ${spark_image} - node_group_templates: - - name: master - flavor: ${ci_flavor_id} - node_processes: - - master - - namenode - auto_security_group: true - - name: worker - flavor: ${ci_flavor_id} - node_processes: - - datanode - - slave - auto_security_group: true - cluster_template: - name: spark100 - node_group_templates: - master: 1 - worker: 1 - cluster_configs: - HDFS: - dfs.replication: 1 - cluster: - name: ${cluster_name} - scaling: - - operation: add - node_group: worker - size: 1 - edp_jobs_flow: - - spark_pi - - spark_wordcount diff --git a/etc/scenario/sahara-ci/spark-1.3.1.yaml.mako b/etc/scenario/sahara-ci/spark-1.3.1.yaml.mako deleted file mode 100644 index ccf8d3bb2e..0000000000 --- a/etc/scenario/sahara-ci/spark-1.3.1.yaml.mako +++ /dev/null @@ -1,37 +0,0 @@ -<%page args="is_proxy_gateway='true'"/> - -clusters: - - plugin_name: spark - plugin_version: 1.3.1 - image: ${spark_1_3_image} - node_group_templates: - - name: master - flavor: ${ci_flavor_id} - node_processes: - - master - - namenode - auto_security_group: true - is_proxy_gateway: ${is_proxy_gateway} - - name: worker - flavor: ${ci_flavor_id} - node_processes: - - datanode - - slave - auto_security_group: true - cluster_template: - name: spark131 - node_group_templates: - master: 1 - worker: 1 - cluster_configs: - HDFS: - dfs.replication: 1 - cluster: - name: ${cluster_name} - scaling: - - operation: add - node_group: worker - size: 1 - edp_jobs_flow: - - spark_pi - - spark_wordcount diff --git a/etc/scenario/sahara-ci/transient.yaml.mako b/etc/scenario/sahara-ci/transient.yaml.mako deleted file mode 100644 index 571726d07e..0000000000 --- a/etc/scenario/sahara-ci/transient.yaml.mako +++ /dev/null @@ -1,54 +0,0 @@ -<%page args="is_proxy_gateway='true'"/> - -clusters: - - plugin_name: vanilla - plugin_version: 2.7.1 - image: ${vanilla_two_seven_one_image} - node_group_templates: - - name: worker - flavor: ${ci_flavor_id} - node_processes: - - datanode - - nodemanager - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - node_configs: - &ng_configs - MapReduce: - yarn.app.mapreduce.am.resource.mb: 256 - yarn.app.mapreduce.am.command-opts: -Xmx256m - YARN: - yarn.scheduler.minimum-allocation-mb: 256 - yarn.scheduler.maximum-allocation-mb: 1024 - yarn.nodemanager.vmem-check-enabled: false - - name: master - flavor: ${ci_flavor_id} - node_processes: - - oozie - - historyserver - - resourcemanager - - namenode - auto_security_group: true - is_proxy_gateway: ${is_proxy_gateway} - cluster_template: - name: transient - node_group_templates: - master: 1 - worker: 3 - cluster_configs: - HDFS: - dfs.replication: 1 - MapReduce: - mapreduce.tasktracker.map.tasks.maximum: 16 - mapreduce.tasktracker.reduce.tasks.maximum: 16 - YARN: - yarn.resourcemanager.scheduler.class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler - cluster: - name: ${cluster_name} - is_transient: true - scenario: - - run_jobs - - transient - edp_jobs_flow: pig_job - diff --git a/etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako b/etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako deleted file mode 100644 index b1302bdd91..0000000000 --- a/etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako +++ /dev/null @@ -1,79 +0,0 @@ -<%page args="is_proxy_gateway='true'"/> - -clusters: - - plugin_name: vanilla - plugin_version: 2.7.1 - image: ${vanilla_two_seven_one_image} - node_group_templates: - - name: worker-dn-nm - flavor: ${ci_flavor_id} - node_processes: - - datanode - - nodemanager - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - - name: worker-nm - flavor: ${ci_flavor_id} - node_processes: - - nodemanager - auto_security_group: true - - name: worker-dn - flavor: ${ci_flavor_id} - node_processes: - - datanode - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - - name: master-rm-nn-hvs - flavor: ${ci_flavor_id} - node_processes: - - namenode - - resourcemanager - - hiveserver - - nodemanager - auto_security_group: true - - name: master-oo-hs-sn - flavor: ${ci_flavor_id} - node_processes: - - oozie - - historyserver - - secondarynamenode - - nodemanager - auto_security_group: true - is_proxy_gateway: ${is_proxy_gateway} - cluster_template: - name: vanilla271 - node_group_templates: - master-rm-nn-hvs: 1 - master-oo-hs-sn: 1 - worker-dn-nm: 2 - worker-dn: 1 - worker-nm: 1 - cluster_configs: - HDFS: - dfs.replication: 1 - cluster: - name: ${cluster_name} - scaling: - - operation: resize - node_group: worker-dn-nm - size: 1 - - operation: resize - node_group: worker-dn - size: 0 - - operation: resize - node_group: worker-nm - size: 0 - - operation: add - node_group: worker-dn - size: 1 - - operation: add - node_group: worker-nm - size: 2 - edp_jobs_flow: - - pig_job - - mapreduce_job - - mapreduce_streaming_job - - java_job - - hive_job \ No newline at end of file diff --git a/etc/scenario/simple-testcase.yaml b/etc/scenario/simple-testcase.yaml deleted file mode 100644 index 4a2b52d313..0000000000 --- a/etc/scenario/simple-testcase.yaml +++ /dev/null @@ -1,79 +0,0 @@ -concurrency: 1 - -credentials: - os_username: admin - os_password: nova - os_tenant: admin - os_auth_url: http://localhost:5000/v2.0 - -network: - private_network: private - public_network: public - -clusters: - - plugin_name: vanilla - plugin_version: 2.7.1 - image: sahara-liberty-vanilla-2.7.1-ubuntu-14.04 - edp_jobs_flow: test_flow - - plugin_name: hdp - plugin_version: 2.0.6 - image: f3c4a228-9ba4-41f1-b100-a0587689d4dd - scaling: - - operation: resize - node_group: hdp-worker - size: 5 - - plugin_name: cdh - plugin_version: 5.3.0 - image: ubuntu-cdh-5.3.0 - scaling: - - operation: add - node_group: cdh-worker - size: 1 - edp_jobs_flow: test_flow - -edp_jobs_flow: - test_flow: - - type: Pig - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - main_lib: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/example.pig - configs: - dfs.replication: 1 - - type: Java - additional_libs: - - type: database - source: etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.7.1.jar - configs: - edp.java.main_class: org.apache.hadoop.examples.QuasiMonteCarlo - args: - - 10 - - 10 - - type: MapReduce - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - additional_libs: - - type: database - source: etc/edp-examples/edp-mapreduce/edp-mapreduce.jar - configs: - mapred.mapper.class: org.apache.oozie.example.SampleMapper - mapred.reducer.class: org.apache.oozie.example.SampleReducer - - type: MapReduce.Streaming - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/trim-spaces/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - configs: - edp.streaming.mapper: /bin/cat - edp.streaming.reducer: /usr/bin/wc diff --git a/sahara/tests/scenario/README.rst b/sahara/tests/scenario/README.rst deleted file mode 100644 index 4c8a3d5374..0000000000 --- a/sahara/tests/scenario/README.rst +++ /dev/null @@ -1,417 +0,0 @@ -System(scenario) tests for Sahara project -========================================= - -How to run ----------- - -Create the YAML and/or the YAML mako template files for scenario tests -``etc/scenario/sahara-ci/simple-testcase.yaml``. -You can take a look at sample YAML files `How to write scenario files`_. - -If you want to run scenario tests for one plugin, you should use the -YAML files with a scenario for the specific plugin: - -.. sourcecode:: console - - $ tox -e scenario etc/scenario/sahara-ci/simple-testcase.yaml -.. - -or, if the file is a YAML Mako template: - -.. sourcecode:: console - - $ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako -.. - -where templatevars.ini contains the values of the variables referenced -by ``vanilla-2.7.1.yaml.mako``. - -For example, you want to run tests for the Vanilla plugin with the Hadoop -version 2.7.1 In this case you should create ``templatevars.ini`` with -the appropriate values (see the section `Variables and sahara-ci templates`_) -and use the following tox env: - -.. sourcecode:: console - - $ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako -.. - -If you want to run scenario tests for a few plugins or their versions, you -should use the several YAML and/or YAML Mako template files: - -.. sourcecode:: console - - $ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/cdh-5.4.0.yaml.mako etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako ... -.. - -Here are a few more examples. - -.. sourcecode:: console - - $ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/credentials.yaml.mako etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako - -.. - -will run tests for Vanilla plugin with the Hadoop version 2.7.1 and credential -located in ``etc/scenario/sahara-ci/credentials.yaml.mako``, replacing the variables -included into ``vanilla-2.7.1.yaml.mako`` with the values defined into -``templatevars.ini``. -For more information about writing scenario YAML files, see the section -section `How to write scenario files`_. - -``tox -e scenario etc/scenario/sahara-ci`` will run tests from the test directory. - -Also, you can validate your yaml-files using flag ``--validate`` via command: - -.. sourcecode:: console - - $ tox -e scenario -- --validate -V templatevars.ini etc/scenario/sahara-ci/credantials.yaml.mako etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako - -.. - -Template variables ------------------- -The variables used in the Mako template files are replaced with the values from a -config file, whose name is passed to the test runner through the ``-V`` parameter. - -The format of the config file is an INI-style file, as accepted by the Python -ConfigParser module. The key/values must be specified in the DEFAULT section. - -Example of template variables file: -.. sourcecode:: ini - - [DEFAULT] - OS_USERNAME: demo - OS_TENANT_NAME: demo - OS_PASSWORD: foobar - ... - network_type: neutron - ... - -.. - -Variables and sahara-ci templates -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The following variables are currently used by sahara-ci templates: - -+-----------------------------+--------+--------------------------------------------------------------+ -| Variable | Type | Value | -+=============================+========+==============================================================+ -| OS_USERNAME | string | user name for login | -+-----------------------------+--------+--------------------------------------------------------------+ -| OS_PASSWORD | string | password for login | -+-----------------------------+--------+--------------------------------------------------------------+ -| OS_TENANT_NAME | string | tenant name | -+-----------------------------+--------+--------------------------------------------------------------+ -| OS_AUTH_URL | string | url for authentication | -+-----------------------------+--------+--------------------------------------------------------------+ -| network_type | string | neutron or nova-network | -+-----------------------------+--------+--------------------------------------------------------------+ -| network_private_name | string | private network name for OS_TENANT_NAME | -+-----------------------------+--------+--------------------------------------------------------------+ -| network_public_name | string | public network name | -+-----------------------------+--------+--------------------------------------------------------------+ -| _name | string | name of the image to be used for the specific plugin/version | -+-----------------------------+--------+--------------------------------------------------------------+ -| {ci,medium,large}_flavor_id | string | IDs of flavor with different size | -+-----------------------------+--------+--------------------------------------------------------------+ - - -_`How to write scenario files` -============================== - -You can write all sections in one or several files, which can be simple YAML files -or YAML-based Mako templates (.yaml.mako or yml.mako). - -Field "concurrency" -------------------- - -This field has integer value, and set concurrency for run tests - -For example: - ``concurrency: 2`` - -Section "credentials" --------------------- - -This section is dictionary-type. - -+---------------------+--------+----------+------------------------------+---------------------------------+ -| Fields | Type | Required | Default | Value | -+=====================+========+==========+==============================+=================================+ -| os_username | string | True | admin | user name for login | -+---------------------+--------+----------+------------------------------+---------------------------------+ -| os_password | string | True | nova | password for login | -+---------------------+--------+----------+------------------------------+---------------------------------+ -| os_tenant | string | True | admin | tenant name | -+---------------------+--------+----------+------------------------------+---------------------------------+ -| os_auth_url | string | True | `http://localhost:5000/v2.0` | url for login | -+---------------------+--------+----------+------------------------------+---------------------------------+ -| sahara_service_type | string | | data-processing | service type for sahara | -+---------------------+--------+----------+------------------------------+---------------------------------+ -| sahara_url | string | | None | url of sahara | -+---------------------+--------+----------+------------------------------+---------------------------------+ -| ssl_cert | string | | None | ssl certificate for all clients | -+---------------------+--------+----------+------------------------------+---------------------------------+ -| ssl_verify | boolean| | True | enable verify ssl for sahara | -+---------------------+--------+----------+------------------------------+---------------------------------+ - -Section "network" ------------------ - -This section is dictionary-type. - -+-----------------------------+---------+----------+----------+-------------------------------+ -| Fields | Type | Required | Default | Value | -+=============================+=========+==========+==========+===============================+ -| private_network | string | True | private | name or id of private network | -+-----------------------------+---------+----------+----------+-------------------------------+ -| public_network | string | True | public | name or id of private network | -+-----------------------------+---------+----------+----------+-------------------------------+ -| type | string | | neutron | "neutron" or "nova-network" | -+-----------------------------+---------+----------+----------+-------------------------------+ -| auto_assignment_floating_ip | boolean | | False | | -+-----------------------------+---------+----------+----------+-------------------------------+ - - -Section "clusters" ------------------- - -This sections is an array-type. - -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| Fields | Type | Required | Default | Value | -+=============================+=========+==========+===================================+================================================+ -| plugin_name | string | True | | name of plugin | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| plugin_version | string | True | | version of plugin | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| image | string | True | | name or id of image | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| existing_cluster | string | | | cluster name or id for testing | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| key_name | string | | | name of registered ssh key for testing cluster | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| node_group_templates | object | | | see `section "node_group_templates"`_ | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| cluster_template | object | | | see `section "cluster_template"`_ | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| cluster | object | | | see `section "cluster"`_ | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| scaling | object | | | see `section "scaling"`_ | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| timeout_check_transient | integer | | 300 | timeout for checking transient | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| timeout_poll_jobs_status | integer | | 1800 | timeout for polling jobs state | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| timeout_delete_resource | integer | | 300 | timeout for delete resource | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| timeout_poll_cluster_status | integer | | 1800 | timeout for polling cluster state | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| scenario | array | | ['run_jobs', 'scale', 'run_jobs'] | array of checks | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| edp_jobs_flow | string | | | name of edp job flow | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ -| retain_resources | boolean | | False | | -+-----------------------------+---------+----------+-----------------------------------+------------------------------------------------+ - - -Section "node_group_templates" ------------------------------- - -This section is an array-type. - -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| Fields | Type | Required | Default | Value | -+===========================+==================+==========+============+==================================================+ -| name | string | True | | name for node group template | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| flavor | string or object | True | | name or id of flavor, or see `section "flavor"`_ | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| node_processes | string | True | | name of process | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| description | string | | Empty | description for node group | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| volumes_per_node | integer | | 0 | minimum 0 | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| volumes_size | integer | | 0 | minimum 0 | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| auto_security_group | boolean | | True | | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| security_group | array | | | security group | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| node_configs | object | | | name_of_config_section: config: value | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| availability_zone | string | | | | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| volumes_availability_zone | string | | | | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| volume_type | string | | | | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| is_proxy_gateway | boolean | | False | use this node as proxy gateway | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ -| edp_batching | integer | | count jobs | use for batching jobs | -+---------------------------+------------------+----------+------------+--------------------------------------------------+ - -Section "flavor" ----------------- - -This section is an dictionary-type. - -+----------------+---------+----------+---------------+--------------------------------+ -| Fields | Type | Required | Default | Value | -+================+=========+==========+===============+================================+ -| name | string | | auto-generate | name for flavor | -+----------------+---------+----------+---------------+--------------------------------+ -| id | string | | auto-generate | id for flavor | -+----------------+---------+----------+---------------+--------------------------------+ -| vcpus | integer | | 1 | number of VCPUs for the flavor | -+----------------+---------+----------+---------------+--------------------------------+ -| ram | integer | | 1 | memory in MB for the flavor | -+----------------+---------+----------+---------------+--------------------------------+ -| root_disk | integer | | 0 | size of local disk in GB | -+----------------+---------+----------+---------------+--------------------------------+ -| ephemeral_disk | integer | | 0 | ephemeral space in MB | -+----------------+---------+----------+---------------+--------------------------------+ -| swap_disk | integer | | 0 | swap space in MB | -+----------------+---------+----------+---------------+--------------------------------+ - - -Section "cluster_template" --------------------------- - -This section is dictionary-type. - -+----------------------+--------+----------+-----------+---------------------------------------+ -| Fields | Type | Required | Default | Value | -+======================+========+==========+===========+=======================================+ -| name | string | True | | name for cluster template | -+----------------------+--------+----------+-----------+---------------------------------------+ -| description | string | | Empty | description | -+----------------------+--------+----------+-----------+---------------------------------------+ -| cluster_configs | object | | | name_of_config_section: config: value | -+----------------------+--------+----------+-----------+---------------------------------------+ -| node_group_templates | object | True | | name_of_node_group: count | -+----------------------+--------+----------+-----------+---------------------------------------+ -| anti_affinity | array | | Empty | array of roles | -+----------------------+--------+----------+-----------+---------------------------------------+ - - -Section "cluster" ------------------ - -This section is dictionary-type. - -+--------------+---------+----------+---------+------------------+ -| Fields | Type | Required | Default | Value | -+==============+=========+==========+=========+==================+ -| name | string | True | Empty | name for cluster | -+--------------+---------+----------+---------+------------------+ -| description | string | | Empty | description | -+--------------+---------+----------+---------+------------------+ -| is_transient | boolean | | False | value | -+--------------+---------+----------+---------+------------------+ - - -Section "scaling" ------------------ - -This section is an array-type. - -+------------+---------+----------+-----------+--------------------+ -| Fields | Type | Required | Default | Value | -+============+=========+==========+===========+====================+ -| operation | string | True | | "add" or "resize" | -+------------+---------+----------+-----------+--------------------+ -| node_group | string | True | Empty | name of node group | -+------------+---------+----------+-----------+--------------------+ -| size | integer | True | Empty | count node group | -+------------+---------+----------+-----------+--------------------+ - - -Section "edp_jobs_flow" ------------------------ - -This section has an object with a name from the `section "clusters"`_ field "edp_jobs_flows" -Object has sections of array-type. -Required: type - -+-------------------+--------+----------+-----------+-----------------------------------------------------------------------------+ -| Fields | Type | Required | Default | Value | -+===================+========+==========+===========+=============================================================================+ -| type | string | True | | "Pig", "Java", "MapReduce", "MapReduce.Streaming", "Hive", "Spark", "Shell" | -+-------------------+--------+----------+-----------+-----------------------------------------------------------------------------+ -| input_datasource | object | | | see `section "input_datasource"`_ | -+-------------------+--------+----------+-----------+-----------------------------------------------------------------------------+ -| output_datasource | object | | | see `section "output_datasource"`_ | -+-------------------+--------+----------+-----------+-----------------------------------------------------------------------------+ -| main_lib | object | | | see `section "main_lib"`_ | -+-------------------+--------+----------+-----------+-----------------------------------------------------------------------------+ -| additional_libs | object | | | see `section "additional_libs"`_ | -+-------------------+--------+----------+-----------+-----------------------------------------------------------------------------+ -| configs | dict | | Empty | config: value | -+-------------------+--------+----------+-----------+-----------------------------------------------------------------------------+ -| args | array | | Empty | array of args | -+-------------------+--------+----------+-----------+-----------------------------------------------------------------------------+ - - -Section "input_datasource" --------------------------- - -Required: type, source -This section is dictionary-type. - -+---------------+--------+----------+-----------+---------------------------+ -| Fields | Type | Required | Default | Value | -+===============+========+==========+===========+===========================+ -| type | string | True | | "swift", "hdfs", "maprfs" | -+---------------+--------+----------+-----------+---------------------------+ -| hdfs_username | string | | | username for hdfs | -+---------------+--------+----------+-----------+---------------------------+ -| source | string | True | | uri of source | -+---------------+--------+----------+-----------+---------------------------+ - - -Section "output_datasource" ---------------------------- - -Required: type, destination -This section is dictionary-type. - -+-------------+--------+----------+-----------+---------------------------+ -| Fields | Type | Required | Default | Value | -+=============+========+==========+===========+===========================+ -| type | string | True | | "swift", "hdfs", "maprfs" | -+-------------+--------+----------+-----------+---------------------------+ -| destination | string | True | | uri of source | -+-------------+--------+----------+-----------+---------------------------+ - - -Section "main_lib" ------------------- - -Required: type, source -This section is dictionary-type. - -+--------+--------+----------+-----------+----------------------+ -| Fields | Type | Required | Default | Value | -+========+========+==========+===========+======================+ -| type | string | True | | "swift or "database" | -+--------+--------+----------+-----------+----------------------+ -| source | string | True | | uri of source | -+--------+--------+----------+-----------+----------------------+ - - -Section "additional_libs" -------------------------- - -Required: type, source -This section is an array-type. - -+--------+--------+----------+-----------+----------------------+ -| Fields | Type | Required | Default | Value | -+========+========+==========+===========+======================+ -| type | string | True | | "swift or "database" | -+--------+--------+----------+-----------+----------------------+ -| source | string | True | | uri of source | -+--------+--------+----------+-----------+----------------------+ diff --git a/sahara/tests/scenario/__init__.py b/sahara/tests/scenario/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/tests/scenario/base.py b/sahara/tests/scenario/base.py deleted file mode 100644 index 7d99841c7f..0000000000 --- a/sahara/tests/scenario/base.py +++ /dev/null @@ -1,719 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import functools -import glob -import logging -import os -import sys -import time -import traceback - -import fixtures -from oslo_serialization import jsonutils as json -from oslo_utils import timeutils -import prettytable -import six -from tempest_lib import base -from tempest_lib.common import ssh as connection -from tempest_lib import exceptions as exc - -from sahara.tests.scenario import clients -from sahara.tests.scenario import timeouts -from sahara.tests.scenario import utils -from sahara.utils import crypto as ssh - -logger = logging.getLogger('swiftclient') -logger.setLevel(logging.CRITICAL) - -DEFAULT_TEMPLATES_PATH = ( - 'sahara/tests/scenario/templates/%(plugin_name)s/%(hadoop_version)s') -CHECK_OK_STATUS = "OK" -CHECK_FAILED_STATUS = "FAILED" -CLUSTER_STATUS_ACTIVE = "Active" -CLUSTER_STATUS_ERROR = "Error" - - -def track_result(check_name, exit_with_error=True): - def decorator(fct): - @functools.wraps(fct) - def wrapper(self, *args, **kwargs): - test_info = { - 'check_name': check_name, - 'status': CHECK_OK_STATUS, - 'duration': None, - 'traceback': None - } - self._results.append(test_info) - started_at = timeutils.utcnow() - try: - return fct(self, *args, **kwargs) - except Exception: - test_info['status'] = CHECK_FAILED_STATUS - test_info['traceback'] = traceback.format_exception( - *sys.exc_info()) - if exit_with_error: - raise - finally: - test_time = timeutils.utcnow() - started_at - test_info['duration'] = test_time.seconds - return wrapper - return decorator - - -class BaseTestCase(base.BaseTestCase): - @classmethod - def setUpClass(cls): - super(BaseTestCase, cls).setUpClass() - cls.network = None - cls.credentials = None - cls.testcase = None - cls._results = [] - - def setUp(self): - super(BaseTestCase, self).setUp() - self._init_clients() - timeouts.Defaults.init_defaults(self.testcase) - self.testcase['ssh_username'] = self.sahara.sahara_client.images.get( - self.nova.get_image_id(self.testcase['image'])).username - self.key = self.testcase.get('key_name') - if self.key is None: - self.private_key, self.public_key = ssh.generate_key_pair() - self.key_name = self.__create_keypair() - # save the private key if retain_resources is specified - # (useful for debugging purposes) - if self.testcase['retain_resources'] or self.key is None: - with open(self.key_name + '.key', 'a') as private_key_file: - private_key_file.write(self.private_key) - self.plugin_opts = { - 'plugin_name': self.testcase['plugin_name'], - 'hadoop_version': self.testcase['plugin_version'] - } - self.template_path = DEFAULT_TEMPLATES_PATH % self.plugin_opts - self.cinder = True - self.proxy_ng_name = False - self.proxy = False - - def _init_clients(self): - username = self.credentials['os_username'] - password = self.credentials['os_password'] - tenant_name = self.credentials['os_tenant'] - auth_url = self.credentials['os_auth_url'] - sahara_service_type = self.credentials['sahara_service_type'] - sahara_url = self.credentials['sahara_url'] - - session = clients.get_session(auth_url, username, password, - tenant_name, - self.credentials['ssl_verify'], - self.credentials['ssl_cert']) - - self.sahara = clients.SaharaClient(session=session, - service_type=sahara_service_type, - sahara_url=sahara_url) - self.nova = clients.NovaClient(session=session) - self.neutron = clients.NeutronClient(session=session) - # swiftclient doesn't support keystone sessions - self.swift = clients.SwiftClient( - authurl=auth_url, - user=username, - key=password, - insecure=not self.credentials['ssl_verify'], - cacert=self.credentials['ssl_cert'], - tenant_name=tenant_name) - - def create_cluster(self): - self.cluster_id = self.sahara.get_cluster_id( - self.testcase.get('existing_cluster')) - self.ng_id_map = {} - if self.cluster_id is None: - self.ng_id_map = self._create_node_group_templates() - cl_tmpl_id = self._create_cluster_template() - self.cluster_id = self._create_cluster(cl_tmpl_id) - elif self.key is None: - self.cinder = False - self._poll_cluster_status_tracked(self.cluster_id) - cluster = self.sahara.get_cluster(self.cluster_id, show_progress=True) - if self.proxy_ng_name: - for ng in cluster.node_groups: - if ng['name'] == self.proxy_ng_name: - self.proxy = ng['instances'][0]['management_ip'] - - self.check_cinder() - if not getattr(cluster, "provision_progress", None): - return - self._check_event_logs(cluster) - - @track_result("Check transient") - def check_transient(self): - with fixtures.Timeout( - timeouts.Defaults.instance.timeout_check_transient, - gentle=True): - while True: - if self.sahara.is_resource_deleted( - self.sahara.get_cluster_status, self.cluster_id): - break - time.sleep(5) - - def _inject_datasources_data(self, arg, input_url, output_url): - return arg.format( - input_datasource=input_url, output_datasource=output_url) - - def _put_io_data_to_configs(self, configs, input_id, output_id): - input_url, output_url = None, None - if input_id is not None: - input_url = self.sahara.get_datasource( - data_source_id=input_id).url - if output_id is not None: - output_url = self.sahara.get_datasource( - data_source_id=output_id).url - pl = lambda x: self._inject_datasources_data(x, input_url, output_url) - args = list(map(pl, configs.get('args', []))) - configs['args'] = args - return configs - - def _prepare_job_running(self, job): - input_id, output_id = self._create_datasources(job) - main_libs, additional_libs = self._create_job_binaries(job) - job_id = self._create_job(job['type'], main_libs, additional_libs) - configs = self._parse_job_configs(job) - configs = self._put_io_data_to_configs( - configs, input_id, output_id) - return [job_id, input_id, output_id, configs] - - @track_result("Check EDP jobs", False) - def check_run_jobs(self): - batching = self.testcase.get('edp_batching', - len(self.testcase['edp_jobs_flow'])) - batching_size = batching - jobs = self.testcase.get('edp_jobs_flow', []) - - pre_exec = [] - for job in jobs: - pre_exec.append(self._prepare_job_running(job)) - batching -= 1 - if not batching: - self._job_batching(pre_exec) - pre_exec = [] - batching = batching_size - - def _job_batching(self, pre_exec): - job_exec_ids = [] - for job_exec in pre_exec: - job_exec_ids.append(self._run_job(*job_exec)) - - self._poll_jobs_status(job_exec_ids) - - def _create_datasources(self, job): - def create(ds, name): - location = ds.get('source', None) - if not location: - location = utils.rand_name(ds['destination']) - if ds['type'] == 'swift': - url = self._create_swift_data(location) - if ds['type'] == 'hdfs': - url = self._create_hdfs_data(location, ds.get('hdfs_username', - 'oozie')) - if ds['type'] == 'maprfs': - url = location - return self.__create_datasource( - name=utils.rand_name(name), - description='', - data_source_type=ds['type'], url=url, - credential_user=self.credentials['os_username'], - credential_pass=self.credentials['os_password']) - - input_id, output_id = None, None - if job.get('input_datasource'): - ds = job['input_datasource'] - input_id = create(ds, 'input') - - if job.get('output_datasource'): - ds = job['output_datasource'] - output_id = create(ds, 'output') - - return input_id, output_id - - def _create_job_binaries(self, job): - main_libs = [] - additional_libs = [] - if job.get('main_lib'): - main_libs.append(self._create_job_binary(job['main_lib'])) - for add_lib in job.get('additional_libs', []): - lib_id = self._create_job_binary(add_lib) - additional_libs.append(lib_id) - - return main_libs, additional_libs - - def _create_job_binary(self, job_binary): - url = None - extra = {} - if job_binary['type'] == 'swift': - url = self._create_swift_data(job_binary['source']) - extra['user'] = self.credentials['os_username'] - extra['password'] = self.credentials['os_password'] - if job_binary['type'] == 'database': - url = self._create_internal_db_data(job_binary['source']) - - job_binary_name = '%s-%s' % ( - utils.rand_name('test'), os.path.basename(job_binary['source'])) - return self.__create_job_binary(job_binary_name, url, '', extra) - - def _create_job(self, type, mains, libs): - return self.__create_job(utils.rand_name('test'), type, mains, - libs, '') - - def _parse_job_configs(self, job): - configs = {} - if job.get('configs'): - configs['configs'] = {} - for param, value in six.iteritems(job['configs']): - configs['configs'][param] = str(value) - if job.get('args'): - configs['args'] = map(str, job['args']) - return configs - - def _run_job(self, job_id, input_id, output_id, configs): - return self.__run_job(job_id, self.cluster_id, input_id, output_id, - configs) - - def _poll_jobs_status(self, exec_ids): - try: - with fixtures.Timeout( - timeouts.Defaults.instance.timeout_poll_jobs_status, - gentle=True): - success = False - polling_ids = list(exec_ids) - while not success: - current_ids = list(polling_ids) - success = True - for exec_id in polling_ids: - status = self.sahara.get_job_status(exec_id) - if status not in ['FAILED', 'KILLED', 'DONEWITHERROR', - "SUCCEEDED"]: - success = False - else: - current_ids.remove(exec_id) - polling_ids = list(current_ids) - time.sleep(5) - finally: - report = [] - for exec_id in exec_ids: - status = self.sahara.get_job_status(exec_id) - if status != "SUCCEEDED": - info = self.sahara.get_job_info(exec_id) - report.append("Job with id={id}, name={name}, " - "type={type} has status " - "{status}".format(id=exec_id, - name=info.name, - type=info.type, - status=status)) - if report: - self.fail("\n".join(report)) - - def _create_swift_data(self, source=None): - container = self._get_swift_container() - path = utils.rand_name('test') - data = None - if source: - with open(source) as source_fd: - data = source_fd.read() - - self.__upload_to_container(container, path, data) - - return 'swift://%s.sahara/%s' % (container, path) - - def _create_hdfs_data(self, source, hdfs_username): - - def to_hex_present(string): - return "".join(map(lambda x: hex(ord(x)).replace("0x", "\\x"), - string)) - - if 'user' in source: - return source - hdfs_dir = utils.rand_name("/user/%s/data" % hdfs_username) - inst_ip = self._get_nodes_with_process()[0]["management_ip"] - self._run_command_on_node( - inst_ip, - "sudo su - -c \"hdfs dfs -mkdir -p %(path)s \" %(user)s" % { - "path": hdfs_dir, "user": hdfs_username}) - hdfs_filepath = utils.rand_name(hdfs_dir + "/file") - with open(source) as source_fd: - data = source_fd.read() - self._run_command_on_node( - inst_ip, - ("echo -e \"%(data)s\" | sudo su - -c \"hdfs dfs" - " -put - %(path)s\" %(user)s") % { - "data": to_hex_present(data), - "path": hdfs_filepath, - "user": hdfs_username}) - return hdfs_filepath - - def _create_internal_db_data(self, source): - with open(source) as source_fd: - data = source_fd.read() - id = self.__create_internal_db_data(utils.rand_name('test'), data) - return 'internal-db://%s' % id - - def _get_swift_container(self): - if not getattr(self, '__swift_container', None): - self.__swift_container = self.__create_container( - utils.rand_name('sahara-tests')) - return self.__swift_container - - @track_result("Cluster scaling", False) - def check_scale(self): - scale_ops = [] - ng_before_scale = self.sahara.get_cluster(self.cluster_id).node_groups - if self.testcase.get('scaling'): - scale_ops = self.testcase['scaling'] - else: - scale_path = os.path.join(self.template_path, 'scale.json') - if os.path.exists(scale_path): - with open(scale_path) as data: - scale_ops = json.load(data) - - body = {} - for op in scale_ops: - node_scale = op['node_group'] - if op['operation'] == 'add': - if 'add_node_groups' not in body: - body['add_node_groups'] = [] - body['add_node_groups'].append({ - 'node_group_template_id': - self.ng_id_map.get(node_scale, - self.sahara.get_node_group_template_id( - node_scale)), - 'count': op['size'], - 'name': utils.rand_name(node_scale) - }) - if op['operation'] == 'resize': - if 'resize_node_groups' not in body: - body['resize_node_groups'] = [] - body['resize_node_groups'].append({ - 'name': self.ng_name_map.get( - node_scale, - self.sahara.get_node_group_template_id(node_scale)), - 'count': op['size'] - }) - - if body: - self.sahara.scale_cluster(self.cluster_id, body) - self._poll_cluster_status(self.cluster_id) - ng_after_scale = self.sahara.get_cluster( - self.cluster_id).node_groups - self._validate_scaling(ng_after_scale, - self._get_expected_count_of_nodes( - ng_before_scale, body)) - - def _validate_scaling(self, after, expected_count): - for (key, value) in six.iteritems(expected_count): - ng = {} - for after_ng in after: - if after_ng['name'] == key: - ng = after_ng - break - self.assertEqual(value, ng.get('count', 0)) - - def _get_expected_count_of_nodes(self, before, body): - expected_mapper = {} - for ng in before: - expected_mapper[ng['name']] = ng['count'] - for ng in body.get('add_node_groups', []): - expected_mapper[ng['name']] = ng['count'] - for ng in body.get('resize_node_groups', []): - expected_mapper[ng['name']] = ng['count'] - return expected_mapper - - @track_result("Check cinder volumes") - def check_cinder(self): - if not self._get_node_list_with_volumes() or not self.cinder: - print("All tests for Cinder were skipped") - return - for node_with_volumes in self._get_node_list_with_volumes(): - volume_count_on_node = int(self._run_command_on_node( - node_with_volumes['node_ip'], - 'mount | grep %s | wc -l' % - node_with_volumes['volume_mount_prefix'] - )) - self.assertEqual( - node_with_volumes['volume_count'], volume_count_on_node, - 'Some volumes were not mounted to node.\n' - 'Expected count of mounted volumes to node is %s.\n' - 'Actual count of mounted volumes to node is %s.' - % (node_with_volumes['volume_count'], volume_count_on_node) - ) - - def _get_node_list_with_volumes(self): - node_groups = self.sahara.get_cluster(self.cluster_id).node_groups - node_list_with_volumes = [] - for node_group in node_groups: - if node_group['volumes_per_node'] != 0: - for instance in node_group['instances']: - node_list_with_volumes.append({ - 'node_ip': instance['management_ip'], - 'volume_count': node_group['volumes_per_node'], - 'volume_mount_prefix': - node_group['volume_mount_prefix'] - }) - return node_list_with_volumes - - @track_result("Create node group templates") - def _create_node_group_templates(self): - ng_id_map = {} - floating_ip_pool = None - if self.network['type'] == 'neutron': - floating_ip_pool = self.neutron.get_network_id( - self.network['public_network']) - elif not self.network['auto_assignment_floating_ip']: - floating_ip_pool = self.network['public_network'] - - node_groups = [] - if self.testcase.get('node_group_templates'): - for ng in self.testcase['node_group_templates']: - node_groups.append(ng) - else: - templates_path = os.path.join(self.template_path, - 'node_group_template_*.json') - for template_file in glob.glob(templates_path): - with open(template_file) as data: - node_groups.append(json.load(data)) - - check_indirect_access = False - for ng in node_groups: - if ng.get('is_proxy_gateway'): - check_indirect_access = True - - for ng in node_groups: - kwargs = dict(ng) - kwargs.update(self.plugin_opts) - kwargs['flavor_id'] = self._get_flavor_id(kwargs['flavor']) - del kwargs['flavor'] - kwargs['name'] = utils.rand_name(kwargs['name']) - if (not kwargs.get('is_proxy_gateway', - False)) and (check_indirect_access): - kwargs['floating_ip_pool'] = None - self.proxy_ng_name = kwargs['name'] - else: - kwargs['floating_ip_pool'] = floating_ip_pool - ng_id = self.__create_node_group_template(**kwargs) - ng_id_map[ng['name']] = ng_id - return ng_id_map - - @track_result("Set flavor") - def _get_flavor_id(self, flavor): - if isinstance(flavor, str): - return self.nova.get_flavor_id(flavor) - else: - flavor_id = self.nova.create_flavor(flavor).id - self.addCleanup(self.nova.delete_flavor, flavor_id) - return flavor_id - - @track_result("Create cluster template") - def _create_cluster_template(self): - self.ng_name_map = {} - template = None - if self.testcase.get('cluster_template'): - template = self.testcase['cluster_template'] - else: - template_path = os.path.join(self.template_path, - 'cluster_template.json') - with open(template_path) as data: - template = json.load(data) - - kwargs = dict(template) - ngs = kwargs['node_group_templates'] - del kwargs['node_group_templates'] - kwargs['node_groups'] = [] - for ng, count in ngs.items(): - ng_name = utils.rand_name(ng) - self.ng_name_map[ng] = ng_name - kwargs['node_groups'].append({ - 'name': ng_name, - 'node_group_template_id': self.ng_id_map[ng], - 'count': count}) - - kwargs.update(self.plugin_opts) - kwargs['name'] = utils.rand_name(kwargs['name']) - if self.network['type'] == 'neutron': - kwargs['net_id'] = self.neutron.get_network_id( - self.network['private_network']) - - return self.__create_cluster_template(**kwargs) - - @track_result("Check event logs") - def _check_event_logs(self, cluster): - invalid_steps = [] - if cluster.is_transient: - # skip event log testing - return - - for step in cluster.provision_progress: - if not step['successful']: - invalid_steps.append(step) - - if len(invalid_steps) > 0: - invalid_steps_info = "\n".join(six.text_type(e) - for e in invalid_steps) - steps_info = "\n".join(six.text_type(e) - for e in cluster.provision_progress) - raise exc.TempestException( - "Issues with event log work: " - "\n Incomplete steps: \n\n {invalid_steps}" - "\n All steps: \n\n {steps}".format( - steps=steps_info, - invalid_steps=invalid_steps_info)) - - @track_result("Create cluster") - def _create_cluster(self, cluster_template_id): - if self.testcase.get('cluster'): - kwargs = dict(self.testcase['cluster']) - else: - kwargs = {} # default template - - kwargs.update(self.plugin_opts) - kwargs['name'] = utils.rand_name(kwargs.get('name', 'test')) - kwargs['cluster_template_id'] = cluster_template_id - kwargs['default_image_id'] = self.nova.get_image_id( - self.testcase['image']) - kwargs['user_keypair_id'] = self.key_name - - return self.__create_cluster(**kwargs) - - @track_result("Check cluster state") - def _poll_cluster_status_tracked(self, cluster_id): - self._poll_cluster_status(cluster_id) - - def _poll_cluster_status(self, cluster_id): - with fixtures.Timeout( - timeouts.Defaults.instance.timeout_poll_cluster_status, - gentle=True): - while True: - status = self.sahara.get_cluster_status(cluster_id) - if status == CLUSTER_STATUS_ACTIVE: - break - if status == CLUSTER_STATUS_ERROR: - raise exc.TempestException("Cluster in %s state" % status) - time.sleep(3) - - def _run_command_on_node(self, node_ip, command): - host_ip = node_ip - if self.proxy: - host_ip = self.proxy - command = "ssh %s %s" % (node_ip, command) - ssh_session = connection.Client(host_ip, self.testcase['ssh_username'], - pkey=self.private_key) - return ssh_session.exec_command(command) - - def _get_nodes_with_process(self, process=None): - nodegroups = self.sahara.get_cluster(self.cluster_id).node_groups - nodes_with_process = [] - for nodegroup in nodegroups: - if not process or process in nodegroup['node_processes']: - nodes_with_process.extend(nodegroup['instances']) - return nodes_with_process - - # client ops - - def __create_node_group_template(self, *args, **kwargs): - id = self.sahara.create_node_group_template(*args, **kwargs) - if not self.testcase['retain_resources']: - self.addCleanup(self.sahara.delete_node_group_template, id) - return id - - def __create_cluster_template(self, *args, **kwargs): - id = self.sahara.create_cluster_template(*args, **kwargs) - if not self.testcase['retain_resources']: - self.addCleanup(self.sahara.delete_cluster_template, id) - return id - - def __create_cluster(self, *args, **kwargs): - id = self.sahara.create_cluster(*args, **kwargs) - if not self.testcase['retain_resources']: - self.addCleanup(self.sahara.delete_cluster, id) - return id - - def __create_datasource(self, *args, **kwargs): - id = self.sahara.create_datasource(*args, **kwargs) - if not self.testcase['retain_resources']: - self.addCleanup(self.sahara.delete_datasource, id) - return id - - def __create_internal_db_data(self, *args, **kwargs): - id = self.sahara.create_job_binary_internal(*args, **kwargs) - if not self.testcase['retain_resources']: - self.addCleanup(self.sahara.delete_job_binary_internal, id) - return id - - def __create_job_binary(self, *args, **kwargs): - id = self.sahara.create_job_binary(*args, **kwargs) - if not self.testcase['retain_resources']: - self.addCleanup(self.sahara.delete_job_binary, id) - return id - - def __create_job(self, *args, **kwargs): - id = self.sahara.create_job(*args, **kwargs) - if not self.testcase['retain_resources']: - self.addCleanup(self.sahara.delete_job, id) - return id - - def __run_job(self, *args, **kwargs): - id = self.sahara.run_job(*args, **kwargs) - if not self.testcase['retain_resources']: - self.addCleanup(self.sahara.delete_job_execution, id) - return id - - def __create_container(self, container_name): - self.swift.create_container(container_name) - if not self.testcase['retain_resources']: - self.addCleanup(self.swift.delete_container, container_name) - return container_name - - def __upload_to_container(self, container_name, object_name, data=None): - if data: - self.swift.upload_data(container_name, object_name, data) - if not self.testcase['retain_resources']: - self.addCleanup(self.swift.delete_object, container_name, - object_name) - - def __create_keypair(self): - key = utils.rand_name('scenario_key') - self.nova.nova_client.keypairs.create(key, - public_key=self.public_key) - if not self.testcase['retain_resources']: - self.addCleanup(self.nova.delete_keypair, key) - return key - - def tearDown(self): - tbs = [] - table = prettytable.PrettyTable(["Check", "Status", "Duration, s"]) - table.align["Check"] = "l" - for check in self._results: - table.add_row( - [check['check_name'], check['status'], check['duration']]) - if check['status'] == CHECK_FAILED_STATUS: - tbs.extend(check['traceback']) - tbs.append("") - print("Results of testing plugin", self.plugin_opts['plugin_name'], - self.plugin_opts['hadoop_version']) - print(table) - print("\n".join(tbs), file=sys.stderr) - - super(BaseTestCase, self).tearDown() - - test_failed = any([c['status'] == CHECK_FAILED_STATUS - for c in self._results]) - if test_failed: - self.fail("Scenario tests failed") diff --git a/sahara/tests/scenario/clients.py b/sahara/tests/scenario/clients.py deleted file mode 100644 index a1e6e00c66..0000000000 --- a/sahara/tests/scenario/clients.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time - -import fixtures -from keystoneclient.auth.identity import v3 as identity_v3 -from keystoneclient import session -from neutronclient.neutron import client as neutron_client -from novaclient import client as nova_client -from novaclient import exceptions as nova_exc -from oslo_utils import uuidutils -from saharaclient.api import base as saharaclient_base -from saharaclient import client as sahara_client -from swiftclient import client as swift_client -from swiftclient import exceptions as swift_exc -from tempest_lib import exceptions as exc - -from sahara.tests.scenario import utils - - -def get_session(auth_url=None, username=None, password=None, - project_name=None, verify=True, cert=None): - auth = identity_v3.Password(auth_url=auth_url.replace('/v2.0', '/v3'), - username=username, - password=password, - project_name=project_name, - user_domain_name='default', - project_domain_name='default') - return session.Session(auth=auth, verify=verify, cert=cert) - -from sahara.tests.scenario import timeouts - - -class Client(object): - def is_resource_deleted(self, method, *args, **kwargs): - raise NotImplementedError - - def delete_resource(self, method, *args, **kwargs): - with fixtures.Timeout( - timeouts.Defaults.instance.timeout_delete_resource, - gentle=True): - while True: - if self.is_resource_deleted(method, *args, **kwargs): - break - time.sleep(5) - - -class SaharaClient(Client): - def __init__(self, *args, **kwargs): - self.sahara_client = sahara_client.Client('1.1', *args, **kwargs) - - def create_node_group_template(self, *args, **kwargs): - data = self.sahara_client.node_group_templates.create(*args, **kwargs) - return data.id - - def delete_node_group_template(self, node_group_template_id): - return self.delete_resource( - self.sahara_client.node_group_templates.delete, - node_group_template_id) - - def create_cluster_template(self, *args, **kwargs): - data = self.sahara_client.cluster_templates.create(*args, **kwargs) - return data.id - - def delete_cluster_template(self, cluster_template_id): - return self.delete_resource( - self.sahara_client.cluster_templates.delete, - cluster_template_id) - - def create_cluster(self, *args, **kwargs): - data = self.sahara_client.clusters.create(*args, **kwargs) - return data.id - - def delete_cluster(self, cluster_id): - return self.delete_resource( - self.sahara_client.clusters.delete, - cluster_id) - - def scale_cluster(self, cluster_id, body): - return self.sahara_client.clusters.scale(cluster_id, body) - - def create_datasource(self, *args, **kwargs): - data = self.sahara_client.data_sources.create(*args, **kwargs) - return data.id - - def get_datasource(self, *args, **kwargs): - return self.sahara_client.data_sources.get(*args, **kwargs) - - def delete_datasource(self, datasource_id): - return self.delete_resource( - self.sahara_client.data_sources.delete, - datasource_id) - - def create_job_binary_internal(self, *args, **kwargs): - data = self.sahara_client.job_binary_internals.create(*args, **kwargs) - return data.id - - def delete_job_binary_internal(self, job_binary_internal_id): - return self.delete_resource( - self.sahara_client.job_binary_internals.delete, - job_binary_internal_id) - - def create_job_binary(self, *args, **kwargs): - data = self.sahara_client.job_binaries.create(*args, **kwargs) - return data.id - - def delete_job_binary(self, job_binary_id): - return self.delete_resource( - self.sahara_client.job_binaries.delete, - job_binary_id) - - def create_job(self, *args, **kwargs): - data = self.sahara_client.jobs.create(*args, **kwargs) - return data.id - - def delete_job(self, job_id): - return self.delete_resource( - self.sahara_client.jobs.delete, - job_id) - - def run_job(self, *args, **kwargs): - data = self.sahara_client.job_executions.create(*args, **kwargs) - return data.id - - def delete_job_execution(self, job_execution_id): - return self.delete_resource( - self.sahara_client.job_executions.delete, - job_execution_id) - - def get_cluster(self, cluster_id, show_progress=False): - return self.sahara_client.clusters.get(cluster_id, show_progress) - - def get_cluster_status(self, cluster_id): - data = self.sahara_client.clusters.get(cluster_id) - return str(data.status) - - def get_job_status(self, exec_id): - data = self.sahara_client.job_executions.get(exec_id) - return str(data.info['status']) - - def get_job_info(self, exec_id): - job_execution = self.sahara_client.job_executions.get(exec_id) - return self.sahara_client.jobs.get(job_execution.job_id) - - def get_cluster_id(self, name): - if uuidutils.is_uuid_like(name): - return name - for cluster in self.sahara_client.clusters.list(): - if cluster.name == name: - return cluster.id - - def get_node_group_template_id(self, name): - for nodegroup in self.sahara_client.node_group_templates.list(): - if nodegroup.name == name: - return nodegroup.id - - def is_resource_deleted(self, method, *args, **kwargs): - try: - method(*args, **kwargs) - except saharaclient_base.APIException as ex: - return ex.error_code == 404 - - return False - - -class NovaClient(Client): - def __init__(self, *args, **kwargs): - self.nova_client = nova_client.Client('2', *args, **kwargs) - - def get_image_id(self, image_name): - if uuidutils.is_uuid_like(image_name): - return image_name - for image in self.nova_client.images.list(): - if image.name == image_name: - return image.id - - raise exc.NotFound(image_name) - - def get_flavor_id(self, flavor_name): - if uuidutils.is_uuid_like(flavor_name) or flavor_name.isdigit(): - return flavor_name - for flavor in self.nova_client.flavors.list(): - if flavor.name == flavor_name: - return flavor.id - - raise exc.NotFound(flavor_name) - - def create_flavor(self, flavor_object): - return self.nova_client.flavors.create( - flavor_object.get('name', utils.rand_name('scenario')), - flavor_object.get('ram', 1), - flavor_object.get('vcpus', 1), - flavor_object.get('root_disk', 0), - ephemeral=flavor_object.get('ephemeral_disk', 0), - swap=flavor_object.get('swap_disk', 0), - flavorid=flavor_object.get('id', 'auto')) - - def delete_flavor(self, flavor_id): - return self.delete_resource(self.nova_client.flavors.delete, flavor_id) - - def delete_keypair(self, key_name): - return self.delete_resource( - self.nova_client.keypairs.delete, key_name) - - def is_resource_deleted(self, method, *args, **kwargs): - try: - method(*args, **kwargs) - except nova_exc.NotFound as ex: - return ex.code == 404 - - return False - - -class NeutronClient(Client): - def __init__(self, *args, **kwargs): - self.neutron_client = neutron_client.Client('2.0', *args, **kwargs) - - def get_network_id(self, network_name): - if uuidutils.is_uuid_like(network_name): - return network_name - networks = self.neutron_client.list_networks(name=network_name) - networks = networks['networks'] - if len(networks) < 1: - raise exc.NotFound(network_name) - return networks[0]['id'] - - -class SwiftClient(Client): - def __init__(self, *args, **kwargs): - self.swift_client = swift_client.Connection(auth_version='2.0', - *args, **kwargs) - - def create_container(self, container_name): - return self.swift_client.put_container(container_name) - - def delete_container(self, container_name): - return self.delete_resource( - self.swift_client.delete_container, - container_name) - - def upload_data(self, container_name, object_name, data): - return self.swift_client.put_object(container_name, object_name, data) - - def delete_object(self, container_name, object_name): - return self.delete_resource( - self.swift_client.delete_object, - container_name, - object_name) - - def is_resource_deleted(self, method, *args, **kwargs): - try: - method(*args, **kwargs) - except swift_exc.ClientException as ex: - return ex.http_status == 404 - - return False diff --git a/sahara/tests/scenario/custom_checks/__init__.py b/sahara/tests/scenario/custom_checks/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/tests/scenario/custom_checks/check_cinder.py b/sahara/tests/scenario/custom_checks/check_cinder.py deleted file mode 100644 index 22301c5519..0000000000 --- a/sahara/tests/scenario/custom_checks/check_cinder.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def check(self): - self.check_cinder() diff --git a/sahara/tests/scenario/custom_checks/check_kafka.py b/sahara/tests/scenario/custom_checks/check_kafka.py deleted file mode 100644 index de88d7c9b3..0000000000 --- a/sahara/tests/scenario/custom_checks/check_kafka.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.tests.scenario import base as base_scenario -from sahara.tests.scenario import utils - - -class CustomCheckKafka(object): - def __init__(self, base_class): - self.base = base_class - - def _run_command_on_node(self, *args, **kwargs): - return self.base._run_command_on_node(*args, **kwargs) - - def _get_nodes_with_process(self, *args, **kwargs): - return self.base._get_nodes_with_process(*args, **kwargs) - - def fail(self, *args, **kwargs): - return self.base.fail(*args, **kwargs) - - def _prepare_job_running(self, *args, **kwargs): - return self.base._prepare_job_running(*args, **kwargs) - - def _job_batching(self, *args, **kwargs): - return self.base._job_batching(*args, **kwargs) - - @property - def _results(self): - return self.base._results - - @_results.setter - def _results(self, value): - self.base._results = value - - @staticmethod - def _get_nodes_desc_list(nodes, node_domain, port): - data = [] - for node in nodes: - fqdn = "{0}.{1}".format( - node["instance_name"], node_domain) - data.append("{0}:{1}".format(fqdn, port)) - return ",".join(data) - - def _get_node_ip(self, process): - node = self._get_nodes_with_process(process)[0] - return node["management_ip"] - - def _search_file_on_node(self, ip, file): - file_path = self._run_command_on_node( - ip, 'find / -name "{file}" 2>/dev/null -print | head -n 1' - .format(file=file)) - if not file_path: - self.fail("Cannot find file: {file}".format(file)) - return file_path.rstrip() - - def _create_test_topic(self, broker, topic, zookeepers): - ip = self._get_node_ip(broker) - scr = self._search_file_on_node(ip, "kafka-topics.sh") - # TODO(vgridnev): Avoid hardcoded values in future - self._run_command_on_node( - ip, "{script} --create --zookeeper {zoo} --replication-factor " - "1 --partitions 1 --topic {topic}".format( - script=scr, zoo=zookeepers, topic=topic)) - - def _send_messages(self, broker, topic, broker_list): - ip = self._get_node_ip(broker) - - scr = self._search_file_on_node(ip, "kafka-console-producer.sh") - messages = ["< -class ${testcase['class_name']}TestCase(base.BaseTestCase): - @classmethod - def setUpClass(cls): - super(${testcase['class_name']}TestCase, cls).setUpClass() - cls.credentials = ${credentials} - cls.network = ${network} - cls.testcase = ${testcase} - - def test_plugin(self): - self.create_cluster() - % for check in testcase['scenario']: - from sahara.tests.scenario.custom_checks import check_${check} - check_${check}.check(self) - % endfor - diff --git a/sahara/tests/scenario/timeouts.py b/sahara/tests/scenario/timeouts.py deleted file mode 100644 index d9e7dbcecc..0000000000 --- a/sahara/tests/scenario/timeouts.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class Defaults(object): - def __init__(self, config): - self.timeout_check_transient = config.get('timeout_check_transient', - 300) - self.timeout_delete_resource = config.get('timeout_delete_resource', - 300) - self.timeout_poll_cluster_status = config.get( - 'timeout_poll_cluster_status', 1800) - self.timeout_poll_jobs_status = config.get('timeout_poll_jobs_status', - 1800) - - @classmethod - def init_defaults(cls, config): - if not hasattr(cls, 'instance'): - cls.instance = Defaults(config) - return cls.instance diff --git a/sahara/tests/scenario/utils.py b/sahara/tests/scenario/utils.py deleted file mode 100644 index fcc2fb6c1f..0000000000 --- a/sahara/tests/scenario/utils.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils - - -def rand_name(name=''): - rand_data = uuidutils.generate_uuid()[:8] - if name: - return '%s-%s' % (name, rand_data) - else: - return rand_data diff --git a/sahara/tests/scenario/validation.py b/sahara/tests/scenario/validation.py deleted file mode 100644 index 3c47ce099a..0000000000 --- a/sahara/tests/scenario/validation.py +++ /dev/null @@ -1,448 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -import jsonschema -import rfc3986 - - -SCHEMA = { - "type": "object", - "properties": { - "concurrency": { - "type": "integer", - "minimum": 1 - }, - "credentials": { - "type": "object", - "properties": { - "os_username": { - "type": "string", - "minLength": 1 - }, - "os_password": { - "type": "string", - "minLength": 1 - }, - "os_tenant": { - "type": "string", - "minLength": 1 - }, - "os_auth_url": { - "type": "string", - "format": "uri" - }, - "sahara_service_type": { - "type": "string", - "minLength": 1 - }, - "sahara_url": { - "type": "string", - "format": "uri" - }, - "ssl_verify": { - "type": "boolean" - }, - "ssl_cert": { - "type": "string", - "minLength": 1 - } - }, - "additionalProperties": False - }, - "network": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["neutron", "nova-network"] - }, - "auto_assignment_floating_ip": { - "type": "boolean" - }, - "public_network": { - "type": "string", - "minLength": 1 - }, - "private_network": { - "type": "string", - "minLength": 1 - } - }, - "additionalProperties": False - }, - "clusters": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "properties": { - "existing_cluster": { - "type": "string", - "minLength": 1 - }, - "key_name": { - "type": "string", - "minLength": 1 - }, - "plugin_name": { - "type": "string", - "minLength": 1 - }, - "plugin_version": { - "type": "string", - "minLength": 1 - }, - "image": { - "type": "string", - "minLength": 1 - }, - "node_group_templates": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - "minLength": 1, - "format": "valid_name" - }, - "node_processes": { - "type": "array", - "minItems": 1, - "items": { - "type": "string", - "minLength": 1 - } - }, - "flavor": { - "type": ["object", "string"], - "properties": { - "name": { - "type": "string", - "minLength": 1 - }, - "id": { - "type": "string", - "minLength": 1 - }, - "vcpus": { - "type": "integer", - "minimum": 1 - }, - "ram": { - "type": "integer", - "minimum": 1 - }, - "root_disk": { - "type": "integer", - "minimum": 0 - }, - "ephemeral_disk": { - "type": "integer", - "minimum": 0 - }, - "swap_disk": { - "type": "integer", - "minimum": 0 - }, - }, - "additionalProperties": True - }, - "description": { - "type": "string" - }, - "volumes_per_node": { - "type": "integer", - "minimum": 0 - }, - "volumes_size": { - "type": "integer", - "minimum": 0 - }, - "node_configs": { - "type": "object" - }, - "security_groups": { - "type": "array", - "items": { - "type": "string", - "minLength": 1 - } - }, - "auto_security_group": { - "type": "boolean" - }, - "availability_zone": { - "type": "string", - "minLength": 1 - }, - "volumes_availability_zone": { - "type": "string", - "minLength": 1 - }, - "volume_type": { - "type": "string", - "minLength": 1 - }, - "is_proxy_gateway": { - "type": "boolean" - } - }, - "required": ["name", "flavor", "node_processes"], - "additionalProperties": False - } - }, - "cluster_template": { - "type": "object", - "properties": { - "name": { - "type": "string", - "minLength": 1, - "format": "valid_name" - }, - "description": { - "type": "string" - }, - "cluster_configs": { - "type": "object" - }, - "node_group_templates": { - "type": "object", - "patternProperties": { - ".*": { - "type": "integer", - "minimum": 1 - } - } - }, - "anti_affinity": { - "type": "array", - "items": { - "type": "string", - "minLength": 1 - } - } - }, - "required": ["name", "node_group_templates"], - "additionalProperties": False - }, - "cluster": { - "type": "object", - "properties": { - "name": { - "type": "string", - "minLength": 1, - "format": "valid_name" - }, - "description": { - "type": "string" - }, - "is_transient": { - "type": "boolean" - } - }, - "required": ["name"], - "additionalProperties": False, - }, - "timeout_check_transient": { - "type": "integer", - "minimum": 1 - }, - "timeout_delete_resource": { - "type": "integer", - "minimum": 1 - }, - "timeout_poll_cluster_status": { - "type": "integer", - "minimum": 1 - }, - "timeout_poll_jobs_status": { - "type": "integer", - "minimum": 1 - }, - "custom_checks": { - "type": "object", - "properties": { - ".*": { - "type": "object", - } - } - }, - "scaling": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "properties": { - "operation": { - "type": "string", - "enum": ["add", "resize"] - }, - "node_group": { - "type": "string", - "minLength": 1 - }, - "size": { - "type": "integer", - "minimum": 0 - } - }, - "required": ["operation", "node_group", "size"], - "additionalProperties": False - } - }, - "scenario": { - "type": "array", - "items": { - "type": "string", - "minLength": 1 - } - }, - "edp_jobs_flow": { - "type": ["string", "array"] - }, - "retain_resources": { - "type": "boolean" - }, - "edp_batching": { - "type": "integer", - "minimum": 1 - } - }, - "required": ["plugin_name", "plugin_version", "image"], - "additionalProperties": False - } - }, - "edp_jobs_flow": { - "type": "object", - "patternProperties": { - ".*": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["Pig", "Java", "MapReduce", - "MapReduce.Streaming", "Hive", - "Spark"] - }, - "input_datasource": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["swift", "hdfs", "maprfs"] - }, - "source": { - "type": "string" - }, - "hdfs_username": { - "type": "string" - } - }, - "required": ["type", "source"], - "additionalProperties": False - }, - "output_datasource": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["swift", "hdfs", "maprfs"] - }, - "destination": { - "type": "string" - } - }, - "required": ["type", "destination"], - "additionalProperties": False - }, - "main_lib": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["swift", "database"] - }, - "source": { - "type": "string" - } - }, - "required": ["type", "source"], - "additionalProperties": False - }, - "additional_libs": { - "type": "array", - "items": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": ["swift", "database"] - }, - "source": { - "type": "string" - } - }, - "required": ["type", "source"], - "additionalProperties": False - } - }, - "configs": { - "type": "object" - }, - "args": { - "type": "array" - } - }, - "required": ["type"], - "additionalProperties": False - } - } - } - } - }, - "required": ["clusters"], - "additionalProperties": False -} - - -@jsonschema.FormatChecker.cls_checks("uri") -def validate_uri_format(entry): - return rfc3986.is_valid_uri(entry) - - -@jsonschema.FormatChecker.cls_checks('valid_name') -def validate_name_hostname_format(entry): - res = re.match(r"^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]" - r"*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z]" - r"[A-Za-z0-9\-]*[A-Za-z0-9])$", entry) - return res is not None - - -class Validator(jsonschema.Draft4Validator): - def __init__(self, schema): - format_checker = jsonschema.FormatChecker() - super(Validator, self).__init__( - schema, format_checker=format_checker) - - -def validate(config): - return Validator(SCHEMA).validate(config) diff --git a/sahara/tests/scenario_unit/__init__.py b/sahara/tests/scenario_unit/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/tests/scenario_unit/templatevars_complete.ini b/sahara/tests/scenario_unit/templatevars_complete.ini deleted file mode 100644 index dfb54b3834..0000000000 --- a/sahara/tests/scenario_unit/templatevars_complete.ini +++ /dev/null @@ -1,10 +0,0 @@ -[DEFAULT] -OS_USERNAME: demo -OS_PASSWORD: demopwd -OS_TENANT_NAME: demo -OS_AUTH_URL: http://127.0.0.1:5000/v2 -network_type: neutron -network_private_name: private -network_public_name: public -vanilla_two_six_image: centos_sahara_vanilla_hadoop_2_6_latest -ci_flavor_id: '2' diff --git a/sahara/tests/scenario_unit/templatevars_incomplete.ini b/sahara/tests/scenario_unit/templatevars_incomplete.ini deleted file mode 100644 index b562e09bc0..0000000000 --- a/sahara/tests/scenario_unit/templatevars_incomplete.ini +++ /dev/null @@ -1,9 +0,0 @@ -[DEFAULT] -OS_USERNAME: demo -OS_PASSWORD: demopwd -OS_TENANT_NAME: demo -OS_AUTH_URL: http://127.0.0.1:5000/v2 -network_type: neutron -network_private_name: private -network_public_name: public -ci_flavor_id: '2' diff --git a/sahara/tests/scenario_unit/templatevars_nodefault.ini b/sahara/tests/scenario_unit/templatevars_nodefault.ini deleted file mode 100644 index 6a7a57b8b4..0000000000 --- a/sahara/tests/scenario_unit/templatevars_nodefault.ini +++ /dev/null @@ -1,9 +0,0 @@ -OS_USERNAME: demo -OS_PASSWORD: demopwd -OS_TENANT_NAME: demo -OS_AUTH_URL: http://127.0.0.1:5000/v2 -network_type: neutron -network_private_name: private -network_public_name: public -vanilla_two_six_image: centos_sahara_vanilla_hadoop_2_6_latest -ci_flavor_id: '2' diff --git a/sahara/tests/scenario_unit/test_base.py b/sahara/tests/scenario_unit/test_base.py deleted file mode 100644 index 1ad2f87e81..0000000000 --- a/sahara/tests/scenario_unit/test_base.py +++ /dev/null @@ -1,547 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from saharaclient.api import cluster_templates -from saharaclient.api import clusters -from saharaclient.api import data_sources -from saharaclient.api import images -from saharaclient.api import job_binaries -from saharaclient.api import job_binary_internals -from saharaclient.api import job_executions -from saharaclient.api import jobs -from saharaclient.api import node_group_templates -from saharaclient.api import plugins -from tempest_lib import exceptions as exc -import testtools - -from sahara.tests.scenario import base -from sahara.tests.scenario import timeouts - - -class FakeSaharaClient(object): - def __init__(self): - self.clusters = clusters.ClusterManager(None) - self.cluster_templates = cluster_templates.ClusterTemplateManager(None) - self.node_group_templates = (node_group_templates. - NodeGroupTemplateManager(None)) - self.plugins = plugins.PluginManager(None) - self.images = images.ImageManager(None) - - self.data_sources = data_sources.DataSourceManager(None) - self.jobs = jobs.JobsManager(None) - self.job_executions = job_executions.JobExecutionsManager(None) - self.job_binaries = job_binaries.JobBinariesManager(None) - self.job_binary_internals = ( - job_binary_internals.JobBinaryInternalsManager(None)) - - -class FakeCluster(object): - def __init__(self, is_transient, provision_progress): - self.is_transient = is_transient - self.provision_progress = provision_progress - - -class FakeResponse(object): - def __init__(self, set_id=None, set_status=None, node_groups=None, - url=None, job_id=None, name=None, job_type=None): - self.id = set_id - self.status = set_status - self.node_groups = node_groups - self.url = url - self.job_id = job_id - self.name = name - self.type = job_type - - -class TestBase(testtools.TestCase): - def setUp(self): - super(TestBase, self).setUp() - with mock.patch( - 'sahara.tests.scenario.base.BaseTestCase.__init__' - ) as mock_init: - mock_init.return_value = None - self.base_scenario = base.BaseTestCase() - self.base_scenario.credentials = {'os_username': 'admin', - 'os_password': 'nova', - 'os_tenant': 'admin', - 'os_auth_url': - 'http://localhost:5000/v2.0', - 'sahara_service_type': - 'data-processing-local', - 'sahara_url': - 'http://sahara_host:8386/v1.1', - 'ssl_cert': '/etc/tests/cert.crt', - 'ssl_verify': True} - self.base_scenario.plugin_opts = {'plugin_name': 'vanilla', - 'hadoop_version': '2.7.1'} - self.base_scenario.network = {'type': 'neutron', - 'private_network': 'changed_private', - 'public_network': 'changed_public', - 'auto_assignment_floating_ip': False} - self.base_scenario.testcase = { - 'node_group_templates': [ - { - 'name': 'master', - 'node_processes': ['namenode', 'oozie', 'resourcemanager'], - 'flavor': '2', - 'is_proxy_gateway': True - }, - { - 'name': 'worker', - 'node_processes': ['datanode', 'nodemanager'], - 'flavor': '2' - }], - 'cluster_template': - { - 'name': 'test_name_ct', - 'node_group_templates': - { - 'master': 1, - 'worker': 3 - } - }, - 'timeout_poll_cluster_status': 300, - 'timeout_delete_resource': 300, - 'timeout_poll_jobs_status': 2, - 'timeout_check_transient': 3, - 'retain_resources': True, - 'image': 'image_name', - 'edp_batching': 1, - "edp_jobs_flow": - { - "test_flow": - [{ - "type": "Pig", - "input_datasource": - { - "type": "swift", - "source": "etc/edp-examples/edp-pig/" - "top-todoers/data/input" - }, - "output_datasource": - { - "type": "hdfs", - "destination": "/user/hadoop/edp-output" - }, - "main_lib": - { - "type": "swift", - "source": "etc/edp-examples/edp-pig/" - "top-todoers/example.pig" - } - }], - }, - } - self.base_scenario.ng_id_map = {'worker': 'set_id', 'master': 'set_id'} - self.base_scenario.ng_name_map = {} - self.base_scenario.key_name = 'test_key' - self.base_scenario.key = 'key_from_yaml' - self.base_scenario.template_path = ('sahara/tests/scenario/templates/' - 'vanilla/2.7.1') - self.job = self.base_scenario.testcase["edp_jobs_flow"].get( - 'test_flow')[0] - self.base_scenario.cluster_id = 'some_id' - self.base_scenario.proxy_ng_name = False - self.base_scenario.proxy = False - self.base_scenario.setUpClass() - timeouts.Defaults.init_defaults(self.base_scenario.testcase) - - @mock.patch('keystoneclient.auth.identity.v3.Password') - @mock.patch('keystoneclient.session.Session') - @mock.patch('saharaclient.client.Client', return_value=None) - @mock.patch('novaclient.client.Client', return_value=None) - @mock.patch('neutronclient.neutron.client.Client', return_value=None) - @mock.patch('swiftclient.client.Connection', return_value=None) - def test__init_clients(self, swift, neutron, nova, sahara, m_session, - m_auth): - fake_session = mock.Mock() - fake_auth = mock.Mock() - m_session.return_value = fake_session - m_auth.return_value = fake_auth - - self.base_scenario._init_clients() - - sahara.assert_called_with('1.1', - session=fake_session, - service_type='data-processing-local', - sahara_url='http://sahara_host:8386/v1.1') - swift.assert_called_with(auth_version='2.0', - user='admin', - key='nova', - insecure=False, - cacert='/etc/tests/cert.crt', - tenant_name='admin', - authurl='http://localhost:5000/v2.0') - - nova.assert_called_with('2', session=fake_session) - neutron.assert_called_with('2.0', session=fake_session) - - m_auth.assert_called_with(auth_url='http://localhost:5000/v3', - username='admin', - password='nova', - project_name='admin', - user_domain_name='default', - project_domain_name='default') - m_session.assert_called_with(auth=fake_auth, - cert='/etc/tests/cert.crt', - verify=True) - - @mock.patch('sahara.tests.scenario.clients.NeutronClient.get_network_id', - return_value='mock_net') - @mock.patch('saharaclient.client.Client', - return_value=FakeSaharaClient()) - @mock.patch('saharaclient.api.node_group_templates.' - 'NodeGroupTemplateManager.create', - return_value=FakeResponse(set_id='id_ng')) - def test__create_node_group_template(self, mock_ng, mock_saharaclient, - mock_neutron): - self.base_scenario._init_clients() - self.assertEqual({'worker': 'id_ng', 'master': 'id_ng'}, - self.base_scenario._create_node_group_templates()) - - @mock.patch('sahara.tests.scenario.clients.NeutronClient.get_network_id', - return_value='mock_net') - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - @mock.patch('saharaclient.api.cluster_templates.' - 'ClusterTemplateManager.create', - return_value=FakeResponse(set_id='id_ct')) - def test__create_cluster_template(self, mock_ct, mock_saharaclient, - mock_neutron): - self.base_scenario._init_clients() - self.assertEqual('id_ct', - self.base_scenario._create_cluster_template()) - - @mock.patch('sahara.tests.scenario.clients.NovaClient.get_image_id', - return_value='mock_image') - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - @mock.patch('saharaclient.api.clusters.ClusterManager.create', - return_value=FakeResponse(set_id='id_cluster')) - def test__create_cluster(self, mock_cluster_manager, mock_saharaclient, - mock_nova): - self.base_scenario._init_clients() - self.assertEqual('id_cluster', - self.base_scenario._create_cluster('id_ct')) - - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - @mock.patch('sahara.tests.scenario.clients.NeutronClient.get_network_id', - return_value='mock_net') - @mock.patch('saharaclient.api.base.ResourceManager._get', - return_value=FakeResponse( - set_status=base.CLUSTER_STATUS_ACTIVE)) - @mock.patch('sahara.tests.scenario.base.BaseTestCase._check_event_logs') - def test__poll_cluster_status(self, mock_status, mock_neutron, - mock_saharaclient, mock_check_event_logs): - self.base_scenario._init_clients() - self.assertIsNone( - self.base_scenario._poll_cluster_status('id_cluster')) - - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - @mock.patch('saharaclient.api.base.ResourceManager._get') - def test_check_event_log_feature(self, mock_resp, mock_saharaclient): - self.base_scenario._init_clients() - - self.assertIsNone(self.base_scenario._check_event_logs( - FakeCluster(True, []))) - self.assertIsNone(self.base_scenario._check_event_logs( - FakeCluster(False, [{'successful': True}]))) - - with testtools.ExpectedException(exc.TempestException): - self.base_scenario._check_event_logs( - FakeCluster(False, [{'successful': False}])) - - with testtools.ExpectedException(exc.TempestException): - self.base_scenario._check_event_logs( - FakeCluster(False, [{'successful': None}])) - - @mock.patch('saharaclient.api.base.ResourceManager._update', - return_value=FakeResponse(set_id='id_internal_db_data')) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test__create_internal_db_data(self, mock_saharaclient, mock_update): - self.base_scenario._init_clients() - self.assertEqual('internal-db://id_internal_db_data', - self.base_scenario._create_internal_db_data( - 'sahara/tests/scenario_unit/vanilla2_7_1.yaml')) - - @mock.patch('swiftclient.client.Connection.put_container', - return_value=None) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test__create_swift_data(self, mock_saharaclient, mock_swiftclient): - self.base_scenario._init_clients() - self.assertTrue('swift://sahara-tests-' in - self.base_scenario._create_swift_data()) - - @mock.patch('swiftclient.client.Connection.put_container', - return_value=None) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test__get_swift_container(self, mock_saharaclient, - mock_swiftclient): - self.base_scenario._init_clients() - self.assertTrue('sahara-tests-' in - self.base_scenario._get_swift_container()) - - @mock.patch('saharaclient.api.base.ResourceManager._create', - return_value=FakeResponse(set_id='id_for_datasource')) - @mock.patch('swiftclient.client.Connection.put_container', - return_value=None) - @mock.patch('swiftclient.client.Connection.put_object', - return_value=None) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test__create_datasources(self, mock_saharaclient, - mock_swiftcontainer, mock_swiftobject, - mock_create): - self.base_scenario._init_clients() - self.assertEqual(('id_for_datasource', 'id_for_datasource'), - self.base_scenario._create_datasources( - self.job)) - - @mock.patch('saharaclient.api.base.ResourceManager._create', - return_value=FakeResponse(set_id='id_for_job_binaries')) - @mock.patch('swiftclient.client.Connection.put_object', - return_value=None) - @mock.patch('swiftclient.client.Connection.put_container', - return_value=None) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test__create_create_job_binaries(self, mock_saharaclient, - mock_swiftcontainer, - mock_swiftobject, - mock_sahara_create): - self.base_scenario._init_clients() - self.assertEqual((['id_for_job_binaries'], []), - self.base_scenario._create_job_binaries( - self.job)) - - @mock.patch('saharaclient.api.base.ResourceManager._create', - return_value=FakeResponse(set_id='id_for_job_binary')) - @mock.patch('swiftclient.client.Connection.put_object', - return_value=None) - @mock.patch('swiftclient.client.Connection.put_container', - return_value=None) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test__create_create_job_binary(self, mock_saharaclient, - mock_swiftcontainer, mock_swiftobject, - mock_sahara_create): - self.base_scenario._init_clients() - self.assertEqual('id_for_job_binary', - self.base_scenario._create_job_binary(self.job.get( - 'input_datasource'))) - - @mock.patch('saharaclient.api.base.ResourceManager._create', - return_value=FakeResponse(set_id='id_for_job')) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test__create_job(self, mock_client, mock_sahara_client): - self.base_scenario._init_clients() - self.assertEqual('id_for_job', - self.base_scenario._create_job( - 'Pig', - ['id_for_job_binaries'], - [])) - - @mock.patch('sahara.tests.scenario.clients.SaharaClient.get_cluster_id', - return_value='cluster_id') - @mock.patch('sahara.tests.scenario.base.BaseTestCase.check_cinder', - return_value=None) - @mock.patch('sahara.tests.scenario.clients.SaharaClient.get_job_status', - return_value='KILLED') - @mock.patch('saharaclient.api.base.ResourceManager._get', - return_value=FakeResponse(set_id='id_for_run_job_get', - job_type='Java', - name='test_job')) - @mock.patch('saharaclient.api.base.ResourceManager._create', - return_value=FakeResponse(set_id='id_for_run_job_create')) - @mock.patch('sahara.tests.scenario.base.BaseTestCase.' - '_poll_cluster_status', - return_value=None) - @mock.patch('sahara.tests.scenario.base.BaseTestCase.' - '_create_node_group_templates', - return_value='id_node_group_template') - @mock.patch('sahara.tests.scenario.base.BaseTestCase.' - '_create_cluster_template', - return_value='id_cluster_template') - @mock.patch('sahara.tests.scenario.base.BaseTestCase._create_cluster', - return_value='id_cluster') - @mock.patch('sahara.tests.scenario.base.BaseTestCase._create_job', - return_value='id_for_job') - @mock.patch('sahara.tests.scenario.base.BaseTestCase._create_job_binaries', - return_value=(['id_for_job_binaries'], [])) - @mock.patch('sahara.tests.scenario.base.BaseTestCase._create_datasources', - return_value=('id_for_datasource', 'id_for_datasource')) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test_check_run_jobs(self, mock_saharaclient, mock_datasources, - mock_job_binaries, mock_job, - mock_node_group_template, mock_cluster_template, - mock_cluster, mock_cluster_status, mock_create, - mock_get, mock_client, mock_cinder, - mock_get_cluster_id): - self.base_scenario._init_clients() - self.base_scenario.create_cluster() - self.base_scenario.testcase["edp_jobs_flow"] = [ - { - "type": "Pig", - "input_datasource": { - "type": "swift", - "source": "etc/edp-examples/edp-pig/top-todoers/" - "data/input" - }, - "output_datasource": { - "type": "hdfs", - "destination": "/user/hadoop/edp-output" - }, - "main_lib": { - "type": "swift", - "source": "etc/edp-examples/edp-pig/top-todoers/" - "example.pig" - } - } - ] - with mock.patch('time.sleep'): - self.assertIsNone(self.base_scenario.check_run_jobs()) - self.assertIn("Job with id=id_for_run_job_create, name=test_job, " - "type=Java has status KILLED", - self.base_scenario._results[-1]['traceback'][-1]) - - @mock.patch('sahara.tests.scenario.base.BaseTestCase.' - '_poll_cluster_status', - return_value=None) - @mock.patch('saharaclient.api.base.ResourceManager._get', - return_value=FakeResponse(set_id='id_scale_get')) - @mock.patch('saharaclient.api.base.ResourceManager._update', - return_value=FakeResponse(set_id='id_scale_update')) - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - def test_check_scale(self, mock_saharaclient, mock_update, mock_get, - mock_poll): - self.base_scenario._init_clients() - self.base_scenario.ng_id_map = {'vanilla-worker': 'set_id-w', - 'vanilla-master': 'set_id-m'} - self.base_scenario.ng_name_map = {'vanilla-worker': 'worker-123', - 'vanilla-master': 'master-321'} - self.base_scenario.cluster_id = 'cluster_id' - self.assertIsNone(self.base_scenario.check_scale()) - - @mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient()) - @mock.patch('sahara.tests.scenario.clients.NeutronClient.get_network_id', - return_value='mock_net') - @mock.patch('saharaclient.api.base.ResourceManager._get', - return_value=FakeResponse(set_status='Error')) - def test_errormsg(self, mock_status, mock_neutron, mock_saharaclient): - self.base_scenario._init_clients() - with testtools.ExpectedException(exc.TempestException): - self.base_scenario._poll_cluster_status('id_cluster') - - @mock.patch('sahara.tests.scenario.clients.SaharaClient.__init__', - return_value=None) - def test_get_nodes_with_process(self, mock_init): - self.base_scenario._init_clients() - with mock.patch( - 'sahara.tests.scenario.clients.SaharaClient.get_cluster', - return_value=FakeResponse(node_groups=[ - { - 'node_processes': 'test', - 'instances': ['test_instance'] - } - ])): - self.assertEqual( - ['test_instance'], - self.base_scenario._get_nodes_with_process('test') - ) - - with mock.patch( - 'sahara.tests.scenario.clients.SaharaClient.get_cluster', - return_value=FakeResponse(node_groups=[ - { - 'node_processes': 'test', - 'instances': [] - } - ])): - self.assertEqual( - [], self.base_scenario._get_nodes_with_process('test')) - - @mock.patch('keystoneclient.session.Session') - def test_get_node_list_with_volumes(self, mock_keystone): - self.base_scenario._init_clients() - with mock.patch( - 'sahara.tests.scenario.clients.SaharaClient.get_cluster', - return_value=FakeResponse(node_groups=[ - { - 'node_processes': 'test', - 'volumes_per_node': 2, - 'volume_mount_prefix': 2, - 'instances': [ - { - 'management_ip': 'test_ip' - } - ] - } - ])): - self.assertEqual( - [{ - 'node_ip': 'test_ip', - 'volume_count': 2, - 'volume_mount_prefix': 2 - }], self.base_scenario._get_node_list_with_volumes()) - - @mock.patch('sahara.tests.scenario.clients.SaharaClient.__init__', - return_value=None) - @mock.patch('sahara.tests.scenario.clients.SaharaClient.get_datasource') - def test_put_io_data_to_configs(self, get_datasources, sahara_mock): - self.base_scenario._init_clients() - get_datasources.side_effect = [ - mock.Mock(id='1', url="swift://cont/input"), - mock.Mock(id='2', url="hdfs://cont/output") - ] - configs = {'args': ['2', "{input_datasource}", - "{output_datasource}"]} - self.assertEqual({'args': ['2', 'swift://cont/input', - 'hdfs://cont/output']}, - self.base_scenario._put_io_data_to_configs( - configs, '1', '2')) - - @mock.patch('sahara.tests.scenario.base.BaseTestCase.addCleanup') - @mock.patch('novaclient.v2.flavors.FlavorManager.create', - return_value=FakeResponse(set_id='flavor_id')) - @mock.patch('keystoneclient.session.Session') - def test_get_flavor_id(self, mock_keystone, mock_create_flavor, mock_base): - self.base_scenario._init_clients() - self.assertEqual('flavor_id', - self.base_scenario._get_flavor_id({ - 'name': 'test-flavor', - "id": 'created_flavor_id', - "vcpus": 1, - "ram": 512, - "root_disk": 1, - "ephemeral_disk": 1, - "swap_disk": 1 - })) - - @mock.patch('sahara.tests.scenario.base.BaseTestCase._run_command_on_node') - @mock.patch('keystoneclient.session.Session') - def test_create_hdfs_data(self, mock_session, mock_ssh): - self.base_scenario._init_clients() - output_path = '/user/test/data/output' - self.assertEqual(output_path, - self.base_scenario._create_hdfs_data(output_path, - None)) - input_path = 'etc/edp-examples/edp-pig/trim-spaces/data/input' - with mock.patch( - 'sahara.tests.scenario.clients.SaharaClient.get_cluster', - return_value=FakeResponse(node_groups=[ - { - 'instances': [ - { - 'management_ip': 'test_ip' - }] - }])): - self.assertTrue('/user/test/data-' in ( - self.base_scenario._create_hdfs_data(input_path, 'test'))) diff --git a/sahara/tests/scenario_unit/test_runner.py b/sahara/tests/scenario_unit/test_runner.py deleted file mode 100644 index 3bf6214858..0000000000 --- a/sahara/tests/scenario_unit/test_runner.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -import mock -import testtools - -from sahara.tests.scenario import runner - - -class RunnerUnitTest(testtools.TestCase): - - def _isDictContainSubset(self, sub_dictionary, dictionary): - for key in sub_dictionary: - if sub_dictionary[key] != dictionary[key]: - return False - return True - - def test_set_defaults(self): - config_without_cred_net = { - "clusters": [ - { - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "image": "sahara-vanilla-2.7.1-ubuntu-14.04" - }], - } - - expected_default_credential = { - "credentials": { - "os_username": "admin", - "os_auth_url": "http://localhost:5000/v2.0", - "sahara_url": None, - "sahara_service_type": "data-processing", - "os_password": "nova", - "os_tenant": "admin", - "ssl_cert": None, - "ssl_verify": True - } - } - - expected_default_network = { - "network": { - "type": "neutron", - "private_network": "private", - "public_network": "public", - "auto_assignment_floating_ip": False - }, - } - - expected_default_cluster = { - "clusters": [ - { - "image": "sahara-vanilla-2.7.1-ubuntu-14.04", - "edp_jobs_flow": [], - "class_name": "vanilla2_7_1", - "plugin_name": "vanilla", - "scenario": ['run_jobs', 'scale', 'run_jobs'], - "plugin_version": "2.7.1", - "retain_resources": False, - }], - } - - runner.set_defaults(config_without_cred_net) - - self.assertTrue(self._isDictContainSubset( - expected_default_credential, config_without_cred_net)) - self.assertTrue(self._isDictContainSubset( - expected_default_network, config_without_cred_net)) - self.assertTrue(self._isDictContainSubset( - expected_default_cluster, config_without_cred_net)) - - config = { - "credentials": { - "os_username": "changed_admin", - "os_auth_url": "http://127.0.0.1:5000/v2.0", - "sahara_url": "http://127.0.0.1", - "os_password": "changed_nova", - "os_tenant": "changed_admin", - "ssl_cert": "/etc/tests/cert.crt" - }, - "network": { - "type": "neutron", - "private_network": "changed_private", - "public_network": "changed_public", - "auto_assignment_floating_ip": True, - }, - "clusters": [ - { - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "image": "sahara-vanilla-2.7.1-ubuntu-14.04", - "edp_jobs_flow": "test_flow", - "retain_resources": True - }], - "edp_jobs_flow": { - "test_flow": [ - { - "type": "Pig", - "input_datasource": { - "type": "swift", - "source": "etc/edp-examples/edp-pig/top-todoers/" - "data/input" - }, - "output_datasource": { - "type": "hdfs", - "destination": "/user/hadoop/edp-output" - }, - "main_lib": { - "type": "swift", - "source": "etc/edp-examples/edp-pig/top-todoers/" - "example.pig" - } - }, - { - "type": "Java", - "additional_libs": [ - { - "type": "database", - "source": "sahara/tests/integration/tests/" - "resources/" - }], - "configs": "edp.java.main_class: org.apache.hadoop." - "examples.QuasiMonteCarlo", - "args": [10, 10] - }, - ], - "test_flow2": [ - { - "type": "Java", - "additional_libs": [ - { - "type": "database", - "source": "sahara/tests/integration/tests/" - "resources/" - }], - "configs": "edp.java.main_class: org.apache.hadoop." - "examples.QuasiMonteCarlo", - "args": [20, 20] - }, - ], - }, - } - - expected_credential = { - "credentials": { - "os_username": "changed_admin", - "os_auth_url": "http://127.0.0.1:5000/v2.0", - "sahara_url": "http://127.0.0.1", - "os_password": "changed_nova", - "os_tenant": "changed_admin", - "sahara_service_type": "data-processing", - "ssl_cert": "/etc/tests/cert.crt", - "ssl_verify": True - }, - } - - expected_network = { - "network": { - "type": "neutron", - "private_network": "changed_private", - "public_network": "changed_public", - "auto_assignment_floating_ip": True, - }, - } - - expected_cluster = { - "clusters": [ - { - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "image": "sahara-vanilla-2.7.1-ubuntu-14.04", - "retain_resources": True, - 'edp_jobs_flow': [ - { - 'main_lib': { - 'source': 'etc/edp-examples/edp-pig/' - 'top-todoers/example.pig', - 'type': 'swift' - }, - 'type': 'Pig', - 'input_datasource': { - 'source': 'etc/edp-examples/edp-pig/' - 'top-todoers/data/input', - 'type': 'swift' - }, - 'output_datasource': { - 'type': 'hdfs', - 'destination': '/user/hadoop/edp-output' - } - }, - { - 'args': [10, 10], - 'configs': 'edp.java.main_class: org.apache.' - 'hadoop.examples.QuasiMonteCarlo', - 'type': 'Java', - 'additional_libs': [ - { - 'source': 'sahara/tests/integration/' - 'tests/resources/', - 'type': 'database' - }] - } - ], - "scenario": ['run_jobs', 'scale', 'run_jobs'], - "class_name": "vanilla2_7_1" - }], - } - - runner.set_defaults(config) - - self.assertTrue(self._isDictContainSubset( - expected_credential, config)) - self.assertTrue(self._isDictContainSubset( - expected_network, config)) - self.assertTrue(self._isDictContainSubset( - expected_cluster, config)) - - @mock.patch('sys.exit', return_value=None) - @mock.patch('subprocess.call', return_value=None) - def test_runner_main(self, mock_sub, mock_sys): - sys.argv = ['sahara/tests/scenario/runner.py', - 'sahara/tests/scenario_unit/vanilla2_7_1.yaml'] - runner.main() - - @mock.patch('sys.exit', return_value=None) - @mock.patch('subprocess.call', return_value=None) - def test_runner_template_missing_varfile(self, mock_sub, mock_sys): - sys.argv = ['sahara/tests/scenario/runner.py', - 'sahara/tests/scenario_unit/vanilla2_7_1.yaml.mako'] - self.assertRaises(NameError, runner.main) - - @mock.patch('sys.exit', return_value=None) - @mock.patch('subprocess.call', return_value=None) - def test_runner_template_wrong_varfile(self, mock_sub, mock_sys): - sys.argv = ['sahara/tests/scenario/runner.py', - '-V', - 'sahara/tests/scenario_unit/templatevars_nodefault.ini', - 'sahara/tests/scenario_unit/vanilla2_7_1.yaml.mako'] - self.assertRaises(NameError, runner.main) - - @mock.patch('sys.exit', return_value=None) - @mock.patch('subprocess.call', return_value=None) - def test_runner_template_incomplete_varfile(self, mock_sub, mock_sys): - sys.argv = ['sahara/tests/scenario/runner.py', - '-V', - 'sahara/tests/scenario_unit/templatevars_incomplete.ini', - 'sahara/tests/scenario_unit/vanilla2_7_1.yaml.mako'] - self.assertRaises(NameError, runner.main) - - @mock.patch('sys.exit', return_value=None) - @mock.patch('subprocess.call', return_value=None) - def test_runner_template_working(self, mock_sub, mock_sys): - sys.argv = ['sahara/tests/scenario/runner.py', - '-V', - 'sahara/tests/scenario_unit/templatevars_complete.ini', - 'sahara/tests/scenario_unit/vanilla2_7_1.yaml.mako'] - runner.main() - - @mock.patch('sys.exit', return_value=None) - def test_runner_validate(self, mock_sys): - sys.argv = ['sahara/tests/scenario/runner.py', - '--validate', - '-V', - 'sahara/tests/scenario_unit/templatevars_complete.ini', - 'sahara/tests/scenario_unit/vanilla2_7_1.yaml.mako'] - runner.main() diff --git a/sahara/tests/scenario_unit/test_validation.py b/sahara/tests/scenario_unit/test_validation.py deleted file mode 100644 index 977af92ccf..0000000000 --- a/sahara/tests/scenario_unit/test_validation.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import testtools -import yaml - -from sahara.tests.scenario import validation - - -class TestValidation(testtools.TestCase): - def test_validation(self): - with open("sahara/tests/scenario_unit/vanilla2_7_1.yaml", - "r") as yaml_file: - config = yaml.load(yaml_file) - self.assertIsNone(validation.validate(config)) diff --git a/sahara/tests/scenario_unit/vanilla2_7_1.yaml b/sahara/tests/scenario_unit/vanilla2_7_1.yaml deleted file mode 100644 index 72a59bc6eb..0000000000 --- a/sahara/tests/scenario_unit/vanilla2_7_1.yaml +++ /dev/null @@ -1,108 +0,0 @@ -concurrency: 1 - -credentials: - os_username: admin - os_password: nova - os_tenant: admin - os_auth_url: http://127.0.0.1:5000/v2.0 - ssl_cert: /etc/test/cert.crt - ssl_verify: True - -network: - private_network: private - public_network: public - -clusters: - - plugin_name: vanilla - plugin_version: 2.7.1 - image: sahara-vanilla-2.7.1-ubuntu-14.04 - node_group_templates: - - name: master - node_processes: - - namenode - - resourcemanager - - hiveserver - - oozie - - historyserver - - secondarynamenode - flavor: '2' - - name: worker - node_processes: - - datanode - - nodemanager - flavor: - name: test-flavor - id: test-id - vcpus: 1 - ram: 512 - root_disk: 1 - cluster_template: - name: vanilla - node_group_templates: - master: 1 - worker: 3 - anti_affinity: - - namenode - - datanode - scenario: - - run_jobs - - scale - - run_jobs - edp_jobs_flow: test_flow - edp_batching: 1 - retain_resources: true - -edp_jobs_flow: - test_flow: - - type: Pig - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - main_lib: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/example.pig - - type: Java - additional_libs: - - type: database - source: etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.7.1.jar - configs: - edp.java.main_class: org.apache.hadoop.examples.QuasiMonteCarlo - args: - - 10 - - 10 - - type: MapReduce - configs: - mapred.mapper.class: org.apache.oozie.example.SampleMapper - mapred.reducer.class: org.apache.oozie.example.SampleReducer - additional_libs: - - type: database - source: etc/edp-examples/edp-java/edp-java.jar - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - - type: MapReduce.Streaming - configs: - edp.streaming.mapper: /bin/cat - edp.streaming.reducer: /usr/bin/wc - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - - type: Hive - input_datasource: - type: swift - source: etc/edp-examples/edp-hive/input.csv - output_datasource: - type: hdfs - destination: /user/hadoop/edp-hive/ - main_lib: - type: swift - source: etc/edp-examples/edp-hive/script.q diff --git a/sahara/tests/scenario_unit/vanilla2_7_1.yaml.mako b/sahara/tests/scenario_unit/vanilla2_7_1.yaml.mako deleted file mode 100644 index 3aebd76147..0000000000 --- a/sahara/tests/scenario_unit/vanilla2_7_1.yaml.mako +++ /dev/null @@ -1,122 +0,0 @@ -concurrency: 1 - -credentials: - os_username: ${OS_USERNAME} - os_password: ${OS_PASSWORD} - os_tenant: ${OS_TENANT_NAME} - os_auth_url: ${OS_AUTH_URL} - ssl_cert: /etc/tests/cert.crt - ssl_verify: True - -network: - type: ${network_type} - private_network: ${network_private_name} - public_network: ${network_public_name} - -clusters: - - plugin_name: vanilla - plugin_version: 2.7.1 - image: ${vanilla_two_six_image} - node_group_templates: - - name: master - node_processes: - - namenode - - resourcemanager - - hiveserver - - oozie - - historyserver - - secondarynamenode - flavor: ${ci_flavor_id} - - name: worker - node_processes: - - datanode - - nodemanager - flavor: - name: test-flavor - id: test-id - vcpus: 1 - ram: 512 - root_disk: 1 - cluster_template: - name: vanilla - node_group_templates: - master: 1 - worker: 3 - scenario: - - run_jobs - - scale - - run_jobs - edp_jobs_flow: test_flow - edp_batching: 1 - retain_resources: true - -edp_jobs_flow: - test_flow: - - type: Pig - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - main_lib: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/example.pig - - type: Java - additional_libs: - - type: database - source: etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.7.1.jar - configs: - edp.java.main_class: org.apache.hadoop.examples.QuasiMonteCarlo - args: - - 10 - - 10 - - type: MapReduce - configs: - mapred.mapper.class: org.apache.oozie.example.SampleMapper - mapred.reducer.class: org.apache.oozie.example.SampleReducer - additional_libs: - - type: database - source: etc/edp-examples/edp-java/edp-java.jar - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - - type: MapReduce.Streaming - configs: - edp.streaming.mapper: /bin/cat - edp.streaming.reducer: /usr/bin/wc - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - - type: Hive - input_datasource: - type: swift - source: etc/edp-examples/edp-hive/input.csv - output_datasource: - type: hdfs - destination: /user/hadoop/edp-hive/ - main_lib: - type: swift - source: etc/edp-examples/edp-hive/script.q - - type: MapReduce - configs: - mapred.mapper.class: org.apache.oozie.example.SampleMapper - mapred.reducer.class: org.apache.oozie.example.SampleReducer - additional_libs: - - type: database - source: etc/edp-examples/edp-java/edp-java.jar - input_datasource: - type: swift - source: etc/edp-examples/edp-pig/top-todoers/data/input - output_datasource: - type: hdfs - destination: /user/hadoop/edp-output - args: - - {input_datasource} - - {output_datasource} diff --git a/tools/gate/scenario/README.rst b/tools/gate/scenario/README.rst deleted file mode 100644 index 56746a5abb..0000000000 --- a/tools/gate/scenario/README.rst +++ /dev/null @@ -1,3 +0,0 @@ -======================================== -Scripts and conf for scenario tests gate -======================================== diff --git a/tools/gate/scenario/commons b/tools/gate/scenario/commons deleted file mode 100644 index 6d726614e4..0000000000 --- a/tools/gate/scenario/commons +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -export NETWORK=${1:-neutron} -export ENGINE=${2:-heat} - -# Normalize network name -if [[ $NETWORK == nova* ]]; then - export NETWORK="nova-network" -fi - -source settings - -export DEST=${DEST:-$BASE/new} -export DEVSTACK_DIR=${DEVSTACK_DIR:-$DEST/devstack} -export SAHARA_DIR=${SAHARA_DIR:-$DEST/sahara} -export SAHARA_TESTS_DIR=${SAHARA_TESTS_DIR:-$DEST/sahara-scenario} - - -export LOCALRC_PATH=$DEVSTACK_DIR/localrc -export LOCALCONF_PATH=$DEVSTACK_DIR/local.conf - -function sahara_register_fake_plugin_image { - local props="--public" - props+=" --property _sahara_tag_0.1=True" - props+=" --property _sahara_tag_fake=True" - props+=" --property _sahara_username=$SAHARA_FAKE_PLUGIN_IMAGE_USERNAME" - - openstack --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT --os-image-api-version 1 image set \ - $SAHARA_FAKE_PLUGIN_IMAGE_NAME $props -} - -function sahara_register_flavor { - nova flavor-create $SAHARA_FLAVOR_NAME $SAHARA_FLAVOR_ID $SAHARA_FLAVOR_RAM \ - $SAHARA_FLAVOR_DISK $SAHARA_FLAVOR_VCPUS -} diff --git a/tools/gate/scenario/post_test_hook.sh b/tools/gate/scenario/post_test_hook.sh deleted file mode 100755 index c1b43c3a99..0000000000 --- a/tools/gate/scenario/post_test_hook.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -set -ex - -source commons $@ - -set +x -source $DEVSTACK_DIR/stackrc -source $DEVSTACK_DIR/openrc admin admin -set -x - -# Make public and register in Sahara as admin -sahara_register_fake_plugin_image - -# Register sahara specific flavor for gate -sahara_register_flavor - -# Use demo user for running scenario tests -set +x -source $DEVSTACK_DIR/openrc demo demo -set -x - -sudo -E chown -R jenkins:stack $SAHARA_TESTS_DIR -cd $SAHARA_TESTS_DIR - -echo "Generating scenario tests config file" -sudo -E -u jenkins tee template_vars.ini <> $LOCALRC_PATH - -# Here we can set some configurations for local.conf -# for example, to pass some config options directly to sahara.conf file -# echo -e '[[post-config|$SAHARA_CONF]]\n[DEFAULT]\n' >> $LOCALCONF_PATH -# echo -e 'infrastructure_engine=true\n' >> $LOCALCONF_PATH - -if [ "$NETWORK" == "nova-network" ]; then - echo -e '[[post-config|$SAHARA_CONF_FILE]]\n[DEFAULT]\n' >> $LOCALCONF_PATH - echo -e 'heat_enable_wait_condition=false\n' >> $LOCALCONF_PATH -fi diff --git a/tools/gate/scenario/settings b/tools/gate/scenario/settings deleted file mode 100644 index 58f9944d57..0000000000 --- a/tools/gate/scenario/settings +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#TODO(slukjanov): replace with special image for fake plugin (cloud ubuntu?) -export SAHARA_FAKE_PLUGIN_IMAGE=http://tarballs.openstack.org/heat-test-image/fedora-heat-test-image.qcow2 -export SAHARA_FAKE_PLUGIN_IMAGE_NAME=$(basename $SAHARA_FAKE_PLUGIN_IMAGE .qcow2) -export SAHARA_FAKE_PLUGIN_IMAGE_USERNAME=fedora -export SAHARA_FLAVOR_NAME=sahara-flavor -export SAHARA_FLAVOR_ID=20 -export SAHARA_FLAVOR_RAM=512 -export SAHARA_FLAVOR_DISK=4 -export SAHARA_FLAVOR_VCPUS=1 diff --git a/tox.ini b/tox.ini index cb07fc0d53..4c499fa63f 100644 --- a/tox.ini +++ b/tox.ini @@ -16,16 +16,6 @@ commands = bash tools/pretty_tox.sh '{posargs}' whitelist_externals = bash passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY -[testenv:scenario] -setenv = VIRTUALENV={envdir} -commands = python {toxinidir}/sahara/tests/scenario/runner.py {posargs} - -[testenv:py27-scenario-unit] -setenv = - VIRTUAL_ENV={envdir} - DISCOVER_DIRECTORY=sahara/tests/scenario_unit -commands = bash tools/pretty_tox.sh '{posargs}' - [testenv:cover] commands = {toxinidir}/tools/cover.sh {posargs}