Reformat job flows

Scenario framework started support of several edp job
flows, so it's better to use explicit jobs in plugins
flows.

Change-Id: If0f6a182fbf34ed7b2283f2d8bbf61b32064d4bf
This commit is contained in:
Vitaly Gridnev 2015-10-07 15:34:47 +03:00 committed by Michael McCune
parent f8e6907299
commit 08b9396676
11 changed files with 47 additions and 88 deletions

View File

@ -42,4 +42,8 @@ clusters:
name: ${cluster_name} name: ${cluster_name}
scenario: scenario:
- run_jobs - run_jobs
edp_jobs_flow: hadoop_2 edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job

View File

@ -68,4 +68,8 @@ clusters:
scenario: scenario:
- run_jobs - run_jobs
- sentry - sentry
edp_jobs_flow: hadoop_2 edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job

View File

@ -73,4 +73,9 @@ clusters:
scenario: scenario:
- run_jobs - run_jobs
- sentry - sentry
edp_jobs_flow: cdh_flow edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job
- spark_wordcount

View File

@ -1,9 +1,8 @@
edp_jobs_flow: edp_jobs_flow:
hadoop_2: pig_job:
- type: Pig - type: Pig
input_datasource: input_datasource:
type: hdfs type: swift
hdfs_username: hadoop
source: etc/edp-examples/edp-pig/trim-spaces/data/input source: etc/edp-examples/edp-pig/trim-spaces/data/input
output_datasource: output_datasource:
type: hdfs type: hdfs
@ -14,6 +13,7 @@ edp_jobs_flow:
additional_libs: additional_libs:
- type: swift - type: swift
source: etc/edp-examples/edp-pig/trim-spaces/udf.jar source: etc/edp-examples/edp-pig/trim-spaces/udf.jar
mapreduce_job:
- type: MapReduce - type: MapReduce
input_datasource: input_datasource:
type: swift type: swift
@ -27,6 +27,7 @@ edp_jobs_flow:
configs: configs:
mapred.mapper.class: org.apache.oozie.example.SampleMapper mapred.mapper.class: org.apache.oozie.example.SampleMapper
mapred.reducer.class: org.apache.oozie.example.SampleReducer mapred.reducer.class: org.apache.oozie.example.SampleReducer
mapreduce_streaming_job:
- type: MapReduce.Streaming - type: MapReduce.Streaming
input_datasource: input_datasource:
type: swift type: swift
@ -37,6 +38,7 @@ edp_jobs_flow:
configs: configs:
edp.streaming.mapper: /bin/cat edp.streaming.mapper: /bin/cat
edp.streaming.reducer: /usr/bin/wc edp.streaming.reducer: /usr/bin/wc
java_job:
- type: Java - type: Java
additional_libs: additional_libs:
- type: database - type: database
@ -46,7 +48,7 @@ edp_jobs_flow:
args: args:
- 10 - 10
- 10 - 10
spark_edp: spark_pi:
- type: Spark - type: Spark
main_lib: main_lib:
type: database type: database
@ -55,6 +57,7 @@ edp_jobs_flow:
edp.java.main_class: org.apache.spark.examples.SparkPi edp.java.main_class: org.apache.spark.examples.SparkPi
args: args:
- 4 - 4
spark_wordcount:
- type: Spark - type: Spark
input_datasource: input_datasource:
type: swift type: swift
@ -69,20 +72,6 @@ edp_jobs_flow:
fs.swift.service.sahara.password: ${OS_PASSWORD} fs.swift.service.sahara.password: ${OS_PASSWORD}
args: args:
- '{input_datasource}' - '{input_datasource}'
transient:
- type: Pig
input_datasource:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/data/input
output_datasource:
type: hdfs
destination: /user/hadoop/edp-output
main_lib:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/example.pig
additional_libs:
- type: swift
source: etc/edp-examples/edp-pig/trim-spaces/udf.jar
mapr: mapr:
- type: Pig - type: Pig
input_datasource: input_datasource:
@ -129,63 +118,3 @@ edp_jobs_flow:
args: args:
- 10 - 10
- 10 - 10
cdh_flow:
- type: Pig
input_datasource:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/data/input
output_datasource:
type: hdfs
destination: /user/hadoop/edp-output
main_lib:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/example.pig
additional_libs:
- type: swift
source: etc/edp-examples/edp-pig/trim-spaces/udf.jar
- type: MapReduce
input_datasource:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/data/input
output_datasource:
type: hdfs
destination: /user/hadoop/edp-output
additional_libs:
- type: database
source: etc/edp-examples/edp-mapreduce/edp-mapreduce.jar
configs:
mapred.mapper.class: org.apache.oozie.example.SampleMapper
mapred.reducer.class: org.apache.oozie.example.SampleReducer
- type: MapReduce.Streaming
input_datasource:
type: swift
source: etc/edp-examples/edp-pig/trim-spaces/data/input
output_datasource:
type: hdfs
destination: /user/hadoop/edp-output
configs:
edp.streaming.mapper: /bin/cat
edp.streaming.reducer: /usr/bin/wc
- type: Java
additional_libs:
- type: database
source: etc/edp-examples/hadoop2/edp-java/hadoop-mapreduce-examples-2.6.0.jar
configs:
edp.java.main_class: org.apache.hadoop.examples.QuasiMonteCarlo
args:
- 10
- 10
- type: Spark
input_datasource:
type: swift
source: etc/edp-examples/edp-spark/sample_input.txt
main_lib:
type: database
source: etc/edp-examples/edp-spark/spark-wordcount.jar
configs:
edp.java.main_class: sahara.edp.spark.SparkWordCount
edp.spark.adapt_for_swift: true
fs.swift.service.sahara.username: ${OS_USERNAME}
fs.swift.service.sahara.password: ${OS_PASSWORD}
args:
- '{input_datasource}'

View File

@ -29,4 +29,4 @@ clusters:
- operation: add - operation: add
node_group: worker node_group: worker
size: 1 size: 1
edp_jobs_flow: transient edp_jobs_flow: pig_job

View File

@ -45,4 +45,8 @@ clusters:
- operation: add - operation: add
node_group: worker node_group: worker
size: 1 size: 1
edp_jobs_flow: hadoop_2 edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job

View File

@ -29,4 +29,6 @@ clusters:
- operation: add - operation: add
node_group: worker node_group: worker
size: 1 size: 1
edp_jobs_flow: spark_edp edp_jobs_flow:
- spark_pi
- spark_wordcount

View File

@ -30,4 +30,6 @@ clusters:
- operation: add - operation: add
node_group: worker node_group: worker
size: 1 size: 1
edp_jobs_flow: spark_edp edp_jobs_flow:
- spark_pi
- spark_wordcount

View File

@ -48,4 +48,5 @@ clusters:
scenario: scenario:
- run_jobs - run_jobs
- transient - transient
edp_jobs_flow: transient edp_jobs_flow: pig_job

View File

@ -83,4 +83,8 @@ clusters:
- operation: add - operation: add
node_group: worker-nm node_group: worker-nm
size: 1 size: 1
edp_jobs_flow: hadoop_2 edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job

View File

@ -69,4 +69,8 @@ clusters:
- operation: add - operation: add
node_group: worker-nm node_group: worker-nm
size: 2 size: 2
edp_jobs_flow: hadoop_2 edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job