Updating workload logic to handle spark worker resource allocation

This commit is contained in:
GregBestland
2016-03-23 17:38:20 -05:00
parent a9aa13feef
commit fc0ae79f26

View File

@@ -261,6 +261,9 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]):
CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True})
if CASSANDRA_VERSION >= '3.0':
CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True})
if 'spark' in workloads:
config_options = {"initial_spark_worker_resources": 0.1}
CCM_CLUSTER.set_dse_configuration_options(config_options)
common.switch_cluster(path, cluster_name)
CCM_CLUSTER.populate(nodes, ipformat=ipformat)
try: