deb-sahara/etc/savanna/savanna.conf.sample-basic
Andrew Lazarev a9ff51eabd Added IDH plugin to savanna config
Implements blueprint idh-savanna-plugin

Change-Id: Ida02be6bb627d43e196106edf239522240c7eadf
2014-02-24 16:31:26 -08:00

117 lines
3.1 KiB
Plaintext

[DEFAULT]
# Hostname or IP address that will be used to listen on
# (string value)
#host=
# Port that will be used to listen on (integer value)
#port=8386
# Address and credentials that will be used to check auth tokens
#os_auth_host=127.0.0.1
#os_auth_port=5000
#os_admin_username=admin
#os_admin_password=nova
#os_admin_tenant_name=admin
# If set to True, Savanna will use floating IPs to communicate
# with instances. To make sure that all instances have
# floating IPs assigned in Nova Network set
# "auto_assign_floating_ip=True" in nova.conf.If Neutron is
# used for networking, make sure that all Node Groups have
# "floating_ip_pool" parameter defined. (boolean value)
#use_floating_ips=true
# Use Neutron or Nova Network (boolean value)
#use_neutron=false
# Use network namespaces for communication (only valid to use in conjunction
# with use_neutron=True)
#use_namespaces=false
# Maximum length of job binary data in kilobytes that may be
# stored or retrieved in a single operation (integer value)
#job_binary_max_KB=5120
# Postfix for storing jobs in hdfs. Will be added to
# /user/hadoop/ (string value)
#job_workflow_postfix=
# Enables Savanna to use Keystone API v3. If that flag is
# disabled, per-job clusters will not be terminated
# automatically. (boolean value)
#use_identity_api_v3=false
# enable periodic tasks (boolean value)
#periodic_enable=true
# Enables data locality for hadoop cluster.
# Also enables data locality for Swift used by hadoop.
# If enabled, 'compute_topology' and 'swift_topology'
# configuration parameters should point to OpenStack and Swift
# topology correspondingly. (boolean value)
#enable_data_locality=false
# File with nova compute topology. It should
# contain mapping between nova computes and racks.
# File format:
# compute1 /rack1
# compute2 /rack2
# compute3 /rack2
# (string value)
#compute_topology_file=etc/savanna/compute.topology
# File with Swift topology. It should contains mapping
# between Swift nodes and racks. File format:
# node1 /rack1
# node2 /rack2
# node3 /rack2
# (string value)
#swift_topology_file=etc/savanna/swift.topology
# Log request/response exchange details: environ, headers and
# bodies (boolean value)
#log_exchange=false
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error (boolean value)
#use_stderr=true
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
#log_file=<None>
# (Optional) The base directory used for relative --log-file
# paths (string value)
#log_dir=<None>
# Use syslog for logging. (boolean value)
#use_syslog=false
# syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
# List of plugins to be loaded. Savanna preserves the order of
# the list when returning it. (list value)
#plugins=vanilla,hdp,idh
[database]
#connection=sqlite:////savanna/openstack/common/db/$sqlite_db