Merge "Completely remove etc/savanna dir"
This commit is contained in:
commit
e0d1b0918a
@ -1,431 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
#
|
||||
# Options defined in sahara.config
|
||||
#
|
||||
|
||||
# Hostname or IP address that will be used to listen on.
|
||||
# (string value)
|
||||
#host=
|
||||
|
||||
# Port that will be used to listen on. (integer value)
|
||||
#port=8386
|
||||
|
||||
# Log request/response exchange details: environ, headers and
|
||||
# bodies. (boolean value)
|
||||
#log_exchange=false
|
||||
|
||||
# Maximum length of job binary data in kilobytes that may be
|
||||
# stored or retrieved in a single operation (integer value)
|
||||
#job_binary_max_KB=5120
|
||||
|
||||
# If set to True, Savanna will use floating IPs to communicate
|
||||
# with instances. To make sure that all instances have
|
||||
# floating IPs assigned in Nova Network set
|
||||
# "auto_assign_floating_ip=True" in nova.conf. If Neutron is
|
||||
# used for networking, make sure that all Node Groups have
|
||||
# "floating_ip_pool" parameter defined. (boolean value)
|
||||
#use_floating_ips=true
|
||||
|
||||
# The suffix of the node's FQDN. In nova-network that is the
|
||||
# dhcp_domain config parameter. (string value)
|
||||
#node_domain=novalocal
|
||||
|
||||
# Use Neutron Networking (False indicates the use of Nova
|
||||
# networking). (boolean value)
|
||||
#use_neutron=false
|
||||
|
||||
# Use network namespaces for communication (only valid to use
|
||||
# in conjunction with use_neutron=True). (boolean value)
|
||||
#use_namespaces=false
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.main
|
||||
#
|
||||
|
||||
# Protocol used to access OpenStack Identity service. (string
|
||||
# value)
|
||||
#os_auth_protocol=http
|
||||
|
||||
# IP or hostname of machine on which OpenStack Identity
|
||||
# service is located. (string value)
|
||||
#os_auth_host=127.0.0.1
|
||||
|
||||
# Port of OpenStack Identity service. (string value)
|
||||
#os_auth_port=5000
|
||||
|
||||
# This OpenStack user is used to verify provided tokens. The
|
||||
# user must have admin role in <os_admin_tenant_name> tenant.
|
||||
# (string value)
|
||||
#os_admin_username=admin
|
||||
|
||||
# Password of the admin user. (string value)
|
||||
#os_admin_password=nova
|
||||
|
||||
# Name of tenant where the user is admin. (string value)
|
||||
#os_admin_tenant_name=admin
|
||||
|
||||
# An engine which will be used to provision infrastructure for
|
||||
# Hadoop cluster. (string value)
|
||||
#infrastructure_engine=direct
|
||||
|
||||
# A method for Savanna to execute commands on VMs. (string
|
||||
# value)
|
||||
#remote=ssh
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.db.base
|
||||
#
|
||||
|
||||
# Driver to use for database access. (string value)
|
||||
#db_driver=sahara.db
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.openstack.common.db.sqlalchemy.session
|
||||
#
|
||||
|
||||
# The file name to use with SQLite (string value)
|
||||
#sqlite_db=savanna.sqlite
|
||||
|
||||
# If True, SQLite uses synchronous mode (boolean value)
|
||||
#sqlite_synchronous=true
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.openstack.common.eventlet_backdoor
|
||||
#
|
||||
|
||||
# Enable eventlet backdoor. Acceptable values are 0, <port>,
|
||||
# and <start>:<end>, where 0 results in listening on a random
|
||||
# tcp port number; <port> results in listening on the
|
||||
# specified port number (and not enabling backdoor if that
|
||||
# port is in use); and <start>:<end> results in listening on
|
||||
# the smallest unused port number within the specified range
|
||||
# of port numbers. The chosen port is displayed in the
|
||||
# service's log file. (string value)
|
||||
#backdoor_port=<None>
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.openstack.common.lockutils
|
||||
#
|
||||
|
||||
# Whether to disable inter-process locks (boolean value)
|
||||
#disable_process_locking=false
|
||||
|
||||
# Directory to use for lock files. (string value)
|
||||
#lock_path=<None>
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.openstack.common.log
|
||||
#
|
||||
|
||||
# Print debugging output (set logging level to DEBUG instead
|
||||
# of default WARNING level). (boolean value)
|
||||
#debug=false
|
||||
|
||||
# Print more verbose output (set logging level to INFO instead
|
||||
# of default WARNING level). (boolean value)
|
||||
#verbose=false
|
||||
|
||||
# Log output to standard error (boolean value)
|
||||
#use_stderr=true
|
||||
|
||||
# Format string to use for log messages with context (string
|
||||
# value)
|
||||
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
|
||||
|
||||
# Format string to use for log messages without context
|
||||
# (string value)
|
||||
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
|
||||
|
||||
# Data to append to log format when level is DEBUG (string
|
||||
# value)
|
||||
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
|
||||
|
||||
# Prefix each line of exception output with this format
|
||||
# (string value)
|
||||
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
|
||||
|
||||
# List of logger=LEVEL pairs (list value)
|
||||
#default_log_levels=amqplib=WARN,qpid.messaging=INFO,stevedore=INFO,eventlet.wsgi.server=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,paramiko=WARN,requests=WARN,iso8601=WARN
|
||||
|
||||
# Publish error events (boolean value)
|
||||
#publish_errors=false
|
||||
|
||||
# Make deprecations fatal (boolean value)
|
||||
#fatal_deprecations=false
|
||||
|
||||
# If an instance is passed with the log message, format it
|
||||
# like this (string value)
|
||||
#instance_format="[instance: %(uuid)s] "
|
||||
|
||||
# If an instance UUID is passed with the log message, format
|
||||
# it like this (string value)
|
||||
#instance_uuid_format="[instance: %(uuid)s] "
|
||||
|
||||
# The name of logging configuration file. It does not disable
|
||||
# existing loggers, but just appends specified logging
|
||||
# configuration to any other existing logging options. Please
|
||||
# see the Python logging module documentation for details on
|
||||
# logging configuration files. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/log_config
|
||||
#log_config_append=<None>
|
||||
|
||||
# DEPRECATED. A logging.Formatter log message format string
|
||||
# which may use any of the available logging.LogRecord
|
||||
# attributes. This option is deprecated. Please use
|
||||
# logging_context_format_string and
|
||||
# logging_default_format_string instead. (string value)
|
||||
#log_format=<None>
|
||||
|
||||
# Format string for %%(asctime)s in log records. Default:
|
||||
# %(default)s (string value)
|
||||
#log_date_format=%Y-%m-%d %H:%M:%S
|
||||
|
||||
# (Optional) Name of log file to output to. If no default is
|
||||
# set, logging will go to stdout. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/logfile
|
||||
#log_file=<None>
|
||||
|
||||
# (Optional) The base directory used for relative --log-file
|
||||
# paths (string value)
|
||||
# Deprecated group/name - [DEFAULT]/logdir
|
||||
#log_dir=<None>
|
||||
|
||||
# Use syslog for logging. Existing syslog format is DEPRECATED
|
||||
# during I, and then will be changed in J to honor RFC5424
|
||||
# (boolean value)
|
||||
#use_syslog=false
|
||||
|
||||
# (Optional) Use syslog rfc5424 format for logging. If
|
||||
# enabled, will add APP-NAME (RFC5424) before the MSG part of
|
||||
# the syslog message. The old format without APP-NAME is
|
||||
# deprecated in I, and will be removed in J. (boolean value)
|
||||
#use_syslog_rfc_format=false
|
||||
|
||||
# Syslog facility to receive log lines (string value)
|
||||
#syslog_log_facility=LOG_USER
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.openstack.common.periodic_task
|
||||
#
|
||||
|
||||
# Some periodic tasks can be run in a separate process. Should
|
||||
# we run them here? (boolean value)
|
||||
#run_external_periodic_tasks=true
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.plugins.base
|
||||
#
|
||||
|
||||
# List of plugins to be loaded. Savanna preserves the order of
|
||||
# the list when returning it. (list value)
|
||||
#plugins=vanilla,hdp,idh
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.service.edp.job_manager
|
||||
#
|
||||
|
||||
# Postfix for storing jobs in hdfs. Will be added to
|
||||
# /user/hadoop/. (string value)
|
||||
#job_workflow_postfix=
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.service.periodic
|
||||
#
|
||||
|
||||
# Enable periodic tasks. (boolean value)
|
||||
#periodic_enable=true
|
||||
|
||||
# Range in seconds to randomly delay when starting the
|
||||
# periodic task scheduler to reduce stampeding. (Disable by
|
||||
# setting to 0). (integer value)
|
||||
#periodic_fuzzy_delay=60
|
||||
|
||||
# Max interval size between periodic tasks execution in
|
||||
# seconds (integer value)
|
||||
#periodic_interval_max=60
|
||||
|
||||
# Minimal "lifetime" in seconds for a transient cluster.
|
||||
# Cluster is guarantied to be "alive" within this time period.
|
||||
# (integer value)
|
||||
#min_transient_cluster_active_time=0
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.topology.topology_helper
|
||||
#
|
||||
|
||||
# Enables data locality for hadoop cluster.
|
||||
# Also enables data locality for Swift used by hadoop.
|
||||
# If enabled, 'compute_topology' and 'swift_topology'
|
||||
# configuration parameters should point to OpenStack and Swift
|
||||
# topology correspondingly. (boolean value)
|
||||
#enable_data_locality=false
|
||||
|
||||
# Enables four-level topology for data locality.
|
||||
# Works only if corresponding plugin supports such mode.
|
||||
# (boolean value)
|
||||
#enable_hypervisor_awareness=true
|
||||
|
||||
# File with nova compute topology. It should
|
||||
# contain mapping between nova computes and racks.
|
||||
# File format: compute1 /rack1
|
||||
# compute2 /rack2 compute3 /rack2 (string
|
||||
# value)
|
||||
#compute_topology_file=etc/sahara/compute.topology
|
||||
|
||||
# File with Swift topology. It should contain
|
||||
# mapping between Swift nodes and racks. File
|
||||
# format: node1 /rack1 node2
|
||||
# /rack2 node3 /rack2 (string value)
|
||||
#swift_topology_file=etc/sahara/swift.topology
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.utils.openstack.keystone
|
||||
#
|
||||
|
||||
# Enables Savanna to use Keystone API v3. If that flag is
|
||||
# disabled, per-job clusters will not be terminated
|
||||
# automatically. (boolean value)
|
||||
#use_identity_api_v3=true
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.utils.remote
|
||||
#
|
||||
|
||||
# A server to which guest agent running on a VM should connect
|
||||
# to. The parameter is needed only if agent remote is enabled.
|
||||
# (string value)
|
||||
#rpc_server_host=<None>
|
||||
|
||||
# Maximum number of remote operations that will be running at
|
||||
# the same time. Note that each remote operation requires its
|
||||
# own process to run. (integer value)
|
||||
#global_remote_threshold=100
|
||||
|
||||
# The same as global_remote_threshold, but for a single
|
||||
# cluster. (integer value)
|
||||
#cluster_remote_threshold=70
|
||||
|
||||
|
||||
[conductor]
|
||||
|
||||
#
|
||||
# Options defined in sahara.conductor.api
|
||||
#
|
||||
|
||||
# Perform savanna-conductor operations locally. (boolean
|
||||
# value)
|
||||
#use_local=true
|
||||
|
||||
|
||||
[database]
|
||||
|
||||
#
|
||||
# Options defined in sahara.openstack.common.db.api
|
||||
#
|
||||
|
||||
# The backend to use for db (string value)
|
||||
# Deprecated group/name - [DEFAULT]/db_backend
|
||||
#backend=sqlalchemy
|
||||
|
||||
|
||||
#
|
||||
# Options defined in sahara.openstack.common.db.sqlalchemy.session
|
||||
#
|
||||
|
||||
# The SQLAlchemy connection string used to connect to the
|
||||
# database (string value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection
|
||||
# Deprecated group/name - [DATABASE]/sql_connection
|
||||
# Deprecated group/name - [sql]/connection
|
||||
#connection=sqlite:////sahara/openstack/common/db/$sqlite_db
|
||||
|
||||
# The SQLAlchemy connection string used to connect to the
|
||||
# slave database (string value)
|
||||
#slave_connection=
|
||||
|
||||
# Timeout before idle sql connections are reaped (integer
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
||||
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
||||
# Deprecated group/name - [sql]/idle_timeout
|
||||
#idle_timeout=3600
|
||||
|
||||
# Minimum number of SQL connections to keep open in a pool
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
||||
# Deprecated group/name - [DATABASE]/sql_min_pool_size
|
||||
#min_pool_size=1
|
||||
|
||||
# Maximum number of SQL connections to keep open in a pool
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
||||
# Deprecated group/name - [DATABASE]/sql_max_pool_size
|
||||
#max_pool_size=<None>
|
||||
|
||||
# Maximum db connection retries during startup. (setting -1
|
||||
# implies an infinite retry count) (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
||||
# Deprecated group/name - [DATABASE]/sql_max_retries
|
||||
#max_retries=10
|
||||
|
||||
# Interval between retries of opening a sql connection
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
||||
# Deprecated group/name - [DATABASE]/reconnect_interval
|
||||
#retry_interval=10
|
||||
|
||||
# If set, use this value for max_overflow with sqlalchemy
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
||||
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
|
||||
#max_overflow=<None>
|
||||
|
||||
# Verbosity of SQL debugging information. 0=None,
|
||||
# 100=Everything (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
||||
#connection_debug=0
|
||||
|
||||
# Add python stack traces to SQL as comment strings (boolean
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
||||
#connection_trace=false
|
||||
|
||||
# If set, use this value for pool_timeout with sqlalchemy
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
||||
#pool_timeout=<None>
|
||||
|
||||
|
||||
[ssl]
|
||||
|
||||
#
|
||||
# Options defined in sahara.openstack.common.sslutils
|
||||
#
|
||||
|
||||
# CA certificate file to use to verify connecting clients
|
||||
# (string value)
|
||||
#ca_file=<None>
|
||||
|
||||
# Certificate file to use when starting the server securely
|
||||
# (string value)
|
||||
#cert_file=<None>
|
||||
|
||||
# Private key file to use when starting the server securely
|
||||
# (string value)
|
||||
#key_file=<None>
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user