Merge "Standardize config sample locations"

This commit is contained in:
Jenkins 2014-02-18 13:49:48 +00:00 committed by Gerrit Code Review
commit e481dd35ba
10 changed files with 484 additions and 878 deletions

View File

@ -65,7 +65,7 @@ On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):
.. sourcecode:: console
$ cp ./etc/savanna/savanna.conf.sample ./etc/savanna/savanna.conf
$ cp ./etc/savanna/savanna.conf.sample-basic ./etc/savanna/savanna.conf
5. Look through the savanna.conf and change parameters which default values do
not suite you. Set ``os_auth_host`` to the address of OpenStack keystone.

View File

@ -104,13 +104,13 @@ To install into a virtual environment
We recommend browsing `<http://tarballs.openstack.org/savanna/>`_ and selecting the latest stable release.
4. After installation you should create configuration file. Sample config file location
depends on your OS. For Ubuntu it is ``/usr/local/share/savanna/savanna.conf.sample``,
for Red Hat - ``/usr/share/savanna/savanna.conf.sample``. Below is an example for Ubuntu:
depends on your OS. For Ubuntu it is ``/usr/local/share/savanna/savanna.conf.sample-basic``,
for Red Hat - ``/usr/share/savanna/savanna.conf.sample-basic``. Below is an example for Ubuntu:
.. sourcecode:: console
$ mkdir savanna-venv/etc
$ cp savanna-venv/share/savanna/savanna.conf.sample savanna-venv/etc/savanna.conf
$ cp savanna-venv/share/savanna/savanna.conf.sample-basic savanna-venv/etc/savanna.conf
..
check each option in savanna-venv/etc/savanna.conf, and make necessary changes

View File

@ -1,5 +1,9 @@
[DEFAULT]
#
# Options defined in savanna.config
#
# Hostname or IP address that will be used to listen on
# (string value)
#host=
@ -7,16 +11,13 @@
# Port that will be used to listen on (integer value)
#port=8386
# Log request/response exchange details: environ, headers and
# bodies (boolean value)
#log_exchange=false
# Address and credentials that will be used to check auth tokens
#os_auth_host=127.0.0.1
#os_auth_port=5000
#os_admin_username=admin
#os_admin_password=nova
#os_admin_tenant_name=admin
# Maximum length of job binary data in kilobytes that may be
# stored or retrieved in a single operation (integer value)
#job_binary_max_KB=5120
# If set to True, Savanna will use floating IPs to communicate
# with instances. To make sure that all instances have
@ -26,61 +27,102 @@
# "floating_ip_pool" parameterdefined. (boolean value)
#use_floating_ips=true
# Use Neutron or Nova Network (boolean value)
# The suffix of the node's FQDN. In nova-network that is
# dhcp_domain config parameter (string value)
#node_domain=novalocal
# Use Neutron Networking (False indicates the use of Nova
# networking) (boolean value)
#use_neutron=false
# Use network namespaces for communication (only valid to use in conjunction
# with use_neutron=True)
# Use network namespaces for communication (only valid to use
# in conjunction with use_neutron=True) (boolean value)
#use_namespaces=false
# Maximum length of job binary data in kilobytes that may be
# stored or retrieved in a single operation (integer value)
#job_binary_max_KB=5120
#
# Options defined in savanna.main
#
# Postfix for storing jobs in hdfs. Will be added to
# /user/hadoop/ (string value)
#job_workflow_postfix=
# Protocol used to access OpenStack Identity service (string
# value)
#os_auth_protocol=http
# Enables Savanna to use Keystone API v3. If that flag is
# disabled, per-job clusters will not be terminated
# automatically. (boolean value)
#use_identity_api_v3=false
# IP or hostname of machine on which OpenStack Identity
# service is located (string value)
#os_auth_host=127.0.0.1
# enable periodic tasks (boolean value)
#periodic_enable=true
# Port of OpenStack Identity service (string value)
#os_auth_port=5000
# Enables data locality for hadoop cluster.
# Also enables data locality for Swift used by hadoop.
# If enabled, 'compute_topology' and 'swift_topology'
# configuration parameters should point to OpenStack and Swift
# topology correspondingly. (boolean value)
#enable_data_locality=false
# File with nova compute topology. It should
# contain mapping between nova computes and racks.
# File format:
# compute1 /rack1
# compute2 /rack2
# compute3 /rack2
# This OpenStack user is used to verify provided tokens. The
# user must have admin role in <os_admin_tenant_name> tenant
# (string value)
#compute_topology_file=etc/savanna/compute.topology
#os_admin_username=admin
# File with Swift topology. It should contains mapping
# between Swift nodes and racks. File format:
# node1 /rack1
# node2 /rack2
# node3 /rack2
# (string value)
#swift_topology_file=etc/savanna/swift.topology
# Password of the admin user (string value)
#os_admin_password=nova
# Name of tenant where the user is admin (string value)
#os_admin_tenant_name=admin
# An engine which will be used to provision infrastructure for
# Hadoop cluster. (string value)
#infrastructure_engine=savanna
# A method for Savanna to execute commands on VMs. (string
# value)
#remote=ssh
#
# Options defined in savanna.db.base
#
# Log request/response exchange details: environ, headers and
# bodies (boolean value)
#log_exchange=false
# Driver to use for database access (string value)
#db_driver=savanna.db
#
# Options defined in savanna.openstack.common.db.sqlalchemy.session
#
# The file name to use with SQLite (string value)
#sqlite_db=savanna.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous=true
#
# Options defined in savanna.openstack.common.eventlet_backdoor
#
# Enable eventlet backdoor. Acceptable values are 0, <port>,
# and <start>:<end>, where 0 results in listening on a random
# tcp port number; <port> results in listening on the
# specified port number (and not enabling backdoor if that
# port is in use); and <start>:<end> results in listening on
# the smallest unused port number within the specified range
# of port numbers. The chosen port is displayed in the
# service's log file. (string value)
#backdoor_port=<None>
#
# Options defined in savanna.openstack.common.lockutils
#
# Whether to disable inter-process locks (boolean value)
#disable_process_locking=false
# Directory to use for lock files. (string value)
#lock_path=<None>
#
# Options defined in savanna.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
@ -93,24 +135,286 @@
# Log output to standard error (boolean value)
#use_stderr=true
# Format string to use for log messages with context (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages without context
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Data to append to log format when level is DEBUG (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# List of logger=LEVEL pairs (list value)
#default_log_levels=amqplib=WARN,qpid.messaging=INFO,stevedore=INFO,eventlet.wsgi.server=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,paramiko=WARN,requests=WARN,iso8601=WARN
# Publish error events (boolean value)
#publish_errors=false
# Make deprecations fatal (boolean value)
#fatal_deprecations=false
# If an instance is passed with the log message, format it
# like this (string value)
#instance_format="[instance: %(uuid)s] "
# If an instance UUID is passed with the log message, format
# it like this (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# The name of logging configuration file. It does not disable
# existing loggers, but just appends specified logging
# configuration to any other existing logging options. Please
# see the Python logging module documentation for details on
# logging configuration files. (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append=<None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
# Format string for %%(asctime)s in log records. Default:
# %(default)s (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file=<None>
# (Optional) The base directory used for relative --log-file
# paths (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir=<None>
# Use syslog for logging. (boolean value)
# Use syslog for logging. Existing syslog format is DEPRECATED
# during I, and then will be changed in J to honor RFC5424
# (boolean value)
#use_syslog=false
# syslog facility to receive log lines (string value)
# (Optional) Use syslog rfc5424 format for logging. If
# enabled, will add APP-NAME (RFC5424) before the MSG part of
# the syslog message. The old format without APP-NAME is
# deprecated in I, and will be removed in J. (boolean value)
#use_syslog_rfc_format=false
# Syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
#
# Options defined in savanna.openstack.common.periodic_task
#
# Some periodic tasks can be run in a separate process. Should
# we run them here? (boolean value)
#run_external_periodic_tasks=true
#
# Options defined in savanna.plugins.base
#
# List of plugins to be loaded. Savanna preserves the order of
# the list when returning it. (list value)
#plugins=vanilla,hdp
#
# Options defined in savanna.service.edp.job_manager
#
# Postfix for storing jobs in hdfs. Will be added to
# /user/hadoop/ (string value)
#job_workflow_postfix=
#
# Options defined in savanna.service.periodic
#
# enable periodic tasks (boolean value)
#periodic_enable=true
# range of seconds to randomly delay when starting the
# periodic task scheduler to reduce stampeding. (Disable by
# setting to 0) (integer value)
#periodic_fuzzy_delay=60
# Max interval size between periodic tasks execution inseconds
# (integer value)
#periodic_interval_max=60
#
# Options defined in savanna.topology.topology_helper
#
# Enables data locality for hadoop cluster.
# Also enables data locality for Swift used by hadoop.
# If enabled, 'compute_topology' and 'swift_topology'
# configuration parameters should point to OpenStack and Swift
# topology correspondingly. (boolean value)
#enable_data_locality=false
# Enables four-level topology for data locality.
# Works only if corresponding plugin supports such mode.
# (boolean value)
#enable_hypervisor_awareness=true
# File with nova compute topology. It should
# contain mapping between nova computes and racks.
# File format: compute1 /rack1
# compute2 /rack2 compute3 /rack2 (string
# value)
#compute_topology_file=etc/savanna/compute.topology
# File with Swift topology. It should contain
# mapping between Swift nodes and racks. File
# format: node1 /rack1 node2
# /rack2 node3 /rack2 (string value)
#swift_topology_file=etc/savanna/swift.topology
#
# Options defined in savanna.utils.openstack.keystone
#
# Enables Savanna to use Keystone API v3. If that flag is
# disabled, per-job clusters will not be terminated
# automatically. (boolean value)
#use_identity_api_v3=false
#
# Options defined in savanna.utils.ssh_remote
#
# Maximum number of remote operations that will be running at
# the same time. Note that each remote operation requires its
# own process torun. (integer value)
#global_remote_threshold=100
# The same as global_remote_threshold, but for a single
# cluster. (integer value)
#cluster_remote_threshold=70
[conductor]
#
# Options defined in savanna.conductor.api
#
# Perform savanna-conductor operations locally (boolean value)
#use_local=true
[database]
#
# Options defined in savanna.openstack.common.db.api
#
# The backend to use for db (string value)
# Deprecated group/name - [DEFAULT]/db_backend
#backend=sqlalchemy
#
# Options defined in savanna.openstack.common.db.sqlalchemy.session
#
# The SQLAlchemy connection string used to connect to the
# database (string value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection=sqlite:////savanna/openstack/common/db/$sqlite_db
# The SQLAlchemy connection string used to connect to the
# slave database (string value)
#slave_connection=
# Timeout before idle sql connections are reaped (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#idle_timeout=3600
# Minimum number of SQL connections to keep open in a pool
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size=1
# Maximum number of SQL connections to keep open in a pool
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size=<None>
# Maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries=10
# Interval between retries of opening a sql connection
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval=10
# If set, use this value for max_overflow with sqlalchemy
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow=<None>
# Verbosity of SQL debugging information. 0=None,
# 100=Everything (integer value)
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug=0
# Add python stack traces to SQL as comment strings (boolean
# value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace=false
# If set, use this value for pool_timeout with sqlalchemy
# (integer value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout=<None>
[ssl]
#
# Options defined in savanna.openstack.common.sslutils
#
# CA certificate file to use to verify connecting clients
# (string value)
#ca_file=<None>
# Certificate file to use when starting the server securely
# (string value)
#cert_file=<None>
# Private key file to use when starting the server securely
# (string value)
#key_file=<None>

View File

@ -0,0 +1,116 @@
[DEFAULT]
# Hostname or IP address that will be used to listen on
# (string value)
#host=
# Port that will be used to listen on (integer value)
#port=8386
# Address and credentials that will be used to check auth tokens
#os_auth_host=127.0.0.1
#os_auth_port=5000
#os_admin_username=admin
#os_admin_password=nova
#os_admin_tenant_name=admin
# If set to True, Savanna will use floating IPs to communicate
# with instances. To make sure that all instances have
# floating IPs assigned in Nova Network set
# "auto_assign_floating_ip=True" in nova.conf.If Neutron is
# used for networking, make sure that all Node Groups have
# "floating_ip_pool" parameter defined. (boolean value)
#use_floating_ips=true
# Use Neutron or Nova Network (boolean value)
#use_neutron=false
# Use network namespaces for communication (only valid to use in conjunction
# with use_neutron=True)
#use_namespaces=false
# Maximum length of job binary data in kilobytes that may be
# stored or retrieved in a single operation (integer value)
#job_binary_max_KB=5120
# Postfix for storing jobs in hdfs. Will be added to
# /user/hadoop/ (string value)
#job_workflow_postfix=
# Enables Savanna to use Keystone API v3. If that flag is
# disabled, per-job clusters will not be terminated
# automatically. (boolean value)
#use_identity_api_v3=false
# enable periodic tasks (boolean value)
#periodic_enable=true
# Enables data locality for hadoop cluster.
# Also enables data locality for Swift used by hadoop.
# If enabled, 'compute_topology' and 'swift_topology'
# configuration parameters should point to OpenStack and Swift
# topology correspondingly. (boolean value)
#enable_data_locality=false
# File with nova compute topology. It should
# contain mapping between nova computes and racks.
# File format:
# compute1 /rack1
# compute2 /rack2
# compute3 /rack2
# (string value)
#compute_topology_file=etc/savanna/compute.topology
# File with Swift topology. It should contains mapping
# between Swift nodes and racks. File format:
# node1 /rack1
# node2 /rack2
# node3 /rack2
# (string value)
#swift_topology_file=etc/savanna/swift.topology
# Log request/response exchange details: environ, headers and
# bodies (boolean value)
#log_exchange=false
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error (boolean value)
#use_stderr=true
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
#log_file=<None>
# (Optional) The base directory used for relative --log-file
# paths (string value)
#log_dir=<None>
# Use syslog for logging. (boolean value)
#use_syslog=false
# syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
# List of plugins to be loaded. Savanna preserves the order of
# the list when returning it. (list value)
#plugins=vanilla,hdp
[database]
#connection=sqlite:////savanna/openstack/common/db/$sqlite_db

View File

@ -1,336 +0,0 @@
[DEFAULT]
#
# Options defined in savanna.config
#
# Hostname or IP address that will be used to listen on
# (string value)
#host=
# Port that will be used to listen on (integer value)
#port=8386
# Log request/response exchange details: environ, headers and
# bodies (boolean value)
#log_exchange=false
# Maximum length of job binary data in kilobytes that may be
# stored or retrieved in a single operation (integer value)
#job_binary_max_KB=5120
# If set to True, Savanna will use floating IPs to communicate
# with instances. To make sure that all instances have
# floating IPs assigned in Nova Network set
# "auto_assign_floating_ip=True" in nova.conf.If Neutron is
# used for networking, make sure thatall Node Groups have
# "floating_ip_pool" parameterdefined. (boolean value)
#use_floating_ips=true
# The suffix of the node's FQDN. In nova-network that is
# dhcp_domain config parameter (string value)
#node_domain=novalocal
# Use Neutron Networking (False indicates the use of Nova
# networking) (boolean value)
#use_neutron=false
# Use network namespaces for communication (only valid to use
# in conjunction with use_neutron=True) (boolean value)
#use_namespaces=false
#
# Options defined in savanna.main
#
# Protocol used to access OpenStack Identity service (string
# value)
#os_auth_protocol=http
# IP or hostname of machine on which OpenStack Identity
# service is located (string value)
#os_auth_host=127.0.0.1
# Port of OpenStack Identity service (string value)
#os_auth_port=5000
# This OpenStack user is used to verify provided tokens. The
# user must have admin role in <os_admin_tenant_name> tenant
# (string value)
#os_admin_username=admin
# Password of the admin user (string value)
#os_admin_password=nova
# Name of tenant where the user is admin (string value)
#os_admin_tenant_name=admin
# An engine which will be used to provision infrastructure for
# Hadoop cluster. (string value)
#infrastructure_engine=savanna
# A method for Savanna to execute commands on VMs. (string
# value)
#remote=ssh
#
# Options defined in savanna.db.base
#
# Driver to use for database access (string value)
#db_driver=savanna.db
#
# Options defined in savanna.openstack.common.db.sqlalchemy.session
#
# the filename to use with sqlite (string value)
#sqlite_db=savanna.sqlite
# If true, use synchronous mode for sqlite (boolean value)
#sqlite_synchronous=true
#
# Options defined in savanna.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error (boolean value)
#use_stderr=true
# format string to use for log messages with context (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# format string to use for log messages without context
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# data to append to log format when level is DEBUG (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# prefix each line of exception output with this format
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# list of logger=LEVEL pairs (list value)
#default_log_levels=amqplib=WARN,qpid.messaging=INFO,stevedore=INFO,eventlet.wsgi.server=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,paramiko=WARN,requests=WARN,iso8601=WARN
# publish error events (boolean value)
#publish_errors=false
# make deprecations fatal (boolean value)
#fatal_deprecations=false
# The name of logging configuration file. It does not disable
# existing loggers, but just appends specified logging
# configuration to any other existing logging options. Please
# see the Python logging module documentation for details on
# logging configuration files. (string value)
#log_config_append=<None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
# Format string for %%(asctime)s in log records. Default:
# %(default)s (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
#log_file=<None>
# (Optional) The base directory used for relative --log-file
# paths (string value)
#log_dir=<None>
# Use syslog for logging. (boolean value)
#use_syslog=false
# syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
#
# Options defined in savanna.plugins.base
#
# List of plugins to be loaded. Savanna preserves the order of
# the list when returning it. (list value)
#plugins=vanilla,hdp
#
# Options defined in savanna.service.edp.job_manager
#
# Postfix for storing jobs in hdfs. Will be added to
# /user/hadoop/ (string value)
#job_workflow_postfix=
#
# Options defined in savanna.service.periodic
#
# enable periodic tasks (boolean value)
#periodic_enable=true
# range of seconds to randomly delay when starting the
# periodic task scheduler to reduce stampeding. (Disable by
# setting to 0) (integer value)
#periodic_fuzzy_delay=60
# Max interval size between periodic tasks execution inseconds
# (integer value)
#periodic_interval_max=60
#
# Options defined in savanna.topology.topology_helper
#
# Enables data locality for hadoop cluster.
# Also enables data locality for Swift used by hadoop.
# If enabled, 'compute_topology' and 'swift_topology'
# configuration parameters should point to OpenStack and Swift
# topology correspondingly. (boolean value)
#enable_data_locality=false
# Enables four-level topology for data locality.
# Works only if corresponding plugin supports such mode.
# (boolean value)
#enable_hypervisor_awareness=true
# File with nova compute topology. It should
# contain mapping between nova computes and racks.
# File format: compute1 /rack1
# compute2 /rack2 compute3 /rack2 (string
# value)
#compute_topology_file=etc/savanna/compute.topology
# File with Swift topology. It should contain
# mapping between Swift nodes and racks. File
# format: node1 /rack1 node2
# /rack2 node3 /rack2 (string value)
#swift_topology_file=etc/savanna/swift.topology
#
# Options defined in savanna.utils.openstack.keystone
#
# Enables Savanna to use Keystone API v3. If that flag is
# disabled, per-job clusters will not be terminated
# automatically. (boolean value)
#use_identity_api_v3=false
#
# Options defined in savanna.utils.ssh_remote
#
# Maximum number of remote operations that will be running at
# the same time. Note that each remote operation requires its
# own process to run. (integer value)
#global_remote_threshold=100
# The same as global_remote_threshold, but for a single
# cluster. (integer value)
#cluster_remote_threshold=70
[conductor]
#
# Options defined in savanna.conductor.api
#
# Perform savanna-conductor operations locally (boolean value)
#use_local=true
[database]
#
# Options defined in savanna.db.migration.cli
#
# URL to database (string value)
#connection=
#
# Options defined in savanna.openstack.common.db.api
#
# The backend to use for db (string value)
#backend=sqlalchemy
#
# Options defined in savanna.openstack.common.db.sqlalchemy.session
#
# The SQLAlchemy connection string used to connect to the
# database (string value)
#connection=sqlite:////savanna/openstack/common/db/$sqlite_db
# The SQLAlchemy connection string used to connect to the
# slave database (string value)
#slave_connection=
# timeout before idle sql connections are reaped (integer
# value)
#idle_timeout=3600
# Minimum number of SQL connections to keep open in a pool
# (integer value)
#min_pool_size=1
# Maximum number of SQL connections to keep open in a pool
# (integer value)
#max_pool_size=<None>
# maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value)
#max_retries=10
# interval between retries of opening a sql connection
# (integer value)
#retry_interval=10
# If set, use this value for max_overflow with sqlalchemy
# (integer value)
#max_overflow=<None>
# Verbosity of SQL debugging information. 0=None,
# 100=Everything (integer value)
#connection_debug=0
# Add python stack traces to SQL as comment strings (boolean
# value)
#connection_trace=false
# If set, use this value for pool_timeout with sqlalchemy
# (integer value)
#pool_timeout=<None>
# Total option count: 62

View File

@ -1,9 +0,0 @@
This generate_sample.sh tool is used to generate etc/savanna/savanna.conf.sample-full
Run it from the top-level working directory i.e.
$> ./tools/conf/generate_sample.sh
Watch out for warnings about modules like libvirt, qpid and zmq not
being found - these warnings are significant because they result
in options not appearing in the generated config file.

View File

@ -1,63 +0,0 @@
DEFAULT.backdoor_port
DEFAULT.disable_process_locking
DEFAULT.lock_path
DEFAULT.notification_driver
DEFAULT.default_notification_level
DEFAULT.default_publisher_id
DEFAULT.notification_topics
DEFAULT.run_external_periodic_tasks
DEFAULT.rpc_backend
DEFAULT.rpc_thread_pool_size
DEFAULT.rpc_conn_pool_size
DEFAULT.rpc_response_timeout
DEFAULT.rpc_cast_timeout
DEFAULT.allowed_rpc_exception_modules
DEFAULT.fake_rabbit
DEFAULT.control_exchange
DEFAULT.amqp_durable_queues
DEFAULT.amqp_auto_delete
DEFAULT.kombu_ssl_version
DEFAULT.kombu_ssl_keyfile
DEFAULT.kombu_ssl_certfile
DEFAULT.kombu_ssl_ca_certs
DEFAULT.rabbit_host
DEFAULT.rabbit_port
DEFAULT.rabbit_hosts
DEFAULT.rabbit_use_ssl
DEFAULT.rabbit_userid
DEFAULT.rabbit_password
DEFAULT.rabbit_virtual_host
DEFAULT.rabbit_retry_interval
DEFAULT.rabbit_retry_backoff
DEFAULT.rabbit_max_retries
DEFAULT.rabbit_ha_queues
DEFAULT.qpid_hostname
DEFAULT.qpid_port
DEFAULT.qpid_hosts
DEFAULT.qpid_username
DEFAULT.qpid_password
DEFAULT.qpid_sasl_mechanisms
DEFAULT.qpid_heartbeat
DEFAULT.qpid_protocol
DEFAULT.qpid_tcp_nodelay
DEFAULT.qpid_topology_version
DEFAULT.rpc_zmq_bind_address
DEFAULT.rpc_zmq_matchmaker
DEFAULT.rpc_zmq_port
DEFAULT.rpc_zmq_contexts
DEFAULT.rpc_zmq_topic_backlog
DEFAULT.rpc_zmq_ipc_dir
DEFAULT.rpc_zmq_host
DEFAULT.matchmaker_heartbeat_freq
DEFAULT.matchmaker_heartbeat_ttl
rpc_notifier2.topics
matchmaker_redis.host
matchmaker_redis.port
matchmaker_redis.password
ssl.ca_file
ssl.cert_file
ssl.key_file
matchmaker_ring.ringfile
DEFAULT.instance_format
DEFAULT.instance_uuid_format

View File

@ -1,333 +0,0 @@
# Copyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Zhongyue Luo, SINA Corporation.
#
# stolen from the OpenStack Nova
"""Extracts OpenStack config option info from module(s)."""
import imp
import optparse
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
#from nova.openstack.common import importutils
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_COUNT = 0
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
WORDWRAP_WIDTH = 60
def main(srcfiles, whitelist_file, blacklist_file):
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
continue
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
opts_by_group = _filter_opts(opts_by_group, whitelist_file, blacklist_file)
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
print "# Total option count: %d" % OPTION_COUNT
def _readlst(filename):
if not filename:
return None
result = []
with open(filename) as fl:
for line in fl.readlines():
if line.strip() and not line.startswith('#'):
result.append(line.strip())
return result
def _filter_opts(opts_by_group, whitelist_file, blacklist_file):
whitelist = _readlst(whitelist_file)
blacklist = _readlst(blacklist_file)
if (not whitelist) and (not blacklist):
return opts_by_group
filtered = {}
for group, opts_by_module in opts_by_group.items():
new_opts_by_module = []
for mod, opts in opts_by_module:
new_opts = []
for opt in opts:
opt_name = '%s.%s' % (str(group), opt.dest)
if whitelist:
if opt_name in whitelist:
new_opts.append(opt)
elif blacklist:
if not opt_name in blacklist:
new_opts.append(opt)
if new_opts:
new_opts_by_module.append((mod, new_opts))
if new_opts_by_module:
filtered[group] = new_opts_by_module
return filtered
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
__import__(mod_str)
return sys.modules[mod_str]
#return importutils.import_module(mod_str)
except (ValueError, AttributeError), err:
return None
except ImportError, ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception, e:
return None
def _guess_groups(opt, mod_obj):
groups = []
# is it in the DEFAULT group?
if (opt.dest in cfg.CONF and
not isinstance(cfg.CONF[opt.dest], cfg.CONF.GroupAttr)):
groups.append('DEFAULT')
# what other groups is it in?
for key, value in cfg.CONF.items():
if not isinstance(value, cfg.CONF.GroupAttr):
continue
if opt.dest not in value:
continue
groups.append(key)
if len(groups) == 1:
return groups[0]
group = None
for g in groups:
if g in mod_obj.__name__:
group = g
break
if group is None and 'DEFAULT' in groups:
sys.stderr.write("Guessing that " + opt.dest +
" in " + mod_obj.__name__ +
" is in DEFAULT group out of " +
','.join(groups) + "\n")
return 'DEFAULT'
if group is None:
sys.stderr.write("Unable to guess what group " + opt.dest +
" in " + mod_obj.__name__ +
" is in out of " + ','.join(groups) + "\n")
return None
sys.stderr.write("Guessing that " + opt.dest +
" in " + mod_obj.__name__ +
" is in the " + group +
" group out of " + ','.join(groups) + "\n")
return group
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
groups = _guess_groups(opt, obj)
if groups:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print "[%s]" % group
print
global OPTION_COUNT
for mod, opts in opts_by_module:
OPTION_COUNT += len(opts)
print '#'
print '# Options defined in %s' % mod
print '#'
print
for opt in opts:
_print_opt(opt)
print
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(s):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if s.startswith(BASEDIR):
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in s:
return s.replace(BASEDIR, '')
elif s == _get_my_ip():
return '10.0.0.1'
elif s == socket.getfqdn():
return 'savanna'
elif s.strip() != s:
return '"%s"' % s
return s
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError), err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
try:
if opt_default is None:
print '#%s=<None>' % opt_name
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, str(opt_default).lower())
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print '#%s=%s' % (opt_name, ','.join(opt_default))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print '#%s=%s' % (opt_name, default)
print
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
if __name__ == '__main__':
usage = "Usage: %prog [options] [srcfile1] [srcfile2] [srcfile3] ..."
parser = optparse.OptionParser(usage=usage)
parser.add_option("-w", "--whitelist-file", dest="whitelist_file",
help="Use file FILE as a whitelist", metavar="FILE",
default=None)
parser.add_option("-b", "--blacklist-file", dest="blacklist_file",
help="Use file FILE as a blacklist", metavar="FILE",
default=None)
(options, args) = parser.parse_args()
main(args, options.whitelist_file, options.blacklist_file)

View File

@ -1,38 +0,0 @@
#!/usr/bin/env bash
# Copyright 2012 SINA Corporation
# All Rights Reserved.
# Author: Zhongyue Luo <lzyeval@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# stolen from the OpenStack Nova
FILES=$(find savanna -type f -name "*.py" ! -path "savanna/tests/*" -exec \
grep -l "Opt(" {} \; | sort -u)
BINS=$(echo bin/savanna-*)
PYTHONPATH=./:${PYTHONPATH} \
tools/with_venv python $(dirname "$0")/extract_opts.py \
--whitelist-file tools/conf/whitelist.txt ${FILES} ${BINS} > \
etc/savanna/savanna.conf.sample.raw
PYTHONPATH=./:${PYTHONPATH} \
tools/with_venv python $(dirname "$0")/extract_opts.py \
--blacklist-file tools/conf/blacklist.txt ${FILES} ${BINS} > \
etc/savanna/savanna.conf.sample-full
# Remove compiled files created by imp.import_source()
for bin in ${BINS}; do
[ -f ${bin}c ] && rm ${bin}c
done

View File

@ -1,35 +0,0 @@
DEFAULT.host
DEFAULT.port
DEFAULT.log_exchange
DEFAULT.job_binary_max_KB
DEFAULT.use_floating_ips
DEFAULT.use_neutron
DEFAULT.use_namespaces
DEFAULT.os_auth_protocol
DEFAULT.os_auth_host
DEFAULT.os_auth_port
DEFAULT.os_admin_username
DEFAULT.os_admin_password
DEFAULT.os_admin_tenant_name
DEFAULT.debug
DEFAULT.verbose
DEFAULT.use_stderr
DEFAULT.log_file
DEFAULT.log_dir
DEFAULT.use_syslog
DEFAULT.syslog_log_facility
DEFAULT.plugins
DEFAULT.job_workflow_postfix
DEFAULT.periodic_enable
DEFAULT.periodic_fuzzy_delay
DEFAULT.periodic_interval_max
DEFAULT.enable_data_locality
DEFAULT.compute_topology_file
DEFAULT.swift_topology_file
DEFAULT.use_identity_api_v3
database.connection