Use oslo's config generator to generate sample

This patch adds oslo's config generator to the source tree and uses it
to generate marconi's sample configs. It also adds a check to pep8 that
verifies the config file is up-to-date.

Change-Id: Iec7defa244dc8649a5c832bb81b9ec6f30f0ee37
This commit is contained in:
Flavio Percoco 2014-03-11 15:09:55 +01:00
parent c3f6e4f79a
commit a4ed60feed
6 changed files with 396 additions and 126 deletions

View File

@ -1,126 +0,0 @@
# By default, this should line in one of:
# ~/.marconi/marconi.conf
# /etc/marconi/marconi.conf
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
;verbose = False
# Show debugging output in logs (sets DEBUG log level output)
;debug = False
# Log to this file
log_file = /var/log/marconi/queues.log
;auth_strategy =
# Set to True to enable sharding across multiple storage backends
;sharding = False
# Set to True to activate endpoints to manage the shard registry
;admin_mode = False
# ======================================================================
# Syslog
# ======================================================================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
;use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
;syslog_log_facility = LOG_LOCAL0
# ======================================================================
# Drivers
# ======================================================================
[drivers]
# Transport driver module (e.g., wsgi, zmq)
transport = wsgi
# Storage driver module (e.g., mongodb, sqlalchemy)
# sqlite has been deprecated, though it still works given the backward
# compatibility support.
storage = mongodb
# ======================================================================
# General storage options
# ======================================================================
[storage]
# Pipeline for operations on queue resources
;queue_pipeline =
# Pipeline for operations on message resources
;message_pipeline =
# Pipeline for operations on claim resources
;claim_pipeline =
# ======================================================================
# General transport options
# ======================================================================
[transport]
# Maximum number of queue records that may be requested per page,
# when listing queues.
;max_queues_per_page = 20
# Maximum number of messages per page when listing messages. Also,
# determines the max number of messages that can be requested or
# deleted by ID.
;max_messages_per_page = 20
# Maximum number of messages that can be claimed at a time.
;max_messages_per_claim = 20
# Maximum lifetime, in seconds. Minimal values are all 60 seconds.
;max_message_ttl = 1209600
;max_claim_ttl = 43200
;max_claim_grace = 43200
# Maximum size, in bytes, allowed for queue metadata and bulk/single
# message post bodies. Includes whitespace and envelope fields, if any.
;max_queue_metadata = 65536
;max_message_size = 262144
# ======================================================================
# Driver-specific transport options
# ======================================================================
[drivers:transport:wsgi]
;bind = 0.0.0.0
;port = 8888
;[drivers:transport:zmq]
;port = 9999
# ======================================================================
# Driver-specific storage options
# ======================================================================
[drivers:storage:mongodb]
uri = mongodb://db1.example.net,db2.example.net:2500/?replicaSet=test&ssl=true&w=majority
database = marconi
[drivers:storage:sqlalchemy]
;uri = sqlite:///:memory:
# Number of databases across which to partition message data,
# in order to reduce writer lock %. DO NOT change this setting
# after initial deployment. It MUST remain static. Also,
# you should not need a large number of partitions to improve
# performance, esp. if deploying MongoDB on SSD storage.
;partitions = 2
# Maximum number of times to retry a failed operation. Currently
# only used for retrying a message post.
;max_attempts = 1000
# Maximum sleep interval between retries in seconds (actual sleep time
# increases linearly according to number of attempts performed).
;max_retry_sleep = 0.1
# Maximum jitter interval, to be added to the sleep interval, in
# order to decrease probability that parallel requests will retry
# at the same instant.
;max_retry_jitter = 0.005

203
etc/marconi.conf.sample Normal file
View File

@ -0,0 +1,203 @@
[DEFAULT]
#
# Options defined in marconi.transport.base
#
# (string value)
#auth_strategy=
#
# Options defined in marconi.bootstrap
#
# ('Enable sharding across multiple storage backends. ', 'If
# sharding is enabled, the storage driver ', 'configuration is
# used to determine where the ', 'catalogue/control plane data
# is kept.') (boolean value)
#sharding=false
# Activate endpoints to manage shard registry. (boolean value)
#admin_mode=false
#
# Options defined in marconi.openstack.common.lockutils
#
# Whether to disable inter-process locks (boolean value)
#disable_process_locking=false
# Directory to use for lock files. (string value)
#lock_path=<None>
#
# Options defined in marconi.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error (boolean value)
#use_stderr=true
# Format string to use for log messages with context (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages without context
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Data to append to log format when level is DEBUG (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# List of logger=LEVEL pairs (list value)
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
# Publish error events (boolean value)
#publish_errors=false
# Make deprecations fatal (boolean value)
#fatal_deprecations=false
# If an instance is passed with the log message, format it
# like this (string value)
#instance_format="[instance: %(uuid)s] "
# If an instance UUID is passed with the log message, format
# it like this (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# The name of logging configuration file. It does not disable
# existing loggers, but just appends specified logging
# configuration to any other existing logging options. Please
# see the Python logging module documentation for details on
# logging configuration files. (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append=<None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
# Format string for %%(asctime)s in log records. Default:
# %(default)s (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file=<None>
# (Optional) The base directory used for relative --log-file
# paths (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir=<None>
# Use syslog for logging. Existing syslog format is DEPRECATED
# during I, and then will be changed in J to honor RFC5424
# (boolean value)
#use_syslog=false
# (Optional) Use syslog rfc5424 format for logging. If
# enabled, will add APP-NAME (RFC5424) before the MSG part of
# the syslog message. The old format without APP-NAME is
# deprecated in I, and will be removed in J. (boolean value)
#use_syslog_rfc_format=false
# Syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
[drivers]
#
# Options defined in marconi.bootstrap
#
# Transport driver to use. (string value)
#transport=wsgi
# Storage driver to use. (string value)
#storage=sqlite
[drivers:storage:mongodb]
#
# Options defined in marconi.storage.mongodb
#
# Mongodb Connection URI. (string value)
#uri=<None>
# Database name. (string value)
#database=marconi
# Number of databases across which to partition message data,
# in order to reduce writer lock %. DO NOT change this setting
# after initial deployment. It MUST remain static. Also, you
# should not need a large number of partitions to improve
# performance, esp. if deploying MongoDB on SSD storage.
# (integer value)
#partitions=2
# Maximum number of times to retry a failed operation.
# Currently only used for retrying a message post. (integer
# value)
#max_attempts=1000
# Maximum sleep interval between retries (actual sleep time
# increases linearly according to number of attempts
# performed). (floating point value)
#max_retry_sleep=0.1
# Maximum jitter interval, to be added to the sleep interval,
# in order to decrease probability that parallel requests will
# retry at the same instant. (floating point value)
#max_retry_jitter=0.005
# Maximum number of times to retry an operation that failed
# due to a primary node failover. (integer value)
#max_reconnect_attempts=10
# Base sleep interval between attempts to reconnect after a
# primary node failover. The actual sleep time increases
# exponentially (power of 2) each time the operation is
# retried. (floating point value)
#reconnect_sleep=0.02
[drivers:storage:sqlalchemy]
#
# Options defined in marconi.storage.sqlalchemy
#
# An sqlalchemy URL (string value)
#uri=sqlite:///:memory:
[sharding:catalog]
#
# Options defined in marconi.storage.sharding
#
# Catalog storage driver. (integer value)

38
tools/config/README Normal file
View File

@ -0,0 +1,38 @@
This generate_sample.sh tool is used to generate sample config files
from OpenStack project source trees.
Run it by passing the base directory and package name i.e.
$> generate_sample.sh --base-dir /opt/stack/nova --package-name nova \
--output-dir /opt/stack/nova/etc
$> generate_sample.sh -b /opt/stack/neutron -p nova -o /opt/stack/neutron/etc
Optionally, include libraries that register entry points for option
discovery, such as oslo.messaging:
$> generate_sample.sh -b /opt/stack/ceilometer -p ceilometer \
-o /opt/stack/ceilometer/etc -l oslo.messaging
Watch out for warnings about modules like libvirt, qpid and zmq not
being found - these warnings are significant because they result
in options not appearing in the generated config file.
This check_uptodate.sh tool is used to ensure that the generated sample
config file in the OpenStack project source tree is continually kept up
to date with the code itself.
This can be done by adding a hook to tox.ini. For example, if a project
already had flake8 enabled in a section like this:
[testenv.pep8]
commands =
flake8 {posargs}
This section would be changed to:
[testenv.pep8]
commands =
flake8 {posargs}
{toxinidir}/tools/config/check_uptodate.sh

25
tools/config/check_uptodate.sh Executable file
View File

@ -0,0 +1,25 @@
#!/usr/bin/env bash
PROJECT_NAME=${PROJECT_NAME:-marconi}
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
elif [ -e etc/${CFGFILE_NAME} ]; then
CFGFILE=etc/${CFGFILE_NAME}
else
echo "${0##*/}: can not find config file"
exit 1
fi
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
exit 1
fi

119
tools/config/generate_sample.sh Executable file
View File

@ -0,0 +1,119 @@
#!/usr/bin/env bash
print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
--long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
eval set -- "$PARSED_OPTIONS"
while true; do
case "$1" in
-h|--help)
echo "${0##*/} [options]"
echo ""
echo "options:"
echo "-h, --help show brief help"
echo "-b, --base-dir=DIR project base directory"
echo "-p, --package-name=NAME project package name"
echo "-o, --output-dir=DIR file output directory"
echo "-m, --module=MOD extra python module to interrogate for options"
echo "-l, --library=LIB extra library that registers options for discovery"
exit 0
;;
-b|--base-dir)
shift
BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-p|--package-name)
shift
PACKAGENAME=`echo $1`
shift
;;
-o|--output-dir)
shift
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-m|--module)
shift
MODULES="$MODULES -m $1"
shift
;;
-l|--library)
shift
LIBRARIES="$LIBRARIES -l $1"
shift
;;
--)
break
;;
esac
done
BASEDIR=${BASEDIR:-`pwd`}
if ! [ -d $BASEDIR ]
then
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
elif [[ $BASEDIR != /* ]]
then
BASEDIR=$(cd "$BASEDIR" && pwd)
fi
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
TARGETDIR=$BASEDIR/$PACKAGENAME
if ! [ -d $TARGETDIR ]
then
echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
fi
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
# NOTE(bnemec): Some projects put their sample config in etc/,
# some in etc/$PACKAGENAME/
if [ -d $OUTPUTDIR/$PACKAGENAME ]
then
OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
elif ! [ -d $OUTPUTDIR ]
then
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
exit 1
fi
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
find $TARGETDIR -type f -name "*.pyc" -delete
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
RC_FILE="`dirname $0`/oslo.config.generator.rc"
if test -r "$RC_FILE"
then
source "$RC_FILE"
fi
for mod in ${OSLO_CONFIG_GENERATOR_EXTRA_MODULES}; do
MODULES="$MODULES -m $mod"
done
for lib in ${OSLO_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
LIBRARIES="$LIBRARIES -l $lib"
done
export EVENTLET_NO_GREENDNS=yes
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
DEFAULT_MODULEPATH=marconi.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
for CONCAT_FILE in $CONCAT_FILES; do
cat $CONCAT_FILE >> $OUTPUTFILE
done

View File

@ -0,0 +1,11 @@
export OSLO_CONFIG_GENERATOR_EXTRA_MODULES=""
export OSLO_CONFIG_GENERATOR_EXTRA_LIBRARIES="marconi.bootstrap
marconi.storage.base
marconi.storage.pipeline
marconi.storage.sharding
marconi.storage.mongodb
marconi.storage.sqlalchemy
marconi.transport.wsgi
marconi.transport.base
marconi.transport.validation"