Switched from self-made config to oslo.config

Something is already working, but it needs further testing and refactoring
Memo:
fix failing test
fix pep8 errors
think what to do with copyright in tools/conf/* files copied from nova
rename 'host' and 'port' to 'listen_address' and 'listen_port'
rephrase help strings for config options; right now they are pretty ugly
This commit is contained in:
Dmitry Mescheryakov
2013-03-18 02:05:48 +04:00
parent 6b1d3f9be4
commit a08eaa2bea
14 changed files with 902 additions and 105 deletions

View File

@@ -15,54 +15,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import eho.server.main as server
from eventlet import wsgi
import eventlet
from oslo.config.cfg import CONF
from eho import config
import eho.server.main as server
def main():
parser = argparse.ArgumentParser(description='EHO server')
parser.add_argument(
'--dev', action='store_true', dest='dev', help='enable dev mode'
)
parser.add_argument(
'--host', default='', dest='host', help='set host (default: \'\' all)'
)
parser.add_argument(
'-p', '--port', type=int, default=8080, dest='port',
help='specify http port (default: 8080)'
)
parser.add_argument(
'--log-level, --log_level', default='WARN', dest='log_level',
help='set logging level (default: WARN)'
)
parser.add_argument(
'--reset-db, --reset_db', action='store_true', dest='reset_db',
help='reset db'
)
parser.add_argument(
'--stub-data, --stub_data', action='store_true', dest='stub_data',
help='put stub templates and clusters into db'
)
parser.add_argument(
'--allow-cluster-ops, --allow_cluster_ops', action='store_true',
dest='allow_cluster_ops', help='allow cluster ops (default: False)'
)
config.parse_args(sys.argv[1:])
args = parser.parse_args()
opts = dict()
opts['DEBUG'] = args.dev
opts['LOG_LEVEL'] = args.log_level
opts['RESET_DB'] = args.reset_db
opts['STUB_DATA'] = args.stub_data
opts['ALLOW_CLUSTER_OPS'] = args.allow_cluster_ops
app = server.make_app()
app = server.make_app(**opts)
wsgi.server(eventlet.listen((args.host, args.port), backlog=500), app,
debug=args.dev)
wsgi.server(eventlet.listen((CONF.host, CONF.port), backlog=500), app,
debug=CONF.dev)
if __name__ == '__main__':

34
eho/config.py Normal file
View File

@@ -0,0 +1,34 @@
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
cli_opts = [
cfg.BoolOpt('dev', default=False, help='enable dev mode'),
cfg.StrOpt('host', default='', help='set host'),
cfg.IntOpt('port', default=8080, help='set port'),
cfg.StrOpt('log-level', default='WARN', help='set application log level'),
cfg.BoolOpt('reset-db', default=False, help='resets DB'),
cfg.BoolOpt('stub-data', default=False, help='populates DB with stub data'),
cfg.BoolOpt('allow-cluster-ops', default=False, help='without that option' \
' the application operates in dry run mode and does not ' \
' send any requests to the OpenStack cluster')
]
CONF=cfg.CONF
CONF.register_cli_opts(cli_opts)
def parse_args(argv):
CONF(argv, project='eho')

View File

@@ -18,56 +18,85 @@ import logging
from eventlet import monkey_patch
from flask import Flask
from keystoneclient.middleware.auth_token import filter_factory as auth_token
from oslo.config import cfg
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
from eho.server.middleware.auth_valid import filter_factory as auth_valid
from eho.server.scheduler import setup_scheduler
from eho.server.service.api import setup_api
from eho.server.storage.defaults import setup_defaults
from eho.server.utils.api import render
from eho.server.api import v02 as api_v02
from eho.server.storage.storage import setup_storage
from eho.server.service.cluster_ops import setup_ops
monkey_patch(os=True, select=True, socket=True, thread=True, time=True)
opts = [
cfg.StrOpt('os_auth_protocol',
default='http',
help='Protocol used to access OpenStack Identity service'),
cfg.StrOpt('os_auth_host',
default='openstack',
help='IP or hostname of machine on which OpenStack Identity' \
' service is located'),
cfg.StrOpt('os_auth_port',
default='35357',
help='Port of OpenStack Identity service'),
cfg.StrOpt('os_admin_username',
default='admin',
help='This OpenStack user is used to verify provided tokens.' \
' The user must have admin role in <os_admin_tenant_name>' \
' tenant'),
cfg.StrOpt('os_admin_password',
default='nova',
help='Password of the admin user'),
cfg.StrOpt('os_admin_tenant_name',
default='admin',
help='Name of tenant where the user is admin'),
cfg.StrOpt('nova_internal_net_name',
default='novanetwork',
help='Name of network which IPs are given to the VMs')
]
def make_app(**local_conf):
sqlalchemy_opts = [
cfg.StrOpt('database_uri',
default='sqlite:////tmp/eho-server.db',
help='URL for sqlalchemy database'),
cfg.BoolOpt('echo',
default=False,
help='Sqlalchemy echo')
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.register_opts(sqlalchemy_opts, group='sqlalchemy')
CONF.import_opt('log_level', 'eho.config')
def make_app():
"""
Entry point for Elastic Hadoop on OpenStack REST API server
"""
app = Flask('eho.api')
# reading defaults
app.config.from_pyfile('etc/default.cfg', silent=True)
app.config.from_pyfile('../etc/default.cfg', silent=True)
# read local conf
app.config.from_pyfile('etc/local.cfg', silent=True)
app.config.from_pyfile('../etc/local.cfg', silent=True)
app.config.from_envvar('EHO_API_CFG', silent=True)
app.config.update(**local_conf)
app.config['SQLALCHEMY_DATABASE_URI'] = CONF.sqlalchemy.database_uri
app.config['SQLALCHEMY_ECHO'] = CONF.sqlalchemy.echo
root_logger = logging.getLogger()
ll = app.config.pop('LOG_LEVEL', 'WARN')
if ll:
root_logger.setLevel(ll)
root_logger.setLevel(CONF.log_level)
app.register_blueprint(api_v02.rest, url_prefix='/v0.2')
if app.config['DEBUG']:
if CONF.log_level == 'DEBUG':
print 'Configuration:'
for k in app.config:
print '\t%s = %s' % (k, app.config[k])
for key in CONF:
print "%s = %s" %(key, CONF[key])
setup_storage(app)
setup_defaults(app)
setup_scheduler(app)
setup_ops(app)
setup_api(app)
def make_json_error(ex):
status_code = (ex.code
@@ -86,12 +115,12 @@ def make_app(**local_conf):
app.wsgi_app = auth_token(
app.config,
auth_host=app.config['OS_AUTH_HOST'],
auth_port=app.config['OS_AUTH_PORT'],
auth_protocol=app.config['OS_AUTH_PROTOCOL'],
admin_user=app.config['OS_ADMIN_USER'],
admin_password=app.config['OS_ADMIN_PASSWORD'],
admin_tenant=['OS_ADMIN_TENANT']
auth_host=CONF.os_auth_host,
auth_port=CONF.os_auth_port,
auth_protocol=CONF.os_auth_protocol,
admin_user=CONF.os_admin_username,
admin_password=CONF.os_admin_password,
admin_tenant=CONF.os_admin_tenant_name
)(app.wsgi_app)
return app

View File

@@ -17,6 +17,8 @@ import logging
import eventlet
from oslo.config import cfg
from eho.server.storage.models import NodeTemplate, NodeType, NodeProcess, \
NodeTemplateConfig, Cluster, ClusterNodeCount
from eho.server.storage.storage import DB
@@ -25,12 +27,8 @@ from eho.server.service import cluster_ops
from flask import request
ALLOW_CLUSTER_OPS = False
def setup_api(app):
global ALLOW_CLUSTER_OPS
ALLOW_CLUSTER_OPS = app.config['ALLOW_CLUSTER_OPS']
CONF = cfg.CONF
CONF.import_opt('allow_cluster_ops', 'eho.config')
def _clean_nones(obj):
@@ -217,7 +215,7 @@ def _cluster_creation_job(headers, cluster_id):
logging.debug("Starting cluster '%s' creation: %s", cluster_id,
_cluster(cluster).dict)
if ALLOW_CLUSTER_OPS:
if CONF.allow_cluster_ops:
cluster_ops.launch_cluster(headers, cluster)
else:
logging.info("Cluster ops are disabled, use --allow-cluster-ops flag")
@@ -244,7 +242,7 @@ def _cluster_termination_job(headers, cluster_id):
logging.debug("Stoping cluster '%s' creation: %s", cluster_id,
_cluster(cluster).dict)
if ALLOW_CLUSTER_OPS:
if CONF.allow_cluster_ops:
cluster_ops.stop_cluster(headers, cluster)
else:
logging.info("Cluster ops are disabled, use --allow-cluster-ops flag")

View File

@@ -15,25 +15,31 @@
import logging
import time
from eho.server.utils.openstack.nova import novaclient
from jinja2 import Environment
from jinja2 import PackageLoader
from paramiko import SSHClient, AutoAddPolicy
from oslo.config import cfg
from eho.server.storage.models import Node, ServiceUrl
from eho.server.storage.storage import DB
from eho.server.utils.openstack.nova import novaclient
NODE_CONF = {}
CONF = cfg.CONF
cluster_node_opts = [
cfg.StrOpt('username',
default='root',
help='An existing user on Hadoop image'),
cfg.StrOpt('password',
default='swordfish',
help='User\'s password')
]
def setup_ops(app):
NODE_CONF['user'] = app.config.get('NODE_USER')
NODE_CONF['password'] = app.config.get('NODE_PASSWORD')
NODE_CONF['vm_internal_net'] = \
app.config.get('NODE_INTERNAL_NET')
CONF.register_opts(cluster_node_opts, 'cluster_node')
#CONF.import_opt('nova_internal_net_name', 'eho.server.main')
def _find_by_id(lst, id):
@@ -67,8 +73,8 @@ def _setup_ssh_connection(host, ssh):
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(
host,
username=NODE_CONF['user'],
password=NODE_CONF['password']
username=CONF.cluster_node.username,
password=CONF.cluster_node.password
)
@@ -176,11 +182,11 @@ def _check_if_up(nova, node):
srv = _find_by_id(nova.servers.list(), node['id'])
nets = srv.networks
if not NODE_CONF['vm_internal_net'] in nets:
if not CONF.nova_internal_net_name in nets:
# VM's networking is not configured yet
return
ips = nets[NODE_CONF['vm_internal_net']]
ips = nets[CONF.nova_internal_net_name]
if len(ips) < 2:
# public IP is not assigned yet
return

View File

@@ -14,11 +14,19 @@
# limitations under the License.
import logging
from oslo.config import cfg
from eho.server.storage.models import NodeProcess, NodeProcessProperty, \
NodeType, NodeTemplate, NodeTemplateConfig, Cluster, ClusterNodeCount
from eho.server.storage.storage import DB
CONF = cfg.CONF
CONF.import_opt('reset_db', 'eho.config')
CONF.import_opt('stub_data', 'eho.config')
def create_node_process(name, properties):
"""
Creates new node process and node process properties
@@ -107,7 +115,7 @@ def setup_defaults(app):
nt_nn = None
nt_tt_dn = None
if app.config.get('RESET_DB', False):
if CONF.reset_db:
# setup default processes
p_jt = create_node_process('job_tracker', [('heap_size', True, None)])
p_nn = create_node_process('name_node', [('heap_size', True, None)])
@@ -129,7 +137,7 @@ def setup_defaults(app):
nt.id, nt.name,
[p.name.__str__() for p in nt.processes])
if app.config.get('STUB_DATA', False):
if CONF.stub_data:
_setup_stub_data(nt_jt_nn, nt_jt, nt_nn, nt_tt_dn)

View File

@@ -14,15 +14,19 @@
# limitations under the License.
from flask.ext.sqlalchemy import SQLAlchemy
from oslo.config import cfg
DB = SQLAlchemy()
CONF = cfg.CONF
CONF.import_opt('reset_db', 'eho.config')
def setup_storage(app):
DB.app = app
DB.init_app(app)
if app.config.get('RESET_DB', False):
if CONF.reset_db:
DB.drop_all()
DB.create_all()

View File

@@ -21,6 +21,7 @@ import uuid
import os
import eventlet
from oslo.config import cfg
from eho.server import scheduler
from eho.server.main import make_app
@@ -78,6 +79,15 @@ def _stub_auth_valid(*args, **kwargs):
return _filter
CONF = cfg.CONF
CONF.import_opt('reset_db', 'eho.config')
CONF.import_opt('stub_data', 'eho.config')
CONF.import_opt('log_level', 'eho.config')
CONF.import_opt('allow_cluster_ops', 'eho.config')
CONF.import_opt('database_uri', 'eho.server.main', group='sqlalchemy')
CONF.import_opt('echo', 'eho.server.main', group='sqlalchemy')
class TestApi(unittest.TestCase):
def setUp(self):
self.db_fd, self.db_path = tempfile.mkstemp()
@@ -95,21 +105,15 @@ class TestApi(unittest.TestCase):
api.cluster_ops.launch_cluster = _stub_launch_cluster
api.cluster_ops.stop_cluster = _stub_stop_cluster
app = make_app(
TESTING=True,
RESET_DB=True,
STUB_DATA=True,
LOG_LEVEL="DEBUG",
ALLOW_CLUSTER_OPS=True,
SQLALCHEMY_DATABASE_URI='sqlite:///' + self.db_path,
SQLALCHEMY_ECHO=False,
OS_AUTH_HOST='localhost',
OS_AUTH_PORT='12345',
OS_AUTH_PROTOCOL='http',
OS_ADMIN_USER='admin',
OS_ADMIN_PASSWORD='admin',
OS_ADMIN_TENANT='admin'
)
CONF.set_override('reset_db', True)
CONF.set_override('stub_data', True)
CONF.set_override('log_level', 'DEBUG')
CONF.set_override('allow_cluster_ops', False)
CONF.set_override('database_uri', 'sqlite:///' + self.db_path, group='sqlalchemy')
CONF.set_override('echo', False, group='sqlalchemy')
app = make_app()
logging.debug('Test db path: %s', self.db_path)
logging.debug('Test app.config: %s', app.config)

171
etc/eho/eho.conf Normal file
View File

@@ -0,0 +1,171 @@
[DEFAULT]
#
# Options defined in eho.config
#
# enable dev mode (boolean value)
#dev=false
# set host (string value)
#host=
# set port (integer value)
#port=8080
# set application log level (string value)
#log_level=WARN
# resets DB (boolean value)
#reset_db=false
# populates DB with stub data (boolean value)
#stub_data=false
# without that option the application operates in dry run mode
# and does not send any requests to the OpenStack cluster
# (boolean value)
#allow_cluster_ops=false
#
# Options defined in eho.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error (boolean value)
#use_stderr=true
# Default file mode used when creating log files (string
# value)
#logfile_mode=0644
# format string to use for log messages with context (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
# format string to use for log messages without context
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# data to append to log format when level is DEBUG (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# prefix each line of exception output with this format
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# list of logger=LEVEL pairs (list value)
#default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
# publish error events (boolean value)
#publish_errors=false
# make deprecations fatal (boolean value)
#fatal_deprecations=false
# If an instance is passed with the log message, format it
# like this (string value)
#instance_format="[instance: %(uuid)s] "
# If an instance UUID is passed with the log message, format
# it like this (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# If this option is specified, the logging configuration file
# specified is used and overrides any other logging options
# specified. Please see the Python logging module
# documentation for details on logging configuration files.
# (string value)
#log_config=<None>
# A logging.Formatter log message format string which may use
# any of the available logging.LogRecord attributes. Default:
# %(default)s (string value)
#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
# Format string for %%(asctime)s in log records. Default:
# %(default)s (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If not set,
# logging will go to stdout. (string value)
#log_file=<None>
# (Optional) The directory to keep log files in (will be
# prepended to --log-file) (string value)
#log_dir=<None>
# Use syslog for logging. (boolean value)
#use_syslog=false
# syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
#
# Options defined in eho.server.main
#
# Protocol used to access OpenStack Identity service (string
# value)
#os_auth_protocol=http
# IP or hostname of machine on which OpenStack Identity
# service is located (string value)
os_auth_host=172.18.79.139
# Port of OpenStack Identity service (string value)
#os_auth_port=35357
# This OpenStack user is used to verify provided tokens. The
# user must have admin role in <os_admin_tenant_name> tenant
# (string value)
#os_admin_username=admin
# Password of the admin user (string value)
#os_admin_password=nova
# Name of tenant where the user is admin (string value)
#os_admin_tenant_name=admin
# Name of network which IPs are given to the VMs (string
# value)
#nova_internal_net_name=novanetwork
[cluster_node]
#
# Options defined in eho.server.service.cluster_ops
#
# An existing user on Hadoop image (string value)
#username=root
# User's password (string value)
#password=swordfish
[sqlalchemy]
#
# Options defined in eho.server.main
#
# URL for sqlalchemy database (string value)
#database_uri=sqlite:////tmp/eho-server.db
# Sqlalchemy echo (boolean value)
#echo=false
# Total option count: 38

171
etc/eho/eho.conf.sample Normal file
View File

@@ -0,0 +1,171 @@
[DEFAULT]
#
# Options defined in eho.config
#
# enable dev mode (boolean value)
#dev=false
# set host (string value)
#host=openstack
# set port (integer value)
#port=8080
# set application log level (string value)
#log_level=WARN
# resets DB (boolean value)
#reset_db=false
# populates DB with stub data (boolean value)
#stub_data=false
# without that option the application operates in dry run mode
# and does not send any requests to the OpenStack cluster
# (boolean value)
#allow_cluster_ops=false
#
# Options defined in eho.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error (boolean value)
#use_stderr=true
# Default file mode used when creating log files (string
# value)
#logfile_mode=0644
# format string to use for log messages with context (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
# format string to use for log messages without context
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# data to append to log format when level is DEBUG (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# prefix each line of exception output with this format
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# list of logger=LEVEL pairs (list value)
#default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
# publish error events (boolean value)
#publish_errors=false
# make deprecations fatal (boolean value)
#fatal_deprecations=false
# If an instance is passed with the log message, format it
# like this (string value)
#instance_format="[instance: %(uuid)s] "
# If an instance UUID is passed with the log message, format
# it like this (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# If this option is specified, the logging configuration file
# specified is used and overrides any other logging options
# specified. Please see the Python logging module
# documentation for details on logging configuration files.
# (string value)
#log_config=<None>
# A logging.Formatter log message format string which may use
# any of the available logging.LogRecord attributes. Default:
# %(default)s (string value)
#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
# Format string for %%(asctime)s in log records. Default:
# %(default)s (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If not set,
# logging will go to stdout. (string value)
#log_file=<None>
# (Optional) The directory to keep log files in (will be
# prepended to --log-file) (string value)
#log_dir=<None>
# Use syslog for logging. (boolean value)
#use_syslog=false
# syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
#
# Options defined in eho.server.main
#
# Protocol used to access OpenStack Identity service (string
# value)
#os_auth_protocol=http
# IP or hostname of machine on which OpenStack Identity
# service is located (string value)
#os_auth_host=openstack
# Port of OpenStack Identity service (string value)
#os_auth_port=35357
# This OpenStack user is used to verify provided tokens. The
# user must have admin role in <os_admin_tenant_name> tenant
# (string value)
#os_admin_username=admin
# Password of the admin user (string value)
#os_admin_password=nova
# Name of tenant where the user is admin (string value)
#os_admin_tenant_name=admin
# Name of network which IPs are given to the VMs (string
# value)
#nova_internal_net_name=novanetwork
[cluster_node]
#
# Options defined in eho.server.service.cluster_ops
#
# An existing user on Hadoop image (string value)
#username=root
# User's password (string value)
#password=swordfish
[sqlalchemy]
#
# Options defined in eho.server.main
#
# URL for sqlalchemy database (string value)
#database_uri=sqlite:////tmp/eho-server.db
# Sqlalchemy echo (boolean value)
#echo=false
# Total option count: 38

20
tools/conf/README Normal file
View File

@@ -0,0 +1,20 @@
This generate_sample.sh tool is used to generate etc/nova/nova.conf.sample
Run it from the top-level working directory i.e.
$> ./tools/conf/generate_sample.sh
Watch out for warnings about modules like libvirt, qpid and zmq not
being found - these warnings are significant because they result
in options not appearing in the generated config file.
The analyze_opts.py tool is used to find options which appear in
/etc/nova/nova.conf but not in etc/nova/nova.conf.sample
This helps identify options in the nova.conf file which are not used by nova.
The tool also identifies any options which are set to the default value.
Run it from the top-level working directory i.e.
$> ./tools/conf/analyze_opts.py

80
tools/conf/analyze_opts.py Executable file
View File

@@ -0,0 +1,80 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
find_unused_options.py
Compare the nova.conf file with the nova.conf.sample file to find any unused
options or default values in nova.conf
'''
import argparse
import os
import sys
sys.path.append(os.getcwd())
from oslo.config import iniparser
class PropertyCollecter(iniparser.BaseParser):
def __init__(self):
super(PropertyCollecter, self).__init__()
self.key_value_pairs = {}
def assignment(self, key, value):
self.key_value_pairs[key] = value
def new_section(self, section):
pass
@classmethod
def collect_properties(cls, lineiter, sample_format=False):
def clean_sample(f):
for line in f:
if line.startswith("# ") and line != '# nova.conf sample #\n':
line = line[2:]
yield line
pc = cls()
if sample_format:
lineiter = clean_sample(lineiter)
pc.parse(lineiter)
return pc.key_value_pairs
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''Compare the nova.conf
file with the nova.conf.sample file to find any unused options or
default values in nova.conf''')
parser.add_argument('-c', action='store',
default='/etc/nova/nova.conf',
help='path to nova.conf\
(defaults to /etc/nova/nova.conf)')
parser.add_argument('-s', default='./etc/nova/nova.conf.sample',
help='path to nova.conf.sample\
(defaults to ./etc/nova/nova.conf.sample')
options = parser.parse_args()
conf_file_options = PropertyCollecter.collect_properties(open(options.c))
sample_conf_file_options = PropertyCollecter.collect_properties(
open(options.s), sample_format=True)
for k, v in sorted(conf_file_options.items()):
if k not in sample_conf_file_options:
print "Unused:", k
for k, v in sorted(conf_file_options.items()):
if k in sample_conf_file_options and v == sample_conf_file_options[k]:
print "Default valued:", k

271
tools/conf/extract_opts.py Normal file
View File

@@ -0,0 +1,271 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Zhongyue Luo, SINA Corporation.
#
"""Extracts OpenStack config option info from module(s)."""
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
#from nova.openstack.common import importutils
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_COUNT = 0
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
WORDWRAP_WIDTH = 60
def main(srcfiles):
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
continue
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
print "# Total option count: %d" % OPTION_COUNT
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
__import__(mod_str)
return sys.modules[mod_str]
#return importutils.import_module(mod_str)
except (ValueError, AttributeError), err:
return None
except ImportError, ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception, e:
return None
def _guess_groups(opt, mod_obj):
groups = []
# is it in the DEFAULT group?
if (opt.dest in cfg.CONF and
not isinstance(cfg.CONF[opt.dest], cfg.CONF.GroupAttr)):
groups.append('DEFAULT')
# what other groups is it in?
for key, value in cfg.CONF.items():
if not isinstance(value, cfg.CONF.GroupAttr):
continue
if opt.dest not in value:
continue
groups.append(key)
if len(groups) == 1:
return groups[0]
group = None
for g in groups:
if g in mod_obj.__name__:
group = g
break
if group is None and 'DEFAULT' in groups:
sys.stderr.write("Guessing that " + opt.dest +
" in " + mod_obj.__name__ +
" is in DEFAULT group out of " +
','.join(groups) + "\n")
return 'DEFAULT'
if group is None:
sys.stderr.write("Unable to guess what group " + opt.dest +
" in " + mod_obj.__name__ +
" is in out of " + ','.join(groups) + "\n")
sys.exit(1)
sys.stderr.write("Guessing that " + opt.dest +
" in " + mod_obj.__name__ +
" is in the " + group +
" group out of " + ','.join(groups) + "\n")
return group
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print "[%s]" % group
print
global OPTION_COUNT
for mod, opts in opts_by_module:
OPTION_COUNT += len(opts)
print '#'
print '# Options defined in %s' % mod
print '#'
print
for opt in opts:
_print_opt(opt)
print
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(s):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if s.startswith(BASEDIR):
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in s:
return s.replace(BASEDIR, '')
elif s == _get_my_ip():
return '10.0.0.1'
elif s == socket.getfqdn():
return 'nova'
elif s.strip() != s:
return '"%s"' % s
return s
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError), err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
try:
if opt_default is None:
print '#%s=<None>' % opt_name
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, str(opt_default).lower())
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print '#%s=%s' % (opt_name, ','.join(opt_default))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print '#%s=%s' % (opt_name, default)
print
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "usage: python %s [srcfile]...\n" % sys.argv[0]
sys.exit(0)
main(sys.argv[1:])

31
tools/conf/generate_sample.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corporation
# All Rights Reserved.
# Author: Zhongyue Luo <lzyeval@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
FILES=$(find eho -type f -name "*.py" ! -path "eho/tests/*" -exec \
grep -l "Opt(" {} \; | sort -u)
BINS=$(echo bin/eho-*)
PYTHONPATH=./:${PYTHONPATH} \
tools/with_venv python $(dirname "$0")/extract_opts.py ${FILES} ${BINS} > \
etc/eho/eho.conf.sample
# Remove compiled files created by imp.import_source()
for bin in ${BINS}; do
[ -f ${bin}c ] && rm ${bin}c
done