Centralize Barbican API Config

This centralizes all config options into config.py,
eliminating a race condition where not all config options
from modules may have been registered by the time the
config file is parsed. Plugins are now provided a
separate config object, to which they can add their
own individual options before parsing the config file themselves.

Change-Id: I737bfe092ff71a1275f27de5b9328e9d23032a24
Closes-Bug: #1459448
This commit is contained in:
Kevin Bishop 2015-06-01 14:13:17 -05:00
parent 0bf1268d77
commit 5687cd9b93
37 changed files with 256 additions and 245 deletions

View File

@ -18,10 +18,10 @@ API handler for Cloudkeep's Barbican
"""
import pkgutil
from oslo_config import cfg
from oslo_policy import policy
import pecan
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
@ -29,14 +29,7 @@ from barbican.openstack.common import jsonutils as json
LOG = utils.getLogger(__name__)
MAX_BYTES_REQUEST_INPUT_ACCEPTED = 15000
common_opts = [
cfg.IntOpt('max_allowed_request_size_in_bytes',
default=MAX_BYTES_REQUEST_INPUT_ACCEPTED),
]
CONF = cfg.CONF
CONF.register_opts(common_opts)
CONF = config.CONF
class ApiResource(object):

View File

@ -24,7 +24,6 @@ try:
except ImportError:
newrelic_loaded = False
from oslo_config import cfg
from oslo_log import log
from barbican.api.controllers import cas
@ -39,6 +38,8 @@ from barbican import i18n as u
from barbican.model import repositories
from barbican import queue
CONF = config.CONF
if newrelic_loaded:
newrelic.agent.initialize('/etc/newrelic/newrelic.ini')
@ -76,11 +77,9 @@ def create_main_app(global_config, **local_conf):
"""uWSGI factory method for the Barbican-API application."""
# Queuing initialization
CONF = cfg.CONF
queue.init(CONF, is_server_side=False)
# Configure oslo logging and configuration services.
config.parse_args()
log.setup(CONF, 'barbican')
config.setup_remote_pydev_debug()
@ -102,7 +101,6 @@ def create_main_app(global_config, **local_conf):
def create_admin_app(global_config, **local_conf):
config.parse_args()
wsgi_app = pecan.make_app(versions.VersionController())
return wsgi_app

View File

@ -14,30 +14,16 @@
# under the License.
import uuid
from oslo_config import cfg
import webob.exc
from barbican.api import middleware as mw
from barbican.common import config
from barbican.common import utils
import barbican.context
from barbican import i18n as u
LOG = utils.getLogger(__name__)
# TODO(jwood) Need to figure out why config is ignored in this module.
context_opts = [
cfg.StrOpt('admin_role', default='admin',
help=u._('Role used to identify an authenticated user as '
'administrator.')),
cfg.BoolOpt('allow_anonymous_access', default=False,
help=u._('Allow unauthenticated users to access the API with '
'read-only privileges. This only applies when using '
'ContextMiddleware.')),
]
CONF = cfg.CONF
CONF.register_opts(context_opts)
CONF = config.CONF
class BaseContextMiddleware(mw.Middleware):

View File

@ -18,13 +18,12 @@ A filter middleware that just outputs to logs, for instructive/sample
purposes only.
"""
from oslo_config import cfg
from barbican.api import middleware
from barbican.common import config
from barbican.common import utils
LOG = utils.getLogger(__name__)
CONF = cfg.CONF
CONF = config.CONF
class SimpleFilter(middleware.Middleware):

View File

@ -23,24 +23,181 @@ import os
from oslo_config import cfg
from oslo_log import log
from barbican import i18n as u
import barbican.version
CONF = cfg.CONF
log.register_options(CONF)
MAX_BYTES_REQUEST_INPUT_ACCEPTED = 15000
DEFAULT_MAX_SECRET_BYTES = 10000
KS_NOTIFICATIONS_GRP_NAME = 'keystone_notifications'
LOG = logging.getLogger(__name__)
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
def parse_args(args=None, usage=None, default_config_files=None):
CONF(args=args,
context_opts = [
cfg.StrOpt('admin_role', default='admin',
help=u._('Role used to identify an authenticated user as '
'administrator.')),
cfg.BoolOpt('allow_anonymous_access', default=False,
help=u._('Allow unauthenticated users to access the API with '
'read-only privileges. This only applies when using '
'ContextMiddleware.')),
]
common_opts = [
cfg.IntOpt('max_allowed_request_size_in_bytes',
default=MAX_BYTES_REQUEST_INPUT_ACCEPTED),
cfg.IntOpt('max_allowed_secret_in_bytes',
default=DEFAULT_MAX_SECRET_BYTES),
]
host_opts = [
cfg.StrOpt('host_href', default='http://localhost:9311'),
]
db_opts = [
cfg.StrOpt('sql_connection'),
cfg.IntOpt('sql_idle_timeout', default=3600),
cfg.IntOpt('sql_max_retries', default=60),
cfg.IntOpt('sql_retry_interval', default=1),
cfg.BoolOpt('db_auto_create', default=True),
cfg.IntOpt('max_limit_paging', default=100),
cfg.IntOpt('default_limit_paging', default=10),
cfg.StrOpt('sql_pool_class', default=None),
cfg.BoolOpt('sql_pool_logging', default=False),
cfg.IntOpt('sql_pool_size', default=None),
cfg.IntOpt('sql_pool_max_overflow', default=None),
]
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
retry_opt_group = cfg.OptGroup(name='retry_scheduler',
title='Retry/Scheduler Options')
retry_opts = [
cfg.FloatOpt(
'initial_delay_seconds', default=10.0,
help=u._('Seconds (float) to wait before starting retry scheduler')),
cfg.FloatOpt(
'periodic_interval_max_seconds', default=10.0,
help=u._('Seconds (float) to wait between periodic schedule events')),
]
queue_opt_group = cfg.OptGroup(name='queue',
title='Queue Application Options')
queue_opts = [
cfg.BoolOpt('enable', default=False,
help=u._('True enables queuing, False invokes '
'workers synchronously')),
cfg.StrOpt('namespace', default='barbican',
help=u._('Queue namespace')),
cfg.StrOpt('topic', default='barbican.workers',
help=u._('Queue topic name')),
cfg.StrOpt('version', default='1.1',
help=u._('Version of tasks invoked via queue')),
cfg.StrOpt('server_name', default='barbican.queue',
help=u._('Server name for RPC task processing server')),
]
ks_queue_opt_group = cfg.OptGroup(name=KS_NOTIFICATIONS_GRP_NAME,
title='Keystone Notification Options')
ks_queue_opts = [
cfg.BoolOpt('enable', default=False,
help=u._('True enables keystone notification listener '
' functionality.')),
cfg.StrOpt('control_exchange', default='openstack',
help=u._('The default exchange under which topics are scoped. '
'May be overridden by an exchange name specified in '
'the transport_url option.')),
cfg.StrOpt('topic', default='notifications',
help=u._("Keystone notification queue topic name. This name "
"needs to match one of values mentioned in Keystone "
"deployment's 'notification_topics' configuration "
"e.g."
" notification_topics=notifications, "
" barbican_notifications"
"Multiple servers may listen on a topic and messages "
"will be dispatched to one of the servers in a "
"round-robin fashion. That's why Barbican service "
"should have its own dedicated notification queue so "
"that it receives all of Keystone notifications.")),
cfg.BoolOpt('allow_requeue', default=False,
help=u._('True enables requeue feature in case of notification'
' processing error. Enable this only when underlying '
'transport supports this feature.')),
cfg.StrOpt('version', default='1.0',
help=u._('Version of tasks invoked via notifications')),
cfg.IntOpt('thread_pool_size', default=10,
help=u._('Define the number of max threads to be used for '
'notification server processing functionality.')),
]
def parse_args(conf, args=None, usage=None, default_config_files=None):
conf(args=args if args else [],
project='barbican',
prog='barbican-api',
version=barbican.version.__version__,
usage=usage,
default_config_files=default_config_files)
CONF.pydev_debug_host = os.environ.get('PYDEV_DEBUG_HOST')
CONF.pydev_debug_port = os.environ.get('PYDEV_DEBUG_PORT')
conf.pydev_debug_host = os.environ.get('PYDEV_DEBUG_HOST')
conf.pydev_debug_port = os.environ.get('PYDEV_DEBUG_PORT')
def new_config():
conf = cfg.ConfigOpts()
log.register_options(conf)
conf.register_opts(context_opts)
conf.register_opts(common_opts)
conf.register_opts(host_opts)
conf.register_opts(db_opts)
conf.register_opts(eventlet_backdoor_opts)
conf.register_opts(periodic_opts)
conf.register_opts(ssl_opts, "ssl")
conf.register_group(retry_opt_group)
conf.register_opts(retry_opts, group=retry_opt_group)
conf.register_group(queue_opt_group)
conf.register_opts(queue_opts, group=queue_opt_group)
conf.register_group(ks_queue_opt_group)
conf.register_opts(ks_queue_opts, group=ks_queue_opt_group)
return conf
def setup_remote_pydev_debug():
@ -63,3 +220,7 @@ def setup_remote_pydev_debug():
'listening on debug-host \'%s\' debug-port \'%s\'.',
CONF.pydev_debug_host, CONF.pydev_debug_port)
raise
CONF = new_config()
LOG = logging.getLogger(__name__)
parse_args(CONF)

View File

@ -21,19 +21,14 @@ import importlib
import mimetypes
import uuid
from oslo_config import cfg
from oslo_log import log
import pecan
from barbican.common import config
from barbican import i18n as u
host_opts = [
cfg.StrOpt('host_href', default='http://localhost:9311'),
]
CONF = cfg.CONF
CONF.register_opts(host_opts)
CONF = config.CONF
# Current API version

View File

@ -19,10 +19,10 @@ import base64
import jsonschema as schema
import ldap
from OpenSSL import crypto
from oslo_config import cfg
import six
from barbican.api import controllers
from barbican.common import config
from barbican.common import exception
from barbican.common import hrefs
from barbican.common import utils
@ -34,15 +34,9 @@ from barbican.plugin.interface import secret_store
from barbican.plugin.util import mime_types
DEFAULT_MAX_SECRET_BYTES = config.DEFAULT_MAX_SECRET_BYTES
LOG = utils.getLogger(__name__)
DEFAULT_MAX_SECRET_BYTES = 10000
common_opts = [
cfg.IntOpt('max_allowed_secret_in_bytes',
default=DEFAULT_MAX_SECRET_BYTES),
]
CONF = cfg.CONF
CONF.register_opts(common_opts)
CONF = config.CONF
MYSQL_SMALL_INT_MAX = 32767

View File

@ -13,12 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import oslo_context
from oslo_policy import policy
from barbican.common import config
CONF = cfg.CONF
CONF = config.CONF
class RequestContext(oslo_context.context.RequestContext):

View File

@ -25,19 +25,14 @@ import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from oslo_config import cfg
from barbican.common import config
from barbican.common import utils
LOG = utils.getLogger(__name__)
db_opts = [
cfg.StrOpt('sql_connection'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF = config.CONF
def init_config(sql_url=None):

View File

@ -24,13 +24,12 @@ import logging
import time
import uuid
from oslo_config import cfg
from oslo_log import log
import sqlalchemy
from sqlalchemy import func as sa_func
from sqlalchemy import or_
import sqlalchemy.orm as sa_orm
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
@ -68,23 +67,8 @@ _SECRET_META_REPOSITORY = None
_SECRET_REPOSITORY = None
_TRANSPORT_KEY_REPOSITORY = None
db_opts = [
cfg.IntOpt('sql_idle_timeout', default=3600),
cfg.IntOpt('sql_max_retries', default=60),
cfg.IntOpt('sql_retry_interval', default=1),
cfg.BoolOpt('db_auto_create', default=True),
cfg.StrOpt('sql_connection'),
cfg.IntOpt('max_limit_paging', default=100),
cfg.IntOpt('default_limit_paging', default=10),
cfg.StrOpt('sql_pool_class', default=None),
cfg.BoolOpt('sql_pool_logging', default=False),
cfg.IntOpt('sql_pool_size', default=None),
cfg.IntOpt('sql_pool_max_overflow', default=None),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
log.register_options(CONF)
CONF = config.CONF
def hard_reset():

View File

@ -28,8 +28,8 @@ import traceback
import eventlet.backdoor
import greenlet
from oslo_config import cfg
from barbican.common import config
from barbican.openstack.common._i18n import _LI
help_for_backdoor_port = (
@ -39,20 +39,15 @@ help_for_backdoor_port = (
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
CONF = config.CONF
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo.config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
return [(None, copy.deepcopy(config.eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):

View File

@ -16,21 +16,13 @@ import logging
import random
import time
from oslo_config import cfg
import six
from barbican.common import config
from barbican.openstack.common._i18n import _, _LE, _LI
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
CONF = config.CONF
LOG = logging.getLogger(__name__)

View File

@ -35,15 +35,15 @@ except ImportError:
import eventlet
from eventlet import event
from oslo_config import cfg
from barbican.common import config
from barbican.openstack.common import eventlet_backdoor
from barbican.openstack.common._i18n import _LE, _LI, _LW
from barbican.openstack.common import systemd
from barbican.openstack.common import threadgroup
CONF = cfg.CONF
CONF = config.CONF
LOG = logging.getLogger(__name__)
@ -138,7 +138,7 @@ class Launcher(object):
:returns: None
"""
cfg.CONF.reload_config_files()
config.CONF.reload_config_files()
self.services.restart()

View File

@ -17,29 +17,11 @@
import os
import ssl
from oslo_config import cfg
from barbican.common import config
from barbican.openstack.common.gettextutils import _ # noqa
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
CONF = config.CONF
def is_enabled():

View File

@ -14,6 +14,7 @@
from oslo_config import cfg
from stevedore import named
from barbican.common import config
from barbican.common import utils
from barbican import i18n as u
from barbican.plugin.crypto import crypto
@ -23,7 +24,7 @@ from barbican.plugin.util import utils as plugin_utils
_PLUGIN_MANAGER = None
CONF = cfg.CONF
CONF = config.new_config()
DEFAULT_PLUGIN_NAMESPACE = 'barbican.crypto.plugin'
DEFAULT_PLUGINS = ['simple_crypto']
@ -42,6 +43,7 @@ crypto_opts = [
]
CONF.register_group(crypto_opt_group)
CONF.register_opts(crypto_opts, group=crypto_opt_group)
config.parse_args(CONF)
class _CryptoPluginManager(named.NamedExtensionManager):

View File

@ -14,13 +14,14 @@ import base64
from oslo_config import cfg
from barbican.common import config
from barbican.common import utils
from barbican import i18n as u
from barbican.openstack.common import jsonutils as json
from barbican.plugin.crypto import crypto as plugin
from barbican.plugin.crypto import pkcs11
CONF = cfg.CONF
CONF = config.new_config()
LOG = utils.getLogger(__name__)
p11_crypto_plugin_group = cfg.OptGroup(name='p11_crypto_plugin',
@ -42,6 +43,7 @@ p11_crypto_plugin_opts = [
]
CONF.register_group(p11_crypto_plugin_group)
CONF.register_opts(p11_crypto_plugin_opts, group=p11_crypto_plugin_group)
config.parse_args(CONF)
class P11CryptoPlugin(plugin.CryptoPluginBase):
@ -53,7 +55,7 @@ class P11CryptoPlugin(plugin.CryptoPluginBase):
outside the HSM.
"""
def __init__(self, conf=cfg.CONF, ffi=None):
def __init__(self, conf=CONF, ffi=None):
self.conf = conf
if conf.p11_crypto_plugin.library_path is None:
raise ValueError(u._("library_path is required"))

View File

@ -19,11 +19,12 @@ from cryptography import fernet
from oslo_config import cfg
import six
from barbican.common import config
from barbican import i18n as u
from barbican.plugin.crypto import crypto as c
CONF = cfg.CONF
CONF = config.new_config()
simple_crypto_plugin_group = cfg.OptGroup(name='simple_crypto_plugin',
title="Simple Crypto Plugin Options")
@ -35,6 +36,7 @@ simple_crypto_plugin_opts = [
]
CONF.register_group(simple_crypto_plugin_group)
CONF.register_opts(simple_crypto_plugin_opts, group=simple_crypto_plugin_group)
config.parse_args(CONF)
class SimpleCryptoPlugin(c.CryptoPluginBase):

View File

@ -30,13 +30,14 @@ import pki.kra
import pki.profile
from requests import exceptions as request_exceptions
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
import barbican.plugin.interface.certificate_manager as cm
import barbican.plugin.interface.secret_store as sstore
CONF = cfg.CONF
CONF = config.new_config()
LOG = utils.getLogger(__name__)
dogtag_plugin_group = cfg.OptGroup(name='dogtag_plugin',
@ -65,6 +66,7 @@ dogtag_plugin_opts = [
CONF.register_group(dogtag_plugin_group)
CONF.register_opts(dogtag_plugin_opts, group=dogtag_plugin_group)
config.parse_args(CONF)
CERT_HEADER = "-----BEGIN CERTIFICATE-----"
CERT_FOOTER = "-----END CERTIFICATE-----"

View File

@ -27,6 +27,7 @@ from oslo_config import cfg
import six
from stevedore import named
from barbican.common import config
from barbican.common import exception
import barbican.common.utils as utils
from barbican import i18n as u
@ -35,7 +36,7 @@ from barbican.model import repositories as repos
from barbican.plugin.util import utils as plugin_utils
CONF = cfg.CONF
CONF = config.new_config()
# Configuration for certificate processing plugins:
DEFAULT_PLUGIN_NAMESPACE = 'barbican.certificate.plugin'
@ -55,6 +56,7 @@ cert_opts = [
]
CONF.register_group(cert_opt_group)
CONF.register_opts(cert_opts, group=cert_opt_group)
config.parse_args(CONF)
# Configuration for certificate eventing plugins:

View File

@ -19,6 +19,7 @@ from oslo_config import cfg
import six
from stevedore import named
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
@ -27,7 +28,7 @@ from barbican.plugin.util import utils as plugin_utils
_SECRET_STORE = None
CONF = cfg.CONF
CONF = config.new_config()
DEFAULT_PLUGIN_NAMESPACE = 'barbican.secretstore.plugin'
DEFAULT_PLUGINS = ['store_crypto']
@ -45,6 +46,7 @@ store_opts = [
]
CONF.register_group(store_opt_group)
CONF.register_opts(store_opts, group=store_opt_group)
config.parse_args(CONF)
class SecretStorePluginNotFound(exception.BarbicanHTTPException):

View File

@ -32,13 +32,14 @@ from kmip.core import objects as kmip_objects
from oslo_config import cfg
from oslo_log import log
from barbican.common import config
from barbican import i18n as u # noqa
from barbican.plugin.interface import secret_store as ss
from barbican.plugin.util import translations
LOG = log.getLogger(__name__)
CONF = cfg.CONF
CONF = config.new_config()
kmip_opt_group = cfg.OptGroup(name='kmip_plugin', title='KMIP Plugin')
kmip_opts = [
@ -78,6 +79,7 @@ kmip_opts = [
]
CONF.register_group(kmip_opt_group)
CONF.register_opts(kmip_opts, group=kmip_opt_group)
config.parse_args(CONF)
attribute_debug_msg = "Created attribute type %s with value %s"

View File

@ -20,11 +20,12 @@ import uuid
from OpenSSL import crypto
from oslo_config import cfg
from barbican.common import config
from barbican.common import utils
from barbican.openstack.common import gettextutils as u
import barbican.plugin.interface.certificate_manager as cert_manager
CONF = cfg.CONF
CONF = config.new_config()
LOG = utils.getLogger(__name__)
@ -40,6 +41,7 @@ snakeoil_ca_plugin_opts = [
CONF.register_group(snakeoil_ca_plugin_group)
CONF.register_opts(snakeoil_ca_plugin_opts, group=snakeoil_ca_plugin_group)
config.parse_args(CONF)
class SnakeoilCA(object):

View File

@ -13,8 +13,7 @@
import base64
from oslo_config import cfg
from barbican.common import config
from barbican.common import utils
from barbican.model import models
from barbican.model import repositories
@ -22,7 +21,8 @@ from barbican.plugin.crypto import crypto
from barbican.plugin.crypto import manager
from barbican.plugin.interface import secret_store as sstore
CONF = cfg.CONF
CONF = config.new_config()
config.parse_args(CONF)
class StoreCryptoContext(object):

View File

@ -21,10 +21,11 @@ from requests import exceptions as request_exceptions
from symantecssl.core import Symantec
from symantecssl import exceptions as symantec_exceptions
from barbican.common import config
from barbican import i18n as u
from barbican.plugin.interface import certificate_manager as cert
CONF = cfg.CONF
CONF = config.new_config()
symantec_plugin_group = cfg.OptGroup(name='symantec_plugin',
title='Symantec Plugin Options')
@ -40,6 +41,7 @@ symantec_plugin_opts = [
CONF.register_group(symantec_plugin_group)
CONF.register_opts(symantec_plugin_opts, group=symantec_plugin_group)
config.parse_args(CONF)
class SymantecCertificatePlugin(cert.CertificatePluginBase):

View File

@ -16,78 +16,21 @@
"""
Queue objects for Cloudkeep's Barbican
"""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging.notify import dispatcher as notfiy_dispatcher
from oslo_messaging import server as msg_server
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
LOG = utils.getLogger(__name__)
queue_opt_group = cfg.OptGroup(name='queue',
title='Queue Application Options')
# Constant at one place if this needs to be changed later
KS_NOTIFICATIONS_GRP_NAME = config.KS_NOTIFICATIONS_GRP_NAME
queue_opts = [
cfg.BoolOpt('enable', default=False,
help=u._('True enables queuing, False invokes '
'workers synchronously')),
cfg.StrOpt('namespace', default='barbican',
help=u._('Queue namespace')),
cfg.StrOpt('topic', default='barbican.workers',
help=u._('Queue topic name')),
cfg.StrOpt('version', default='1.1',
help=u._('Version of tasks invoked via queue')),
cfg.StrOpt('server_name', default='barbican.queue',
help=u._('Server name for RPC task processing server')),
]
# constant at one place if this needs to be changed later
KS_NOTIFICATIONS_GRP_NAME = 'keystone_notifications'
ks_queue_opt_group = cfg.OptGroup(name=KS_NOTIFICATIONS_GRP_NAME,
title='Keystone Notification Options')
ks_queue_opts = [
cfg.BoolOpt('enable', default=False,
help=u._('True enables keystone notification listener '
' functionality.')),
cfg.StrOpt('control_exchange', default='openstack',
help=u._('The default exchange under which topics are scoped. '
'May be overridden by an exchange name specified in '
' the transport_url option.')),
cfg.StrOpt('topic', default='notifications',
help=u._("Keystone notification queue topic name. This name "
"needs to match one of values mentioned in Keystone "
"deployment\'s 'notification_topics' configuration "
"e.g."
" notification_topics=notifications, "
" barbican_notifications"
"Multiple servers may listen on a topic and messages "
" will be dispatched to one of the servers in a "
"round-robin fashion. That's why Barbican service "
" should have its own dedicated notification queue so "
" that it receives all of Keystone notifications.")),
cfg.BoolOpt('allow_requeue', default=False,
help=u._('True enables requeue feature in case of notification'
' processing error. Enable this only when underlying '
'transport supports this feature.')),
cfg.StrOpt('version', default='1.0',
help=u._('Version of tasks invoked via notifications')),
cfg.IntOpt('thread_pool_size', default=10,
help=u._('Define the number of max threads to be used for '
'notification server processing functionality.')),
]
CONF = cfg.CONF
CONF.register_group(queue_opt_group)
CONF.register_opts(queue_opts, group=queue_opt_group)
CONF.register_group(ks_queue_opt_group)
CONF.register_opts(ks_queue_opts, group=ks_queue_opt_group)
CONF = config.CONF
TRANSPORT = None
IS_SERVER_SIDE = True

View File

@ -19,8 +19,7 @@ Retry/scheduler classes and logic.
import datetime
import random
from oslo_config import cfg
from barbican.common import config
from barbican.common import utils
from barbican import i18n as u
from barbican.model import models
@ -31,21 +30,7 @@ from barbican.queue import client as async_client
LOG = utils.getLogger(__name__)
retry_opt_group = cfg.OptGroup(name='retry_scheduler',
title='Retry/Scheduler Options')
retry_opts = [
cfg.FloatOpt(
'initial_delay_seconds', default=10.0,
help=u._('Seconds (float) to wait before starting retry scheduler')),
cfg.FloatOpt(
'periodic_interval_max_seconds', default=10.0,
help=u._('Seconds (float) to wait between periodic schedule events')),
]
CONF = cfg.CONF
CONF.register_group(retry_opt_group)
CONF.register_opts(retry_opts, group=retry_opt_group)
CONF = config.CONF
def _compute_next_periodic_interval():

View File

@ -26,8 +26,7 @@ try:
except ImportError:
newrelic_loaded = False
from oslo_config import cfg
from barbican.common import config
from barbican.common import utils
from barbican import i18n as u
from barbican.model import models
@ -42,7 +41,7 @@ if newrelic_loaded:
LOG = utils.getLogger(__name__)
CONF = cfg.CONF
CONF = config.CONF
# Maps the common/shared RetryTasks (returned from lower-level business logic

View File

@ -20,7 +20,6 @@ For typical-flow business logic tests of these classes, see the
import os
import mock
from oslo_config import cfg
from oslo_policy import policy
from webob import exc
@ -29,17 +28,18 @@ from barbican.api.controllers import containers
from barbican.api.controllers import orders
from barbican.api.controllers import secrets
from barbican.api.controllers import versions
from barbican.common import config
from barbican import context
from barbican.model import models
from barbican.tests import utils
CONF = cfg.CONF
# Point to the policy.json file located in source control.
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../../etc', 'barbican'))
CONF = config.new_config()
ENFORCER = policy.Enforcer(CONF)

View File

@ -11,9 +11,9 @@
# limitations under the License.
import mock
from oslo_config import cfg
import sqlalchemy
from barbican.common import config
from barbican.common import exception
from barbican.model import models
from barbican.model import repositories
@ -25,7 +25,7 @@ class WhenCleaningRepositoryPagingParameters(utils.BaseTestCase):
def setUp(self):
super(WhenCleaningRepositoryPagingParameters, self).setUp()
self.CONF = cfg.CONF
self.CONF = config.CONF
self.default_limit = self.CONF.default_limit_paging
def test_parameters_not_assigned(self):
@ -92,7 +92,7 @@ class WhenInvokingExceptionMethods(utils.BaseTestCase):
def setUp(self):
super(WhenInvokingExceptionMethods, self).setUp()
self.CONF = cfg.CONF
self.CONF = config.CONF
self.entity_id = '123456'
self.entity_name = 'test_entity'

View File

@ -15,8 +15,7 @@
import datetime
import time
from oslo_config import cfg
from barbican.common import config
from barbican.common import exception
from barbican.model import models
from barbican.model import repositories
@ -103,7 +102,7 @@ class WhenTestingOrderRetryTaskRepository(database_utils.RepositoryTestCase):
self.assertEqual([], entities)
self.assertEqual(0, offset)
self.assertEqual(cfg.CONF.default_limit_paging, limit)
self.assertEqual(config.CONF.default_limit_paging, limit)
self.assertEqual(0, total)
def test_should_raise_no_result_found_with_exceptions(self):

View File

@ -10,8 +10,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from barbican.common import config
from barbican.common import exception
from barbican.model import models
from barbican.model import repositories
@ -34,7 +33,7 @@ class WhenTestingOrderRepository(database_utils.RepositoryTestCase):
self.assertEqual([], entities)
self.assertEqual(0, offset)
self.assertEqual(cfg.CONF.default_limit_paging, limit)
self.assertEqual(config.CONF.default_limit_paging, limit)
self.assertEqual(0, total)
def test_should_raise_no_result_found_with_exceptions(self):

View File

@ -31,7 +31,6 @@ from kmip.core import objects
from kmip.core import secrets
from kmip.services import kmip_client as proxy
from kmip.services import results
from oslo_config import cfg
from barbican.plugin import kmip_secret_store as kss
@ -89,7 +88,7 @@ class WhenTestingKMIPSecretStore(utils.BaseTestCase):
self.expected_username = "sample_username"
self.expected_password = "sample_password"
CONF = cfg.CONF
CONF = kss.CONF
CONF.kmip_plugin.username = self.expected_username
CONF.kmip_plugin.password = self.expected_password
CONF.kmip_plugin.keyfile = None
@ -624,7 +623,7 @@ class WhenTestingKMIPSecretStore(utils.BaseTestCase):
actual_credential.credential_value.password.value)
def test_credential_None(self):
CONF = cfg.CONF
CONF = kss.CONF
CONF.kmip_plugin.username = None
CONF.kmip_plugin.password = None
CONF.kmip_plugin.keyfile = None
@ -696,7 +695,7 @@ class WhenTestingKMIPSecretStore(utils.BaseTestCase):
"KMIPSecretStore._validate_keyfile_permissions")
with mock.patch(func, **config) as m:
CONF = cfg.CONF
CONF = kss.CONF
CONF.kmip_plugin.keyfile = '/some/path'
kss.KMIPSecretStore(CONF)
self.assertEqual(len(m.mock_calls), 1)

View File

@ -15,10 +15,10 @@
import uuid
import mock
from oslo_config import cfg
import oslo_messaging
import six
from barbican.common import config
from barbican.openstack.common import service
from barbican import queue
from barbican.queue import keystone_listener
@ -30,7 +30,7 @@ class UtilMixin(object):
def __init__(self, *args, **kwargs):
super(UtilMixin, self).__init__(*args, **kwargs)
self.conf = cfg.CONF
self.conf = config.CONF
# dict which has item as {property: (value, group_name)}
self.overrides = {}

View File

@ -6,8 +6,8 @@ import argparse
sys.path.insert(0, os.getcwd())
from barbican.common import config
from barbican.model.migration import commands
from oslo_config import cfg
from oslo_log import log
@ -124,8 +124,7 @@ def _exception_is_successfull_exit(thrown_exception):
def main():
# Import and configure logging.
CONF = cfg.CONF
log.register_options(CONF)
CONF = config.CONF
log.setup(CONF, 'barbican-db-manage')
LOG = log.getLogger(__name__)
LOG.debug("Performing database schema migration...")

View File

@ -55,7 +55,6 @@ def fail(returncode, e):
if __name__ == '__main__':
try:
config.parse_args()
config.setup_remote_pydev_debug()
# Import and configure logging.
log.setup('barbican')
@ -64,7 +63,7 @@ if __name__ == '__main__':
LOG.info("Booting up Barbican Keystone listener node...")
# Queuing initialization
CONF = cfg.CONF
CONF = config.CONF
queue.init(CONF)
if getattr(getattr(CONF, queue.KS_NOTIFICATIONS_GRP_NAME), 'enable'):

View File

@ -52,8 +52,7 @@ def fail(returncode, e):
if __name__ == '__main__':
try:
config.parse_args()
CONF = cfg.CONF
CONF = config.CONF
# Import and configure logging.
log.setup(CONF, 'barbican-retry-scheduler')

View File

@ -41,7 +41,6 @@ from barbican.openstack.common import service
from barbican import queue
from barbican.queue import server
from oslo_config import cfg
from oslo_log import log
@ -52,8 +51,7 @@ def fail(returncode, e):
if __name__ == '__main__':
try:
config.parse_args()
CONF = cfg.CONF
CONF = config.CONF
# Import and configure logging.
log.setup(CONF, 'barbican')