Use consistent [database] options
Currently Barbican is not using oslo.db to set up database connection but it's own implementation directly using sqlalchemy. Because of this the database parameters were not updated and these are based on the names in quite old oslo.db library. This change updates the database options so that the name of these parameters become consistent with oslo.db. This would help us replace current own implementation by oslo.db in the future. Change-Id: I36926e62842780068f7e66564233c121c37565d0
This commit is contained in:
parent
c8e3dc14e6
commit
12aa8a9339
@ -75,7 +75,7 @@ class DbCommands(object):
|
|||||||
do_soft_delete_expired_secrets=None):
|
do_soft_delete_expired_secrets=None):
|
||||||
"""Clean soft deletions in the database"""
|
"""Clean soft deletions in the database"""
|
||||||
if dburl is None:
|
if dburl is None:
|
||||||
dburl = CONF.sql_connection
|
dburl = CONF.database.connection
|
||||||
if log_file is None:
|
if log_file is None:
|
||||||
log_file = CONF.log_file
|
log_file = CONF.log_file
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ class DbCommands(object):
|
|||||||
"""Process the 'revision' Alembic command."""
|
"""Process the 'revision' Alembic command."""
|
||||||
if dburl is None:
|
if dburl is None:
|
||||||
commands.generate(autogenerate=autogen, message=str(message),
|
commands.generate(autogenerate=autogen, message=str(message),
|
||||||
sql_url=CONF.sql_connection)
|
sql_url=CONF.database.connection)
|
||||||
else:
|
else:
|
||||||
commands.generate(autogenerate=autogen, message=str(message),
|
commands.generate(autogenerate=autogen, message=str(message),
|
||||||
sql_url=str(dburl))
|
sql_url=str(dburl))
|
||||||
@ -115,7 +115,7 @@ class DbCommands(object):
|
|||||||
"""Process the 'upgrade' Alembic command."""
|
"""Process the 'upgrade' Alembic command."""
|
||||||
if dburl is None:
|
if dburl is None:
|
||||||
commands.upgrade(to_version=str(version),
|
commands.upgrade(to_version=str(version),
|
||||||
sql_url=CONF.sql_connection)
|
sql_url=CONF.database.connection)
|
||||||
else:
|
else:
|
||||||
commands.upgrade(to_version=str(version), sql_url=str(dburl))
|
commands.upgrade(to_version=str(version), sql_url=str(dburl))
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ class DbCommands(object):
|
|||||||
default=False, help='Show full information about the revisions.')
|
default=False, help='Show full information about the revisions.')
|
||||||
def history(self, conf, dburl=None, verbose=None):
|
def history(self, conf, dburl=None, verbose=None):
|
||||||
if dburl is None:
|
if dburl is None:
|
||||||
commands.history(verbose, sql_url=CONF.sql_connection)
|
commands.history(verbose, sql_url=CONF.database.connection)
|
||||||
else:
|
else:
|
||||||
commands.history(verbose, sql_url=str(dburl))
|
commands.history(verbose, sql_url=str(dburl))
|
||||||
|
|
||||||
@ -139,7 +139,7 @@ class DbCommands(object):
|
|||||||
default=False, help='Show full information about the revisions.')
|
default=False, help='Show full information about the revisions.')
|
||||||
def current(self, conf, dburl=None, verbose=None):
|
def current(self, conf, dburl=None, verbose=None):
|
||||||
if dburl is None:
|
if dburl is None:
|
||||||
commands.current(verbose, sql_url=CONF.sql_connection)
|
commands.current(verbose, sql_url=CONF.database.connection)
|
||||||
else:
|
else:
|
||||||
commands.current(verbose, sql_url=str(dburl))
|
commands.current(verbose, sql_url=str(dburl))
|
||||||
sync_secret_stores_description = ("Sync secret_stores with " # nosec
|
sync_secret_stores_description = ("Sync secret_stores with " # nosec
|
||||||
@ -157,7 +157,7 @@ class DbCommands(object):
|
|||||||
log_file=None):
|
log_file=None):
|
||||||
"""Sync secret_stores table with barbican.conf"""
|
"""Sync secret_stores table with barbican.conf"""
|
||||||
if dburl is None:
|
if dburl is None:
|
||||||
dburl = CONF.sql_connection
|
dburl = CONF.database.connection
|
||||||
if log_file is None:
|
if log_file is None:
|
||||||
log_file = CONF.log_file
|
log_file = CONF.log_file
|
||||||
|
|
||||||
|
@ -54,7 +54,8 @@ class DatabaseManager(object):
|
|||||||
def get_main_parser(self):
|
def get_main_parser(self):
|
||||||
"""Create top-level parser and arguments."""
|
"""Create top-level parser and arguments."""
|
||||||
parser = argparse.ArgumentParser(description='Barbican DB manager.')
|
parser = argparse.ArgumentParser(description='Barbican DB manager.')
|
||||||
parser.add_argument('--dburl', '-d', default=self.conf.sql_connection,
|
parser.add_argument('--dburl', '-d',
|
||||||
|
default=self.conf.database.connection,
|
||||||
help='URL to the database.')
|
help='URL to the database.')
|
||||||
|
|
||||||
return parser
|
return parser
|
||||||
|
@ -42,7 +42,7 @@ class DBManageTestCase(base.TestCase):
|
|||||||
self.sbehaviors = secret_behaviors.SecretBehaviors(self.client)
|
self.sbehaviors = secret_behaviors.SecretBehaviors(self.client)
|
||||||
self.cbehaviors = container_behaviors.ContainerBehaviors(self.client)
|
self.cbehaviors = container_behaviors.ContainerBehaviors(self.client)
|
||||||
|
|
||||||
db_url = BCONF.sql_connection
|
db_url = BCONF.database.connection
|
||||||
|
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
# Setup session for tests to query DB
|
# Setup session for tests to query DB
|
||||||
|
@ -33,7 +33,7 @@ class KekRewrap(object):
|
|||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.dry_run = False
|
self.dry_run = False
|
||||||
self.db_engine = session.create_engine(conf.sql_connection)
|
self.db_engine = session.create_engine(conf.database.connection)
|
||||||
self._session_creator = scoping.scoped_session(
|
self._session_creator = scoping.scoped_session(
|
||||||
orm.sessionmaker(
|
orm.sessionmaker(
|
||||||
bind=self.db_engine,
|
bind=self.db_engine,
|
||||||
|
@ -159,7 +159,7 @@ def main():
|
|||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
migrator = KekSignatureMigrator(
|
migrator = KekSignatureMigrator(
|
||||||
db_connection=CONF.sql_connection,
|
db_connection=CONF.database.connection,
|
||||||
library_path=CONF.p11_crypto_plugin.library_path,
|
library_path=CONF.p11_crypto_plugin.library_path,
|
||||||
login=CONF.p11_crypto_plugin.login,
|
login=CONF.p11_crypto_plugin.login,
|
||||||
slot_id=CONF.p11_crypto_plugin.slot_id
|
slot_id=CONF.p11_crypto_plugin.slot_id
|
||||||
|
@ -64,10 +64,12 @@ host_opts = [
|
|||||||
"'http://localhost:9311'")),
|
"'http://localhost:9311'")),
|
||||||
]
|
]
|
||||||
|
|
||||||
db_opts = [
|
core_db_opts = [
|
||||||
cfg.StrOpt('sql_connection',
|
cfg.StrOpt('connection',
|
||||||
default="sqlite:///barbican.sqlite",
|
default="sqlite:///barbican.sqlite",
|
||||||
secret=True,
|
secret=True,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DEFAULT')],
|
||||||
help=u._("SQLAlchemy connection string for the reference "
|
help=u._("SQLAlchemy connection string for the reference "
|
||||||
"implementation registry server. Any valid "
|
"implementation registry server. Any valid "
|
||||||
"SQLAlchemy connection string is fine. See: "
|
"SQLAlchemy connection string is fine. See: "
|
||||||
@ -75,7 +77,9 @@ db_opts = [
|
|||||||
"sqlalchemy/connections.html#sqlalchemy."
|
"sqlalchemy/connections.html#sqlalchemy."
|
||||||
"create_engine. Note: For absolute addresses, use "
|
"create_engine. Note: For absolute addresses, use "
|
||||||
"'////' slashes after 'sqlite:'.")),
|
"'////' slashes after 'sqlite:'.")),
|
||||||
cfg.IntOpt('sql_idle_timeout', default=3600,
|
cfg.IntOpt('connection_recycle_time', default=3600,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DEFAULT')],
|
||||||
help=u._("Period in seconds after which SQLAlchemy should "
|
help=u._("Period in seconds after which SQLAlchemy should "
|
||||||
"reestablish its connection to the database. MySQL "
|
"reestablish its connection to the database. MySQL "
|
||||||
"uses a default `wait_timeout` of 8 hours, after "
|
"uses a default `wait_timeout` of 8 hours, after "
|
||||||
@ -83,13 +87,46 @@ db_opts = [
|
|||||||
"in 'MySQL Gone Away' exceptions. If you notice this, "
|
"in 'MySQL Gone Away' exceptions. If you notice this, "
|
||||||
"you can lower this value to ensure that SQLAlchemy "
|
"you can lower this value to ensure that SQLAlchemy "
|
||||||
"reconnects before MySQL can drop the connection.")),
|
"reconnects before MySQL can drop the connection.")),
|
||||||
cfg.IntOpt('sql_max_retries', default=60,
|
cfg.IntOpt('max_retries', default=60,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DEFAULT')],
|
||||||
help=u._("Maximum number of database connection retries "
|
help=u._("Maximum number of database connection retries "
|
||||||
"during startup. Set to -1 to specify an infinite "
|
"during startup. Set to -1 to specify an infinite "
|
||||||
"retry count.")),
|
"retry count.")),
|
||||||
cfg.IntOpt('sql_retry_interval', default=1,
|
cfg.IntOpt('retry_interval', default=1,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
||||||
|
group='DEFAULT')],
|
||||||
help=u._("Interval between retries of opening a SQL "
|
help=u._("Interval between retries of opening a SQL "
|
||||||
"connection.")),
|
"connection.")),
|
||||||
|
cfg.IntOpt('max_pool_size', default=5,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_pool_size',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help=u._("Size of pool used by SQLAlchemy. This is the largest "
|
||||||
|
"number of connections that will be kept persistently "
|
||||||
|
"in the pool. Can be set to 0 to indicate no size "
|
||||||
|
"limit. To disable pooling, use a NullPool with "
|
||||||
|
"sql_pool_class instead. Comment out to allow "
|
||||||
|
"SQLAlchemy to select the default.")),
|
||||||
|
cfg.IntOpt('max_overflow', default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_pool_max_overflow',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help=u._("# The maximum overflow size of the pool used by "
|
||||||
|
"SQLAlchemy. When the number of checked-out "
|
||||||
|
"connections reaches the size set in max_pool_size, "
|
||||||
|
"additional connections will be returned up to this "
|
||||||
|
"limit. It follows then that the total number of "
|
||||||
|
"simultaneous connections the pool will allow is "
|
||||||
|
"max_pool_size + max_overflow. Can be set to -1 to "
|
||||||
|
"indicate no overflow limit, so no limit will be "
|
||||||
|
"placed on the total number of concurrent "
|
||||||
|
"connections. Comment out to allow SQLAlchemy to "
|
||||||
|
"select the default.")),
|
||||||
|
]
|
||||||
|
|
||||||
|
db_opt_group = cfg.OptGroup(name='database',
|
||||||
|
title='Database Options')
|
||||||
|
|
||||||
|
db_opts = [
|
||||||
cfg.BoolOpt('db_auto_create', default=False,
|
cfg.BoolOpt('db_auto_create', default=False,
|
||||||
help=u._("Create the Barbican database on service startup.")),
|
help=u._("Create the Barbican database on service startup.")),
|
||||||
cfg.IntOpt('max_limit_paging', default=100,
|
cfg.IntOpt('max_limit_paging', default=100,
|
||||||
@ -110,25 +147,6 @@ db_opts = [
|
|||||||
cfg.BoolOpt('sql_pool_logging', default=False,
|
cfg.BoolOpt('sql_pool_logging', default=False,
|
||||||
help=u._("Show SQLAlchemy pool-related debugging output in "
|
help=u._("Show SQLAlchemy pool-related debugging output in "
|
||||||
"logs (sets DEBUG log level output) if specified.")),
|
"logs (sets DEBUG log level output) if specified.")),
|
||||||
cfg.IntOpt('sql_pool_size', default=5,
|
|
||||||
help=u._("Size of pool used by SQLAlchemy. This is the largest "
|
|
||||||
"number of connections that will be kept persistently "
|
|
||||||
"in the pool. Can be set to 0 to indicate no size "
|
|
||||||
"limit. To disable pooling, use a NullPool with "
|
|
||||||
"sql_pool_class instead. Comment out to allow "
|
|
||||||
"SQLAlchemy to select the default.")),
|
|
||||||
cfg.IntOpt('sql_pool_max_overflow', default=10,
|
|
||||||
help=u._("# The maximum overflow size of the pool used by "
|
|
||||||
"SQLAlchemy. When the number of checked-out "
|
|
||||||
"connections reaches the size set in sql_pool_size, "
|
|
||||||
"additional connections will be returned up to this "
|
|
||||||
"limit. It follows then that the total number of "
|
|
||||||
"simultaneous connections the pool will allow is "
|
|
||||||
"sql_pool_size + sql_pool_max_overflow. Can be set "
|
|
||||||
"to -1 to indicate no overflow limit, so no limit "
|
|
||||||
"will be placed on the total number of concurrent "
|
|
||||||
"connections. Comment out to allow SQLAlchemy to "
|
|
||||||
"select the default.")),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
retry_opt_group = cfg.OptGroup(name='retry_scheduler',
|
retry_opt_group = cfg.OptGroup(name='retry_scheduler',
|
||||||
@ -239,6 +257,7 @@ def list_opts():
|
|||||||
yield None, host_opts
|
yield None, host_opts
|
||||||
yield None, db_opts
|
yield None, db_opts
|
||||||
yield None, _options.eventlet_backdoor_opts
|
yield None, _options.eventlet_backdoor_opts
|
||||||
|
yield db_opt_group, core_db_opts
|
||||||
yield retry_opt_group, retry_opts
|
yield retry_opt_group, retry_opts
|
||||||
yield queue_opt_group, queue_opts
|
yield queue_opt_group, queue_opts
|
||||||
yield ks_queue_opt_group, ks_queue_opts
|
yield ks_queue_opt_group, ks_queue_opts
|
||||||
@ -280,6 +299,9 @@ def new_config():
|
|||||||
|
|
||||||
conf.register_opts(_options.ssl_opts, "ssl")
|
conf.register_opts(_options.ssl_opts, "ssl")
|
||||||
|
|
||||||
|
conf.register_group(db_opt_group)
|
||||||
|
conf.register_opts(core_db_opts, group=db_opt_group)
|
||||||
|
|
||||||
conf.register_group(retry_opt_group)
|
conf.register_group(retry_opt_group)
|
||||||
conf.register_opts(retry_opts, group=retry_opt_group)
|
conf.register_opts(retry_opts, group=retry_opt_group)
|
||||||
|
|
||||||
|
@ -335,7 +335,7 @@ def clean_command(sql_url, min_num_days, do_clean_unassociated_projects,
|
|||||||
stop_watch.start()
|
stop_watch.start()
|
||||||
try:
|
try:
|
||||||
if sql_url:
|
if sql_url:
|
||||||
CONF.set_override('sql_connection', sql_url)
|
CONF.set_override('connection', sql_url, 'database')
|
||||||
repo.setup_database_engine_and_factory()
|
repo.setup_database_engine_and_factory()
|
||||||
|
|
||||||
if do_clean_unassociated_projects:
|
if do_clean_unassociated_projects:
|
||||||
@ -370,7 +370,7 @@ def clean_command(sql_url, min_num_days, do_clean_unassociated_projects,
|
|||||||
repo.clear()
|
repo.clear()
|
||||||
|
|
||||||
if sql_url:
|
if sql_url:
|
||||||
CONF.clear_override('sql_connection')
|
CONF.clear_override('connection', 'database')
|
||||||
|
|
||||||
log.setup(CONF, 'barbican') # reset the overrides
|
log.setup(CONF, 'barbican') # reset the overrides
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ CONF = config.CONF
|
|||||||
|
|
||||||
def init_config(sql_url=None):
|
def init_config(sql_url=None):
|
||||||
"""Initialize and return the Alembic configuration."""
|
"""Initialize and return the Alembic configuration."""
|
||||||
sqlalchemy_url = sql_url or CONF.sql_connection
|
sqlalchemy_url = sql_url or CONF.database.connection
|
||||||
if not sqlalchemy_url:
|
if not sqlalchemy_url:
|
||||||
raise RuntimeError("Please specify a SQLAlchemy-friendly URL to "
|
raise RuntimeError("Please specify a SQLAlchemy-friendly URL to "
|
||||||
"connect to the proper database, either through "
|
"connect to the proper database, either through "
|
||||||
|
@ -167,7 +167,7 @@ def get_session():
|
|||||||
|
|
||||||
def _get_engine(engine):
|
def _get_engine(engine):
|
||||||
if not engine:
|
if not engine:
|
||||||
connection = CONF.sql_connection
|
connection = CONF.database.connection
|
||||||
if not connection:
|
if not connection:
|
||||||
raise exception.BarbicanException(
|
raise exception.BarbicanException(
|
||||||
u._('No SQL connection configured'))
|
u._('No SQL connection configured'))
|
||||||
@ -176,20 +176,21 @@ def _get_engine(engine):
|
|||||||
# connection_dict = sqlalchemy.engine.url.make_url(_CONNECTION)
|
# connection_dict = sqlalchemy.engine.url.make_url(_CONNECTION)
|
||||||
|
|
||||||
engine_args = {
|
engine_args = {
|
||||||
'connection_recycle_time': CONF.sql_idle_timeout,
|
'connection_recycle_time': CONF.database.connection_recycle_time
|
||||||
}
|
}
|
||||||
if CONF.sql_pool_size:
|
if CONF.database.max_pool_size:
|
||||||
engine_args['max_pool_size'] = CONF.sql_pool_size
|
engine_args['max_pool_size'] = CONF.database.max_pool_size
|
||||||
if CONF.sql_pool_max_overflow:
|
if CONF.database.max_overflow:
|
||||||
engine_args['max_overflow'] = CONF.sql_pool_max_overflow
|
engine_args['max_overflow'] = CONF.database.max_overflow
|
||||||
|
|
||||||
db_connection = None
|
db_connection = None
|
||||||
try:
|
try:
|
||||||
engine = _create_engine(connection, **engine_args)
|
engine = _create_engine(connection, **engine_args)
|
||||||
db_connection = engine.connect()
|
db_connection = engine.connect()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
msg = u._("Error configuring registry database with supplied "
|
msg = u._(
|
||||||
"sql_connection. Got error: {error}").format(error=err)
|
"Error configuring registry database with supplied "
|
||||||
|
"database connection. Got error: {error}").format(error=err)
|
||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
raise exception.BarbicanException(msg)
|
raise exception.BarbicanException(msg)
|
||||||
finally:
|
finally:
|
||||||
@ -277,12 +278,12 @@ def wrap_db_error(f):
|
|||||||
if not is_db_connection_error(e.args[0]):
|
if not is_db_connection_error(e.args[0]):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
remaining_attempts = CONF.sql_max_retries
|
remaining_attempts = CONF.database.max_retries
|
||||||
while True:
|
while True:
|
||||||
LOG.warning('SQL connection failed. %d attempts left.',
|
LOG.warning('SQL connection failed. %d attempts left.',
|
||||||
remaining_attempts)
|
remaining_attempts)
|
||||||
remaining_attempts -= 1
|
remaining_attempts -= 1
|
||||||
time.sleep(CONF.sql_retry_interval)
|
time.sleep(CONF.database.retry_interval)
|
||||||
try:
|
try:
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
except sqlalchemy.exc.OperationalError as e:
|
except sqlalchemy.exc.OperationalError as e:
|
||||||
|
@ -43,7 +43,7 @@ def sync_secret_stores(sql_url, verbose, log_file):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if sql_url:
|
if sql_url:
|
||||||
CONF.set_override('sql_connection', sql_url)
|
CONF.set_override('connection', sql_url, 'database')
|
||||||
repo.setup_database_engine_and_factory(
|
repo.setup_database_engine_and_factory(
|
||||||
initialize_secret_stores=True)
|
initialize_secret_stores=True)
|
||||||
repo.commit()
|
repo.commit()
|
||||||
@ -60,6 +60,6 @@ def sync_secret_stores(sql_url, verbose, log_file):
|
|||||||
repo.clear()
|
repo.clear()
|
||||||
|
|
||||||
if sql_url:
|
if sql_url:
|
||||||
CONF.clear_override('sql_connection')
|
CONF.clear_override('connection', 'database')
|
||||||
|
|
||||||
log.setup(CONF, 'barbican') # reset the overrides
|
log.setup(CONF, 'barbican') # reset the overrides
|
||||||
|
@ -30,7 +30,7 @@ class TestBarbicanManageBase(utils.BaseTestCase):
|
|||||||
clear_conf()
|
clear_conf()
|
||||||
self.addCleanup(clear_conf)
|
self.addCleanup(clear_conf)
|
||||||
|
|
||||||
manager.CONF.set_override('sql_connection', 'mockdburl')
|
manager.CONF.set_override('connection', 'mockdburl', 'database')
|
||||||
|
|
||||||
def _main_test_helper(self, argv, func_name=None, *exp_args, **exp_kwargs):
|
def _main_test_helper(self, argv, func_name=None, *exp_args, **exp_kwargs):
|
||||||
self.useFixture(fixtures.MonkeyPatch('sys.argv', argv))
|
self.useFixture(fixtures.MonkeyPatch('sys.argv', argv))
|
||||||
|
@ -363,11 +363,11 @@ class WhenTestingDBCleanUpCommand(utils.RepositoryTestCase):
|
|||||||
test_log_file)
|
test_log_file)
|
||||||
set_calls = [mock.call('debug', True),
|
set_calls = [mock.call('debug', True),
|
||||||
mock.call('log_file', test_log_file),
|
mock.call('log_file', test_log_file),
|
||||||
mock.call('sql_connection', test_sql_url)]
|
mock.call('connection', test_sql_url, 'database')]
|
||||||
mock_conf.set_override.assert_has_calls(set_calls)
|
mock_conf.set_override.assert_has_calls(set_calls)
|
||||||
|
|
||||||
clear_calls = [mock.call('debug'), mock.call('log_file'),
|
clear_calls = [mock.call('debug'), mock.call('log_file'),
|
||||||
mock.call('sql_connection')]
|
mock.call('connection', 'database')]
|
||||||
mock_conf.clear_override.assert_has_calls(clear_calls)
|
mock_conf.clear_override.assert_has_calls(clear_calls)
|
||||||
self.assertTrue(mock_repo.setup_database_engine_and_factory.called)
|
self.assertTrue(mock_repo.setup_database_engine_and_factory.called)
|
||||||
self.assertTrue(mock_repo.commit.called)
|
self.assertTrue(mock_repo.commit.called)
|
||||||
|
@ -36,7 +36,8 @@ def set_foreign_key_constraint(dbapi_connection, connection_record):
|
|||||||
|
|
||||||
def setup_in_memory_db():
|
def setup_in_memory_db():
|
||||||
# Ensure we are using in-memory SQLite database, and creating tables.
|
# Ensure we are using in-memory SQLite database, and creating tables.
|
||||||
repositories.CONF.set_override("sql_connection", "sqlite:///:memory:")
|
repositories.CONF.set_override("connection", "sqlite:///:memory:",
|
||||||
|
"database")
|
||||||
repositories.CONF.set_override("db_auto_create", True)
|
repositories.CONF.set_override("db_auto_create", True)
|
||||||
repositories.CONF.set_override("debug", True)
|
repositories.CONF.set_override("debug", True)
|
||||||
|
|
||||||
|
@ -198,8 +198,8 @@ class WhenTestingWrapDbError(utils.BaseTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(WhenTestingWrapDbError, self).setUp()
|
super(WhenTestingWrapDbError, self).setUp()
|
||||||
repositories.CONF.set_override("sql_max_retries", 0)
|
repositories.CONF.set_override("max_retries", 0, "database")
|
||||||
repositories.CONF.set_override("sql_retry_interval", 0)
|
repositories.CONF.set_override("retry_interval", 0, "database")
|
||||||
|
|
||||||
@mock.patch('barbican.model.repositories.is_db_connection_error')
|
@mock.patch('barbican.model.repositories.is_db_connection_error')
|
||||||
def test_should_raise_operational_error_is_connection_error(
|
def test_should_raise_operational_error_is_connection_error(
|
||||||
@ -221,7 +221,8 @@ class WhenTestingGetEnginePrivate(utils.BaseTestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(WhenTestingGetEnginePrivate, self).setUp()
|
super(WhenTestingGetEnginePrivate, self).setUp()
|
||||||
|
|
||||||
repositories.CONF.set_override("sql_connection", "connection")
|
repositories.CONF.set_override("connection", "connection",
|
||||||
|
"database")
|
||||||
|
|
||||||
@mock.patch('barbican.model.repositories._create_engine')
|
@mock.patch('barbican.model.repositories._create_engine')
|
||||||
def test_should_raise_value_exception_engine_create_failure(
|
def test_should_raise_value_exception_engine_create_failure(
|
||||||
@ -237,7 +238,7 @@ class WhenTestingGetEnginePrivate(utils.BaseTestCase):
|
|||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'Error configuring registry database with supplied '
|
'Error configuring registry database with supplied '
|
||||||
'sql_connection. Got error: Abort!',
|
'database connection. Got error: Abort!',
|
||||||
str(exception_result))
|
str(exception_result))
|
||||||
|
|
||||||
@mock.patch('barbican.model.repositories._create_engine')
|
@mock.patch('barbican.model.repositories._create_engine')
|
||||||
@ -255,8 +256,8 @@ class WhenTestingGetEnginePrivate(utils.BaseTestCase):
|
|||||||
mock_create_engine.assert_called_once_with(
|
mock_create_engine.assert_called_once_with(
|
||||||
'connection',
|
'connection',
|
||||||
connection_recycle_time=3600,
|
connection_recycle_time=3600,
|
||||||
max_pool_size=repositories.CONF.sql_pool_size,
|
max_pool_size=repositories.CONF.database.max_pool_size,
|
||||||
max_overflow=repositories.CONF.sql_pool_max_overflow
|
max_overflow=repositories.CONF.database.max_overflow
|
||||||
)
|
)
|
||||||
|
|
||||||
@mock.patch('barbican.model.repositories._create_engine')
|
@mock.patch('barbican.model.repositories._create_engine')
|
||||||
@ -266,8 +267,8 @@ class WhenTestingGetEnginePrivate(utils.BaseTestCase):
|
|||||||
repositories.CONF.set_override("db_auto_create", False)
|
repositories.CONF.set_override("db_auto_create", False)
|
||||||
repositories.CONF.set_override(
|
repositories.CONF.set_override(
|
||||||
"sql_pool_class", "QueuePool")
|
"sql_pool_class", "QueuePool")
|
||||||
repositories.CONF.set_override("sql_pool_size", 22)
|
repositories.CONF.set_override("max_pool_size", 22, 'database')
|
||||||
repositories.CONF.set_override("sql_pool_max_overflow", 11)
|
repositories.CONF.set_override("max_overflow", 11, 'database')
|
||||||
|
|
||||||
engine = mock.MagicMock()
|
engine = mock.MagicMock()
|
||||||
mock_create_engine.return_value = engine
|
mock_create_engine.return_value = engine
|
||||||
@ -325,7 +326,7 @@ class WhenTestingMigrations(utils.BaseTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(WhenTestingMigrations, self).setUp()
|
super(WhenTestingMigrations, self).setUp()
|
||||||
repositories.CONF.set_override("sql_connection", "connection")
|
repositories.CONF.set_override("connection", "connection", "database")
|
||||||
self.alembic_config = migration.init_config()
|
self.alembic_config = migration.init_config()
|
||||||
self.alembic_config.barbican_config = cfg.CONF
|
self.alembic_config.barbican_config = cfg.CONF
|
||||||
|
|
||||||
|
15
releasenotes/notes/rename-db-opts-547a9114abde2e88.yaml
Normal file
15
releasenotes/notes/rename-db-opts-547a9114abde2e88.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
deprecations:
|
||||||
|
- |
|
||||||
|
The following database options in the ``[DEFAULT]`` section were renamed
|
||||||
|
and moved to the ``[database]`` section.
|
||||||
|
|
||||||
|
- ``[DEFAULT] sql_connection`` was renamed to ``[database] connection``
|
||||||
|
- ``[DEFAULT] sql_idle_timeout`` was renamed to
|
||||||
|
``[database] connection_recycle_time``
|
||||||
|
- ``[DEFAULT] sql_max_retries`` was renamed to ``[database] max_retries``
|
||||||
|
- ``[DEFAULT] sql_retry_interval`` was renamed to
|
||||||
|
``[database] retry_interval``
|
||||||
|
- ``[DEFAULT] sql_pool_size`` was renamed to `[database] max_pool_size``
|
||||||
|
- ``[DEFAULT] sql_pool_max_overflow`` was renamed to
|
||||||
|
``[database] max_overflow``
|
Loading…
Reference in New Issue
Block a user