Add support to oslo.db to be used as a DB driver. The DB driver will be used with API v2. When it's completely implemented, API V1 will be deprecated and removed by the end of the cycle. Freezer-api will keep supporting V2 with Elasticsearch, Sqlalchemy drivers. This patch implements the follow: * Abstract Base DB driver to be implemented by each driver * Base driver; will return only access to the db engine, session * SqlAlchemy driver; * ElasticSearch driver; * Implement both drivers in freezer-manage Partially-Implements: blueprint oslo-db Depends-On: I81e417155da48f46dd2113e5745fb3c21c96499f Depends-On: I2e5724b1f1a75121952e2beb3844d2c489e4df68 Depends-On: Idb4ac050652d1d0107bf3fcd447d7cbedd811809 Depends-On: I81d46c89859752c0cbc21ef02de90db7f19f942c Change-Id: I93ed1b909f538728a1a9bd5c8b07baf7aeddb705changes/77/539077/27
parent
1cde8eef34
commit
d8e0dc21e0
@ -0,0 +1,54 @@
|
||||
"""
|
||||
(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
(C) Copyright 2016-2018 Hewlett Packard Enterprise Development Company LP
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import abc
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class DBDriver(object):
|
||||
|
||||
_OPTS = [
|
||||
cfg.StrOpt('host',
|
||||
required=True,
|
||||
help="Database host"),
|
||||
cfg.StrOpt("username",
|
||||
help="Database username"),
|
||||
cfg.StrOpt("password",
|
||||
help="Database Password")
|
||||
]
|
||||
|
||||
def __init__(self, backend, is_created=False):
|
||||
if not is_created:
|
||||
grp = cfg.OptGroup(backend)
|
||||
CONF.register_group(grp)
|
||||
CONF.register_opts(self._OPTS, grp)
|
||||
self.conf = CONF.get(backend)
|
||||
self.backend = backend
|
||||
|
||||
def connect(self):
|
||||
pass
|
||||
|
||||
@abc.abstractproperty
|
||||
def name(self):
|
||||
"""Name of the database driver"""
|
||||
pass
|
||||
|
||||
def get_instance(self):
|
||||
pass
|
@ -0,0 +1,45 @@
|
||||
"""
|
||||
(C) Copyright 2016-2018 Hewlett Packard Enterprise Development Company LP
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
# storage backend options to be registered
|
||||
_OPTS = [
|
||||
cfg.StrOpt("backend",
|
||||
help="Database backend section name. This section "
|
||||
"will be loaded by the proper driver to connect to "
|
||||
"the database."
|
||||
),
|
||||
cfg.StrOpt('driver',
|
||||
default='freezer_api.storage.elastic.ElasticSearchEngine',
|
||||
help="Database driver to be used."
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def get_db(driver=None):
|
||||
"""Automatically loads the database driver to be used."""
|
||||
storage = CONF.get('storage')
|
||||
if not driver:
|
||||
driver = storage['driver']
|
||||
driver_instance = importutils.import_object(
|
||||
driver,
|
||||
backend=storage['backend']
|
||||
)
|
||||
|
||||
return driver_instance
|
@ -0,0 +1,119 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from freezer_api.common import db_mappings
|
||||
from freezer_api.db import base as db_base
|
||||
from freezer_api.db.elasticsearch import es_manager
|
||||
from freezer_api.storage import elasticv2 as db_session
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
DEFAULT_INDEX = 'freezer'
|
||||
DEFAULT_REPLICAS = 0
|
||||
|
||||
_BACKEND_MAPPING = {'sqlalchemy': 'freezer_api.db.sqlalchemy.api'}
|
||||
|
||||
|
||||
class ElasticSearchDB(db_base.DBDriver):
|
||||
_ES_OPTS = [
|
||||
cfg.ListOpt('hosts',
|
||||
default=['http://127.0.0.1:9200'],
|
||||
help='specify the storage hosts'),
|
||||
cfg.StrOpt('index',
|
||||
default='freezer',
|
||||
help='specify the name of the elasticsearch index'),
|
||||
cfg.IntOpt('timeout',
|
||||
default=60,
|
||||
help='specify the connection timeout'),
|
||||
cfg.IntOpt('retries',
|
||||
default=20,
|
||||
help='number of retries to allow before raising and error'),
|
||||
cfg.BoolOpt('use_ssl',
|
||||
default=False,
|
||||
help='explicitly turn on SSL'),
|
||||
cfg.BoolOpt('verify_certs',
|
||||
default=False,
|
||||
help='turn on SSL certs verification'),
|
||||
cfg.StrOpt('ca_certs',
|
||||
help='path to CA certs on disk'),
|
||||
cfg.IntOpt('number_of_replicas',
|
||||
default=0,
|
||||
help='Number of replicas for elk cluster. Default is 0. '
|
||||
'Use 0 for no replicas. This should be set to (number '
|
||||
'of node in the ES cluter -1).'),
|
||||
cfg.StrOpt('mapping',
|
||||
dest='select_mapping',
|
||||
default='',
|
||||
help='Specific mapping to upload. Valid choices: {0}'
|
||||
.format(','.join(db_mappings.get_mappings()))),
|
||||
cfg.BoolOpt('erase',
|
||||
dest='erase',
|
||||
default=False,
|
||||
help='Enable index deletion in case mapping update fails '
|
||||
'due to incompatible changes'
|
||||
),
|
||||
cfg.StrOpt('test-only',
|
||||
dest='test_only',
|
||||
default=False,
|
||||
help='Test the validity of the mappings, but take no action'
|
||||
)
|
||||
]
|
||||
|
||||
def __init__(self, backend):
|
||||
super(ElasticSearchDB, self).__init__(backend)
|
||||
grp = cfg.OptGroup(backend)
|
||||
CONF.register_group(grp)
|
||||
CONF.register_opts(self._ES_OPTS, group=backend)
|
||||
# CONF.register_cli_opts(self._ES_CLI_OPTS)
|
||||
|
||||
self.conf = CONF.get(backend)
|
||||
self.index = self.conf.index or DEFAULT_INDEX
|
||||
self._engine = None
|
||||
self._manage_engine = None
|
||||
|
||||
def get_engine(self):
|
||||
if not self._engine:
|
||||
self._engine = db_session.ElasticSearchEngineV2(self.backend)
|
||||
return self._engine
|
||||
|
||||
def get_api(self):
|
||||
return self.get_engine()
|
||||
|
||||
def get_manage_engine(self):
|
||||
opts = dict(self.conf.items())
|
||||
self._manage_engine = es_manager.ElasticSearchManager(**opts)
|
||||
return self._manage_engine
|
||||
|
||||
def db_sync(self):
|
||||
if not self._manage_engine:
|
||||
self._manage_engine = self.get_manage_engine()
|
||||
self._manage_engine.update_mappings()
|
||||
|
||||
def db_remove(self):
|
||||
if not self._manage_engine:
|
||||
self._manage_engine = self.get_manage_engine()
|
||||
self._manage_engine.remove_mappings()
|
||||
|
||||
def db_show(self):
|
||||
if not self._manage_engine:
|
||||
self._manage_engine = self.get_manage_engine()
|
||||
return self._manage_engine.show_mappings()
|
||||
|
||||
def name(self):
|
||||
return "ElasticSearch"
|
@ -0,0 +1,231 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import elasticsearch
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
from freezer_api.common import db_mappings
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
DEFAULT_INDEX = 'freezer'
|
||||
DEFAULT_REPLICAS = 0
|
||||
|
||||
|
||||
class ElasticSearchManager(object):
|
||||
"""
|
||||
Managing ElasticSearch mappings operations
|
||||
Sync: create mappings
|
||||
Update: Update mappings
|
||||
remove: deletes the mappings
|
||||
show: print out all the mappings
|
||||
"""
|
||||
|
||||
def __init__(self, **options):
|
||||
self.mappings = db_mappings.get_mappings().copy()
|
||||
self.conf = options.copy()
|
||||
self.index = self.conf['index']
|
||||
|
||||
self.elk = elasticsearch.Elasticsearch(**options)
|
||||
# check if the cluster is up or not !
|
||||
if not self.elk.ping():
|
||||
raise Exception('ElasticSearch cluster is not available. '
|
||||
'Cannot ping it')
|
||||
# clear the index cache
|
||||
try:
|
||||
self.elk.indices.clear_cache(index=self.conf['index'])
|
||||
except Exception as e:
|
||||
LOG.warning(e)
|
||||
|
||||
def _check_index_exists(self, index):
|
||||
LOG.info('check if index: {0} exists or not'.format(index))
|
||||
try:
|
||||
return self.elk.indices.exists(index=index)
|
||||
except elasticsearch.TransportError:
|
||||
raise
|
||||
|
||||
def _check_mapping_exists(self, mappings):
|
||||
LOG.info('check if mappings: {0} exists or not'.format(mappings))
|
||||
return self.elk.indices.exists_type(index=self.index,
|
||||
doc_type=mappings)
|
||||
|
||||
def get_required_mappings(self):
|
||||
"""
|
||||
This function checks if the user chooses a certain mappings or not.
|
||||
If the user has chosen a certain mappings it will return these mappings
|
||||
only If not it will return all mappings to be updated
|
||||
:return:
|
||||
"""
|
||||
# check if the user asked to update only one mapping ( -m is provided )
|
||||
mappings = {}
|
||||
if self.conf['select_mapping']:
|
||||
if self.conf['select_mapping'] not in self.mappings.keys():
|
||||
raise Exception(
|
||||
'Selected mappings {0} does not exists. Please, choose '
|
||||
'one of {1}'.format(self.conf['select_mapping'],
|
||||
self.mappings.keys()
|
||||
)
|
||||
)
|
||||
mappings[self.conf['select_mapping']] = \
|
||||
self.mappings.get(self.conf['select_mapping'])
|
||||
else:
|
||||
mappings = self.mappings
|
||||
return mappings
|
||||
|
||||
def db_sync(self):
|
||||
"""
|
||||
Create or update elasticsearch db mappings
|
||||
steps:
|
||||
1) check if mappings exists
|
||||
2) remove mapping if erase is passed
|
||||
3) update mappings if - y is passed
|
||||
4) if update failed ask for permission to remove old mappings
|
||||
5) try to update again
|
||||
6) if update succeeded exit :)
|
||||
:return:
|
||||
"""
|
||||
# check if erase provided remove mappings first
|
||||
if self.conf.get('erase'):
|
||||
self.remove_mappings()
|
||||
|
||||
# check if index does not exists create it
|
||||
if not self._check_index_exists(self.index):
|
||||
self._create_index()
|
||||
|
||||
_mappings = self.get_required_mappings()
|
||||
# create/update one by one
|
||||
for doc_type, body in _mappings.items():
|
||||
check = self.create_one_mapping(doc_type, body)
|
||||
if check:
|
||||
print("Creating or Updating {0} is {1}".format(
|
||||
doc_type, check.get('acknowledged')))
|
||||
else:
|
||||
print("Couldn't update {0}. Request returned {1}".format(
|
||||
doc_type, check.get('acknowledged')))
|
||||
|
||||
def _create_index(self):
|
||||
"""
|
||||
Create the index that will allow us to put the mappings under it
|
||||
:return: {u'acknowledged': True} if success or None if index exists
|
||||
"""
|
||||
if not self._check_index_exists(index=self.index):
|
||||
body = {
|
||||
'number_of_replicas':
|
||||
self.conf['number_of_replicas'] or DEFAULT_REPLICAS
|
||||
}
|
||||
return self.elk.indices.create(index=self.index, body=body)
|
||||
|
||||
def delete_index(self):
|
||||
return self.elk.indices.delete(index=self.index)
|
||||
|
||||
def create_one_mapping(self, doc_type, body):
|
||||
"""
|
||||
Create one document type and update its mappings
|
||||
:param doc_type: the document type to be created jobs, clients, backups
|
||||
:param body: the structure of the document
|
||||
:return: dict
|
||||
"""
|
||||
# check if doc_type exists or not
|
||||
if self._check_mapping_exists(doc_type):
|
||||
do_update = self.prompt(
|
||||
'[[[ {0} ]]] already exists in index => {1}'
|
||||
' <= Do you want to update it ? (y/n) '.format(doc_type,
|
||||
self.index)
|
||||
)
|
||||
if do_update:
|
||||
# Call elasticsearch library and put the mappings
|
||||
return self.elk.indices.put_mapping(doc_type=doc_type,
|
||||
body=body,
|
||||
index=self.index
|
||||
)
|
||||
else:
|
||||
return {'acknowledged': False}
|
||||
return self.elk.indices.put_mapping(doc_type=doc_type, body=body,
|
||||
index=self.index)
|
||||
|
||||
def remove_one_mapping(self, doc_type):
|
||||
"""
|
||||
Removes one mapping at a time
|
||||
:param doc_type: document type to be removed
|
||||
:return: dict
|
||||
"""
|
||||
LOG.info('Removing mapping {0} from index {1}'.format(doc_type,
|
||||
self.index))
|
||||
try:
|
||||
return self.elk.indices.delete_mapping(self.index,
|
||||
doc_type=doc_type)
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
def remove_mappings(self):
|
||||
"""
|
||||
Remove mappings from elasticsearch
|
||||
:return: dict
|
||||
"""
|
||||
# check if index doesn't exist return
|
||||
if not self._check_index_exists(index=self.index):
|
||||
print("Index {0} doesn't exists.".format(self.index))
|
||||
return
|
||||
# remove mappings
|
||||
self.delete_index()
|
||||
|
||||
def update_mappings(self):
|
||||
"""
|
||||
Update mappings
|
||||
:return: dict
|
||||
"""
|
||||
self.conf['yes'] = True
|
||||
return self.db_sync()
|
||||
|
||||
def show_mappings(self):
|
||||
"""
|
||||
Print existing mappings in an index
|
||||
:return: dict
|
||||
"""
|
||||
# check if index doesn't exist return
|
||||
if not self._check_index_exists(index=self.index):
|
||||
LOG.debug("Index {0} doesn't exists.".format(self.index))
|
||||
return
|
||||
return self.elk.indices.get_mapping(index=self.index)
|
||||
|
||||
def update_settings(self):
|
||||
"""
|
||||
Update number of replicas
|
||||
:return: dict
|
||||
"""
|
||||
body = {
|
||||
'number_of_replicas':
|
||||
self.conf['number_of_replicas'] or DEFAULT_REPLICAS
|
||||
}
|
||||
return self.elk.indices.put_settings(body=body, index=self.index)
|
||||
|
||||
def prompt(self, message):
|
||||
"""
|
||||
Helper function that is being used to ask the user for confirmation
|
||||
:param message: Message to be printed (To ask the user to confirm ...)
|
||||
:return: True or False
|
||||
"""
|
||||
if self.conf['yes']:
|
||||
return self.conf['yes']
|
||||
while True:
|
||||
ans = six.input(message)
|
||||
if ans.lower() == 'y':
|
||||
return True
|
||||
elif ans.lower() == 'n':
|
||||
return False
|
@ -0,0 +1,65 @@
|
||||
"""
|
||||
(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
|
||||
(C) Copyright 2016-2018 Hewlett Packard Enterprise Development Company LP
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_utils import importutils
|
||||
from stevedore import driver
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
_DB_DRIVER_NAMESPACE = "freezer.db.backends"
|
||||
|
||||
|
||||
def _load_class_by_alias_or_classname(namespace, name):
|
||||
"""Load class using stevedore alias or the class name
|
||||
:param namespace: namespace where the alias is defined
|
||||
:param name: alias or class name of the class to be loaded
|
||||
:returns: class if calls can be loaded
|
||||
:raises ImportError if class cannot be loaded
|
||||
"""
|
||||
|
||||
if not name:
|
||||
LOG.error("Alias or class name is not set")
|
||||
raise ImportError("Class not found.")
|
||||
try:
|
||||
# Try to resolve class by alias
|
||||
mgr = driver.DriverManager(
|
||||
namespace, name, warn_on_missing_entrypoint=False)
|
||||
class_to_load = mgr.driver
|
||||
except RuntimeError:
|
||||
e1_info = sys.exc_info()
|
||||
# Fallback to class name
|
||||
try:
|
||||
class_to_load = importutils.import_class(name)
|
||||
except (ImportError, ValueError):
|
||||
LOG.error("Error loading class by alias",
|
||||
exc_info=e1_info)
|
||||
LOG.error("Error loading class by class name",
|
||||
exc_info=True)
|
||||
raise ImportError("Class not found.")
|
||||
return class_to_load
|
||||
|
||||
|
||||
def get_db_driver(name, backend):
|
||||
"""
|
||||
Loads database driver
|
||||
:param name: name of the database driver.
|
||||
:return: Instance of the driver class
|
||||
"""
|
||||
driver_class = _load_class_by_alias_or_classname(_DB_DRIVER_NAMESPACE,
|
||||
name)
|
||||
return driver_class(backend=backend)
|
@ -0,0 +1,88 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db.sqlalchemy import enginefacade
|
||||
from oslo_log import log
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
main_context_manager = enginefacade.transaction_context()
|
||||
api_context_manager = enginefacade.transaction_context()
|
||||
|
||||
|
||||
def _get_db_conf(conf_group, connection=None):
|
||||
kw = dict(
|
||||
connection=connection or conf_group.connection,
|
||||
slave_connection=conf_group.slave_connection,
|
||||
sqlite_fk=False,
|
||||
__autocommit=True,
|
||||
expire_on_commit=False,
|
||||
mysql_sql_mode=conf_group.mysql_sql_mode,
|
||||
connection_recycle_time=conf_group.connection_recycle_time,
|
||||
connection_debug=conf_group.connection_debug,
|
||||
max_pool_size=conf_group.max_pool_size,
|
||||
max_overflow=conf_group.max_overflow,
|
||||
pool_timeout=conf_group.pool_timeout,
|
||||
sqlite_synchronous=conf_group.sqlite_synchronous,
|
||||
connection_trace=conf_group.connection_trace,
|
||||
max_retries=conf_group.max_retries,
|
||||
retry_interval=conf_group.retry_interval)
|
||||
return kw
|
||||
|
||||
|
||||
def get_backend():
|
||||
return sys.modules[__name__]
|
||||
|
||||
|
||||
def create_context_manager(connection=None):
|
||||
"""Create a database context manager object.
|
||||
: param connection: The database connection string
|
||||
"""
|
||||
ctxt_mgr = enginefacade.transaction_context()
|
||||
ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection))
|
||||
return ctxt_mgr
|
||||
|
||||
|
||||
def _context_manager_from_context(context):
|
||||
if context:
|
||||
try:
|
||||
return context.db_connection
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def get_context_manager(context):
|
||||
"""Get a database context manager object.
|
||||
:param context: The request context that can contain a context manager
|
||||
"""
|
||||
return _context_manager_from_context(context) or main_context_manager
|
||||
|
||||
|
||||
def get_engine(use_slave=False, context=None):
|
||||
"""Get a database engine object.
|
||||
:param use_slave: Whether to use the slave connection
|
||||
:param context: The request context that can contain a context manager
|
||||
"""
|
||||
ctxt_mgr = get_context_manager(context)
|
||||
return ctxt_mgr.get_legacy_facade().get_engine(use_slave=use_slave)
|
||||
|
||||
|
||||
def get_api_engine():
|
||||
return api_context_manager.get_legacy_facade().get_engine()
|
@ -0,0 +1,61 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import api as db_api
|
||||
from oslo_log import log
|
||||
|
||||
from freezer_api.db import base as db_base
|
||||
from freezer_api.db.sqlalchemy import api as db_session
|
||||
from freezer_api.db.sqlalchemy import models
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
_BACKEND_MAPPING = {'sqlalchemy': 'freezer_api.db.sqlalchemy.api'}
|
||||
|
||||
|
||||
class SQLDriver(db_base.DBDriver):
|
||||
|
||||
def __init__(self, backend):
|
||||
super(SQLDriver, self).__init__(backend)
|
||||
self.IMPL = db_api.DBAPI.from_config(CONF, _BACKEND_MAPPING)
|
||||
self._engine = None
|
||||
|
||||
def get_engine(self):
|
||||
if not self._engine:
|
||||
self._engine = db_session.get_engine()
|
||||
return self._engine
|
||||
|
||||
def get_api(self):
|
||||
return self.get_engine()
|
||||
|
||||
def db_sync(self):
|
||||
if not self._engine:
|
||||
self._engine = self.get_engine()
|
||||
models.register_models(self._engine)
|
||||
|
||||
def db_show(self):
|
||||
if not self._engine:
|
||||
self._engine = self.get_engine()
|
||||
return models.get_tables(self._engine)
|
||||
|
||||
def db_remove(self):
|
||||
if not self._engine:
|
||||
self._engine = self.get_engine()
|
||||
models.unregister_models(self._engine)
|
||||
|
||||