Part 1: Implement Sqlalchemy driver for freezer-api

Add support to oslo.db to be used as a DB driver. The DB driver will be
used with API v2. When it's completely implemented, API V1 will be
deprecated and removed by the end of the cycle. Freezer-api will keep
supporting V2 with Elasticsearch, Sqlalchemy drivers.

This patch implements the follow:
    * Abstract Base DB driver to be implemented by each driver
    * Base driver; will return only access to the db engine, session
    * SqlAlchemy driver;
    * ElasticSearch driver;
    * Implement both drivers in freezer-manage

Partially-Implements: blueprint oslo-db

Depends-On: I81e417155da48f46dd2113e5745fb3c21c96499f
Depends-On: I2e5724b1f1a75121952e2beb3844d2c489e4df68
Depends-On: Idb4ac050652d1d0107bf3fcd447d7cbedd811809
Depends-On: I81d46c89859752c0cbc21ef02de90db7f19f942c
Change-Id: I93ed1b909f538728a1a9bd5c8b07baf7aeddb705
This commit is contained in:
Saad Zaher 2018-01-30 02:58:38 +00:00
parent 1cde8eef34
commit d8e0dc21e0
21 changed files with 899 additions and 446 deletions

1
.gitignore vendored
View File

@ -11,6 +11,7 @@ coverage.xml
*.sw?
.tox
*.egg
*.eggs/*
*.egg-info
*.py[co]
.DS_Store

View File

@ -5,3 +5,4 @@ namespace = "freezer-api"
namespace = oslo.log
namespace = oslo.policy
namespace = oslo.middleware
namespace = oslo.db

View File

@ -112,6 +112,8 @@ function configure_freezer_api {
#set elasticsearch configuration
iniset $FREEZER_API_CONF 'storage' backend elasticsearch
iniset $FREEZER_API_CONF 'storage' driver elasticsearch
iniset $FREEZER_API_CONF 'elasticsearch' index freezer
iniset $FREEZER_API_CONF 'elasticsearch' number_of_replicas 0
iniset $FREEZER_API_CONF 'elasticsearch' hosts http://$SERVICE_HOST:9200

View File

@ -100,6 +100,10 @@
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Use JSON formatting for logging. This option is ignored if log_config_append
# is set. (boolean value)
#use_json = false
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = false
@ -124,7 +128,7 @@
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@ -182,65 +186,110 @@
#allow_headers =
[cors.subdomain]
[database]
#
# From oslo.middleware
# From oslo.db
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
# slash. Example: https://horizon.example.com (list value)
#allowed_origin = <None>
# If True, SQLite uses synchronous mode. (boolean value)
#sqlite_synchronous = true
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# The back end to use for the database. (string value)
# Deprecated group/name - [DEFAULT]/db_backend
#backend = sqlalchemy
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers =
# The SQLAlchemy connection string to use to connect to the database. (string
# value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection = <None>
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# The SQLAlchemy connection string to use to connect to the slave database.
# (string value)
#slave_connection = <None>
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH
# The SQL mode to be used for MySQL sessions. This option, including the
# default, overrides any server-set SQL mode. To use whatever SQL mode is set
# by the server configuration, set this to no value. Example: mysql_sql_mode=
# (string value)
#mysql_sql_mode = TRADITIONAL
# Indicate which header field names may be used during the actual request.
# (list value)
#allow_headers =
# If True, transparently enables support for handling MySQL Cluster (NDB).
# (boolean value)
#mysql_enable_ndb = false
# Connections which have been present in the connection pool longer than this
# number of seconds will be replaced with a new one the next time they are
# checked out from the pool. (integer value)
# Deprecated group/name - [DATABASE]/idle_timeout
# Deprecated group/name - [database]/idle_timeout
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#connection_recycle_time = 3600
[elasticsearch]
# Minimum number of SQL connections to keep open in a pool. (integer value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size = 1
#
# From freezer-api
#
# Maximum number of SQL connections to keep open in a pool. Setting a value of
# 0 indicates no limit. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size = 5
# specify the storage hosts (list value)
#hosts = http://127.0.0.1:9200
# Maximum number of database connection retries during startup. Set to -1 to
# specify an infinite retry count. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries = 10
# specify the name of the elasticsearch index (string value)
#index = freezer
# Interval between retries of opening a SQL connection. (integer value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval = 10
# specify the connection timeout (integer value)
#timeout = 60
# If set, use this value for max_overflow with SQLAlchemy. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow = 50
# number of retries to allow before raising and error (integer value)
#retries = 20
# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
# value)
# Minimum value: 0
# Maximum value: 100
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
# explicitly turn on SSL (boolean value)
#use_ssl = false
# Add Python stack traces to SQL as comment strings. (boolean value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace = false
# turn on SSL certs verification (boolean value)
#verify_certs = false
# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout = <None>
# path to CA certs on disk (string value)
#ca_certs = <None>
# Enable the experimental use of database reconnect on connection lost.
# (boolean value)
#use_db_reconnect = false
# Number of replicas for elk cluster. Default is 0. Use 0 for no replicas. This
# should be set to (number of node in the ES cluter -1). (integer value)
#number_of_replicas = 0
# Seconds between retries of a database transaction. (integer value)
#db_retry_interval = 1
# If True, increases the interval between retries of a database operation up to
# db_max_retry_interval. (boolean value)
#db_inc_retry_interval = true
# If db_inc_retry_interval is set, the maximum seconds between retries of a
# database operation. (integer value)
#db_max_retry_interval = 10
# Maximum retries in case of connection error or deadlock error before error is
# raised. Set to -1 to specify an infinite retry count. (integer value)
#db_max_retries = 20
[healthcheck]
@ -280,11 +329,27 @@
# Complete "public" Identity API endpoint. This endpoint should not be an
# "admin" endpoint, as it should be accessible by all end users.
# Unauthenticated clients are redirected to this endpoint to authenticate.
# Although this endpoint should ideally be unversioned, client support in the
# wild varies. If you're using a versioned v2 endpoint here, then this should
# *not* be the same endpoint the service user utilizes for validating tokens,
# because normal end users may not be able to reach that endpoint. (string
# Although this endpoint should ideally be unversioned, client support in the
# wild varies. If you're using a versioned v2 endpoint here, then this should
# *not* be the same endpoint the service user utilizes for validating tokens,
# because normal end users may not be able to reach that endpoint. (string
# value)
# Deprecated group/name - [keystone_authtoken]/auth_uri
#www_authenticate_uri = <None>
# DEPRECATED: Complete "public" Identity API endpoint. This endpoint should not
# be an "admin" endpoint, as it should be accessible by all end users.
# Unauthenticated clients are redirected to this endpoint to authenticate.
# Although this endpoint should ideally be unversioned, client support in the
# wild varies. If you're using a versioned v2 endpoint here, then this should
# *not* be the same endpoint the service user utilizes for validating tokens,
# because normal end users may not be able to reach that endpoint. This option
# is deprecated in favor of www_authenticate_uri and will be removed in the S
# release. (string value)
# This option is deprecated for removal since Queens.
# Its value may be silently ignored in the future.
# Reason: The auth_uri option is deprecated in favor of www_authenticate_uri
# and will be removed in the S release.
#auth_uri = <None>
# API version of the admin Identity API endpoint. (string value)
@ -357,7 +422,10 @@
# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
# cache. If the value is not one of these options or empty, auth_token will
# raise an exception on initialization. (string value)
# Allowed values: None, MAC, ENCRYPT
# Possible values:
# None - <No description provided>
# MAC - <No description provided>
# ENCRYPT - <No description provided>
#memcache_security_strategy = None
# (Optional, mandatory if memcache_security_strategy is defined) This string is
@ -451,7 +519,9 @@
# Protocol of the admin Identity API endpoint. Deprecated, use identity_uri.
# (string value)
# Allowed values: http, https
# Possible values:
# http - <No description provided>
# https - <No description provided>
#auth_protocol = https
# Complete admin Identity API endpoint. This should specify the unversioned
@ -511,12 +581,18 @@
# From oslo.policy
#
# This option controls whether or not to enforce scope when evaluating
# policies. If ``True``, the scope of the token used in the request is compared
# to the ``scope_types`` of the policy being enforced. If the scopes do not
# match, an ``InvalidScope`` exception will be raised. If ``False``, a message
# will be logged informing operators that policies are being invoked with
# mismatching scope. (boolean value)
#enforce_scope = false
# The file that defines policies. (string value)
# Deprecated group/name - [DEFAULT]/policy_file
#policy_file = policy.json
# Default rule. Enforced when a requested rule is not found. (string value)
# Deprecated group/name - [DEFAULT]/policy_default_rule
#policy_default_rule = default
# Directories where policy configuration files are stored. They can be relative
@ -524,9 +600,27 @@
# absolute paths. The file defined by policy_file must exist for these
# directories to be searched. Missing or empty directories are ignored. (multi
# valued)
# Deprecated group/name - [DEFAULT]/policy_dirs
#policy_dirs = policy.d
# Content Type to send and receive data for REST based policy check (string
# value)
# Possible values:
# application/x-www-form-urlencoded - <No description provided>
# application/json - <No description provided>
#remote_content_type = application/x-www-form-urlencoded
# server identity verification for REST based policy check (boolean value)
#remote_ssl_verify_server_crt = false
# Absolute path to ca cert file for REST based policy check (string value)
#remote_ssl_ca_crt_file = <None>
# Absolute path to client cert for REST based policy check (string value)
#remote_ssl_client_crt_file = <None>
# Absolute path client key file REST based policy check (string value)
#remote_ssl_client_key_file = <None>
[paste_deploy]
@ -550,4 +644,4 @@
#backend = <None>
# Database driver to be used. (string value)
#driver = freezer_api.storage.elastic.ElasticSearchEngine
#driver = elasticsearch

View File

@ -33,8 +33,8 @@ from freezer_api.api import v2
from freezer_api.common import _i18n
from freezer_api.common import config
from freezer_api.common import exceptions as freezer_api_exc
from freezer_api.db import manager
from freezer_api import policy
from freezer_api.storage import driver
CONF = cfg.CONF
@ -48,10 +48,9 @@ def configure_app(app, db=None):
:param db: Database engine (ElasticSearch)
:return:
"""
if not db:
db = driver.get_db(
driver='freezer_api.storage.elastic.ElasticSearchEngine'
)
db_driver = manager.get_db_driver(CONF.storage.driver,
backend=CONF.storage.backend)
db = db_driver.get_api()
# setup freezer policy
policy.setup_policy(CONF)
@ -129,7 +128,9 @@ def build_app_v2():
middleware_list.append(middleware.JSONTranslator())
app = falcon.API(middleware=middleware_list)
db = driver.get_db()
db_driver = manager.get_db_driver(CONF.storage.driver,
backend=CONF.storage.backend)
db = db_driver.get_api()
# setup freezer policy
policy.setup_policy(CONF)

View File

@ -14,23 +14,22 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import json
import sys
import elasticsearch
from oslo_config import cfg
from oslo_log import log
import six
from freezer_api import __version__ as FREEZER_API_VERSION
from freezer_api.common import config
from freezer_api.common import db_mappings
from freezer_api.storage import driver
from freezer_api.db import manager
CONF = cfg.CONF
LOG = log.getLogger(__name__)
DEFAULT_INDEX = 'freezer'
DEFAULT_REPLICAS = 0
@ -44,51 +43,19 @@ def add_db_opts(subparser):
)
def parse_config(mapping_choices):
def parse_config():
DB_INIT = [
cfg.SubCommandOpt('db',
dest='db',
title='DB Options',
handler=add_db_opts
),
cfg.ListOpt('hosts',
default=['http://127.0.0.1:9200'],
help='specify the storage hosts'),
cfg.StrOpt('mapping',
dest='select_mapping',
default='',
short='m',
help='Specific mapping to upload. Valid choices: {0}'
.format(','.join(mapping_choices))),
cfg.StrOpt('index',
dest='index',
short='i',
default=DEFAULT_INDEX,
help='The DB index (default "{0}")'.format(DEFAULT_INDEX)
),
cfg.BoolOpt('yes',
short='y',
dest='yes',
default=False,
help='Automatic confirmation to update mappings and '
'number-of-replicas.'),
cfg.BoolOpt('erase',
short='e',
dest='erase',
default=False,
help='Enable index deletion in case mapping update fails '
'due to incompatible changes'
),
cfg.StrOpt('test-only',
short='t',
dest='test_only',
default=False,
help='Test the validity of the mappings, but take no action'
)
)
]
driver.register_storage_opts()
# register database backend drivers
config.register_db_drivers_opt()
# register database cli options
CONF.register_cli_opts(DB_INIT)
# register logging opts
log.register_options(CONF)
default_config_files = cfg.find_config_files('freezer', 'freezer-api')
CONF(args=sys.argv[1:],
@ -98,234 +65,8 @@ def parse_config(mapping_choices):
)
class ElasticSearchManager(object):
"""
Managing ElasticSearch mappings operations
Sync: create mappings
Update: Update mappings
remove: deletes the mappings
show: print out all the mappings
"""
def __init__(self, mappings):
self.mappings = mappings.copy()
grp = cfg.OptGroup(CONF.storage.backend)
CONF.register_group(grp)
backend_opts = driver._get_elastic_opts(backend=CONF.storage.backend)
CONF.register_opts(backend_opts[CONF.storage.backend],
group=CONF.storage.backend)
self.conf = CONF.get(CONF.storage.backend)
self.index = self.conf.index or DEFAULT_INDEX
# initialize elk
opts = dict(self.conf.items())
self.elk = elasticsearch.Elasticsearch(**opts)
# check if the cluster is up or not !
if not self.elk.ping():
raise Exception('ElasticSearch cluster is not available. '
'Cannot ping it')
# clear the index cache
try:
self.elk.indices.clear_cache(index=self.index)
except Exception as e:
LOG.warning(e)
def _check_index_exists(self, index):
LOG.info('check if index: {0} exists or not'.format(index))
try:
return self.elk.indices.exists(index=index)
except elasticsearch.TransportError:
raise
def _check_mapping_exists(self, mappings):
LOG.info('check if mappings: {0} exists or not'.format(mappings))
return self.elk.indices.exists_type(index=self.index,
doc_type=mappings)
def get_required_mappings(self):
"""
This function checks if the user chooses a certain mappings or not.
If the user has chosen a certain mappings it will return these mappings
only If not it will return all mappings to be updated
:return:
"""
# check if the user asked to update only one mapping ( -m is provided )
mappings = {}
if CONF.select_mapping:
if CONF.select_mapping not in self.mappings.keys():
raise Exception(
'Selected mappings {0} does not exists. Please, choose '
'one of {1}'.format(CONF.select_mapping,
self.mappings.keys()
)
)
mappings[CONF.select_mapping] = \
self.mappings.get(CONF.select_mapping)
else:
mappings = self.mappings
return mappings
def db_sync(self):
"""
Create or update elasticsearch db mappings
steps:
1) check if mappings exists
2) remove mapping if erase is passed
3) update mappings if - y is passed
4) if update failed ask for permission to remove old mappings
5) try to update again
6) if update succeeded exit :)
:return:
"""
# check if erase provided remove mappings first
if CONF.erase:
self.remove_mappings()
# check if index does not exists create it
if not self._check_index_exists(self.index):
self._create_index()
_mappings = self.get_required_mappings()
# create/update one by one
for doc_type, body in _mappings.items():
check = self.create_one_mapping(doc_type, body)
if check:
print("Creating or Updating {0} is {1}".format(
doc_type, check.get('acknowledged')))
else:
print("Couldn't update {0}. Request returned {1}".format(
doc_type, check.get('acknowledged')))
def _create_index(self):
"""
Create the index that will allow us to put the mappings under it
:return: {u'acknowledged': True} if success or None if index exists
"""
if not self._check_index_exists(index=self.index):
body = {
'number_of_replicas':
self.conf.number_of_replicas or DEFAULT_REPLICAS
}
return self.elk.indices.create(index=self.index, body=body)
def delete_index(self):
return self.elk.indices.delete(index=self.index)
def create_one_mapping(self, doc_type, body):
"""
Create one document type and update its mappings
:param doc_type: the document type to be created jobs, clients, backups
:param body: the structure of the document
:return: dict
"""
# check if doc_type exists or not
if self._check_mapping_exists(doc_type):
do_update = self.prompt(
'[[[ {0} ]]] already exists in index => {1}'
' <= Do you want to update it ? (y/n) '.format(doc_type,
self.index)
)
if do_update:
# Call elasticsearch library and put the mappings
return self.elk.indices.put_mapping(doc_type=doc_type,
body=body,
index=self.index
)
else:
return {'acknowledged': False}
return self.elk.indices.put_mapping(doc_type=doc_type, body=body,
index=self.index)
def remove_one_mapping(self, doc_type):
"""
Removes one mapping at a time
:param doc_type: document type to be removed
:return: dict
"""
LOG.info('Removing mapping {0} from index {1}'.format(doc_type,
self.index))
try:
return self.elk.indices.delete_mapping(self.index,
doc_type=doc_type)
except Exception:
raise
def remove_mappings(self):
"""
Remove mappings from elasticsearch
:return: dict
"""
# check if index doesn't exist return
if not self._check_index_exists(index=self.index):
print("Index {0} doesn't exists.".format(self.index))
return
# remove mappings
_mappings = self.get_required_mappings()
for doc_type, body in _mappings.items():
check = self.remove_one_mapping(doc_type)
if not check:
print("Deleting {0} is failed".format(doc_type))
elif check:
print("Deleting {0} is {1}".format(
doc_type, check.get('acknowledged')))
else:
print("Couldn't delete {0}. Request returned {1}".format(
doc_type, check.get('acknowledged')))
del_index = self.prompt('Do you want to remove index as well ? (y/n) ')
if del_index:
self.delete_index()
def update_mappings(self):
"""
Update mappings
:return: dict
"""
CONF.yes = True
return self.db_sync()
def show_mappings(self):
"""
Print existing mappings in an index
:return: dict
"""
# check if index doesn't exist return
if not self._check_index_exists(index=self.index):
print("Index {0} doesn't exists.".format(self.index))
return
print(json.dumps(self.elk.indices.get_mapping(index=self.index)))
def update_settings(self):
"""
Update number of replicas
:return: dict
"""
body = {
'number_of_replicas':
self.conf.number_of_replicas or DEFAULT_REPLICAS
}
return self.elk.indices.put_settings(body=body, index=self.index)
def prompt(self, message):
"""
Helper function that is being used to ask the user for confirmation
:param message: Message to be printed (To ask the user to confirm ...)
:return: True or False
"""
if CONF.yes:
return CONF.yes
while True:
ans = six.input(message)
if ans.lower() == 'y':
return True
elif ans.lower() == 'n':
return False
def main():
mappings = db_mappings.get_mappings()
parse_config(mapping_choices=mappings.keys())
parse_config()
config.setup_logging()
if not CONF.db:
@ -333,17 +74,20 @@ def main():
sys.exit(0)
try:
elk = ElasticSearchManager(mappings=mappings)
db_driver = manager.get_db_driver(CONF.storage.driver,
backend=CONF.storage.backend)
if CONF.db.options.lower() == 'sync':
elk.db_sync()
db_driver.db_sync()
elif CONF.db.options.lower() == 'update':
elk.update_mappings()
db_driver.db_sync()
elif CONF.db.options.lower() == 'remove':
elk.remove_mappings()
db_driver.db_remove()
elif CONF.db.options.lower() == 'show':
elk.show_mappings()
elif CONF.db.options.lower() == 'update-settings':
elk.update_settings()
db_tables = db_driver.db_show()
if db_tables:
print(json.dumps(db_tables))
else:
print ("No Tables/Mappings found!")
else:
raise Exception('Option {0} not found !'.format(CONF.db.options))
except Exception as e:

View File

@ -22,7 +22,6 @@ from oslo_log import log
from oslo_policy import policy
from freezer_api import __version__ as FREEZER_API_VERSION
from freezer_api.storage import driver
CONF = cfg.CONF
@ -34,6 +33,19 @@ paste_deploy = [
'the available pipelines.'),
]
_DB_DRIVERS = [
cfg.StrOpt("backend",
help="Database backend section name. This section will "
"be loaded by the proper driver to connect to "
"the database."
),
cfg.StrOpt('driver',
# default='freezer_api.storage.elastic.ElasticSearchEngine',
default='elasticsearch',
help="Database driver to be used."
)
]
def api_common_opts():
@ -83,9 +95,19 @@ requests on registered endpoints conforming to the v2 OpenStack Freezer api.
return _COMMON
def register_db_drivers_opt():
"""Register storage configuration options"""
# storage backend options to be registered
opt_group = cfg.OptGroup(name='storage',
title='Freezer Database drivers')
CONF.register_group(opt_group)
CONF.register_opts(_DB_DRIVERS, group=opt_group)
def parse_args(args=[]):
CONF.register_cli_opts(api_common_opts())
driver.register_storage_opts()
register_db_drivers_opt()
# register paste configuration
paste_grp = cfg.OptGroup('paste_deploy',
'Paste Configuration')
@ -151,5 +173,5 @@ def list_opts():
AUTH_GROUP: AUTH_OPTS
}
# update the current list of opts with db backend drivers opts
_OPTS.update(driver.get_storage_opts())
_OPTS.update({"storage": _DB_DRIVERS})
return _OPTS.items()

View File

@ -21,6 +21,9 @@ import jsonschema
from freezer_api.common import exceptions as freezer_api_exc
from freezer_api.common import json_schemas
from oslo_log import log
LOG = log.getLogger(__name__)
class BackupMetadataDoc(object):
@ -169,6 +172,7 @@ class SessionDoc(object):
@staticmethod
def validate(doc):
LOG.debug("Debugging Session validate: {0}".format(doc))
try:
SessionDoc.session_doc_validator.validate(doc)
except Exception as e:
@ -190,7 +194,7 @@ class SessionDoc(object):
return doc
@staticmethod
def create(doc, user_id, hold_off=30, project_id=None):
def create(doc, user_id, project_id, hold_off=30):
doc.update({
'user_id': user_id,
'project_id': project_id,

54
freezer_api/db/base.py Normal file
View File

@ -0,0 +1,54 @@
"""
(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
(C) Copyright 2016-2018 Hewlett Packard Enterprise Development Company LP
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
from oslo_config import cfg
from oslo_log import log
import six
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class DBDriver(object):
_OPTS = [
cfg.StrOpt('host',
required=True,
help="Database host"),
cfg.StrOpt("username",
help="Database username"),
cfg.StrOpt("password",
help="Database Password")
]
def __init__(self, backend, is_created=False):
if not is_created:
grp = cfg.OptGroup(backend)
CONF.register_group(grp)
CONF.register_opts(self._OPTS, grp)
self.conf = CONF.get(backend)
self.backend = backend
def connect(self):
pass
@abc.abstractproperty
def name(self):
"""Name of the database driver"""
pass
def get_instance(self):
pass

45
freezer_api/db/common.py Normal file
View File

@ -0,0 +1,45 @@
"""
(C) Copyright 2016-2018 Hewlett Packard Enterprise Development Company LP
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# storage backend options to be registered
_OPTS = [
cfg.StrOpt("backend",
help="Database backend section name. This section "
"will be loaded by the proper driver to connect to "
"the database."
),
cfg.StrOpt('driver',
default='freezer_api.storage.elastic.ElasticSearchEngine',
help="Database driver to be used."
)
]
def get_db(driver=None):
"""Automatically loads the database driver to be used."""
storage = CONF.get('storage')
if not driver:
driver = storage['driver']
driver_instance = importutils.import_object(
driver,
backend=storage['backend']
)
return driver_instance

View File

View File

@ -0,0 +1,119 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from freezer_api.common import db_mappings
from freezer_api.db import base as db_base
from freezer_api.db.elasticsearch import es_manager
from freezer_api.storage import elasticv2 as db_session
CONF = cfg.CONF
LOG = log.getLogger(__name__)
DEFAULT_INDEX = 'freezer'
DEFAULT_REPLICAS = 0
_BACKEND_MAPPING = {'sqlalchemy': 'freezer_api.db.sqlalchemy.api'}
class ElasticSearchDB(db_base.DBDriver):
_ES_OPTS = [
cfg.ListOpt('hosts',
default=['http://127.0.0.1:9200'],
help='specify the storage hosts'),
cfg.StrOpt('index',
default='freezer',
help='specify the name of the elasticsearch index'),
cfg.IntOpt('timeout',
default=60,
help='specify the connection timeout'),
cfg.IntOpt('retries',
default=20,
help='number of retries to allow before raising and error'),
cfg.BoolOpt('use_ssl',
default=False,
help='explicitly turn on SSL'),
cfg.BoolOpt('verify_certs',
default=False,
help='turn on SSL certs verification'),
cfg.StrOpt('ca_certs',
help='path to CA certs on disk'),
cfg.IntOpt('number_of_replicas',
default=0,
help='Number of replicas for elk cluster. Default is 0. '
'Use 0 for no replicas. This should be set to (number '
'of node in the ES cluter -1).'),
cfg.StrOpt('mapping',
dest='select_mapping',
default='',
help='Specific mapping to upload. Valid choices: {0}'
.format(','.join(db_mappings.get_mappings()))),
cfg.BoolOpt('erase',
dest='erase',
default=False,
help='Enable index deletion in case mapping update fails '
'due to incompatible changes'
),
cfg.StrOpt('test-only',
dest='test_only',
default=False,
help='Test the validity of the mappings, but take no action'
)
]
def __init__(self, backend):
super(ElasticSearchDB, self).__init__(backend)
grp = cfg.OptGroup(backend)
CONF.register_group(grp)
CONF.register_opts(self._ES_OPTS, group=backend)
# CONF.register_cli_opts(self._ES_CLI_OPTS)
self.conf = CONF.get(backend)
self.index = self.conf.index or DEFAULT_INDEX
self._engine = None
self._manage_engine = None
def get_engine(self):
if not self._engine:
self._engine = db_session.ElasticSearchEngineV2(self.backend)
return self._engine
def get_api(self):
return self.get_engine()
def get_manage_engine(self):
opts = dict(self.conf.items())
self._manage_engine = es_manager.ElasticSearchManager(**opts)
return self._manage_engine
def db_sync(self):
if not self._manage_engine:
self._manage_engine = self.get_manage_engine()
self._manage_engine.update_mappings()
def db_remove(self):
if not self._manage_engine:
self._manage_engine = self.get_manage_engine()
self._manage_engine.remove_mappings()
def db_show(self):
if not self._manage_engine:
self._manage_engine = self.get_manage_engine()
return self._manage_engine.show_mappings()
def name(self):
return "ElasticSearch"

View File

@ -0,0 +1,231 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import elasticsearch
from oslo_config import cfg
from oslo_log import log
import six
from freezer_api.common import db_mappings
CONF = cfg.CONF
LOG = log.getLogger(__name__)
DEFAULT_INDEX = 'freezer'
DEFAULT_REPLICAS = 0
class ElasticSearchManager(object):
"""
Managing ElasticSearch mappings operations
Sync: create mappings
Update: Update mappings
remove: deletes the mappings
show: print out all the mappings
"""
def __init__(self, **options):
self.mappings = db_mappings.get_mappings().copy()
self.conf = options.copy()
self.index = self.conf['index']
self.elk = elasticsearch.Elasticsearch(**options)
# check if the cluster is up or not !
if not self.elk.ping():
raise Exception('ElasticSearch cluster is not available. '
'Cannot ping it')
# clear the index cache
try:
self.elk.indices.clear_cache(index=self.conf['index'])
except Exception as e:
LOG.warning(e)
def _check_index_exists(self, index):
LOG.info('check if index: {0} exists or not'.format(index))
try:
return self.elk.indices.exists(index=index)
except elasticsearch.TransportError:
raise
def _check_mapping_exists(self, mappings):
LOG.info('check if mappings: {0} exists or not'.format(mappings))
return self.elk.indices.exists_type(index=self.index,
doc_type=mappings)
def get_required_mappings(self):
"""
This function checks if the user chooses a certain mappings or not.
If the user has chosen a certain mappings it will return these mappings
only If not it will return all mappings to be updated
:return:
"""
# check if the user asked to update only one mapping ( -m is provided )
mappings = {}
if self.conf['select_mapping']:
if self.conf['select_mapping'] not in self.mappings.keys():
raise Exception(
'Selected mappings {0} does not exists. Please, choose '
'one of {1}'.format(self.conf['select_mapping'],
self.mappings.keys()
)
)
mappings[self.conf['select_mapping']] = \
self.mappings.get(self.conf['select_mapping'])
else:
mappings = self.mappings
return mappings
def db_sync(self):
"""
Create or update elasticsearch db mappings
steps:
1) check if mappings exists
2) remove mapping if erase is passed
3) update mappings if - y is passed
4) if update failed ask for permission to remove old mappings
5) try to update again
6) if update succeeded exit :)
:return:
"""
# check if erase provided remove mappings first
if self.conf.get('erase'):
self.remove_mappings()
# check if index does not exists create it
if not self._check_index_exists(self.index):
self._create_index()
_mappings = self.get_required_mappings()
# create/update one by one
for doc_type, body in _mappings.items():
check = self.create_one_mapping(doc_type, body)
if check:
print("Creating or Updating {0} is {1}".format(
doc_type, check.get('acknowledged')))
else:
print("Couldn't update {0}. Request returned {1}".format(
doc_type, check.get('acknowledged')))
def _create_index(self):
"""
Create the index that will allow us to put the mappings under it
:return: {u'acknowledged': True} if success or None if index exists
"""
if not self._check_index_exists(index=self.index):
body = {
'number_of_replicas':
self.conf['number_of_replicas'] or DEFAULT_REPLICAS
}
return self.elk.indices.create(index=self.index, body=body)
def delete_index(self):
return self.elk.indices.delete(index=self.index)
def create_one_mapping(self, doc_type, body):
"""
Create one document type and update its mappings
:param doc_type: the document type to be created jobs, clients, backups
:param body: the structure of the document
:return: dict
"""
# check if doc_type exists or not
if self._check_mapping_exists(doc_type):
do_update = self.prompt(
'[[[ {0} ]]] already exists in index => {1}'
' <= Do you want to update it ? (y/n) '.format(doc_type,
self.index)
)
if do_update:
# Call elasticsearch library and put the mappings
return self.elk.indices.put_mapping(doc_type=doc_type,
body=body,
index=self.index
)
else:
return {'acknowledged': False}
return self.elk.indices.put_mapping(doc_type=doc_type, body=body,
index=self.index)
def remove_one_mapping(self, doc_type):
"""
Removes one mapping at a time
:param doc_type: document type to be removed
:return: dict
"""
LOG.info('Removing mapping {0} from index {1}'.format(doc_type,
self.index))
try:
return self.elk.indices.delete_mapping(self.index,
doc_type=doc_type)
except Exception:
raise
def remove_mappings(self):
"""
Remove mappings from elasticsearch
:return: dict
"""
# check if index doesn't exist return
if not self._check_index_exists(index=self.index):
print("Index {0} doesn't exists.".format(self.index))
return
# remove mappings
self.delete_index()
def update_mappings(self):
"""
Update mappings
:return: dict
"""
self.conf['yes'] = True
return self.db_sync()
def show_mappings(self):
"""
Print existing mappings in an index
:return: dict
"""
# check if index doesn't exist return
if not self._check_index_exists(index=self.index):
LOG.debug("Index {0} doesn't exists.".format(self.index))
return
return self.elk.indices.get_mapping(index=self.index)
def update_settings(self):
"""
Update number of replicas
:return: dict
"""
body = {
'number_of_replicas':
self.conf['number_of_replicas'] or DEFAULT_REPLICAS
}
return self.elk.indices.put_settings(body=body, index=self.index)
def prompt(self, message):
"""
Helper function that is being used to ask the user for confirmation
:param message: Message to be printed (To ask the user to confirm ...)
:return: True or False
"""
if self.conf['yes']:
return self.conf['yes']
while True:
ans = six.input(message)
if ans.lower() == 'y':
return True
elif ans.lower() == 'n':
return False

65
freezer_api/db/manager.py Normal file
View File

@ -0,0 +1,65 @@
"""
(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
(C) Copyright 2016-2018 Hewlett Packard Enterprise Development Company LP
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from oslo_log import log
from oslo_utils import importutils
from stevedore import driver
LOG = log.getLogger(__name__)
_DB_DRIVER_NAMESPACE = "freezer.db.backends"
def _load_class_by_alias_or_classname(namespace, name):
"""Load class using stevedore alias or the class name
:param namespace: namespace where the alias is defined
:param name: alias or class name of the class to be loaded
:returns: class if calls can be loaded
:raises ImportError if class cannot be loaded
"""
if not name:
LOG.error("Alias or class name is not set")
raise ImportError("Class not found.")
try:
# Try to resolve class by alias
mgr = driver.DriverManager(
namespace, name, warn_on_missing_entrypoint=False)
class_to_load = mgr.driver
except RuntimeError:
e1_info = sys.exc_info()
# Fallback to class name
try:
class_to_load = importutils.import_class(name)
except (ImportError, ValueError):
LOG.error("Error loading class by alias",
exc_info=e1_info)
LOG.error("Error loading class by class name",
exc_info=True)
raise ImportError("Class not found.")
return class_to_load
def get_db_driver(name, backend):
"""
Loads database driver
:param name: name of the database driver.
:return: Instance of the driver class
"""
driver_class = _load_class_by_alias_or_classname(_DB_DRIVER_NAMESPACE,
name)
return driver_class(backend=backend)

View File

@ -0,0 +1,88 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
main_context_manager = enginefacade.transaction_context()
api_context_manager = enginefacade.transaction_context()
def _get_db_conf(conf_group, connection=None):
kw = dict(
connection=connection or conf_group.connection,
slave_connection=conf_group.slave_connection,
sqlite_fk=False,
__autocommit=True,
expire_on_commit=False,
mysql_sql_mode=conf_group.mysql_sql_mode,
connection_recycle_time=conf_group.connection_recycle_time,
connection_debug=conf_group.connection_debug,
max_pool_size=conf_group.max_pool_size,
max_overflow=conf_group.max_overflow,
pool_timeout=conf_group.pool_timeout,
sqlite_synchronous=conf_group.sqlite_synchronous,
connection_trace=conf_group.connection_trace,
max_retries=conf_group.max_retries,
retry_interval=conf_group.retry_interval)
return kw
def get_backend():
return sys.modules[__name__]
def create_context_manager(connection=None):
"""Create a database context manager object.
: param connection: The database connection string
"""
ctxt_mgr = enginefacade.transaction_context()
ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection))
return ctxt_mgr
def _context_manager_from_context(context):
if context:
try:
return context.db_connection
except AttributeError:
pass
def get_context_manager(context):
"""Get a database context manager object.
:param context: The request context that can contain a context manager
"""
return _context_manager_from_context(context) or main_context_manager
def get_engine(use_slave=False, context=None):
"""Get a database engine object.
:param use_slave: Whether to use the slave connection
:param context: The request context that can contain a context manager
"""
ctxt_mgr = get_context_manager(context)
return ctxt_mgr.get_legacy_facade().get_engine(use_slave=use_slave)
def get_api_engine():
return api_context_manager.get_legacy_facade().get_engine()

View File

@ -0,0 +1,61 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import api as db_api
from oslo_log import log
from freezer_api.db import base as db_base
from freezer_api.db.sqlalchemy import api as db_session
from freezer_api.db.sqlalchemy import models
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_BACKEND_MAPPING = {'sqlalchemy': 'freezer_api.db.sqlalchemy.api'}
class SQLDriver(db_base.DBDriver):
def __init__(self, backend):
super(SQLDriver, self).__init__(backend)
self.IMPL = db_api.DBAPI.from_config(CONF, _BACKEND_MAPPING)
self._engine = None
def get_engine(self):
if not self._engine:
self._engine = db_session.get_engine()
return self._engine
def get_api(self):
return self.get_engine()
def db_sync(self):
if not self._engine:
self._engine = self.get_engine()
models.register_models(self._engine)
def db_show(self):
if not self._engine:
self._engine = self.get_engine()
return models.get_tables(self._engine)
def db_remove(self):
if not self._engine:
self._engine = self.get_engine()
models.unregister_models(self._engine)
def name(self):
return "sqlalchemy"

View File

@ -36,7 +36,7 @@ class FreezerBase(models.TimestampMixin,
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
metadata = None
backup_metadata = None
@staticmethod
def delete_values():
@ -128,7 +128,7 @@ class ActionReport(BASE, FreezerBase):
project_id = Column(String(36), nullable=False)
result = Column(String(255))
time_elapsed = Column(String(255))
metadata = Column(Text)
backup_metadata = Column(Text)
report_date = Column(TIMESTAMP)
log = Column(BLOB)
action = relationship(Action, backref='action_reports',
@ -172,3 +172,24 @@ class JobAttachment(BASE, FreezerBase):
primaryjoin='and_('
'JobAttachment.session_id == Session.id,'
'JobAttachment.deleted == False)')
def register_models(engine):
_models = (Client, Action, Job, Session,
ActionAttachment, ActionReport, JobAttachment)
for _model in _models:
_model.metadata.create_all(engine)
def unregister_models(engine):
_models = (Client, Action, Job, Session,
ActionAttachment, ActionReport, JobAttachment)
for _model in _models:
_model.metadata.drop_all(engine)
def get_tables(engine):
from sqlalchemy import MetaData
_meta = MetaData()
_meta.reflect(engine)
return _meta.tables.keys()

View File

@ -319,34 +319,6 @@ class SessionTypeManagerV2(TypeManagerV2):
class ElasticSearchEngineV2(object):
_OPTS = [
cfg.ListOpt('hosts',
default=['http://127.0.0.1:9200'],
help='specify the storage hosts'),
cfg.StrOpt('index',
default='freezer',
help='specify the name of the elasticsearch index'),
cfg.IntOpt('timeout',
default=60,
help='specify the connection timeout'),
cfg.IntOpt('retries',
default=20,
help='number of retries to allow before raising and error'),
cfg.BoolOpt('use_ssl',
default=False,
help='explicitly turn on SSL'),
cfg.BoolOpt('verify_certs',
default=False,
help='turn on SSL certs verification'),
cfg.StrOpt('ca_certs',
help='path to CA certs on disk'),
cfg.IntOpt('number_of_replicas',
default=0,
help='Number of replicas for elk cluster. Default is 0. '
'Use 0 for no replicas. This should be set to (number '
'of node in the ES cluter -1).')
]
def __init__(self, backend):
"""backend: name of the section in the config file to load
elasticsearch opts
@ -358,8 +330,6 @@ class ElasticSearchEngineV2(object):
self.job_manager = None
self.action_manager = None
self.session_manager = None
# register elasticsearch opts
CONF.register_opts(self._OPTS, group=backend)
self.conf = dict(CONF.get(backend))
self.backend = backend
self._validate_opts()

View File

@ -1,74 +0,0 @@
"""Freezer swift.py related tests
(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import mock
from mock import patch
from freezer_api.storage import driver
from freezer_api.tests.unit import common
class TestStorageDriver(common.FreezerBaseTestCase):
@patch('freezer_api.storage.driver.LOG')
def test_get_db_raises_when_db_not_supported(self, mock_LOG):
mock_CONF = mock.Mock()
mock_CONF.storage.db = 'nodb'
driver.CONF = mock_CONF
self.assertRaises(Exception, driver.get_db)
@patch('freezer_api.storage.driver.elastic')
@patch('freezer_api.storage.driver.LOG')
@patch('freezer_api.storage.driver.get_db')
def test_get_db_elastic(self, mock_LOG, mock_elastic, mock_get_db):
mock_get_db.return_value = object()
driver.register_storage_opts()
driver.get_db()
self.assertTrue(mock_elastic.ElasticSearchEngine)
@patch('freezer_api.storage.driver.elastic')
@patch('freezer_api.storage.driver.LOG')
def test_get_db_elastic_raises_Exception_when_cert_file_not_found(
self, mock_LOG, mock_elastic):
mock_CONF = mock.Mock()
mock_CONF.storage.backend = 'elasticsearch'
mock_CONF.storage.driver = 'freezer_api.storage.elastic.' \
'ElasticSearchEngine'
mock_CONF.elasticsearch.hosts = 'es_server'
mock_CONF.elasticsearch.verify_certs = 'False'
mock_CONF.elasticsearch.ca_certs = 'not_existant'
mock_CONF.elasticsearch.use_ssl = False
mock_CONF.elasticsearch.timeout = 43
mock_CONF.elasticsearch.retries = 37
driver.CONF = mock_CONF
self.assertRaises(Exception, driver.get_db)
@patch('freezer_api.storage.driver.elastic')
@patch('freezer_api.storage.driver.LOG')
def test_get_db_elastic_raises_Exception_when_hosts_not_defined(
self, mock_LOG, mock_elastic):
mock_CONF = mock.Mock()
mock_CONF.storage.backend = 'elasticsearch'
mock_CONF.elasticsearch.hosts = ''
mock_CONF.elasticsearch.endpoint = ''
mock_CONF.elasticsearch.verify_certs = 'False'
mock_CONF.elasticsearch.ca_certs = ''
mock_CONF.elasticsearch.use_ssl = False
mock_CONF.elasticsearch.timeout = 43
mock_CONF.elasticsearch.retries = 37
driver.CONF = mock_CONF
self.assertRaises(Exception, driver.get_db)

View File

@ -31,7 +31,7 @@ class TestService(common.FreezerBaseTestCase):
super(TestService, self).setUp()
@patch('freezer_api.cmd.api.v1')
@patch('freezer_api.cmd.api.driver')
@patch('freezer_api.cmd.api.manager')
@patch('freezer_api.cmd.api.falcon')
def test_on_old_falcon_builds_v0(self, mock_falcon, mock_driver, mock_v1):
"""Test that falcon versions that should use old middleware syntax do so
@ -63,7 +63,7 @@ class TestService(common.FreezerBaseTestCase):
self.assertNotIn('middleware', named_args)
@patch('freezer_api.cmd.api.v1')
@patch('freezer_api.cmd.api.driver')
@patch('freezer_api.cmd.api.manager')
@patch('freezer_api.cmd.api.falcon')
def test_on_new_falcon_builds_v1(self, mock_falcon, mock_driver, mock_v1):
"""Test that falcon versions that should use new middleware syntax do so
@ -97,7 +97,7 @@ class TestService(common.FreezerBaseTestCase):
self.assertIn('middleware', named_args)
@patch('freezer_api.cmd.api.v2')
@patch('freezer_api.cmd.api.driver')
@patch('freezer_api.cmd.api.manager')
@patch('freezer_api.cmd.api.falcon')
def test_on_old_falcon_builds_v2(self, mock_falcon, mock_driver, mock_v2):
"""Test that falcon versions that should use old middleware syntax do so

View File

@ -57,6 +57,10 @@ paste.app_factory =
wsgi_scripts =
freezer-api-wsgi = freezer_api.service:initialize_app
freezer.db.backends =
sqlalchemy = freezer_api.db.sqlalchemy.driver:SQLDriver
elasticsearch = freezer_api.db.elasticsearch.driver:ElasticSearchDB
[pytests]
where=tests
verbosity=2