Glare Code
This commit is contained in:
parent
a15b8b1b9a
commit
b4bcc61991
8
.testr.conf
Normal file
8
.testr.conf
Normal file
@ -0,0 +1,8 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ ./glare/tests $LISTOPT $IDOPTION
|
||||
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
0
README.rst
Normal file
0
README.rst
Normal file
245
bandit.yaml
Normal file
245
bandit.yaml
Normal file
@ -0,0 +1,245 @@
|
||||
# optional: after how many files to update progress
|
||||
#show_progress_every: 100
|
||||
|
||||
# optional: plugins directory name
|
||||
#plugins_dir: 'plugins'
|
||||
|
||||
# optional: plugins discovery name pattern
|
||||
plugin_name_pattern: '*.py'
|
||||
|
||||
# optional: terminal escape sequences to display colors
|
||||
#output_colors:
|
||||
# DEFAULT: '\033[0m'
|
||||
# HEADER: '\033[95m'
|
||||
# LOW: '\033[94m'
|
||||
# MEDIUM: '\033[93m'
|
||||
# HIGH: '\033[91m'
|
||||
|
||||
# optional: log format string
|
||||
#log_format: "[%(module)s]\t%(levelname)s\t%(message)s"
|
||||
|
||||
# globs of files which should be analyzed
|
||||
include:
|
||||
- '*.py'
|
||||
- '*.pyw'
|
||||
|
||||
# a list of strings, which if found in the path will cause files to be excluded
|
||||
# for example /tests/ - to remove all all files in tests directory
|
||||
exclude_dirs:
|
||||
- '/tests/'
|
||||
|
||||
profiles:
|
||||
gate:
|
||||
include:
|
||||
|
||||
- any_other_function_with_shell_equals_true
|
||||
- assert_used
|
||||
- blacklist_calls
|
||||
- blacklist_import_func
|
||||
|
||||
# One of the blacklisted imports is the subprocess module. Keystone
|
||||
# has to import the subprocess module in a single module for
|
||||
# eventlet support so in most cases bandit won't be able to detect
|
||||
# that subprocess is even being imported. Also, Bandit's
|
||||
# recommendation is just to check that the use is safe without any
|
||||
# documentation on what safe or unsafe usage is. So this test is
|
||||
# skipped.
|
||||
# - blacklist_imports
|
||||
|
||||
- exec_used
|
||||
|
||||
- execute_with_run_as_root_equals_true
|
||||
|
||||
# - hardcoded_bind_all_interfaces # TODO: enable this test
|
||||
|
||||
# Not working because wordlist/default-passwords file not bundled,
|
||||
# see https://bugs.launchpad.net/bandit/+bug/1451575 :
|
||||
# - hardcoded_password
|
||||
|
||||
# Not used because it's prone to false positives:
|
||||
# - hardcoded_sql_expressions
|
||||
|
||||
# - hardcoded_tmp_directory # TODO: enable this test
|
||||
|
||||
- jinja2_autoescape_false
|
||||
|
||||
- linux_commands_wildcard_injection
|
||||
|
||||
- paramiko_calls
|
||||
|
||||
- password_config_option_not_marked_secret
|
||||
- request_with_no_cert_validation
|
||||
- set_bad_file_permissions
|
||||
- subprocess_popen_with_shell_equals_true
|
||||
# - subprocess_without_shell_equals_true # TODO: enable this test
|
||||
- start_process_with_a_shell
|
||||
# - start_process_with_no_shell # TODO: enable this test
|
||||
- start_process_with_partial_path
|
||||
- ssl_with_bad_defaults
|
||||
- ssl_with_bad_version
|
||||
- ssl_with_no_version
|
||||
# - try_except_pass # TODO: enable this test
|
||||
|
||||
- use_of_mako_templates
|
||||
|
||||
blacklist_calls:
|
||||
bad_name_sets:
|
||||
# - pickle:
|
||||
# qualnames: [pickle.loads, pickle.load, pickle.Unpickler,
|
||||
# cPickle.loads, cPickle.load, cPickle.Unpickler]
|
||||
# message: "Pickle library appears to be in use, possible security issue."
|
||||
# TODO: enable this test
|
||||
- marshal:
|
||||
qualnames: [marshal.load, marshal.loads]
|
||||
message: "Deserialization with the marshal module is possibly dangerous."
|
||||
# - md5:
|
||||
# qualnames: [hashlib.md5, Crypto.Hash.MD2.new, Crypto.Hash.MD4.new, Crypto.Hash.MD5.new, cryptography.hazmat.primitives.hashes.MD5]
|
||||
# message: "Use of insecure MD2, MD4, or MD5 hash function."
|
||||
# TODO: enable this test
|
||||
- mktemp_q:
|
||||
qualnames: [tempfile.mktemp]
|
||||
message: "Use of insecure and deprecated function (mktemp)."
|
||||
- eval:
|
||||
qualnames: [eval]
|
||||
message: "Use of possibly insecure function - consider using safer ast.literal_eval."
|
||||
- mark_safe:
|
||||
names: [mark_safe]
|
||||
message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed."
|
||||
- httpsconnection:
|
||||
qualnames: [httplib.HTTPSConnection]
|
||||
message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033"
|
||||
- yaml_load:
|
||||
qualnames: [yaml.load]
|
||||
message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()."
|
||||
- urllib_urlopen:
|
||||
qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request]
|
||||
message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected."
|
||||
- random:
|
||||
qualnames: [random.random, random.randrange, random.randint, random.choice, random.uniform, random.triangular]
|
||||
message: "Standard pseudo-random generators are not suitable for security/cryptographic purposes."
|
||||
level: "LOW"
|
||||
|
||||
# Most of this is based off of Christian Heimes' work on defusedxml:
|
||||
# https://pypi.python.org/pypi/defusedxml/#defusedxml-sax
|
||||
|
||||
# TODO(jaegerandi): Enable once defusedxml is in global requirements.
|
||||
#- xml_bad_cElementTree:
|
||||
# qualnames: [xml.etree.cElementTree.parse,
|
||||
# xml.etree.cElementTree.iterparse,
|
||||
# xml.etree.cElementTree.fromstring,
|
||||
# xml.etree.cElementTree.XMLParser]
|
||||
# message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function."
|
||||
#- xml_bad_ElementTree:
|
||||
# qualnames: [xml.etree.ElementTree.parse,
|
||||
# xml.etree.ElementTree.iterparse,
|
||||
# xml.etree.ElementTree.fromstring,
|
||||
# xml.etree.ElementTree.XMLParser]
|
||||
# message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function."
|
||||
- xml_bad_expatreader:
|
||||
qualnames: [xml.sax.expatreader.create_parser]
|
||||
message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function."
|
||||
- xml_bad_expatbuilder:
|
||||
qualnames: [xml.dom.expatbuilder.parse,
|
||||
xml.dom.expatbuilder.parseString]
|
||||
message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function."
|
||||
- xml_bad_sax:
|
||||
qualnames: [xml.sax.parse,
|
||||
xml.sax.parseString,
|
||||
xml.sax.make_parser]
|
||||
message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function."
|
||||
- xml_bad_minidom:
|
||||
qualnames: [xml.dom.minidom.parse,
|
||||
xml.dom.minidom.parseString]
|
||||
message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function."
|
||||
- xml_bad_pulldom:
|
||||
qualnames: [xml.dom.pulldom.parse,
|
||||
xml.dom.pulldom.parseString]
|
||||
message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function."
|
||||
- xml_bad_etree:
|
||||
qualnames: [lxml.etree.parse,
|
||||
lxml.etree.fromstring,
|
||||
lxml.etree.RestrictedElement,
|
||||
lxml.etree.GlobalParserTLS,
|
||||
lxml.etree.getDefaultParser,
|
||||
lxml.etree.check_docinfo]
|
||||
message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function."
|
||||
|
||||
|
||||
shell_injection:
|
||||
# Start a process using the subprocess module, or one of its wrappers.
|
||||
subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call,
|
||||
subprocess.check_output, utils.execute, utils.execute_with_timeout]
|
||||
# Start a process with a function vulnerable to shell injection.
|
||||
shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4,
|
||||
popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3,
|
||||
popen2.Popen4, commands.getoutput, commands.getstatusoutput]
|
||||
# Start a process with a function that is not vulnerable to shell injection.
|
||||
no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve,
|
||||
os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp,
|
||||
os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe,
|
||||
os.startfile]
|
||||
|
||||
blacklist_imports:
|
||||
bad_import_sets:
|
||||
- telnet:
|
||||
imports: [telnetlib]
|
||||
level: HIGH
|
||||
message: "Telnet is considered insecure. Use SSH or some other encrypted protocol."
|
||||
- info_libs:
|
||||
imports: [pickle, cPickle, subprocess, Crypto]
|
||||
level: LOW
|
||||
message: "Consider possible security implications associated with {module} module."
|
||||
|
||||
# Most of this is based off of Christian Heimes' work on defusedxml:
|
||||
# https://pypi.python.org/pypi/defusedxml/#defusedxml-sax
|
||||
|
||||
- xml_libs:
|
||||
imports: [xml.etree.cElementTree,
|
||||
xml.etree.ElementTree,
|
||||
xml.sax.expatreader,
|
||||
xml.sax,
|
||||
xml.dom.expatbuilder,
|
||||
xml.dom.minidom,
|
||||
xml.dom.pulldom,
|
||||
lxml.etree,
|
||||
lxml]
|
||||
message: "Using {module} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {module} with the equivilent defusedxml package."
|
||||
level: LOW
|
||||
- xml_libs_high:
|
||||
imports: [xmlrpclib]
|
||||
message: "Using {module} to parse untrusted XML data is known to be vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() function to monkey-patch xmlrpclib and mitigate XML vulnerabilities."
|
||||
level: HIGH
|
||||
|
||||
hardcoded_tmp_directory:
|
||||
tmp_dirs: ['/tmp', '/var/tmp', '/dev/shm']
|
||||
|
||||
hardcoded_password:
|
||||
# Support for full path, relative path and special "%(site_data_dir)s"
|
||||
# substitution (/usr/{local}/share)
|
||||
word_list: "%(site_data_dir)s/wordlist/default-passwords"
|
||||
|
||||
ssl_with_bad_version:
|
||||
bad_protocol_versions:
|
||||
- 'PROTOCOL_SSLv2'
|
||||
- 'SSLv2_METHOD'
|
||||
- 'SSLv23_METHOD'
|
||||
- 'PROTOCOL_SSLv3' # strict option
|
||||
- 'PROTOCOL_TLSv1' # strict option
|
||||
- 'SSLv3_METHOD' # strict option
|
||||
- 'TLSv1_METHOD' # strict option
|
||||
|
||||
password_config_option_not_marked_secret:
|
||||
function_names:
|
||||
- oslo.config.cfg.StrOpt
|
||||
- oslo_config.cfg.StrOpt
|
||||
|
||||
execute_with_run_as_root_equals_true:
|
||||
function_names:
|
||||
- ceilometer.utils.execute
|
||||
- cinder.utils.execute
|
||||
- neutron.agent.linux.utils.execute
|
||||
- nova.utils.execute
|
||||
- nova.utils.trycmd
|
||||
|
||||
try_except_pass:
|
||||
check_typed_exception: True
|
38
etc/glare-paste.ini
Normal file
38
etc/glare-paste.ini
Normal file
@ -0,0 +1,38 @@
|
||||
# Use this pipeline for no auth - DEFAULT
|
||||
[pipeline:glare-api]
|
||||
pipeline = cors faultwrapper healthcheck versionnegotiation osprofiler unauthenticated-context glarev1api
|
||||
|
||||
# Use this pipeline for keystone auth
|
||||
[pipeline:glare-api-keystone]
|
||||
pipeline = cors faultwrapper healthcheck versionnegotiation osprofiler authtoken context glarev1api
|
||||
|
||||
[app:glarev1api]
|
||||
paste.app_factory = glare.api.v1.router:API.factory
|
||||
|
||||
[filter:healthcheck]
|
||||
paste.filter_factory = oslo_middleware:Healthcheck.factory
|
||||
backends = disable_by_file
|
||||
disable_by_file_path = /etc/glare/healthcheck_disable
|
||||
|
||||
[filter:versionnegotiation]
|
||||
paste.filter_factory = glare.api.middleware.version_negotiation:GlareVersionNegotiationFilter.factory
|
||||
|
||||
[filter:faultwrapper]
|
||||
paste.filter_factory = glare.api.middleware.fault:GlareFaultWrapperFilter.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glare.api.middleware.glare_context:ContextMiddleware.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glare.api.middleware.glare_context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||
delay_auth_decision = true
|
||||
|
||||
[filter:osprofiler]
|
||||
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
|
||||
|
||||
[filter:cors]
|
||||
use = egg:oslo.middleware#cors
|
||||
oslo_config_project = glare
|
25
etc/glare-swift.conf.sample
Normal file
25
etc/glare-swift.conf.sample
Normal file
@ -0,0 +1,25 @@
|
||||
# glare-swift.conf.sample
|
||||
#
|
||||
# This file is an example config file when
|
||||
# multiple swift accounts/backing stores are enabled.
|
||||
#
|
||||
# Specify the reference name in []
|
||||
# For each section, specify the auth_address, user and key.
|
||||
#
|
||||
# WARNING:
|
||||
# * If any of auth_address, user or key is not specified,
|
||||
# the glare's swift store will fail to configure
|
||||
|
||||
[ref1]
|
||||
user = tenant:user1
|
||||
key = key1
|
||||
auth_version = 2
|
||||
auth_address = http://localhost:5000/v2.0
|
||||
|
||||
[ref2]
|
||||
user = project_name:user_name2
|
||||
key = key2
|
||||
user_domain_id = default
|
||||
project_domain_id = default
|
||||
auth_version = 3
|
||||
auth_address = http://localhost:5000/v3
|
9
etc/oslo-config-generator/glare.conf
Normal file
9
etc/oslo-config-generator/glare.conf
Normal file
@ -0,0 +1,9 @@
|
||||
[DEFAULT]
|
||||
output_file = etc/glare.conf.sample
|
||||
namespace = glare
|
||||
namespace = glance.store
|
||||
namespace = oslo.db
|
||||
namespace = oslo.db.concurrency
|
||||
namespace = keystonemiddleware.auth_token
|
||||
namespace = oslo.log
|
||||
namespace = oslo.middleware.cors
|
0
glare/__init__.py
Normal file
0
glare/__init__.py
Normal file
0
glare/api/__init__.py
Normal file
0
glare/api/__init__.py
Normal file
0
glare/api/middleware/__init__.py
Normal file
0
glare/api/middleware/__init__.py
Normal file
130
glare/api/middleware/fault.py
Normal file
130
glare/api/middleware/fault.py
Normal file
@ -0,0 +1,130 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""A middleware that turns exceptions into parsable string.
|
||||
Inspired by Cinder's and Heat't faultwrapper.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import reflection
|
||||
import six
|
||||
import webob
|
||||
|
||||
from glare.common import exception
|
||||
from glare.common import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Fault(object):
|
||||
|
||||
def __init__(self, error):
|
||||
self.error = error
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
serializer = wsgi.JSONResponseSerializer()
|
||||
resp = webob.Response(request=req)
|
||||
default_webob_exc = webob.exc.HTTPInternalServerError()
|
||||
resp.status_code = self.error.get('code', default_webob_exc.code)
|
||||
serializer.default(resp, self.error)
|
||||
return resp
|
||||
|
||||
|
||||
class GlareFaultWrapperFilter(wsgi.Middleware):
|
||||
"""Replace error body with something the client can parse."""
|
||||
error_map = {
|
||||
'BadRequest': webob.exc.HTTPBadRequest,
|
||||
'Unauthorized': webob.exc.HTTPUnauthorized,
|
||||
'Forbidden': webob.exc.HTTPForbidden,
|
||||
'NotFound': webob.exc.HTTPNotFound,
|
||||
'RequestTimeout': webob.exc.HTTPRequestTimeout,
|
||||
'Conflict': webob.exc.HTTPConflict,
|
||||
'Gone': webob.exc.HTTPGone,
|
||||
'PreconditionFailed': webob.exc.HTTPPreconditionFailed,
|
||||
'RequestEntityTooLarge': webob.exc.HTTPRequestEntityTooLarge,
|
||||
'UnsupportedMediaType': webob.exc.HTTPUnsupportedMediaType,
|
||||
'RequestRangeNotSatisfiable': webob.exc.HTTPRequestRangeNotSatisfiable,
|
||||
'Locked': webob.exc.HTTPLocked,
|
||||
'FailedDependency': webob.exc.HTTPFailedDependency,
|
||||
'NotAcceptable': webob.exc.HTTPNotAcceptable,
|
||||
}
|
||||
|
||||
def _map_exception_to_error(self, class_exception):
|
||||
if class_exception == exception.GlareException:
|
||||
return webob.exc.HTTPInternalServerError
|
||||
|
||||
if class_exception.__name__ not in self.error_map:
|
||||
return self._map_exception_to_error(class_exception.__base__)
|
||||
|
||||
return self.error_map[class_exception.__name__]
|
||||
|
||||
def _error(self, ex):
|
||||
|
||||
trace = None
|
||||
traceback_marker = 'Traceback (most recent call last)'
|
||||
webob_exc = None
|
||||
|
||||
ex_type = reflection.get_class_name(ex, fully_qualified=False)
|
||||
|
||||
full_message = six.text_type(ex)
|
||||
if traceback_marker in full_message:
|
||||
message, msg_trace = full_message.split(traceback_marker, 1)
|
||||
message = message.rstrip('\n')
|
||||
msg_trace = traceback_marker + msg_trace
|
||||
else:
|
||||
msg_trace = 'None\n'
|
||||
if sys.exc_info() != (None, None, None):
|
||||
msg_trace = traceback.format_exc()
|
||||
message = full_message
|
||||
|
||||
if isinstance(ex, exception.GlareException):
|
||||
message = ex.message
|
||||
|
||||
if cfg.CONF.debug and not trace:
|
||||
trace = msg_trace
|
||||
|
||||
if not webob_exc:
|
||||
webob_exc = self._map_exception_to_error(ex.__class__)
|
||||
|
||||
error = {
|
||||
'code': webob_exc.code,
|
||||
'title': webob_exc.title,
|
||||
'explanation': webob_exc.explanation,
|
||||
'error': {
|
||||
'message': message,
|
||||
'type': ex_type,
|
||||
'traceback': trace,
|
||||
}
|
||||
}
|
||||
|
||||
# add microversion header is this is not acceptable request
|
||||
if isinstance(ex, exception.InvalidGlobalAPIVersion):
|
||||
error['min_version'] = ex.kwargs['min_ver']
|
||||
error['max_version'] = ex.kwargs['max_ver']
|
||||
|
||||
return error
|
||||
|
||||
def process_request(self, req):
|
||||
try:
|
||||
return req.get_response(self.application)
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
return req.get_response(Fault(self._error(exc)))
|
131
glare/api/middleware/glare_context.py
Normal file
131
glare/api/middleware/glare_context.py
Normal file
@ -0,0 +1,131 @@
|
||||
# Copyright 2011-2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import webob
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_context import context
|
||||
from oslo_log import log as logging
|
||||
from oslo_middleware import request_id
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from glare.common import policy
|
||||
from glare.common import wsgi
|
||||
from glare.i18n import _
|
||||
|
||||
context_opts = [
|
||||
cfg.BoolOpt('allow_anonymous_access', default=False,
|
||||
help=_('Allow unauthenticated users to access the API with '
|
||||
'read-only privileges. This only applies when using '
|
||||
'ContextMiddleware.'))
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(context_opts)
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RequestContext(context.RequestContext):
|
||||
"""Stores information about the security context for Glare.
|
||||
|
||||
Stores how the user accesses the system, as well as additional request
|
||||
information.
|
||||
"""
|
||||
|
||||
def __init__(self, service_catalog=None, **kwargs):
|
||||
super(RequestContext, self).__init__(**kwargs)
|
||||
self.service_catalog = service_catalog
|
||||
# check if user is admin using policy file
|
||||
if kwargs.get('is_admin') is None:
|
||||
self.is_admin = policy.check_is_admin(self)
|
||||
|
||||
def to_dict(self):
|
||||
d = super(RequestContext, self).to_dict()
|
||||
d.update({
|
||||
'service_catalog': self.service_catalog,
|
||||
})
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
def to_policy_values(self):
|
||||
values = super(RequestContext, self).to_policy_values()
|
||||
values['is_admin'] = self.is_admin
|
||||
values['read_only'] = self.read_only
|
||||
return values
|
||||
|
||||
|
||||
class ContextMiddleware(wsgi.Middleware):
|
||||
|
||||
def __init__(self, app):
|
||||
super(ContextMiddleware, self).__init__(app)
|
||||
|
||||
def process_request(self, req):
|
||||
"""Convert authentication information into a request context
|
||||
|
||||
Generate a RequestContext object from the available
|
||||
authentication headers and store on the 'context' attribute
|
||||
of the req object.
|
||||
|
||||
:param req: wsgi request object that will be given the context object
|
||||
:raises: webob.exc.HTTPUnauthorized: when value of the
|
||||
X-Identity-Status header is not
|
||||
'Confirmed' and anonymous access
|
||||
is disallowed
|
||||
"""
|
||||
if req.headers.get('X-Identity-Status') == 'Confirmed':
|
||||
req.context = self._get_authenticated_context(req)
|
||||
elif CONF.allow_anonymous_access:
|
||||
req.context = self._get_anonymous_context()
|
||||
else:
|
||||
raise webob.exc.HTTPUnauthorized()
|
||||
|
||||
@staticmethod
|
||||
def _get_anonymous_context():
|
||||
"""Anonymous user has only Read-Only grants"""
|
||||
return RequestContext(read_only=True, is_admin=False)
|
||||
|
||||
@staticmethod
|
||||
def _get_authenticated_context(req):
|
||||
headers = req.headers
|
||||
service_catalog = None
|
||||
if headers.get('X-Service-Catalog') is not None:
|
||||
catalog_header = headers.get('X-Service-Catalog')
|
||||
try:
|
||||
service_catalog = jsonutils.loads(catalog_header)
|
||||
except ValueError:
|
||||
raise webob.exc.HTTPInternalServerError(
|
||||
_('Invalid service catalog json.'))
|
||||
kwargs = {
|
||||
'service_catalog': service_catalog,
|
||||
'request_id': req.environ.get(request_id.ENV_REQUEST_ID),
|
||||
}
|
||||
return RequestContext.from_environ(req.environ, **kwargs)
|
||||
|
||||
|
||||
class UnauthenticatedContextMiddleware(wsgi.Middleware):
|
||||
"""Process requests and responses when auth is turned off at all."""
|
||||
|
||||
def process_request(self, req):
|
||||
"""Create a context without an authorized user.
|
||||
|
||||
When glare deployed as public repo everybody is admin
|
||||
without any credentials.
|
||||
"""
|
||||
req.context = RequestContext(is_admin=True)
|
134
glare/api/middleware/version_negotiation.py
Normal file
134
glare/api/middleware/version_negotiation.py
Normal file
@ -0,0 +1,134 @@
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A filter middleware that inspects the requested URI for a version string
|
||||
and/or Accept headers and attempts to negotiate an API controller to
|
||||
return
|
||||
"""
|
||||
|
||||
import microversion_parse
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from glare.api.v1 import api_version_request as api_version
|
||||
from glare.api import versions as artifacts_versions
|
||||
from glare.common import exception
|
||||
from glare.common import wsgi
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_version_from_accept(accept_header, vnd_mime_type):
|
||||
"""Try to parse accept header to extract api version
|
||||
|
||||
:param accept_header: accept header
|
||||
:return: version string in the request or None if not specified
|
||||
"""
|
||||
accept = str(accept_header)
|
||||
if accept.startswith(vnd_mime_type):
|
||||
LOG.debug("Using media-type versioning")
|
||||
token_loc = len(vnd_mime_type)
|
||||
return accept[token_loc:]
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class GlareVersionNegotiationFilter(wsgi.Middleware):
|
||||
"""Middleware that defines API version in request and redirects it
|
||||
to correct Router.
|
||||
"""
|
||||
|
||||
SERVICE_TYPE = 'artifact'
|
||||
|
||||
def __init__(self, app):
|
||||
super(GlareVersionNegotiationFilter, self).__init__(app)
|
||||
self.vnd_mime_type = 'application/vnd.openstack.artifacts-'
|
||||
|
||||
def process_request(self, req):
|
||||
"""Process api request:
|
||||
1. Define if this is request for available versions or not
|
||||
2. If it is not version request check extract version
|
||||
3. Validate available version and add version info to request
|
||||
"""
|
||||
args = {'method': req.method, 'path': req.path, 'accept': req.accept}
|
||||
LOG.debug("Determining version of request: %(method)s %(path)s "
|
||||
"Accept: %(accept)s", args)
|
||||
|
||||
# determine if this is request for versions
|
||||
if req.path_info in ('/versions', '/'):
|
||||
is_multi = req.path_info == '/'
|
||||
return artifacts_versions.Controller.index(
|
||||
req, is_multi=is_multi)
|
||||
|
||||
# determine api version from request
|
||||
req_version = get_version_from_accept(req.accept, self.vnd_mime_type)
|
||||
if req_version is None:
|
||||
# determine api version for v0.1 from url
|
||||
if req.path_info_peek() == 'v0.1':
|
||||
req_version = 'v0.1'
|
||||
else:
|
||||
# determine api version from microversion header
|
||||
LOG.debug("Determine version from microversion header.")
|
||||
req_version = microversion_parse.get_version(
|
||||
req.headers, service_type=self.SERVICE_TYPE)
|
||||
|
||||
# validate versions and add version info to request
|
||||
if req_version == 'v0.1':
|
||||
req.environ['api.version'] = 0.1
|
||||
else:
|
||||
# validate microversions header
|
||||
req.api_version_request = self._get_api_version_request(
|
||||
req_version)
|
||||
req_version = req.api_version_request.get_string()
|
||||
|
||||
LOG.debug("Matched version: %s", req_version)
|
||||
LOG.debug('new path %s', req.path_info)
|
||||
|
||||
@staticmethod
|
||||
def _get_api_version_request(req_version):
|
||||
"""Set API version for request based on the version header string."""
|
||||
if req_version is None:
|
||||
LOG.debug("No API version in request header. Use default version.")
|
||||
cur_ver = api_version.APIVersionRequest.default_version()
|
||||
elif req_version == 'latest':
|
||||
# 'latest' is a special keyword which is equivalent to
|
||||
# requesting the maximum version of the API supported
|
||||
cur_ver = api_version.APIVersionRequest.max_version()
|
||||
else:
|
||||
cur_ver = api_version.APIVersionRequest(req_version)
|
||||
|
||||
# Check that the version requested is within the global
|
||||
# minimum/maximum of supported API versions
|
||||
if not cur_ver.matches(cur_ver.min_version(), cur_ver.max_version()):
|
||||
raise exception.InvalidGlobalAPIVersion(
|
||||
req_ver=cur_ver.get_string(),
|
||||
min_ver=cur_ver.min_version().get_string(),
|
||||
max_ver=cur_ver.max_version().get_string())
|
||||
return cur_ver
|
||||
|
||||
def process_response(self, response):
|
||||
if hasattr(response, 'headers'):
|
||||
request = response.request
|
||||
if hasattr(request, 'api_version_request'):
|
||||
api_header_name = microversion_parse.STANDARD_HEADER
|
||||
response.headers[api_header_name] = (
|
||||
self.SERVICE_TYPE + ' ' +
|
||||
request.api_version_request.get_string())
|
||||
response.headers.add('Vary', api_header_name)
|
||||
|
||||
return response
|
0
glare/api/v1/__init__.py
Normal file
0
glare/api/v1/__init__.py
Normal file
123
glare/api/v1/api_version_request.py
Normal file
123
glare/api/v1/api_version_request.py
Normal file
@ -0,0 +1,123 @@
|
||||
# Copyright 2016 Openstack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
from glare.common import exception
|
||||
from glare.i18n import _
|
||||
|
||||
|
||||
REST_API_VERSION_HISTORY = """REST API Version History:
|
||||
|
||||
* 1.0 - First stable API version that supports microversion. If API version
|
||||
is not specified in the request then API v1.0 is used as default API
|
||||
version.
|
||||
"""
|
||||
|
||||
|
||||
class APIVersionRequest(object):
|
||||
"""This class represents an API Version Request with convenience
|
||||
methods for manipulation and comparison of version
|
||||
numbers that we need to do to implement microversions.
|
||||
"""
|
||||
|
||||
_MIN_API_VERSION = "1.0"
|
||||
_MAX_API_VERSION = "1.0"
|
||||
_DEFAULT_API_VERSION = "1.0"
|
||||
|
||||
def __init__(self, version_string):
|
||||
"""Create an API version request object.
|
||||
|
||||
:param version_string: String representation of APIVersionRequest.
|
||||
Correct format is 'X.Y', where 'X' and 'Y' are int values.
|
||||
"""
|
||||
match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string)
|
||||
if match:
|
||||
self.ver_major = int(match.group(1))
|
||||
self.ver_minor = int(match.group(2))
|
||||
else:
|
||||
msg = _("API version string %s is not valid. "
|
||||
"Cannot determine API version.") % version_string
|
||||
raise exception.BadRequest(msg)
|
||||
|
||||
def __str__(self):
|
||||
"""Debug/Logging representation of object."""
|
||||
return ("API Version Request Major: %s, Minor: %s"
|
||||
% (self.ver_major, self.ver_minor))
|
||||
|
||||
def _format_type_error(self, other):
|
||||
return TypeError(_("'%(other)s' should be an instance of '%(cls)s'") %
|
||||
{"other": other, "cls": self.__class__})
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, APIVersionRequest):
|
||||
raise self._format_type_error(other)
|
||||
|
||||
return ((self.ver_major, self.ver_minor) <
|
||||
(other.ver_major, other.ver_minor))
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, APIVersionRequest):
|
||||
raise self._format_type_error(other)
|
||||
|
||||
return ((self.ver_major, self.ver_minor) ==
|
||||
(other.ver_major, other.ver_minor))
|
||||
|
||||
def __gt__(self, other):
|
||||
if not isinstance(other, APIVersionRequest):
|
||||
raise self._format_type_error(other)
|
||||
|
||||
return ((self.ver_major, self.ver_minor) >
|
||||
(other.ver_major, other.ver_minor))
|
||||
|
||||
def __le__(self, other):
|
||||
return self < other or self == other
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __ge__(self, other):
|
||||
return self > other or self == other
|
||||
|
||||
def matches(self, min_version, max_version):
|
||||
"""Returns whether the version object represents a version
|
||||
greater than or equal to the minimum version and less than
|
||||
or equal to the maximum version.
|
||||
|
||||
@param min_version: Minimum acceptable version.
|
||||
@param max_version: Maximum acceptable version.
|
||||
@returns: boolean
|
||||
"""
|
||||
return min_version <= self <= max_version
|
||||
|
||||
def get_string(self):
|
||||
"""Converts object to string representation which is used to create
|
||||
an APIVersionRequest object results in the same version request.
|
||||
"""
|
||||
return "%s.%s" % (self.ver_major, self.ver_minor)
|
||||
|
||||
@classmethod
|
||||
def min_version(cls):
|
||||
"""Minimal allowed api version"""
|
||||
return APIVersionRequest(cls._MIN_API_VERSION)
|
||||
|
||||
@classmethod
|
||||
def max_version(cls):
|
||||
"""Maximal allowed api version"""
|
||||
return APIVersionRequest(cls._MAX_API_VERSION)
|
||||
|
||||
@classmethod
|
||||
def default_version(cls):
|
||||
"""Default api version if no version in request"""
|
||||
return APIVersionRequest(cls._DEFAULT_API_VERSION)
|
169
glare/api/v1/api_versioning.py
Normal file
169
glare/api/v1/api_versioning.py
Normal file
@ -0,0 +1,169 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
|
||||
from glare.api.v1 import api_version_request as api_version
|
||||
from glare.common import exception as exc
|
||||
from glare.i18n import _
|
||||
|
||||
|
||||
class VersionedMethod(object):
|
||||
|
||||
def __init__(self, name, start_version, end_version, func):
|
||||
"""Versioning information for a single method
|
||||
:param name: Name of the method
|
||||
:param start_version: Minimum acceptable version
|
||||
:param end_version: Maximum acceptable_version
|
||||
:param func: Method to call
|
||||
Minimum and maximums are inclusive
|
||||
"""
|
||||
self.name = name
|
||||
self.start_version = start_version
|
||||
self.end_version = end_version
|
||||
self.func = func
|
||||
|
||||
def __str__(self):
|
||||
return ("Version Method %s: min: %s, max: %s"
|
||||
% (self.name, self.start_version, self.end_version))
|
||||
|
||||
|
||||
class VersionedResource(object):
|
||||
"""Versioned mixin that provides ability to define versioned methods and
|
||||
return appropriate methods based on user request
|
||||
"""
|
||||
|
||||
# prefix for all versioned methods in class
|
||||
VER_METHODS_ATTR_PREFIX = 'versioned_methods_'
|
||||
|
||||
@staticmethod
|
||||
def check_for_versions_intersection(func_list):
|
||||
"""Determines whether function list contains version intervals
|
||||
intersections or not. General algorithm:
|
||||
https://en.wikipedia.org/wiki/Intersection_algorithm
|
||||
:param func_list: list of VersionedMethod objects
|
||||
:return: boolean
|
||||
"""
|
||||
pairs = []
|
||||
counter = 0
|
||||
for f in func_list:
|
||||
pairs.append((f.start_version, 1, f))
|
||||
pairs.append((f.end_version, -1, f))
|
||||
|
||||
def compare(x):
|
||||
return x[0]
|
||||
|
||||
pairs.sort(key=compare)
|
||||
for p in pairs:
|
||||
counter += p[1]
|
||||
if counter > 1:
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def supported_versions(cls, min_ver, max_ver=None):
|
||||
"""Decorator for versioning api methods.
|
||||
|
||||
Add the decorator to any method which takes a request object
|
||||
as the first parameter and belongs to a class which inherits from
|
||||
wsgi.Controller. The implementation inspired by Nova.
|
||||
|
||||
:param min_ver: string representing minimum version
|
||||
:param max_ver: optional string representing maximum version
|
||||
"""
|
||||
|
||||
def decorator(f):
|
||||
obj_min_ver = api_version.APIVersionRequest(min_ver)
|
||||
if max_ver:
|
||||
obj_max_ver = api_version.APIVersionRequest(max_ver)
|
||||
else:
|
||||
obj_max_ver = api_version.APIVersionRequest.max_version()
|
||||
|
||||
# Add to list of versioned methods registered
|
||||
func_name = f.__name__
|
||||
new_func = VersionedMethod(func_name, obj_min_ver, obj_max_ver, f)
|
||||
|
||||
versioned_attr = cls.VER_METHODS_ATTR_PREFIX + cls.__name__
|
||||
func_dict = getattr(cls, versioned_attr, {})
|
||||
if not func_dict:
|
||||
setattr(cls, versioned_attr, func_dict)
|
||||
|
||||
func_list = func_dict.get(func_name, [])
|
||||
if not func_list:
|
||||
func_dict[func_name] = func_list
|
||||
func_list.append(new_func)
|
||||
|
||||
# Ensure the list is sorted by minimum version (reversed)
|
||||
# so later when we work through the list in order we find
|
||||
# the method which has the latest version which supports
|
||||
# the version requested.
|
||||
is_intersect = cls.check_for_versions_intersection(
|
||||
func_list)
|
||||
|
||||
if is_intersect:
|
||||
raise exc.ApiVersionsIntersect(
|
||||
name=new_func.name,
|
||||
min_ver=new_func.start_version,
|
||||
max_ver=new_func.end_version,
|
||||
)
|
||||
|
||||
func_list.sort(key=lambda vf: vf.start_version, reverse=True)
|
||||
|
||||
return f
|
||||
|
||||
return decorator
|
||||
|
||||
def __getattribute__(self, key):
|
||||
def version_select(*args, **kwargs):
|
||||
"""Look for the method which matches the name supplied and version
|
||||
constraints and calls it with the supplied arguments.
|
||||
:returns: Returns the result of the method called
|
||||
:raises: VersionNotFoundForAPIMethod if there is no method which
|
||||
matches the name and version constraints
|
||||
"""
|
||||
# versioning is used in 3 classes: request deserializer and
|
||||
# controller have request as first argument
|
||||
# response serializer has response as first argument
|
||||
# we must respect all three cases
|
||||
if hasattr(args[0], 'api_version_request'):
|
||||
ver = args[0].api_version_request
|
||||
elif hasattr(args[0], 'request'):
|
||||
ver = args[0].request.api_version_request
|
||||
else:
|
||||
raise exc.VersionNotFoundForAPIMethod(
|
||||
message=_("Api version not found in the request."))
|
||||
|
||||
func_list = self.versioned_methods[key]
|
||||
for func in func_list:
|
||||
if ver.matches(func.start_version, func.end_version):
|
||||
# Update the version_select wrapper function so
|
||||
# other decorator attributes like wsgi.response
|
||||
# are still respected.
|
||||
functools.update_wrapper(version_select, func.func)
|
||||
return func.func(self, *args, **kwargs)
|
||||
|
||||
# No version match
|
||||
raise exc.VersionNotFoundForAPIMethod(version=ver)
|
||||
|
||||
class_obj = object.__getattribute__(self, '__class__')
|
||||
prefix = object.__getattribute__(self, 'VER_METHODS_ATTR_PREFIX')
|
||||
attr_name = prefix + object.__getattribute__(class_obj, '__name__')
|
||||
try:
|
||||
if key in object.__getattribute__(self, attr_name):
|
||||
return version_select
|
||||
except AttributeError:
|
||||
# No versioning on this class
|
||||
pass
|
||||
|
||||
return object.__getattribute__(self, key)
|
484
glare/api/v1/resource.py
Normal file
484
glare/api/v1/resource.py
Normal file
@ -0,0 +1,484 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""WSGI Resource definition for Glare. Defines Glare API and serialization/
|
||||
deserialization of incoming requests."""
|
||||
|
||||
import json
|
||||
import jsonpatch
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
from six.moves import http_client
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from glare.api.v1 import api_versioning
|
||||
from glare.common import exception as exc
|
||||
from glare.common import wsgi
|
||||
from glare import engine
|
||||
from glare.i18n import _, _LI
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
list_configs = [
|
||||
cfg.IntOpt('default_api_limit', default=25,
|
||||
help=_('Default value for the number of items returned by a '
|
||||
'request if not specified explicitly in the request')),
|
||||
cfg.IntOpt('max_api_limit', default=1000,
|
||||
help=_('Maximum permissible number of items that could be '
|
||||
'returned by a request')),
|
||||
]
|
||||
|
||||
CONF.register_opts(list_configs)
|
||||
|
||||
supported_versions = api_versioning.VersionedResource.supported_versions
|
||||
|
||||
|
||||
class RequestDeserializer(api_versioning.VersionedResource,
|
||||
wsgi.JSONRequestDeserializer):
|
||||
"""Glare deserializer for incoming webop Requests.
|
||||
Deserializer converts incoming request into bunch of python primitives.
|
||||
So other components doesn't work with requests at all. Deserializer also
|
||||
executes primary API validation without any knowledge about Artifact
|
||||
structure.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _get_content_type(req, expected=None):
|
||||
"""Determine content type of the request body."""
|
||||
if "Content-Type" not in req.headers:
|
||||
msg = _("Content-Type must be specified.")
|
||||
LOG.error(msg)
|
||||
raise exc.BadRequest(msg)
|
||||
|
||||
content_type = req.content_type
|
||||
if expected is not None and content_type not in expected:
|
||||
msg = (_('Invalid content type: %(ct)s. Expected: %(exp)s') %
|
||||
{'ct': content_type, 'exp': ', '.join(expected)})
|
||||
raise exc.UnsupportedMediaType(message=msg)
|
||||
|
||||
return content_type
|
||||
|
||||
def _get_request_body(self, req):
|
||||
return self.from_json(req.body)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def create(self, req):
|
||||
self._get_content_type(req, expected=['application/json'])
|
||||
body = self._get_request_body(req)
|
||||
if not isinstance(body, dict):
|
||||
msg = _("Dictionary expected as body value. Got %s.") % type(body)
|
||||
raise exc.BadRequest(msg)
|
||||
return {'values': body}
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def list(self, req):
|
||||
params = req.params.copy()
|
||||
marker = params.pop('marker', None)
|
||||
query_params = {}
|
||||
# step 1 - apply marker to query if exists
|
||||
if marker is not None:
|
||||
query_params['marker'] = marker
|
||||
|
||||
# step 2 - apply limit (if exists OR setup default limit)
|
||||
limit = params.pop('limit', CONF.default_api_limit)
|
||||
try:
|
||||
limit = int(limit)
|
||||
except ValueError:
|
||||
msg = _("Limit param must be an integer.")
|
||||
raise exc.BadRequest(message=msg)
|
||||
if limit < 0:
|
||||
msg = _("Limit param must be positive.")
|
||||
raise exc.BadRequest(message=msg)
|
||||
query_params['limit'] = min(CONF.max_api_limit, limit)
|
||||
|
||||
# step 3 - parse sort parameters
|
||||
if 'sort' in params:
|
||||
sort = []
|
||||
for sort_param in params.pop('sort').strip().split(','):
|
||||
key, _sep, direction = sort_param.partition(':')
|
||||
if direction and direction not in ('asc', 'desc'):
|
||||
raise exc.BadRequest('Sort direction must be one of '
|
||||
'["asc", "desc"]. Got %s direction'
|
||||
% direction)
|
||||
sort.append((key, direction or 'desc'))
|
||||
query_params['sort'] = sort
|
||||
|
||||
query_params['filters'] = params
|
||||
return query_params
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def update(self, req):
|
||||
self._get_content_type(
|
||||
req, expected=['application/json-patch+json'])
|
||||
body = self._get_request_body(req)
|
||||
patch = jsonpatch.JsonPatch(body)
|
||||
try:
|
||||
# Initially patch object doesn't validate input. It's only checked
|
||||
# we call get operation on each method
|
||||
map(patch._get_operation, patch.patch)
|
||||
except (jsonpatch.InvalidJsonPatch, TypeError):
|
||||
msg = _("Json Patch body is malformed")
|
||||
raise exc.BadRequest(msg)
|
||||
for patch_item in body:
|
||||
if patch_item['path'] == '/tags':
|
||||
msg = _("Cannot modify artifact tags with PATCH "
|
||||
"request. Use special Tag API for that.")
|
||||
raise exc.BadRequest(msg)
|
||||
return {'patch': patch}
|
||||
|
||||
def _deserialize_blob(self, req):
|
||||
content_type = self._get_content_type(req)
|
||||
if content_type == ('application/vnd+openstack.glare-custom-location'
|
||||
'+json'):
|
||||
data = self._get_request_body(req)['url']
|
||||
else:
|
||||
data = req.body_file
|
||||
return {'data': data, 'content_type': content_type}
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def upload_blob(self, req):
|
||||
return self._deserialize_blob(req)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def upload_blob_dict(self, req):
|
||||
return self._deserialize_blob(req)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def set_tags(self, req):
|
||||
self._get_content_type(req, expected=['application/json'])
|
||||
body = self._get_request_body(req)
|
||||
|
||||
if 'tags' not in body:
|
||||
msg = _("Tag list must be in the body of request.")
|
||||
raise exc.BadRequest(msg)
|
||||
|
||||
return {'tag_list': body['tags']}
|
||||
|
||||
|
||||
def log_request_progress(f):
|
||||
def log_decorator(self, req, *args, **kwargs):
|
||||
LOG.debug("Request %(request_id)s for %(api_method)s successfully "
|
||||
"deserialized. Pass request parameters to Engine",
|
||||
{'request_id': req.context.request_id,
|
||||
'api_method': f.__name__})
|
||||
result = f(self, req, *args, **kwargs)
|
||||
LOG.info(_LI(
|
||||
"Request %(request_id)s for artifact %(api_method)s "
|
||||
"successfully executed."), {'request_id': req.context.request_id,
|
||||
'api_method': f.__name__})
|
||||
return result
|
||||
return log_decorator
|
||||
|
||||
|
||||
class ArtifactsController(api_versioning.VersionedResource):
|
||||
"""API controller for Glare Artifacts.
|
||||
Artifact Controller prepares incoming data for Glare Engine and redirects
|
||||
data to appropriate engine method (so only controller is working with
|
||||
Engine. Once the data returned from Engine Controller returns data
|
||||
in appropriate format for Response Serializer.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.engine = engine.Engine()
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def list_type_schemas(self, req):
|
||||
type_schemas = self.engine.list_type_schemas(req.context)
|
||||
return type_schemas
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def show_type_schema(self, req, type_name):
|
||||
type_schema = self.engine.show_type_schema(req.context, type_name)
|
||||
return {type_name: type_schema}
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def create(self, req, type_name, values):
|
||||
"""Create artifact record in Glare.
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param values: dict with artifact fields {field_name: field_value}
|
||||
:return definition of created artifact
|
||||
"""
|
||||
return self.engine.create(req.context, type_name, values)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def update(self, req, type_name, artifact_id, patch):
|
||||
"""Update artifact record in Glare.
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param artifact_id: id of artifact to update
|
||||
:param patch: json patch with artifact changes
|
||||
:return definition of updated artifact
|
||||
"""
|
||||
return self.engine.update(req.context, type_name, artifact_id, patch)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def delete(self, req, type_name, artifact_id):
|
||||
"""Delete artifact from Glare
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param artifact_id: id of artifact to delete
|
||||
"""
|
||||
return self.engine.delete(req.context, type_name, artifact_id)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def show(self, req, type_name, artifact_id):
|
||||
"""Show detailed artifact info
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param artifact_id: id of artifact to show
|
||||
:return: definition of requested artifact
|
||||
"""
|
||||
return self.engine.get(req.context, type_name, artifact_id)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def list(self, req, type_name, filters, marker=None, limit=None,
|
||||
sort=None):
|
||||
"""List available artifacts
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param filters: filters that need to be applied to artifact
|
||||
:param marker: the artifact that considered as begin of the list
|
||||
so all artifacts before marker (including marker itself) will not be
|
||||
added to artifact list
|
||||
:param limit: maximum number of items in list
|
||||
:param sort: sorting options
|
||||
:return: list of artifacts
|
||||
"""
|
||||
artifacts = self.engine.list(req.context, type_name, filters, marker,
|
||||
limit, sort)
|
||||
result = {'artifacts': artifacts,
|
||||
'type_name': type_name}
|
||||
if len(artifacts) != 0 and len(artifacts) == limit:
|
||||
result['next_marker'] = artifacts[-1]['id']
|
||||
return result
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def upload_blob(self, req, type_name, artifact_id, field_name, data,
|
||||
content_type):
|
||||
"""Upload blob into Glare repo
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param artifact_id: id of Artifact to reactivate
|
||||
:param field_name: name of blob field in artifact
|
||||
:param data: Artifact payload
|
||||
:param content_type: data content-type
|
||||
"""
|
||||
if content_type == ('application/vnd+openstack.glare-custom-location'
|
||||
'+json'):
|
||||
return self.engine.add_blob_location(
|
||||
req.context, type_name, artifact_id, field_name, data)
|
||||
else:
|
||||
return self.engine.upload_blob(req.context, type_name, artifact_id,
|
||||
field_name, data, content_type)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def upload_blob_dict(self, req, type_name, artifact_id, field_name, data,
|
||||
blob_key, content_type):
|
||||
"""Upload blob into Glare repo
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param artifact_id: id of Artifact to reactivate
|
||||
:param field_name: name of blob field in artifact
|
||||
:param data: Artifact payload
|
||||
:param content_type: data content-type
|
||||
:param blob_key: blob key in dict
|
||||
"""
|
||||
if content_type == ('application/vnd+openstack.glare-custom-location'
|
||||
'+json'):
|
||||
return self.engine.add_blob_dict_location(
|
||||
req.context, type_name, artifact_id,
|
||||
field_name, blob_key, str(data))
|
||||
else:
|
||||
return self.engine.upload_blob_dict(
|
||||
req.context, type_name, artifact_id,
|
||||
field_name, blob_key, data, content_type)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def download_blob(self, req, type_name, artifact_id, field_name):
|
||||
"""Download blob data from Artifact
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param artifact_id: id of Artifact to reactivate
|
||||
:param field_name: name of blob field in artifact
|
||||
:return: iterator that returns blob data
|
||||
"""
|
||||
data, meta = self.engine.download_blob(req.context, type_name,
|
||||
artifact_id, field_name)
|
||||
result = {'data': data, 'meta': meta}
|
||||
return result
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def download_blob_dict(self, req, type_name, artifact_id,
|
||||
field_name, blob_key):
|
||||
"""Download blob data from Artifact
|
||||
|
||||
:param req: User request
|
||||
:param type_name: Artifact type name
|
||||
:param artifact_id: id of Artifact to reactivate
|
||||
:param field_name: name of blob field in artifact
|
||||
:param blob_key: name of Dict of blobs (optional)
|
||||
:return: iterator that returns blob data
|
||||
"""
|
||||
data, meta = self.engine.download_blob_dict(
|
||||
req.context, type_name, artifact_id, field_name, blob_key)
|
||||
result = {'data': data, 'meta': meta}
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _tag_body_resp(af):
|
||||
return {'tags': af['tags']}
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def get_tags(self, req, type_name, artifact_id):
|
||||
return self._tag_body_resp(self.engine.get(
|
||||
req.context, type_name, artifact_id))
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def set_tags(self, req, type_name, artifact_id, tag_list):
|
||||
patch = [{'op': 'replace', 'path': '/tags', 'value': tag_list}]
|
||||
patch = jsonpatch.JsonPatch(patch)
|
||||
return self._tag_body_resp(self.engine.update(
|
||||
req.context, type_name, artifact_id, patch))
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
@log_request_progress
|
||||
def delete_tags(self, req, type_name, artifact_id):
|
||||
patch = [{'op': 'replace', 'path': '/tags', 'value': []}]
|
||||
patch = jsonpatch.JsonPatch(patch)
|
||||
self.engine.update(req.context, type_name, artifact_id, patch)
|
||||
|
||||
|
||||
class ResponseSerializer(api_versioning.VersionedResource,
|
||||
wsgi.JSONResponseSerializer):
|
||||
"""Glare Response Serializer converts data received from Glare Engine
|
||||
(it consists from plain data types - dict, int, string, file descriptors,
|
||||
etc) to WSGI Requests. It also specifies proper response status and
|
||||
content type as specified by API design.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _prepare_json_response(response, result,
|
||||
content_type='application/json'):
|
||||
body = json.dumps(result, ensure_ascii=False)
|
||||
response.unicode_body = six.text_type(body)
|
||||
response.content_type = content_type
|
||||
|
||||
def list_type_schemas(self, response, type_schemas):
|
||||
self._prepare_json_response(response,
|
||||
{'schemas': type_schemas},
|
||||
content_type='application/schema+json')
|
||||
|
||||
def show_type_schema(self, response, type_schema):
|
||||
self._prepare_json_response(response,
|
||||
{'schemas': type_schema},
|
||||
content_type='application/schema+json')
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def list_schemas(self, response, type_list):
|
||||
self._prepare_json_response(response, {'types': type_list})
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def create(self, response, artifact):
|
||||
self._prepare_json_response(response, artifact)
|
||||
response.status_int = http_client.CREATED
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def show(self, response, artifact):
|
||||
self._prepare_json_response(response, artifact)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def update(self, response, artifact):
|
||||
self._prepare_json_response(response, artifact)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def list(self, response, af_list):
|
||||
params = dict(response.request.params)
|
||||
params.pop('marker', None)
|
||||
query = urlparse.urlencode(params)
|
||||
type_name = af_list['type_name']
|
||||
body = {
|
||||
type_name: af_list['artifacts'],
|
||||
'first': '/artifacts/%s' % type_name,
|
||||
'schema': '/schemas/%s' % type_name,
|
||||
}
|
||||
if query:
|
||||
body['first'] = '%s?%s' % (body['first'], query)
|
||||
if 'next_marker' in af_list:
|
||||
params['marker'] = af_list['next_marker']
|
||||
next_query = urlparse.urlencode(params)
|
||||
body['next'] = '/artifacts/%s?%s' % (type_name, next_query)
|
||||
response.unicode_body = six.text_type(json.dumps(body,
|
||||
ensure_ascii=False))
|
||||
response.content_type = 'application/json'
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def delete(self, response, result):
|
||||
response.status_int = http_client.NO_CONTENT
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def upload_blob(self, response, artifact):
|
||||
self._prepare_json_response(response, artifact)
|
||||
|
||||
@staticmethod
|
||||
def _serialize_blob(response, result):
|
||||
data, meta = result['data'], result['meta']
|
||||
response.headers['Content-Type'] = meta['content_type']
|
||||
response.headers['Content-MD5'] = meta['checksum']
|
||||
response.headers['Content-Length'] = str(meta['size'])
|
||||
response.app_iter = iter(data)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def download_blob(self, response, result):
|
||||
self._serialize_blob(response, result)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def download_blob_dict(self, response, result):
|
||||
self._serialize_blob(response, result)
|
||||
|
||||
@supported_versions(min_ver='1.0')
|
||||
def delete_tags(self, response, result):
|
||||
response.status_int = http_client.NO_CONTENT
|
||||
|
||||
|
||||
def create_resource():
|
||||
"""Artifact resource factory method"""
|
||||
deserializer = RequestDeserializer()
|
||||
serializer = ResponseSerializer()
|
||||
controller = ArtifactsController()
|
||||
return wsgi.Resource(controller, deserializer, serializer)
|
138
glare/api/v1/router.py
Normal file
138
glare/api/v1/router.py
Normal file
@ -0,0 +1,138 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from glare.api.v1 import resource
|
||||
from glare.common import wsgi
|
||||
|
||||
|
||||
class API(wsgi.Router):
|
||||
"""WSGI router for Glare v1 API requests.
|
||||
|
||||
API Router redirects incoming requests to appropriate WSGI resource method.
|
||||
"""
|
||||
|
||||
def __init__(self, mapper):
|
||||
|
||||
glare_resource = resource.create_resource()
|
||||
reject_method_resource = wsgi.Resource(wsgi.RejectMethodController())
|
||||
|
||||
# ---schemas---
|
||||
mapper.connect('/schemas',
|
||||
controller=glare_resource,
|
||||
action='list_type_schemas',
|
||||
conditions={'method': ['GET']},
|
||||
body_reject=True)
|
||||
mapper.connect('/schemas',
|
||||
controller=reject_method_resource,
|
||||
action='reject',
|
||||
allowed_methods='GET')
|
||||
|
||||
mapper.connect('/schemas/{type_name}',
|
||||
controller=glare_resource,
|
||||
action='show_type_schema',
|
||||
conditions={'method': ['GET']},
|
||||
body_reject=True)
|
||||
mapper.connect('/schemas/{type_name}',
|
||||
controller=reject_method_resource,
|
||||
action='reject',
|
||||
allowed_methods='GET')
|
||||
|
||||
# ---artifacts---
|
||||
mapper.connect('/artifacts/{type_name}',
|
||||
controller=glare_resource,
|
||||
action='list',
|
||||
conditions={'method': ['GET']},
|
||||
body_reject=True)
|
||||
mapper.connect('/artifacts/{type_name}',
|
||||
controller=glare_resource,
|
||||
action='create',
|
||||
conditions={'method': ['POST']})
|
||||
mapper.connect('/artifacts/{type_name}',
|
||||
controller=reject_method_resource,
|
||||
action='reject',
|
||||
allowed_methods='GET, POST')
|
||||
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}',
|
||||
controller=glare_resource,
|
||||
action='update',
|
||||
conditions={'method': ['PATCH']})
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}',
|
||||
controller=glare_resource,
|
||||
action='show',
|
||||
conditions={'method': ['GET']},
|
||||
body_reject=True)
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}',
|
||||
controller=glare_resource,
|
||||
action='delete',
|
||||
conditions={'method': ['DELETE']},
|
||||
body_reject=True)
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}',
|
||||
controller=reject_method_resource,
|
||||
action='reject',
|
||||
allowed_methods='GET, PATCH, DELETE')
|
||||
|
||||
# ---tags---
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/tags',
|
||||
controller=glare_resource,
|
||||
action='set_tags',
|
||||
conditions={'method': ['PUT']})
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/tags',
|
||||
controller=glare_resource,
|
||||
action='get_tags',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/tags',
|
||||
controller=glare_resource,
|
||||
action='delete_tags',
|
||||
conditions={'method': ['DELETE']},
|
||||
body_reject=True)
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/tags',
|
||||
controller=reject_method_resource,
|
||||
action='reject',
|
||||
allowed_methods='GET, PUT, DELETE')
|
||||
|
||||
# ---blobs---
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/{field_name}',
|
||||
controller=glare_resource,
|
||||
action='download_blob',
|
||||
conditions={'method': ['GET']},
|
||||
body_reject=True)
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/{field_name}',
|
||||
controller=glare_resource,
|
||||
action='upload_blob',
|
||||
conditions={'method': ['PUT']})
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/{field_name}',
|
||||
controller=reject_method_resource,
|
||||
action='reject',
|
||||
allowed_methods='GET, PUT')
|
||||
|
||||
# ---blob dicts---
|
||||
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/{field_name}/'
|
||||
'{blob_key}',
|
||||
controller=glare_resource,
|
||||
action='download_blob_dict',
|
||||
conditions={'method': ['GET']},
|
||||
body_reject=True)
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/{field_name}/'
|
||||
'{blob_key}',
|
||||
controller=glare_resource,
|
||||
action='upload_blob_dict',
|
||||
conditions={'method': ['PUT']})
|
||||
mapper.connect('/artifacts/{type_name}/{artifact_id}/{field_name}/'
|
||||
'{blob_key}',
|
||||
controller=reject_method_resource,
|
||||
action='reject',
|
||||
allowed_methods='GET, PUT')
|
||||
|
||||
super(API, self).__init__(mapper)
|
96
glare/api/versions.py
Normal file
96
glare/api/versions.py
Normal file
@ -0,0 +1,96 @@
|
||||
# Copyright 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
from six.moves import http_client
|
||||
import webob.dec
|
||||
|
||||
from glare.api.v1 import api_version_request
|
||||
from glare.i18n import _
|
||||
|
||||
|
||||
versions_opts = [
|
||||
cfg.StrOpt('public_endpoint',
|
||||
help=_("""
|
||||
Public url endpoint to use for Glance/Glare versions response.
|
||||
|
||||
This is the public url endpoint that will appear in the Glance/Glare
|
||||
"versions" response. If no value is specified, the endpoint that is
|
||||
displayed in the version's response is that of the host running the
|
||||
API service. Change the endpoint to represent the proxy URL if the
|
||||
API service is running behind a proxy. If the service is running
|
||||
behind a load balancer, add the load balancer's URL for this value.
|
||||
|
||||
Services which consume this:
|
||||
* glare
|
||||
|
||||
Possible values:
|
||||
* None
|
||||
* Proxy URL
|
||||
* Load balancer URL
|
||||
|
||||
Related options:
|
||||
* None
|
||||
|
||||
""")),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(versions_opts)
|
||||
|
||||
|
||||
class Controller(object):
|
||||
|
||||
"""A controller that reports which API versions are supported."""
|
||||
|
||||
@staticmethod
|
||||
def index(req, is_multi):
|
||||
"""Respond to a request for all OpenStack API versions.
|
||||
:param is_multi: defines if multiple choices should be response status
|
||||
or not
|
||||
:param req: user request object
|
||||
:return list of supported API versions
|
||||
"""
|
||||
def build_version_object(max_version, min_version, status, path=None):
|
||||
url = CONF.public_endpoint or req.host_url
|
||||
return {
|
||||
'id': 'v%s' % max_version,
|
||||
'links': [
|
||||
{
|
||||
'rel': 'self',
|
||||
'href': '%s/%s/' % (url, path) if path else
|
||||
'%s/' % url,
|
||||
},
|
||||
],
|
||||
'status': status,
|
||||
'min_version': min_version,
|
||||
'version': max_version
|
||||
}
|
||||
|
||||
microv_max = api_version_request.APIVersionRequest.max_version()
|
||||
microv_min = api_version_request.APIVersionRequest.min_version()
|
||||
version_objs = [build_version_object(0.1, 0.1, 'EXPERIMENTAL', 'v0.1'),
|
||||
build_version_object(microv_max.get_string(),
|
||||
microv_min.get_string(),
|
||||
'EXPERIMENTAL')]
|
||||
return_status = (http_client.MULTIPLE_CHOICES if is_multi else
|
||||
http_client.OK)
|
||||
response = webob.Response(request=req,
|
||||
status=return_status,
|
||||
content_type='application/json')
|
||||
response.body = jsonutils.dump_as_bytes(dict(versions=version_objs))
|
||||
return response
|
53
glare/cmd/__init__.py
Normal file
53
glare/cmd/__init__.py
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import oslo_utils.strutils as strutils
|
||||
|
||||
from glare import i18n
|
||||
|
||||
try:
|
||||
import dns # noqa
|
||||
except ImportError:
|
||||
dnspython_installed = False
|
||||
else:
|
||||
dnspython_installed = True
|
||||
|
||||
|
||||
def fix_greendns_ipv6():
|
||||
if dnspython_installed:
|
||||
# All of this is because if dnspython is present in your environment
|
||||
# then eventlet monkeypatches socket.getaddrinfo() with an
|
||||
# implementation which doesn't work for IPv6. What we're checking here
|
||||
# is that the magic environment variable was set when the import
|
||||
# happened.
|
||||
nogreendns = 'EVENTLET_NO_GREENDNS'
|
||||
flag = os.environ.get(nogreendns, '')
|
||||
if 'eventlet' in sys.modules and not strutils.bool_from_string(flag):
|
||||
msg = i18n._("It appears that the eventlet module has been "
|
||||
"imported prior to setting %s='yes'. It is currently "
|
||||
"necessary to disable eventlet.greendns "
|
||||
"if using ipv6 since eventlet.greendns currently "
|
||||
"breaks with ipv6 addresses. Please ensure that "
|
||||
"eventlet is not imported prior to this being set.")
|
||||
raise ImportError(msg % nogreendns)
|
||||
|
||||
os.environ[nogreendns] = 'yes'
|
||||
|
||||
|
||||
i18n.enable_lazy()
|
||||
fix_greendns_ipv6()
|
84
glare/cmd/api.py
Executable file
84
glare/cmd/api.py
Executable file
@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""
|
||||
Glare (Glance Artifact Repository) API service
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
import eventlet
|
||||
from oslo_utils import encodeutils
|
||||
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True, time=True,
|
||||
select=True, thread=True, os=True)
|
||||
|
||||
import glance_store
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
import osprofiler.notifier
|
||||
import osprofiler.web
|
||||
|
||||
from glare.common import config
|
||||
from glare.common import exception
|
||||
from glare.common import wsgi
|
||||
from glare import notification
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_group("profiler", "glare.common.wsgi")
|
||||
logging.register_options(CONF)
|
||||
|
||||
KNOWN_EXCEPTIONS = (RuntimeError,
|
||||
exception.WorkerCreationFailure,
|
||||
glance_store.exceptions.BadStoreConfiguration)
|
||||
|
||||
|
||||
def fail(e):
|
||||
global KNOWN_EXCEPTIONS
|
||||
return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1
|
||||
sys.stderr.write("ERROR: %s\n" % encodeutils.exception_to_unicode(e))
|
||||
sys.exit(return_code)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
config.parse_args()
|
||||
wsgi.set_eventlet_hub()
|
||||
logging.setup(CONF, 'glare')
|
||||
|
||||
if cfg.CONF.profiler.enabled:
|
||||
_notifier = osprofiler.notifier.create(
|
||||
"Messaging", oslo_messaging, {}, notification.get_transport(),
|
||||
"glare", "artifacts", cfg.CONF.bind_host)
|
||||
osprofiler.notifier.set(_notifier)
|
||||
else:
|
||||
osprofiler.web.disable()
|
||||
|
||||
server = wsgi.Server(initialize_glance_store=True)
|
||||
server.start(config.load_paste_app('glare-api'), default_port=9494)
|
||||
server.wait()
|
||||
except KNOWN_EXCEPTIONS as e:
|
||||
fail(e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
0
glare/common/__init__.py
Normal file
0
glare/common/__init__.py
Normal file
168
glare/common/config.py
Normal file
168
glare/common/config.py
Normal file
@ -0,0 +1,168 @@
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Routines for configuring Glance
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_middleware import cors
|
||||
from oslo_policy import policy
|
||||
from paste import deploy
|
||||
|
||||
from glare.i18n import _
|
||||
|
||||
paste_deploy_opts = [
|
||||
cfg.StrOpt('flavor',
|
||||
help=_('Partial name of a pipeline in your paste configuration '
|
||||
'file with the service name removed. For example, if '
|
||||
'your paste section name is '
|
||||
'[pipeline:glare-keystone] use the value '
|
||||
'"keystone"')),
|
||||
cfg.StrOpt('config_file',
|
||||
help=_('Name of the paste configuration file.')),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(paste_deploy_opts, group='paste_deploy')
|
||||
policy.Enforcer(CONF)
|
||||
|
||||
|
||||
def parse_args(args=None, usage=None, default_config_files=None):
|
||||
CONF(args=args,
|
||||
project='glare',
|
||||
usage=usage,
|
||||
default_config_files=default_config_files)
|
||||
|
||||
|
||||
def _get_deployment_flavor(flavor=None):
|
||||
"""
|
||||
Retrieve the paste_deploy.flavor config item, formatted appropriately
|
||||
for appending to the application name.
|
||||
|
||||
:param flavor: if specified, use this setting rather than the
|
||||
paste_deploy.flavor configuration setting
|
||||
"""
|
||||
if not flavor:
|
||||
flavor = CONF.paste_deploy.flavor
|
||||
return '' if not flavor else ('-' + flavor)
|
||||
|
||||
|
||||
def _get_paste_config_path():
|
||||
paste_suffix = '-paste.ini'
|
||||
conf_suffix = '.conf'
|
||||
if CONF.config_file:
|
||||
# Assume paste config is in a paste.ini file corresponding
|
||||
# to the last config file
|
||||
path = CONF.config_file[-1].replace(conf_suffix, paste_suffix)
|
||||
else:
|
||||
path = CONF.prog + paste_suffix
|
||||
return CONF.find_file(os.path.basename(path))
|
||||
|
||||
|
||||
def _get_deployment_config_file():
|
||||
"""
|
||||
Retrieve the deployment_config_file config item, formatted as an
|
||||
absolute pathname.
|
||||
"""
|
||||
path = CONF.paste_deploy.config_file
|
||||
if not path:
|
||||
path = _get_paste_config_path()
|
||||
if not path:
|
||||
msg = _("Unable to locate paste config file for %s.") % CONF.prog
|
||||
raise RuntimeError(msg)
|
||||
return os.path.abspath(path)
|
||||
|
||||
|
||||
def load_paste_app(app_name, flavor=None, conf_file=None):
|
||||
"""
|
||||
Builds and returns a WSGI app from a paste config file.
|
||||
|
||||
We assume the last config file specified in the supplied ConfigOpts
|
||||
object is the paste config file, if conf_file is None.
|
||||
|
||||
:param app_name: name of the application to load
|
||||
:param flavor: name of the variant of the application to load
|
||||
:param conf_file: path to the paste config file
|
||||
|
||||
:raises: RuntimeError when config file cannot be located or application
|
||||
cannot be loaded from config file
|
||||
"""
|
||||
# append the deployment flavor to the application name,
|
||||
# in order to identify the appropriate paste pipeline
|
||||
app_name += _get_deployment_flavor(flavor)
|
||||
|
||||
if not conf_file:
|
||||
conf_file = _get_deployment_config_file()
|
||||
|
||||
try:
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.debug("Loading %(app_name)s from %(conf_file)s",
|
||||
{'conf_file': conf_file, 'app_name': app_name})
|
||||
|
||||
app = deploy.loadapp("config:%s" % conf_file, name=app_name)
|
||||
|
||||
# Log the options used when starting if we're in debug mode...
|
||||
if CONF.debug:
|
||||
CONF.log_opt_values(logger, logging.DEBUG)
|
||||
|
||||
return app
|
||||
except (LookupError, ImportError) as e:
|
||||
msg = (_("Unable to load %(app_name)s from "
|
||||
"configuration file %(conf_file)s."
|
||||
"\nGot: %(e)r") % {'app_name': app_name,
|
||||
'conf_file': conf_file,
|
||||
'e': e})
|
||||
logger.error(msg)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
||||
def set_config_defaults():
|
||||
"""This method updates all configuration default values."""
|
||||
set_cors_middleware_defaults()
|
||||
|
||||
|
||||
def set_cors_middleware_defaults():
|
||||
"""Update default configuration options for oslo.middleware."""
|
||||
# CORS Defaults
|
||||
# TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
|
||||
cfg.set_defaults(cors.CORS_OPTS,
|
||||
allow_headers=['Content-MD5',
|
||||
'X-Image-Meta-Checksum',
|
||||
'X-Storage-Token',
|
||||
'Accept-Encoding',
|
||||
'X-Auth-Token',
|
||||
'X-Identity-Status',
|
||||
'X-Roles',
|
||||
'X-Service-Catalog',
|
||||
'X-User-Id',
|
||||
'X-Tenant-Id',
|
||||
'X-OpenStack-Request-ID'],
|
||||
expose_headers=['X-Image-Meta-Checksum',
|
||||
'X-Auth-Token',
|
||||
'X-Subject-Token',
|
||||
'X-Service-Token',
|
||||
'X-OpenStack-Request-ID'],
|
||||
allow_methods=['GET',
|
||||
'PUT',
|
||||
'POST',
|
||||
'DELETE',
|
||||
'PATCH']
|
||||
)
|
155
glare/common/exception.py
Normal file
155
glare/common/exception.py
Normal file
@ -0,0 +1,155 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from glare.i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GlareException(Exception):
|
||||
"""
|
||||
Base Glare Exception
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = _("An unknown exception occurred")
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
if message:
|
||||
self.message = message
|
||||
self.kwargs = kwargs
|
||||
if self.kwargs:
|
||||
self.message = self.message % kwargs
|
||||
LOG.error(self.message)
|
||||
super(GlareException, self).__init__(self.message)
|
||||
|
||||
def __unicode__(self):
|
||||
return six.text_type(self.message)
|
||||
|
||||
|
||||
class BadRequest(GlareException):
|
||||
message = _("Bad request")
|
||||
|
||||
|
||||
class InvalidStatusTransition(BadRequest):
|
||||
message = _("Transition status from %(orig)s to %(new)s was not valid")
|
||||
|
||||
|
||||
class NotAcceptable(GlareException):
|
||||
message = _("Not acceptable")
|
||||
|
||||
|
||||
class InvalidGlobalAPIVersion(NotAcceptable):
|
||||
message = _("Version %(req_ver)s is not supported by the API. Minimum "
|
||||
"is %(min_ver)s and maximum is %(max_ver)s.")
|
||||
|
||||
|
||||
class VersionNotFoundForAPIMethod(GlareException):
|
||||
message = _("API version %(version)s is not supported on this method.")
|
||||
|
||||
|
||||
class ApiVersionsIntersect(GlareException):
|
||||
message = _("Version of %(name) %(min_ver) %(max_ver) intersects "
|
||||
"with another versions.")
|
||||
|
||||
|
||||
class Unauthorized(GlareException):
|
||||
message = _('You are not authenticated')
|
||||
|
||||
|
||||
class Forbidden(GlareException):
|
||||
message = _("You are not authorized to complete this action.")
|
||||
|
||||
|
||||
class PolicyException(Forbidden):
|
||||
message = _("Policy check for %(policy_name)s "
|
||||
"failed with user credentials.")
|
||||
|
||||
|
||||
class NotFound(GlareException):
|
||||
message = _("An object with the specified identifier was not found.")
|
||||
|
||||
|
||||
class TypeNotFound(NotFound):
|
||||
message = _("Glare type with name '%(name)s' was not found.")
|
||||
|
||||
|
||||
class IncorrectArtifactType(GlareException):
|
||||
message = _("Artifact type is incorrect: %(explanation)s")
|
||||
|
||||
|
||||
class ArtifactNotFound(NotFound):
|
||||
message = _("Artifact with type name '%(type_name)s' and id '%(id)s' was "
|
||||
"not found.")
|
||||
|
||||
|
||||
class RequestTimeout(GlareException):
|
||||
message = _("The client did not produce a request within the time "
|
||||
"that the server was prepared to wait.")
|
||||
|
||||
|
||||
class Conflict(GlareException):
|
||||
message = _("The request could not be completed due to a conflict "
|
||||
"with the current state of the resource.")
|
||||
|
||||
|
||||
class Gone(GlareException):
|
||||
message = _("The requested resource is no longer available at the "
|
||||
"server and no forwarding address is known.")
|
||||
|
||||
|
||||
class PreconditionFailed(GlareException):
|
||||
message = _("The precondition given in one or more of the request-header "
|
||||
"fields evaluated to false when it was tested on the server.")
|
||||
|
||||
|
||||
class RequestEntityTooLarge(GlareException):
|
||||
message = _("The server is refusing to process a request because the "
|
||||
"request entity is larger than the server is willing or "
|
||||
"able to process.")
|
||||
|
||||
|
||||
class RequestRangeNotSatisfiable(GlareException):
|
||||
message = _("The request included a Range request-header field, and none "
|
||||
"of the range-specifier values in this field overlap the "
|
||||
"current extent of the selected resource, and the request "
|
||||
"did not include an If-Range request-header field.")
|
||||
|
||||
|
||||
class Locked(GlareException):
|
||||
message = _('The resource is locked.')
|
||||
|
||||
|
||||
class FailedDependency(GlareException):
|
||||
message = _('The method could not be performed because the requested '
|
||||
'action depended on another action and that action failed.')
|
||||
|
||||
|
||||
class UnsupportedMediaType(GlareException):
|
||||
message = _("Unsupported media type.")
|
||||
|
||||
|
||||
class SIGHUPInterrupt(GlareException):
|
||||
message = _("System SIGHUP signal received.")
|
||||
|
||||
|
||||
class WorkerCreationFailure(GlareException):
|
||||
message = _("Server worker creation failed: %(reason)s.")
|
124
glare/common/policy.py
Normal file
124
glare/common/policy.py
Normal file
@ -0,0 +1,124 @@
|
||||
# Copyright 2011-2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Glare policy operations inspired by Nova implementation."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_policy import policy
|
||||
|
||||
from glare.common import exception
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_ENFORCER = None
|
||||
|
||||
|
||||
artifact_policy_rules = [
|
||||
policy.RuleDefault('context_is_admin', 'role:admin'),
|
||||
policy.RuleDefault('admin_or_owner',
|
||||
'is_admin:True or project_id:%(owner)s'),
|
||||
policy.RuleDefault("artifact:type_list", "",
|
||||
"Policy to request list of artifact types"),
|
||||
policy.RuleDefault("artifact:type_get", "",
|
||||
"Policy to request artifact type definition"),
|
||||
policy.RuleDefault("artifact:create", "", "Policy to create artifact."),
|
||||
policy.RuleDefault("artifact:update_public",
|
||||
"'public':%(visibility)s and rule:context_is_admin "
|
||||
"or not 'public':%(visibility)s",
|
||||
"Policy to update public artifact"),
|
||||
policy.RuleDefault("artifact:update", "rule:admin_or_owner and "
|
||||
"rule:artifact:update_public",
|
||||
"Policy to update artifact"),
|
||||
policy.RuleDefault("artifact:activate", "rule:admin_or_owner",
|
||||
"Policy to activate artifact"),
|
||||
policy.RuleDefault("artifact:reactivate", "rule:context_is_admin",
|
||||
"Policy to reactivate artifact"),
|
||||
policy.RuleDefault("artifact:deactivate", "rule:context_is_admin",
|
||||
"Policy to update artifact"),
|
||||
policy.RuleDefault("artifact:publish", "rule:context_is_admin",
|
||||
"Policy to publish artifact"),
|
||||
policy.RuleDefault("artifact:get", "",
|
||||
"Policy to get artifact definition"),
|
||||
policy.RuleDefault("artifact:list", "",
|
||||
"Policy to list artifacts"),
|
||||
policy.RuleDefault("artifact:delete_public",
|
||||
"'public':%(visibility)s and rule:context_is_admin "
|
||||
"or not 'public':%(visibility)s",
|
||||
"Policy to delete public artifacts"),
|
||||
policy.RuleDefault("artifact:delete_deactivated",
|
||||
"'deactivated':%(status)s and rule:context_is_admin "
|
||||
"or not 'deactivated':%(status)s",
|
||||
"Policy to delete deactivated artifacts"),
|
||||
policy.RuleDefault("artifact:delete", "rule:admin_or_owner and "
|
||||
"rule:artifact:delete_public and "
|
||||
"rule:artifact:delete_deactivated",
|
||||
"Policy to delete artifacts"),
|
||||
policy.RuleDefault("artifact:set_location", "rule:admin_or_owner",
|
||||
"Policy to set custom location for artifact"),
|
||||
policy.RuleDefault("artifact:upload", "rule:admin_or_owner",
|
||||
"Policy to upload blob for artifact"),
|
||||
policy.RuleDefault("artifact:download", "",
|
||||
"Policy to download blob from artifact"),
|
||||
]
|
||||
|
||||
|
||||
def list_rules():
|
||||
return artifact_policy_rules
|
||||
|
||||
|
||||
def _get_enforcer():
|
||||
"""Init an Enforcer class.
|
||||
"""
|
||||
|
||||
global _ENFORCER
|
||||
if not _ENFORCER:
|
||||
_ENFORCER = policy.Enforcer(CONF)
|
||||
_ENFORCER.register_defaults(list_rules())
|
||||
return _ENFORCER
|
||||
|
||||
|
||||
def reset():
|
||||
global _ENFORCER
|
||||
if _ENFORCER:
|
||||
_ENFORCER.clear()
|
||||
_ENFORCER = None
|
||||
|
||||
|
||||
def authorize(policy_name, target, context, do_raise=True):
|
||||
"""Method checks that user action can be executed according to policies
|
||||
|
||||
:param policy_name: policy name
|
||||
:param target:
|
||||
:param do_raise
|
||||
:param context:
|
||||
:return: True if check passed
|
||||
"""
|
||||
creds = context.to_policy_values()
|
||||
result = _get_enforcer().authorize(
|
||||
policy_name, target, creds, do_raise=do_raise,
|
||||
exc=exception.PolicyException, policy_name=policy_name)
|
||||
LOG.debug("Policy %(policy)s check %(result)s for request %(request_id)s",
|
||||
{'policy': policy_name,
|
||||
'result': 'passed' if result else 'failed',
|
||||
'request_id': context.request_id})
|
||||
return result
|
||||
|
||||
|
||||
def check_is_admin(context):
|
||||
"""Whether or not roles contains 'admin' role according to policy setting.
|
||||
"""
|
||||
return authorize('context_is_admin', {}, context, do_raise=False)
|
175
glare/common/semver_db.py
Normal file
175
glare/common/semver_db.py
Normal file
@ -0,0 +1,175 @@
|
||||
# Copyright (c) 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import operator
|
||||
|
||||
import semantic_version
|
||||
from sqlalchemy.orm.properties import CompositeProperty
|
||||
from sqlalchemy import sql
|
||||
|
||||
from glare.common import exception
|
||||
from glare.i18n import _
|
||||
|
||||
MAX_COMPONENT_LENGTH = pow(2, 16) - 1
|
||||
MAX_NUMERIC_PRERELEASE_LENGTH = 6
|
||||
|
||||
|
||||
class DBVersion(object):
|
||||
def __init__(self, components_long, prerelease, build):
|
||||
"""
|
||||
Creates a DBVersion object out of 3 component fields. This initializer
|
||||
is supposed to be called from SQLAlchemy if 3 database columns are
|
||||
mapped to this composite field.
|
||||
|
||||
:param components_long: a 64-bit long value, containing numeric
|
||||
components of the version
|
||||
:param prerelease: a prerelease label of the version, optionally
|
||||
preformatted with leading zeroes in numeric-only parts of the label
|
||||
:param build: a build label of the version
|
||||
"""
|
||||
version_string = '%s.%s.%s' % _long_to_components(components_long)
|
||||
if prerelease:
|
||||
version_string += '-' + _strip_leading_zeroes_from_prerelease(
|
||||
prerelease)
|
||||
|
||||
if build:
|
||||
version_string += '+' + build
|
||||
self.version = semantic_version.Version(version_string)
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.version)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, DBVersion) and
|
||||
other.version == self.version)
|
||||
|
||||
def __ne__(self, other):
|
||||
return (not isinstance(other, DBVersion)
|
||||
or self.version != other.version)
|
||||
|
||||
def __composite_values__(self):
|
||||
long_version = _version_to_long(self.version)
|
||||
prerelease = _add_leading_zeroes_to_prerelease(self.version.prerelease)
|
||||
build = '.'.join(self.version.build) if self.version.build else None
|
||||
return long_version, prerelease, build
|
||||
|
||||
|
||||
def parse(version_string):
|
||||
version = semantic_version.Version.coerce(version_string)
|
||||
return DBVersion(_version_to_long(version),
|
||||
'.'.join(version.prerelease),
|
||||
'.'.join(version.build))
|
||||
|
||||
|
||||
def _check_limit(value):
|
||||
if value > MAX_COMPONENT_LENGTH:
|
||||
reason = _("Version component is too "
|
||||
"large (%d max)") % MAX_COMPONENT_LENGTH
|
||||
raise exception.InvalidVersion(reason=reason)
|
||||
|
||||
|
||||
def _version_to_long(version):
|
||||
"""
|
||||
Converts the numeric part of the semver version into the 64-bit long value
|
||||
using the following logic:
|
||||
|
||||
* major version is stored in first 16 bits of the value
|
||||
* minor version is stored in next 16 bits
|
||||
* patch version is stored in following 16 bits
|
||||
* next 2 bits are used to store the flag: if the version has pre-release
|
||||
label then these bits are 00, otherwise they are 11. Intermediate values
|
||||
of the flag (01 and 10) are reserved for future usage.
|
||||
* last 14 bits of the value are reserved for future usage
|
||||
|
||||
The numeric components of version are checked so their value does not
|
||||
exceed 16 bits.
|
||||
|
||||
:param version: a semantic_version.Version object
|
||||
"""
|
||||
_check_limit(version.major)
|
||||
_check_limit(version.minor)
|
||||
_check_limit(version.patch)
|
||||
major = version.major << 48
|
||||
minor = version.minor << 32
|
||||
patch = version.patch << 16
|
||||
flag = 0 if version.prerelease else 2
|
||||
flag <<= 14
|
||||
return major | minor | patch | flag
|
||||
|
||||
|
||||
def _long_to_components(value):
|
||||
major = value >> 48
|
||||
minor = (value - (major << 48)) >> 32
|
||||
patch = (value - (major << 48) - (minor << 32)) >> 16
|
||||
return str(major), str(minor), str(patch)
|
||||
|
||||
|
||||
def _add_leading_zeroes_to_prerelease(label_tuple):
|
||||
if label_tuple is None:
|
||||
return None
|
||||
res = []
|
||||
for component in label_tuple:
|
||||
if component.isdigit():
|
||||
if len(component) > MAX_NUMERIC_PRERELEASE_LENGTH:
|
||||
reason = _("Prerelease numeric component is too large "
|
||||
"(%d characters "
|
||||
"max)") % MAX_NUMERIC_PRERELEASE_LENGTH
|
||||
raise exception.InvalidVersion(reason=reason)
|
||||
res.append(component.rjust(MAX_NUMERIC_PRERELEASE_LENGTH, '0'))
|
||||
else:
|
||||
res.append(component)
|
||||
return '.'.join(res)
|
||||
|
||||
|
||||
def _strip_leading_zeroes_from_prerelease(string_value):
|
||||
res = []
|
||||
for component in string_value.split('.'):
|
||||
if component.isdigit():
|
||||
val = component.lstrip('0')
|
||||
if len(val) == 0: # Corner case: when the component is just '0'
|
||||
val = '0' # it will be stripped completely, so restore it
|
||||
res.append(val)
|
||||
else:
|
||||
res.append(component)
|
||||
return '.'.join(res)
|
||||
|
||||
strict_op_map = {
|
||||
operator.ge: operator.gt,
|
||||
operator.le: operator.lt
|
||||
}
|
||||
|
||||
|
||||
class VersionComparator(CompositeProperty.Comparator):
|
||||
def _get_comparison(self, values, op):
|
||||
columns = self.__clause_element__().clauses
|
||||
if op in strict_op_map:
|
||||
stricter_op = strict_op_map[op]
|
||||
else:
|
||||
stricter_op = op
|
||||
|
||||
return sql.or_(stricter_op(columns[0], values[0]),
|
||||
sql.and_(columns[0] == values[0],
|
||||
op(columns[1], values[1])))
|
||||
|
||||
def __gt__(self, other):
|
||||
return self._get_comparison(other.__composite_values__(), operator.gt)
|
||||
|
||||
def __ge__(self, other):
|
||||
return self._get_comparison(other.__composite_values__(), operator.ge)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._get_comparison(other.__composite_values__(), operator.lt)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._get_comparison(other.__composite_values__(), operator.le)
|
156
glare/common/store_api.py
Normal file
156
glare/common/store_api.py
Normal file
@ -0,0 +1,156 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import hashlib
|
||||
import urllib
|
||||
|
||||
from glance_store import backend
|
||||
from glance_store import exceptions as store_exc
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from glare.common import exception
|
||||
from glare.common import utils
|
||||
from glare.i18n import _
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
error_map = [{'catch': store_exc.NotFound,
|
||||
'raise': exception.NotFound},
|
||||
{'catch': store_exc.UnknownScheme,
|
||||
'raise': exception.BadRequest},
|
||||
{'catch': store_exc.BadStoreUri,
|
||||
'raise': exception.BadRequest},
|
||||
{'catch': store_exc.Duplicate,
|
||||
'raise': exception.Conflict},
|
||||
{'catch': store_exc.Conflict,
|
||||
'raise': exception.Conflict},
|
||||
{'catch': store_exc.StorageFull,
|
||||
'raise': exception.Forbidden},
|
||||
{'catch': store_exc.StorageWriteDenied,
|
||||
'raise': exception.Forbidden},
|
||||
{'catch': store_exc.Forbidden,
|
||||
'raise': exception.Forbidden},
|
||||
{'catch': store_exc.Invalid,
|
||||
'raise': exception.BadRequest},
|
||||
{'catch': store_exc.BadStoreConfiguration,
|
||||
'raise': exception.GlareException},
|
||||
{'catch': store_exc.RemoteServiceUnavailable,
|
||||
'raise': exception.BadRequest},
|
||||
{'catch': store_exc.HasSnapshot,
|
||||
'raise': exception.Conflict},
|
||||
{'catch': store_exc.InUseByStore,
|
||||
'raise': exception.Conflict},
|
||||
{'catch': store_exc.BackendException,
|
||||
'raise': exception.GlareException},
|
||||
{'catch': store_exc.GlanceStoreException,
|
||||
'raise': exception.GlareException}]
|
||||
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def save_blob_to_store(blob_id, blob, context, max_size,
|
||||
store_type=None, verifier=None):
|
||||
"""Save file to specified store type and return location info to the user
|
||||
|
||||
:param store_type: type of the store, None means save to default store.
|
||||
:param blob_id: id of artifact
|
||||
:param blob: blob file iterator
|
||||
:param context: user context
|
||||
:param verifier:signature verified
|
||||
:return: tuple of values: (location_uri, size, checksum, metadata)
|
||||
"""
|
||||
(location, size, checksum, metadata) = backend.add_to_backend(
|
||||
CONF, blob_id,
|
||||
utils.LimitingReader(utils.CooperativeReader(blob), max_size),
|
||||
0, store_type, context, verifier)
|
||||
return location, size, checksum
|
||||
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def load_from_store(uri, context):
|
||||
"""Load file from store backend.
|
||||
|
||||
:param uri: blob uri
|
||||
:param context: user context
|
||||
:return: file iterator
|
||||
"""
|
||||
return backend.get_from_backend(uri=uri, context=context)[0]
|
||||
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def delete_blob(uri, context):
|
||||
"""Delete blob from backend store
|
||||
|
||||
:param uri: blob uri
|
||||
:param context: user context
|
||||
"""
|
||||
return backend.delete_from_backend(uri, context)
|
||||
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def get_blob_size(uri, context):
|
||||
return backend.get_size_from_backend(uri, context)
|
||||
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def get_location_info(url, context, max_size, calc_checksum=True):
|
||||
"""Validate location and get information about external blob
|
||||
|
||||
:param url: blob url
|
||||
:param context: user context
|
||||
:param calc_checksum: define if checksum must be calculated
|
||||
:return: blob size and checksum
|
||||
"""
|
||||
# validate uri
|
||||
scheme = urlparse.urlparse(url).scheme
|
||||
if scheme not in ('http', 'https'):
|
||||
msg = _("Location %s is invalid.") % url
|
||||
raise exception.BadRequest(message=msg)
|
||||
|
||||
res = urllib.urlopen(url)
|
||||
http_message = res.info()
|
||||
content_type = getattr(http_message, 'type') or 'application/octet-stream'
|
||||
|
||||
# calculate blob checksum to ensure that location blob won't be changed
|
||||
# in future
|
||||
# TODO(kairat) need to support external location signatures
|
||||
checksum = None
|
||||
size = 0
|
||||
if calc_checksum:
|
||||
checksum = hashlib.md5()
|
||||
blob_data = load_from_store(url, context)
|
||||
for buf in blob_data:
|
||||
checksum.update(buf)
|
||||
size += len(buf)
|
||||
if size > max_size:
|
||||
msg = _("External blob size %(size)d exceeds maximum allowed "
|
||||
"size %(max)d."), {'size': size, 'max': max_size}
|
||||
raise exception.BadRequest(message=msg)
|
||||
checksum = checksum.hexdigest()
|
||||
else:
|
||||
# request blob size
|
||||
size = get_blob_size(url, context=context)
|
||||
if size < 0 or size > max_size:
|
||||
msg = _("Invalid blob size %d.") % size
|
||||
raise exception.BadRequest(message=msg)
|
||||
|
||||
LOG.debug("Checksum %(checksum)s and size %(size)s calculated "
|
||||
"successfully for location %(location)s",
|
||||
{'checksum': str(checksum), 'size': str(size),
|
||||
'location': url})
|
||||
|
||||
return size, checksum, content_type
|
567
glare/common/utils.py
Normal file
567
glare/common/utils.py
Normal file
@ -0,0 +1,567 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2014 SoftLayer Technologies, Inc.
|
||||
# Copyright 2015 Mirantis, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
System-level utilities and helper functions.
|
||||
"""
|
||||
|
||||
import errno
|
||||
|
||||
try:
|
||||
from eventlet import sleep
|
||||
except ImportError:
|
||||
from time import sleep
|
||||
from eventlet.green import socket
|
||||
|
||||
import functools
|
||||
import os
|
||||
import re
|
||||
import uuid
|
||||
|
||||
from OpenSSL import crypto
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import encodeutils
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import timeutils
|
||||
import six
|
||||
from webob import exc
|
||||
|
||||
from glare.common import exception
|
||||
from glare.i18n import _, _LE, _LW
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
GLARE_TEST_SOCKET_FD_STR = 'GLARE_TEST_SOCKET_FD'
|
||||
|
||||
|
||||
def chunkreadable(iter, chunk_size=65536):
|
||||
"""
|
||||
Wrap a readable iterator with a reader yielding chunks of
|
||||
a preferred size, otherwise leave iterator unchanged.
|
||||
|
||||
:param iter: an iter which may also be readable
|
||||
:param chunk_size: maximum size of chunk
|
||||
"""
|
||||
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
|
||||
|
||||
|
||||
def chunkiter(fp, chunk_size=65536):
|
||||
"""
|
||||
Return an iterator to a file-like obj which yields fixed size chunks
|
||||
|
||||
:param fp: a file-like object
|
||||
:param chunk_size: maximum size of chunk
|
||||
"""
|
||||
while True:
|
||||
chunk = fp.read(chunk_size)
|
||||
if chunk:
|
||||
yield chunk
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
def cooperative_iter(iter):
|
||||
"""
|
||||
Return an iterator which schedules after each
|
||||
iteration. This can prevent eventlet thread starvation.
|
||||
|
||||
:param iter: an iterator to wrap
|
||||
"""
|
||||
try:
|
||||
for chunk in iter:
|
||||
sleep(0)
|
||||
yield chunk
|
||||
except Exception as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
msg = _LE("Error: cooperative_iter exception %s") % err
|
||||
LOG.error(msg)
|
||||
|
||||
|
||||
def cooperative_read(fd):
|
||||
"""
|
||||
Wrap a file descriptor's read with a partial function which schedules
|
||||
after each read. This can prevent eventlet thread starvation.
|
||||
|
||||
:param fd: a file descriptor to wrap
|
||||
"""
|
||||
def readfn(*args):
|
||||
result = fd.read(*args)
|
||||
sleep(0)
|
||||
return result
|
||||
return readfn
|
||||
|
||||
|
||||
MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit
|
||||
|
||||
|
||||
class CooperativeReader(object):
|
||||
"""
|
||||
An eventlet thread friendly class for reading in image data.
|
||||
|
||||
When accessing data either through the iterator or the read method
|
||||
we perform a sleep to allow a co-operative yield. When there is more than
|
||||
one image being uploaded/downloaded this prevents eventlet thread
|
||||
starvation, ie allows all threads to be scheduled periodically rather than
|
||||
having the same thread be continuously active.
|
||||
"""
|
||||
def __init__(self, fd):
|
||||
"""
|
||||
:param fd: Underlying image file object
|
||||
"""
|
||||
self.fd = fd
|
||||
self.iterator = None
|
||||
# NOTE(markwash): if the underlying supports read(), overwrite the
|
||||
# default iterator-based implementation with cooperative_read which
|
||||
# is more straightforward
|
||||
if hasattr(fd, 'read'):
|
||||
self.read = cooperative_read(fd)
|
||||
else:
|
||||
self.iterator = None
|
||||
self.buffer = b''
|
||||
self.position = 0
|
||||
|
||||
def read(self, length=None):
|
||||
"""Return the requested amount of bytes, fetching the next chunk of
|
||||
the underlying iterator when needed.
|
||||
|
||||
This is replaced with cooperative_read in __init__ if the underlying
|
||||
fd already supports read().
|
||||
"""
|
||||
if length is None:
|
||||
if len(self.buffer) - self.position > 0:
|
||||
# if no length specified but some data exists in buffer,
|
||||
# return that data and clear the buffer
|
||||
result = self.buffer[self.position:]
|
||||
self.buffer = b''
|
||||
self.position = 0
|
||||
return str(result)
|
||||
else:
|
||||
# otherwise read the next chunk from the underlying iterator
|
||||
# and return it as a whole. Reset the buffer, as subsequent
|
||||
# calls may specify the length
|
||||
try:
|
||||
if self.iterator is None:
|
||||
self.iterator = self.__iter__()
|
||||
return next(self.iterator)
|
||||
except StopIteration:
|
||||
return ''
|
||||
finally:
|
||||
self.buffer = b''
|
||||
self.position = 0
|
||||
else:
|
||||
result = bytearray()
|
||||
while len(result) < length:
|
||||
if self.position < len(self.buffer):
|
||||
to_read = length - len(result)
|
||||
chunk = self.buffer[self.position:self.position + to_read]
|
||||
result.extend(chunk)
|
||||
|
||||
# This check is here to prevent potential OOM issues if
|
||||
# this code is called with unreasonably high values of read
|
||||
# size. Currently it is only called from the HTTP clients
|
||||
# of Glance backend stores, which use httplib for data
|
||||
# streaming, which has readsize hardcoded to 8K, so this
|
||||
# check should never fire. Regardless it still worths to
|
||||
# make the check, as the code may be reused somewhere else.
|
||||
if len(result) >= MAX_COOP_READER_BUFFER_SIZE:
|
||||
raise exception.LimitExceeded()
|
||||
self.position += len(chunk)
|
||||
else:
|
||||
try:
|
||||
if self.iterator is None:
|
||||
self.iterator = self.__iter__()
|
||||
self.buffer = next(self.iterator)
|
||||
self.position = 0
|
||||
except StopIteration:
|
||||
self.buffer = b''
|
||||
self.position = 0
|
||||
return bytes(result)
|
||||
return bytes(result)
|
||||
|
||||
def __iter__(self):
|
||||
return cooperative_iter(self.fd.__iter__())
|
||||
|
||||
|
||||
class LimitingReader(object):
|
||||
"""
|
||||
Reader designed to fail when reading image data past the configured
|
||||
allowable amount.
|
||||
"""
|
||||
def __init__(self, data, limit):
|
||||
"""
|
||||
:param data: Underlying image data object
|
||||
:param limit: maximum number of bytes the reader should allow
|
||||
"""
|
||||
self.data = data
|
||||
self.limit = limit
|
||||
self.bytes_read = 0
|
||||
|
||||
def __iter__(self):
|
||||
for chunk in self.data:
|
||||
self.bytes_read += len(chunk)
|
||||
if self.bytes_read > self.limit:
|
||||
raise exception.ImageSizeLimitExceeded()
|
||||
else:
|
||||
yield chunk
|
||||
|
||||
def read(self, i):
|
||||
result = self.data.read(i)
|
||||
self.bytes_read += len(result)
|
||||
if self.bytes_read > self.limit:
|
||||
raise exception.ImageSizeLimitExceeded()
|
||||
return result
|
||||
|
||||
|
||||
def create_mashup_dict(image_meta):
|
||||
"""
|
||||
Returns a dictionary-like mashup of the image core properties
|
||||
and the image custom properties from given image metadata.
|
||||
|
||||
:param image_meta: metadata of image with core and custom properties
|
||||
"""
|
||||
|
||||
d = {}
|
||||
for key, value in six.iteritems(image_meta):
|
||||
if isinstance(value, dict):
|
||||
for subkey, subvalue in six.iteritems(
|
||||
create_mashup_dict(value)):
|
||||
if subkey not in image_meta:
|
||||
d[subkey] = subvalue
|
||||
else:
|
||||
d[key] = value
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def safe_mkdirs(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def mutating(func):
|
||||
"""Decorator to enforce read-only logic"""
|
||||
@functools.wraps(func)
|
||||
def wrapped(self, req, *args, **kwargs):
|
||||
if req.context.read_only:
|
||||
msg = "Read-only access"
|
||||
LOG.debug(msg)
|
||||
raise exc.HTTPForbidden(msg, request=req,
|
||||
content_type="text/plain")
|
||||
return func(self, req, *args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
|
||||
def setup_remote_pydev_debug(host, port):
|
||||
error_msg = _LE('Error setting up the debug environment. Verify that the'
|
||||
' option pydev_worker_debug_host is pointing to a valid '
|
||||
'hostname or IP on which a pydev server is listening on'
|
||||
' the port indicated by pydev_worker_debug_port.')
|
||||
|
||||
try:
|
||||
try:
|
||||
from pydev import pydevd
|
||||
except ImportError:
|
||||
import pydevd
|
||||
|
||||
pydevd.settrace(host,
|
||||
port=port,
|
||||
stdoutToServer=True,
|
||||
stderrToServer=True)
|
||||
return True
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(error_msg)
|
||||
|
||||
|
||||
def validate_key_cert(key_file, cert_file):
|
||||
try:
|
||||
error_key_name = "private key"
|
||||
error_filename = key_file
|
||||
with open(key_file, 'r') as keyfile:
|
||||
key_str = keyfile.read()
|
||||
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)
|
||||
|
||||
error_key_name = "certificate"
|
||||
error_filename = cert_file
|
||||
with open(cert_file, 'r') as certfile:
|
||||
cert_str = certfile.read()
|
||||
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
|
||||
except IOError as ioe:
|
||||
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
|
||||
"%(error_filename)s. Please verify it."
|
||||
" Error: %(ioe)s") %
|
||||
{'error_key_name': error_key_name,
|
||||
'error_filename': error_filename,
|
||||
'ioe': ioe})
|
||||
except crypto.Error as ce:
|
||||
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
|
||||
"%(error_filename)s. Please verify it. OpenSSL"
|
||||
" error: %(ce)s") %
|
||||
{'error_key_name': error_key_name,
|
||||
'error_filename': error_filename,
|
||||
'ce': ce})
|
||||
|
||||
try:
|
||||
data = str(uuid.uuid4())
|
||||
# On Python 3, explicitly encode to UTF-8 to call crypto.sign() which
|
||||
# requires bytes. Otherwise, it raises a deprecation warning (and
|
||||
# will raise an error later).
|
||||
data = encodeutils.to_utf8(data)
|
||||
digest = CONF.digest_algorithm
|
||||
if digest == 'sha1':
|
||||
LOG.warn(
|
||||
_LW('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)'
|
||||
' state that the SHA-1 is not suitable for'
|
||||
' general-purpose digital signature applications (as'
|
||||
' specified in FIPS 186-3) that require 112 bits of'
|
||||
' security. The default value is sha1 in Kilo for a'
|
||||
' smooth upgrade process, and it will be updated'
|
||||
' with sha256 in next release(L).'))
|
||||
out = crypto.sign(key, data, digest)
|
||||
crypto.verify(cert, out, data, digest)
|
||||
except crypto.Error as ce:
|
||||
raise RuntimeError(_("There is a problem with your key pair. "
|
||||
"Please verify that cert %(cert_file)s and "
|
||||
"key %(key_file)s belong together. OpenSSL "
|
||||
"error %(ce)s") % {'cert_file': cert_file,
|
||||
'key_file': key_file,
|
||||
'ce': ce})
|
||||
|
||||
|
||||
def get_test_suite_socket():
|
||||
global GLARE_TEST_SOCKET_FD_STR
|
||||
if GLARE_TEST_SOCKET_FD_STR in os.environ:
|
||||
fd = int(os.environ[GLARE_TEST_SOCKET_FD_STR])
|
||||
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
|
||||
if six.PY2:
|
||||
sock = socket.SocketType(_sock=sock)
|
||||
sock.listen(CONF.backlog)
|
||||
del os.environ[GLARE_TEST_SOCKET_FD_STR]
|
||||
os.close(fd)
|
||||
return sock
|
||||
return None
|
||||
|
||||
|
||||
try:
|
||||
REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]')
|
||||
except re.error:
|
||||
# UCS-2 build case
|
||||
REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
|
||||
|
||||
|
||||
def no_4byte_params(f):
|
||||
"""
|
||||
Checks that no 4 byte unicode characters are allowed
|
||||
in dicts' keys/values and string's parameters
|
||||
"""
|
||||
def wrapper(*args, **kwargs):
|
||||
|
||||
def _is_match(some_str):
|
||||
return (isinstance(some_str, six.text_type) and
|
||||
REGEX_4BYTE_UNICODE.findall(some_str) != [])
|
||||
|
||||
def _check_dict(data_dict):
|
||||
# a dict of dicts has to be checked recursively
|
||||
for key, value in six.iteritems(data_dict):
|
||||
if isinstance(value, dict):
|
||||
_check_dict(value)
|
||||
else:
|
||||
if _is_match(key):
|
||||
msg = _("Property names can't contain 4 byte unicode.")
|
||||
raise exception.Invalid(msg)
|
||||
if _is_match(value):
|
||||
msg = (_("%s can't contain 4 byte unicode characters.")
|
||||
% key.title())
|
||||
raise exception.Invalid(msg)
|
||||
|
||||
for data_dict in [arg for arg in args if isinstance(arg, dict)]:
|
||||
_check_dict(data_dict)
|
||||
# now check args for str values
|
||||
for arg in args:
|
||||
if _is_match(arg):
|
||||
msg = _("Param values can't contain 4 byte unicode.")
|
||||
raise exception.Invalid(msg)
|
||||
# check kwargs as well, as params are passed as kwargs via
|
||||
# registry calls
|
||||
_check_dict(kwargs)
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def stash_conf_values():
|
||||
"""
|
||||
Make a copy of some of the current global CONF's settings.
|
||||
Allows determining if any of these values have changed
|
||||
when the config is reloaded.
|
||||
"""
|
||||
conf = {
|
||||
'bind_host': CONF.bind_host,
|
||||
'bind_port': CONF.bind_port,
|
||||
'tcp_keepidle': CONF.cert_file,
|
||||
'backlog': CONF.backlog,
|
||||
'key_file': CONF.key_file,
|
||||
'cert_file': CONF.cert_file
|
||||
}
|
||||
|
||||
return conf
|
||||
|
||||
|
||||
def split_filter_op(expression):
|
||||
"""Split operator from threshold in an expression.
|
||||
Designed for use on a comparative-filtering query field.
|
||||
When no operator is found, default to an equality comparison.
|
||||
|
||||
:param expression: the expression to parse
|
||||
|
||||
:returns: a tuple (operator, threshold) parsed from expression
|
||||
"""
|
||||
left, sep, right = expression.partition(':')
|
||||
if sep:
|
||||
# If the expression is a date of the format ISO 8601 like
|
||||
# CCYY-MM-DDThh:mm:ss+hh:mm and has no operator, it should
|
||||
# not be partitioned, and a default operator of eq should be
|
||||
# assumed.
|
||||
try:
|
||||
timeutils.parse_isotime(expression)
|
||||
op = 'eq'
|
||||
threshold = expression
|
||||
except ValueError:
|
||||
op = left
|
||||
threshold = right
|
||||
else:
|
||||
op = 'eq' # default operator
|
||||
threshold = left
|
||||
|
||||
# NOTE stevelle decoding escaped values may be needed later
|
||||
return op, threshold
|
||||
|
||||
|
||||
def validate_quotes(value):
|
||||
"""Validate filter values
|
||||
|
||||
Validation opening/closing quotes in the expression.
|
||||
"""
|
||||
open_quotes = True
|
||||
for i in range(len(value)):
|
||||
if value[i] == '"':
|
||||
if i and value[i - 1] == '\\':
|
||||
continue
|
||||
if open_quotes:
|
||||
if i and value[i - 1] != ',':
|
||||
msg = _("Invalid filter value %s. There is no comma "
|
||||
"before opening quotation mark.") % value
|
||||
raise exception.InvalidParameterValue(message=msg)
|
||||
else:
|
||||
if i + 1 != len(value) and value[i + 1] != ",":
|
||||
msg = _("Invalid filter value %s. There is no comma "
|
||||
"after closing quotation mark.") % value
|
||||
raise exception.InvalidParameterValue(message=msg)
|
||||
open_quotes = not open_quotes
|
||||
if not open_quotes:
|
||||
msg = _("Invalid filter value %s. The quote is not closed.") % value
|
||||
raise exception.InvalidParameterValue(message=msg)
|
||||
|
||||
|
||||
def split_filter_value_for_quotes(value):
|
||||
"""Split filter values
|
||||
|
||||
Split values by commas and quotes for 'in' operator, according api-wg.
|
||||
"""
|
||||
validate_quotes(value)
|
||||
tmp = re.compile(r'''
|
||||
"( # if found a double-quote
|
||||
[^\"\\]* # take characters either non-quotes or backslashes
|
||||
(?:\\. # take backslashes and character after it
|
||||
[^\"\\]*)* # take characters either non-quotes or backslashes
|
||||
) # before double-quote
|
||||
",? # a double-quote with comma maybe
|
||||
| ([^,]+),? # if not found double-quote take any non-comma
|
||||
# characters with comma maybe
|
||||
| , # if we have only comma take empty string
|
||||
''', re.VERBOSE)
|
||||
return [val[0] or val[1] for val in re.findall(tmp, value)]
|
||||
|
||||
|
||||
def evaluate_filter_op(value, operator, threshold):
|
||||
"""Evaluate a comparison operator.
|
||||
Designed for use on a comparative-filtering query field.
|
||||
|
||||
:param value: evaluated against the operator, as left side of expression
|
||||
:param operator: any supported filter operation
|
||||
:param threshold: to compare value against, as right side of expression
|
||||
|
||||
:raises: InvalidFilterOperatorValue if an unknown operator is provided
|
||||
|
||||
:returns: boolean result of applied comparison
|
||||
|
||||
"""
|
||||
if operator == 'gt':
|
||||
return value > threshold
|
||||
elif operator == 'gte':
|
||||
return value >= threshold
|
||||
elif operator == 'lt':
|
||||
return value < threshold
|
||||
elif operator == 'lte':
|
||||
return value <= threshold
|
||||
elif operator == 'neq':
|
||||
return value != threshold
|
||||
elif operator == 'eq':
|
||||
return value == threshold
|
||||
|
||||
msg = _("Unable to filter on a unknown operator.")
|
||||
raise exception.InvalidFilterOperatorValue(msg)
|
||||
|
||||
|
||||
class error_handler(object):
|
||||
def __init__(self, error_map, default_exception=None):
|
||||
self.error_map = error_map
|
||||
self.default_exception = default_exception
|
||||
|
||||
def __call__(self, f):
|
||||
"""Decorator that catches exception that came from func or method
|
||||
:param f: targer func
|
||||
:param error_map: dict of exception that can be raised
|
||||
in func and exceptions that must be raised for these exceptions.
|
||||
For example, if sqlalchemy NotFound might be raised and we need
|
||||
re-raise it as glare NotFound exception then error_map must
|
||||
contain {"catch": SQLAlchemyNotFound,
|
||||
"raise": exceptions.NotFound}
|
||||
:param default_exception: default exception that must be raised if
|
||||
exception that cannot be found in error map was raised
|
||||
:return: func
|
||||
"""
|
||||
|
||||
def new_function(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except Exception as e:
|
||||
for map_record in self.error_map:
|
||||
if isinstance(e, map_record['catch']):
|
||||
raise map_record['raise'](str(e))
|
||||
else:
|
||||
if self.default_exception:
|
||||
raise self.default_exception(str(e))
|
||||
else:
|
||||
raise
|
||||
return new_function
|
949
glare/common/wsgi.py
Normal file
949
glare/common/wsgi.py
Normal file
@ -0,0 +1,949 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2010 OpenStack Foundation
|
||||
# Copyright 2014 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Utility methods for working with WSGI servers
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import errno
|
||||
import functools
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import ssl
|
||||
import eventlet.greenio
|
||||
import eventlet.wsgi
|
||||
import glance_store
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import encodeutils
|
||||
from oslo_utils import strutils
|
||||
from osprofiler import opts as profiler_opts
|
||||
import routes
|
||||
import routes.middleware
|
||||
import six
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
from webob import multidict
|
||||
|
||||
from glare.common import config
|
||||
from glare.common import exception as glare_exc
|
||||
from glare.common import utils
|
||||
from glare.i18n import _, _LE, _LI, _LW
|
||||
from glare import i18n
|
||||
|
||||
|
||||
bind_opts = [
|
||||
cfg.StrOpt('bind_host', default='0.0.0.0',
|
||||
help=_('Address to bind the server. Useful when '
|
||||
'selecting a particular network interface.')),
|
||||
cfg.PortOpt('bind_port',
|
||||
help=_('The port on which the server will listen.')),
|
||||
]
|
||||
|
||||
socket_opts = [
|
||||
cfg.IntOpt('backlog', default=4096,
|
||||
help=_('The backlog value that will be used when creating the '
|
||||
'TCP listener socket.')),
|
||||
cfg.IntOpt('tcp_keepidle', default=600,
|
||||
help=_('The value for the socket option TCP_KEEPIDLE. This is '
|
||||
'the time in seconds that the connection must be idle '
|
||||
'before TCP starts sending keepalive probes.')),
|
||||
cfg.StrOpt('ca_file', help=_('CA certificate file to use to verify '
|
||||
'connecting clients.')),
|
||||
cfg.StrOpt('cert_file', help=_('Certificate file to use when starting API '
|
||||
'server securely.')),
|
||||
cfg.StrOpt('key_file', help=_('Private key file to use when starting API '
|
||||
'server securely.')),
|
||||
]
|
||||
|
||||
eventlet_opts = [
|
||||
cfg.IntOpt('workers',
|
||||
help=_('The number of child process workers that will be '
|
||||
'created to service requests. The default will be '
|
||||
'equal to the number of CPUs available.')),
|
||||
cfg.IntOpt('max_header_line', default=16384,
|
||||
help=_('Maximum line size of message headers to be accepted. '
|
||||
'max_header_line may need to be increased when using '
|
||||
'large tokens (typically those generated by the '
|
||||
'Keystone v3 API with big service catalogs')),
|
||||
cfg.BoolOpt('http_keepalive', default=True,
|
||||
help=_('If False, server will return the header '
|
||||
'"Connection: close", '
|
||||
'If True, server will return "Connection: Keep-Alive" '
|
||||
'in its responses. In order to close the client socket '
|
||||
'connection explicitly after the response is sent and '
|
||||
'read successfully by the client, you simply have to '
|
||||
'set this option to False when you create a wsgi '
|
||||
'server.')),
|
||||
cfg.IntOpt('client_socket_timeout', default=900,
|
||||
help=_('Timeout for client connections\' socket operations. '
|
||||
'If an incoming connection is idle for this number of '
|
||||
'seconds it will be closed. A value of \'0\' means '
|
||||
'wait forever.')),
|
||||
]
|
||||
|
||||
wsgi_opts = [
|
||||
cfg.StrOpt('secure_proxy_ssl_header',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=_('Use the http_proxy_to_wsgi middleware '
|
||||
'instead.'),
|
||||
help=_('The HTTP header used to determine the scheme for the '
|
||||
'original request, even if it was removed by an SSL '
|
||||
'terminating proxy. Typical value is '
|
||||
'"HTTP_X_FORWARDED_PROTO".')),
|
||||
]
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(bind_opts)
|
||||
CONF.register_opts(socket_opts)
|
||||
CONF.register_opts(eventlet_opts)
|
||||
CONF.register_opts(wsgi_opts)
|
||||
profiler_opts.set_defaults(CONF)
|
||||
|
||||
ASYNC_EVENTLET_THREAD_POOL_LIST = []
|
||||
|
||||
|
||||
def get_num_workers():
|
||||
"""Return the configured number of workers."""
|
||||
if CONF.workers is None:
|
||||
# None implies the number of CPUs
|
||||
return processutils.get_worker_count()
|
||||
return CONF.workers
|
||||
|
||||
|
||||
def get_bind_addr(default_port=None):
|
||||
"""Return the host and port to bind to."""
|
||||
return (CONF.bind_host, CONF.bind_port or default_port)
|
||||
|
||||
|
||||
def ssl_wrap_socket(sock):
|
||||
"""
|
||||
Wrap an existing socket in SSL
|
||||
|
||||
:param sock: non-SSL socket to wrap
|
||||
|
||||
:returns: An SSL wrapped socket
|
||||
"""
|
||||
utils.validate_key_cert(CONF.key_file, CONF.cert_file)
|
||||
|
||||
ssl_kwargs = {
|
||||
'server_side': True,
|
||||
'certfile': CONF.cert_file,
|
||||
'keyfile': CONF.key_file,
|
||||
'cert_reqs': ssl.CERT_NONE,
|
||||
}
|
||||
|
||||
if CONF.ca_file:
|
||||
ssl_kwargs['ca_certs'] = CONF.ca_file
|
||||
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
|
||||
|
||||
return ssl.wrap_socket(sock, **ssl_kwargs)
|
||||
|
||||
|
||||
def get_socket(default_port):
|
||||
"""
|
||||
Bind socket to bind ip:port in conf
|
||||
|
||||
note: Mostly comes from Swift with a few small changes...
|
||||
|
||||
:param default_port: port to bind to if none is specified in conf
|
||||
|
||||
:returns: a socket object as returned from socket.listen or
|
||||
ssl.wrap_socket if conf specifies cert_file
|
||||
"""
|
||||
bind_addr = get_bind_addr(default_port)
|
||||
|
||||
# TODO(jaypipes): eventlet's greened socket module does not actually
|
||||
# support IPv6 in getaddrinfo(). We need to get around this in the
|
||||
# future or monitor upstream for a fix
|
||||
address_family = [
|
||||
addr[0] for addr in socket.getaddrinfo(bind_addr[0],
|
||||
bind_addr[1],
|
||||
socket.AF_UNSPEC,
|
||||
socket.SOCK_STREAM)
|
||||
if addr[0] in (socket.AF_INET, socket.AF_INET6)
|
||||
][0]
|
||||
|
||||
use_ssl = CONF.key_file or CONF.cert_file
|
||||
if use_ssl and (not CONF.key_file or not CONF.cert_file):
|
||||
raise RuntimeError(_("When running server in SSL mode, you must "
|
||||
"specify both a cert_file and key_file "
|
||||
"option value in your configuration file"))
|
||||
|
||||
sock = utils.get_test_suite_socket()
|
||||
retry_until = time.time() + 30
|
||||
|
||||
while not sock and time.time() < retry_until:
|
||||
try:
|
||||
sock = eventlet.listen(bind_addr,
|
||||
backlog=CONF.backlog,
|
||||
family=address_family)
|
||||
except socket.error as err:
|
||||
if err.args[0] != errno.EADDRINUSE:
|
||||
raise
|
||||
eventlet.sleep(0.1)
|
||||
if not sock:
|
||||
raise RuntimeError(_("Could not bind to %(host)s:%(port)s after"
|
||||
" trying for 30 seconds") %
|
||||
{'host': bind_addr[0],
|
||||
'port': bind_addr[1]})
|
||||
|
||||
return sock
|
||||
|
||||
|
||||
def set_eventlet_hub():
|
||||
try:
|
||||
eventlet.hubs.use_hub('poll')
|
||||
except Exception:
|
||||
try:
|
||||
eventlet.hubs.use_hub('selects')
|
||||
except Exception:
|
||||
msg = _("eventlet 'poll' nor 'selects' hubs are available "
|
||||
"on this platform")
|
||||
raise glare_exc.WorkerCreationFailure(
|
||||
reason=msg)
|
||||
|
||||
|
||||
def initialize_glance_store():
|
||||
"""Initialize glance store."""
|
||||
glance_store.register_opts(CONF)
|
||||
glance_store.create_stores(CONF)
|
||||
glance_store.verify_default_store()
|
||||
|
||||
|
||||
def get_asynchronous_eventlet_pool(size=1000):
|
||||
"""Return eventlet pool to caller.
|
||||
|
||||
Also store pools created in global list, to wait on
|
||||
it after getting signal for graceful shutdown.
|
||||
|
||||
:param size: eventlet pool size
|
||||
:returns: eventlet pool
|
||||
"""
|
||||
global ASYNC_EVENTLET_THREAD_POOL_LIST
|
||||
|
||||
pool = eventlet.GreenPool(size=size)
|
||||
# Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST
|
||||
ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool)
|
||||
|
||||
return pool
|
||||
|
||||
|
||||
class Server(object):
|
||||
"""Server class to manage multiple WSGI sockets and applications.
|
||||
|
||||
This class requires initialize_glance_store set to True if
|
||||
glance store needs to be initialized.
|
||||
"""
|
||||
def __init__(self, threads=1000, initialize_glance_store=False):
|
||||
os.umask(0o27) # ensure files are created with the correct privileges
|
||||
self._logger = logging.getLogger("eventlet.wsgi.server")
|
||||
self.threads = threads
|
||||
self.children = set()
|
||||
self.stale_children = set()
|
||||
self.running = True
|
||||
self.initialize_glance_store = initialize_glance_store
|
||||
self.pgid = os.getpid()
|
||||
try:
|
||||
os.setpgid(self.pgid, self.pgid)
|
||||
except OSError:
|
||||
self.pgid = 0
|
||||
|
||||
def hup(self, *args):
|
||||
"""
|
||||
Reloads configuration files with zero down time
|
||||
"""
|
||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
raise glare_exc.SIGHUPInterrupt
|
||||
|
||||
def kill_children(self, *args):
|
||||
"""Kills the entire process group."""
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
self.running = False
|
||||
os.killpg(self.pgid, signal.SIGTERM)
|
||||
|
||||
def start(self, application, default_port):
|
||||
"""
|
||||
Run a WSGI server with the given application.
|
||||
|
||||
:param application: The application to be run in the WSGI server
|
||||
:param default_port: Port to bind to if none is specified in conf
|
||||
"""
|
||||
self.application = application
|
||||
self.default_port = default_port
|
||||
self.configure()
|
||||
self.start_wsgi()
|
||||
|
||||
def start_wsgi(self):
|
||||
workers = get_num_workers()
|
||||
if workers == 0:
|
||||
# Useful for profiling, test, debug etc.
|
||||
self.pool = self.create_pool()
|
||||
self.pool.spawn_n(self._single_run, self.application, self.sock)
|
||||
return
|
||||
else:
|
||||
LOG.info(_LI("Starting %d workers"), workers)
|
||||
signal.signal(signal.SIGTERM, self.kill_children)
|
||||
signal.signal(signal.SIGINT, self.kill_children)
|
||||
signal.signal(signal.SIGHUP, self.hup)
|
||||
while len(self.children) < workers:
|
||||
self.run_child()
|
||||
|
||||
def create_pool(self):
|
||||
return get_asynchronous_eventlet_pool(size=self.threads)
|
||||
|
||||
def _remove_children(self, pid):
|
||||
if pid in self.children:
|
||||
self.children.remove(pid)
|
||||
LOG.info(_LI('Removed dead child %s'), pid)
|
||||
elif pid in self.stale_children:
|
||||
self.stale_children.remove(pid)
|
||||
LOG.info(_LI('Removed stale child %s'), pid)
|
||||
else:
|
||||
LOG.warn(_LW('Unrecognised child %s') % pid)
|
||||
|
||||
def _verify_and_respawn_children(self, pid, status):
|
||||
if len(self.stale_children) == 0:
|
||||
LOG.debug('No stale children')
|
||||
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
|
||||
LOG.error(_LE('Not respawning child %d, cannot '
|
||||
'recover from termination') % pid)
|
||||
if not self.children and not self.stale_children:
|
||||
LOG.info(
|
||||
_LI('All workers have terminated. Exiting'))
|
||||
self.running = False
|
||||
else:
|
||||
if len(self.children) < get_num_workers():
|
||||
self.run_child()
|
||||
|
||||
def wait_on_children(self):
|
||||
while self.running:
|
||||
try:
|
||||
pid, status = os.wait()
|
||||
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
|
||||
self._remove_children(pid)
|
||||
self._verify_and_respawn_children(pid, status)
|
||||
except OSError as err:
|
||||
if err.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
|
||||
break
|
||||
except glare_exc.SIGHUPInterrupt:
|
||||
self.reload()
|
||||
continue
|
||||
eventlet.greenio.shutdown_safe(self.sock)
|
||||
self.sock.close()
|
||||
LOG.debug('Exited')
|
||||
|
||||
def configure(self, old_conf=None, has_changed=None):
|
||||
"""
|
||||
Apply configuration settings
|
||||
|
||||
:param old_conf: Cached old configuration settings (if any)
|
||||
:param has changed: callable to determine if a parameter has changed
|
||||
"""
|
||||
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
|
||||
self.client_socket_timeout = CONF.client_socket_timeout or None
|
||||
self.configure_socket(old_conf, has_changed)
|
||||
if self.initialize_glance_store:
|
||||
initialize_glance_store()
|
||||
|
||||
def reload(self):
|
||||
"""
|
||||
Reload and re-apply configuration settings
|
||||
|
||||
Existing child processes are sent a SIGHUP signal
|
||||
and will exit after completing existing requests.
|
||||
New child processes, which will have the updated
|
||||
configuration, are spawned. This allows preventing
|
||||
interruption to the service.
|
||||
"""
|
||||
def _has_changed(old, new, param):
|
||||
old = old.get(param)
|
||||
new = getattr(new, param)
|
||||
return (new != old)
|
||||
|
||||
old_conf = utils.stash_conf_values()
|
||||
has_changed = functools.partial(_has_changed, old_conf, CONF)
|
||||
CONF.reload_config_files()
|
||||
os.killpg(self.pgid, signal.SIGHUP)
|
||||
self.stale_children = self.children
|
||||
self.children = set()
|
||||
|
||||
# Ensure any logging config changes are picked up
|
||||
logging.setup(CONF, 'glare')
|
||||
config.set_config_defaults()
|
||||
|
||||
self.configure(old_conf, has_changed)
|
||||
self.start_wsgi()
|
||||
|
||||
def wait(self):
|
||||
"""Wait until all servers have completed running."""
|
||||
try:
|
||||
if self.children:
|
||||
self.wait_on_children()
|
||||
else:
|
||||
self.pool.waitall()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def run_child(self):
|
||||
def child_hup(*args):
|
||||
"""Shuts down child processes, existing requests are handled."""
|
||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
eventlet.wsgi.is_accepting = False
|
||||
self.sock.close()
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
signal.signal(signal.SIGHUP, child_hup)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
# ignore the interrupt signal to avoid a race whereby
|
||||
# a child worker receives the signal before the parent
|
||||
# and is respawned unnecessarily as a result
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
# The child has no need to stash the unwrapped
|
||||
# socket, and the reference prevents a clean
|
||||
# exit on sighup
|
||||
self._sock = None
|
||||
self.run_server()
|
||||
LOG.info(_LI('Child %d exiting normally'), os.getpid())
|
||||
# self.pool.waitall() is now called in wsgi's server so
|
||||
# it's safe to exit here
|
||||
sys.exit(0)
|
||||
else:
|
||||
LOG.info(_LI('Started child %s'), pid)
|
||||
self.children.add(pid)
|
||||
|
||||
def run_server(self):
|
||||
"""Run a WSGI server."""
|
||||
if cfg.CONF.pydev_worker_debug_host:
|
||||
utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host,
|
||||
cfg.CONF.pydev_worker_debug_port)
|
||||
|
||||
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
|
||||
self.pool = self.create_pool()
|
||||
try:
|
||||
eventlet.wsgi.server(self.sock,
|
||||
self.application,
|
||||
log=self._logger,
|
||||
custom_pool=self.pool,
|
||||
debug=False,
|
||||
keepalive=CONF.http_keepalive,
|
||||
socket_timeout=self.client_socket_timeout)
|
||||
except socket.error as err:
|
||||
if err[0] != errno.EINVAL:
|
||||
raise
|
||||
|
||||
# waiting on async pools
|
||||
if ASYNC_EVENTLET_THREAD_POOL_LIST:
|
||||
for pool in ASYNC_EVENTLET_THREAD_POOL_LIST:
|
||||
pool.waitall()
|
||||
|
||||
def _single_run(self, application, sock):
|
||||
"""Start a WSGI server in a new green thread."""
|
||||
LOG.info(_LI("Starting single process server"))
|
||||
eventlet.wsgi.server(sock, application, custom_pool=self.pool,
|
||||
log=self._logger,
|
||||
debug=False,
|
||||
keepalive=CONF.http_keepalive,
|
||||
socket_timeout=self.client_socket_timeout)
|
||||
|
||||
def configure_socket(self, old_conf=None, has_changed=None):
|
||||
"""
|
||||
Ensure a socket exists and is appropriately configured.
|
||||
|
||||
This function is called on start up, and can also be
|
||||
called in the event of a configuration reload.
|
||||
|
||||
When called for the first time a new socket is created.
|
||||
If reloading and either bind_host or bind port have been
|
||||
changed the existing socket must be closed and a new
|
||||
socket opened (laws of physics).
|
||||
|
||||
In all other cases (bind_host/bind_port have not changed)
|
||||
the existing socket is reused.
|
||||
|
||||
:param old_conf: Cached old configuration settings (if any)
|
||||
:param has changed: callable to determine if a parameter has changed
|
||||
"""
|
||||
# Do we need a fresh socket?
|
||||
new_sock = (old_conf is None or (
|
||||
has_changed('bind_host') or
|
||||
has_changed('bind_port')))
|
||||
# Will we be using https?
|
||||
use_ssl = not (not CONF.cert_file or not CONF.key_file)
|
||||
# Were we using https before?
|
||||
old_use_ssl = (old_conf is not None and not (
|
||||
not old_conf.get('key_file') or
|
||||
not old_conf.get('cert_file')))
|
||||
# Do we now need to perform an SSL wrap on the socket?
|
||||
wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock)
|
||||
# Do we now need to perform an SSL unwrap on the socket?
|
||||
unwrap_sock = use_ssl is False and old_use_ssl is True
|
||||
|
||||
if new_sock:
|
||||
self._sock = None
|
||||
if old_conf is not None:
|
||||
self.sock.close()
|
||||
_sock = get_socket(self.default_port)
|
||||
_sock.setsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_REUSEADDR, 1)
|
||||
# sockets can hang around forever without keepalive
|
||||
_sock.setsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_KEEPALIVE, 1)
|
||||
self._sock = _sock
|
||||
|
||||
if wrap_sock:
|
||||
self.sock = ssl_wrap_socket(self._sock)
|
||||
|
||||
if unwrap_sock:
|
||||
self.sock = self._sock
|
||||
|
||||
if new_sock and not use_ssl:
|
||||
self.sock = self._sock
|
||||
|
||||
# Pick up newly deployed certs
|
||||
if old_conf is not None and use_ssl is True and old_use_ssl is True:
|
||||
if has_changed('cert_file') or has_changed('key_file'):
|
||||
utils.validate_key_cert(CONF.key_file, CONF.cert_file)
|
||||
if has_changed('cert_file'):
|
||||
self.sock.certfile = CONF.cert_file
|
||||
if has_changed('key_file'):
|
||||
self.sock.keyfile = CONF.key_file
|
||||
|
||||
if new_sock or (old_conf is not None and has_changed('tcp_keepidle')):
|
||||
# This option isn't available in the OS X version of eventlet
|
||||
if hasattr(socket, 'TCP_KEEPIDLE'):
|
||||
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
|
||||
CONF.tcp_keepidle)
|
||||
|
||||
if old_conf is not None and has_changed('backlog'):
|
||||
self.sock.listen(CONF.backlog)
|
||||
|
||||
|
||||
class Middleware(object):
|
||||
"""
|
||||
Base WSGI middleware wrapper. These classes require an application to be
|
||||
initialized that will be called next. By default the middleware will
|
||||
simply call its wrapped app, or you can override __call__ to customize its
|
||||
behavior.
|
||||
"""
|
||||
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_conf, **local_conf):
|
||||
def filter(app):
|
||||
return cls(app)
|
||||
return filter
|
||||
|
||||
def process_request(self, req):
|
||||
"""
|
||||
Called on each request.
|
||||
|
||||
If this returns None, the next application down the stack will be
|
||||
executed. If it returns a response then that response will be returned
|
||||
and execution will stop here.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def process_response(self, response):
|
||||
"""Do whatever you'd like to the response."""
|
||||
return response
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
response = self.process_request(req)
|
||||
if response:
|
||||
return response
|
||||
response = req.get_response(self.application)
|
||||
response.request = req
|
||||
try:
|
||||
return self.process_response(response)
|
||||
except webob.exc.HTTPException as e:
|
||||
return e
|
||||
|
||||
|
||||
class Debug(Middleware):
|
||||
"""
|
||||
Helper class that can be inserted into any WSGI application chain
|
||||
to get information about the request and response.
|
||||
"""
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
print(("*" * 40) + " REQUEST ENVIRON")
|
||||
for key, value in req.environ.items():
|
||||
print(key, "=", value)
|
||||
print('')
|
||||
resp = req.get_response(self.application)
|
||||
|
||||
print(("*" * 40) + " RESPONSE HEADERS")
|
||||
for (key, value) in six.iteritems(resp.headers):
|
||||
print(key, "=", value)
|
||||
print('')
|
||||
|
||||
resp.app_iter = self.print_generator(resp.app_iter)
|
||||
|
||||
return resp
|
||||
|
||||
@staticmethod
|
||||
def print_generator(app_iter):
|
||||
"""
|
||||
Iterator that prints the contents of a wrapper string iterator
|
||||
when iterated.
|
||||
"""
|
||||
print(("*" * 40) + " BODY")
|
||||
for part in app_iter:
|
||||
sys.stdout.write(part)
|
||||
sys.stdout.flush()
|
||||
yield part
|
||||
print()
|
||||
|
||||
|
||||
class APIMapper(routes.Mapper):
|
||||
"""
|
||||
Handle route matching when url is '' because routes.Mapper returns
|
||||
an error in this case.
|
||||
"""
|
||||
|
||||
def routematch(self, url=None, environ=None):
|
||||
if url is "":
|
||||
result = self._match("", environ)
|
||||
return result[0], result[1]
|
||||
return routes.Mapper.routematch(self, url, environ)
|
||||
|
||||
|
||||
class RejectMethodController(object):
|
||||
def reject(self, req, allowed_methods, *args, **kwargs):
|
||||
LOG.debug("The method %s is not allowed for this resource",
|
||||
req.environ['REQUEST_METHOD'])
|
||||
raise webob.exc.HTTPMethodNotAllowed(
|
||||
headers=[('Allow', allowed_methods)])
|
||||
|
||||
|
||||
class Router(object):
|
||||
"""
|
||||
WSGI middleware that maps incoming requests to WSGI apps.
|
||||
"""
|
||||
|
||||
def __init__(self, mapper):
|
||||
"""
|
||||
Create a router for the given routes.Mapper.
|
||||
|
||||
Each route in `mapper` must specify a 'controller', which is a
|
||||
WSGI app to call. You'll probably want to specify an 'action' as
|
||||
well and have your controller be a wsgi.Controller, who will route
|
||||
the request to the action method.
|
||||
|
||||
Examples:
|
||||
mapper = routes.Mapper()
|
||||
sc = ServerController()
|
||||
|
||||
# Explicit mapping of one route to a controller+action
|
||||
mapper.connect(None, "/svrlist", controller=sc, action="list")
|
||||
|
||||
# Actions are all implicitly defined
|
||||
mapper.resource("server", "servers", controller=sc)
|
||||
|
||||
# Pointing to an arbitrary WSGI app. You can specify the
|
||||
# {path_info:.*} parameter so the target app can be handed just that
|
||||
# section of the URL.
|
||||
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
|
||||
"""
|
||||
mapper.redirect("", "/")
|
||||
self.map = mapper
|
||||
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
|
||||
self.map)
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_conf, **local_conf):
|
||||
return cls(APIMapper())
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
"""
|
||||
Route the incoming request to a controller based on self.map.
|
||||
If no match, return either a 404(Not Found) or 501(Not Implemented).
|
||||
"""
|
||||
return self._router
|
||||
|
||||
@staticmethod
|
||||
@webob.dec.wsgify
|
||||
def _dispatch(req):
|
||||
"""
|
||||
Called by self._router after matching the incoming request to a route
|
||||
and putting the information into req.environ. Either returns 404,
|
||||
501, or the routed WSGI app's response.
|
||||
"""
|
||||
match = req.environ['wsgiorg.routing_args'][1]
|
||||
if not match:
|
||||
implemented_http_methods = ['GET', 'HEAD', 'POST', 'PUT',
|
||||
'DELETE', 'PATCH']
|
||||
if req.environ['REQUEST_METHOD'] not in implemented_http_methods:
|
||||
return webob.exc.HTTPNotImplemented()
|
||||
else:
|
||||
return webob.exc.HTTPNotFound()
|
||||
app = match['controller']
|
||||
return app
|
||||
|
||||
|
||||
class Request(webob.Request):
|
||||
"""Add some OpenStack API-specific logic to the base webob.Request."""
|
||||
|
||||
def __init__(self, environ, *args, **kwargs):
|
||||
if CONF.secure_proxy_ssl_header:
|
||||
scheme = environ.get(CONF.secure_proxy_ssl_header)
|
||||
if scheme:
|
||||
environ['wsgi.url_scheme'] = scheme
|
||||
super(Request, self).__init__(environ, *args, **kwargs)
|
||||
|
||||
def best_match_content_type(self):
|
||||
"""Determine the requested response content-type."""
|
||||
supported = ('application/json',)
|
||||
bm = self.accept.best_match(supported)
|
||||
return bm or 'application/json'
|
||||
|
||||
def best_match_language(self):
|
||||
"""Determines best available locale from the Accept-Language header.
|
||||
|
||||
:returns: the best language match or None if the 'Accept-Language'
|
||||
header was not available in the request.
|
||||
"""
|
||||
if not self.accept_language:
|
||||
return None
|
||||
langs = i18n.get_available_languages('glare')
|
||||
return self.accept_language.best_match(langs)
|
||||
|
||||
def get_content_range(self):
|
||||
"""Return the `Range` in a request."""
|
||||
range_str = self.headers.get('Content-Range')
|
||||
if range_str is not None:
|
||||
range_ = webob.byterange.ContentRange.parse(range_str)
|
||||
if range_ is None:
|
||||
msg = _('Malformed Content-Range header: %s') % range_str
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
return range_
|
||||
|
||||
|
||||
class JSONRequestDeserializer(object):
|
||||
valid_transfer_encoding = frozenset(['chunked', 'compress', 'deflate',
|
||||
'gzip', 'identity'])
|
||||
|
||||
def has_body(self, request):
|
||||
"""
|
||||
Returns whether a Webob.Request object will possess an entity body.
|
||||
|
||||
:param request: Webob.Request object
|
||||
"""
|
||||
request_encoding = request.headers.get('transfer-encoding', '').lower()
|
||||
is_valid_encoding = request_encoding in self.valid_transfer_encoding
|
||||
if is_valid_encoding and request.is_body_readable:
|
||||
return True
|
||||
elif request.content_length is not None and request.content_length > 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _sanitizer(obj):
|
||||
"""Sanitizer method that will be passed to jsonutils.loads."""
|
||||
return obj
|
||||
|
||||
def from_json(self, datastring):
|
||||
try:
|
||||
jsondata = jsonutils.loads(datastring, object_hook=self._sanitizer)
|
||||
if not isinstance(jsondata, (dict, list)):
|
||||
msg = _('Unexpected body type. Expected list/dict.')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
return jsondata
|
||||
except ValueError:
|
||||
msg = _('Malformed JSON in request body.')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
def default(self, request):
|
||||
if self.has_body(request):
|
||||
return {'body': self.from_json(request.body)}
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
class JSONResponseSerializer(object):
|
||||
|
||||
def _sanitizer(self, obj):
|
||||
"""Sanitizer method that will be passed to jsonutils.dumps."""
|
||||
if hasattr(obj, "to_dict"):
|
||||
return obj.to_dict()
|
||||
if isinstance(obj, multidict.MultiDict):
|
||||
return obj.mixed()
|
||||
return jsonutils.to_primitive(obj)
|
||||
|
||||
def to_json(self, data):
|
||||
return jsonutils.dump_as_bytes(data, default=self._sanitizer)
|
||||
|
||||
def default(self, response, result):
|
||||
response.content_type = 'application/json'
|
||||
body = self.to_json(result)
|
||||
body = encodeutils.to_utf8(body)
|
||||
response.body = body
|
||||
|
||||
|
||||
def translate_exception(req, e):
|
||||
"""Translates all translatable elements of the given exception."""
|
||||
|
||||
# The RequestClass attribute in the webob.dec.wsgify decorator
|
||||
# does not guarantee that the request object will be a particular
|
||||
# type; this check is therefore necessary.
|
||||
if not hasattr(req, "best_match_language"):
|
||||
return e
|
||||
|
||||
locale = req.best_match_language()
|
||||
|
||||
if isinstance(e, webob.exc.HTTPError):
|
||||
e.explanation = i18n.translate(e.explanation, locale)
|
||||
e.detail = i18n.translate(e.detail, locale)
|
||||
if getattr(e, 'body_template', None):
|
||||
e.body_template = i18n.translate(e.body_template, locale)
|
||||
return e
|
||||
|
||||
|
||||
class Resource(object):
|
||||
"""
|
||||
WSGI app that handles (de)serialization and controller dispatch.
|
||||
|
||||
Reads routing information supplied by RoutesMiddleware and calls
|
||||
the requested action method upon its deserializer, controller,
|
||||
and serializer. Those three objects may implement any of the basic
|
||||
controller action methods (create, update, show, index, delete)
|
||||
along with any that may be specified in the api router. A 'default'
|
||||
method may also be implemented to be used in place of any
|
||||
non-implemented actions. Deserializer methods must accept a request
|
||||
argument and return a dictionary. Controller methods must accept a
|
||||
request argument. Additionally, they must also accept keyword
|
||||
arguments that represent the keys returned by the Deserializer. They
|
||||
may raise a webob.exc exception or return a dict, which will be
|
||||
serialized by requested content type.
|
||||
"""
|
||||
|
||||
def __init__(self, controller, deserializer=None, serializer=None):
|
||||
"""
|
||||
:param controller: object that implement methods created by routes lib
|
||||
:param deserializer: object that supports webob request deserialization
|
||||
through controller-like actions
|
||||
:param serializer: object that supports webob response serialization
|
||||
through controller-like actions
|
||||
"""
|
||||
self.controller = controller
|
||||
self.serializer = serializer or JSONResponseSerializer()
|
||||
self.deserializer = deserializer or JSONRequestDeserializer()
|
||||
|
||||
@webob.dec.wsgify(RequestClass=Request)
|
||||
def __call__(self, request):
|
||||
"""WSGI method that controls (de)serialization and method dispatch."""
|
||||
action_args = self.get_action_args(request.environ)
|
||||
action = action_args.pop('action', None)
|
||||
body_reject = strutils.bool_from_string(
|
||||
action_args.pop('body_reject', None))
|
||||
|
||||
try:
|
||||
if body_reject and self.deserializer.has_body(request):
|
||||
msg = _('A body is not expected with this request.')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
deserialized_request = self.dispatch(self.deserializer,
|
||||
action, request)
|
||||
action_args.update(deserialized_request)
|
||||
action_result = self.dispatch(self.controller, action,
|
||||
request, **action_args)
|
||||
except webob.exc.WSGIHTTPException as e:
|
||||
exc_info = sys.exc_info()
|
||||
e = translate_exception(request, e)
|
||||
six.reraise(type(e), e, exc_info[2])
|
||||
except glare_exc.GlareException:
|
||||
raise
|
||||
except UnicodeDecodeError:
|
||||
msg = _("Error decoding your request. Either the URL or the "
|
||||
"request body contained characters that could not be "
|
||||
"decoded by Glance")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE("Caught error: %s"),
|
||||
encodeutils.exception_to_unicode(e))
|
||||
response = webob.exc.HTTPInternalServerError()
|
||||
return response
|
||||
|
||||
try:
|
||||
response = webob.Response(request=request)
|
||||
self.dispatch(self.serializer, action, response, action_result)
|
||||
# encode all headers in response to utf-8 to prevent unicode errors
|
||||
for name, value in list(response.headers.items()):
|
||||
if six.PY2 and isinstance(value, six.text_type):
|
||||
response.headers[name] = encodeutils.safe_encode(value)
|
||||
return response
|
||||
except webob.exc.WSGIHTTPException as e:
|
||||
return translate_exception(request, e)
|
||||
except webob.exc.HTTPException as e:
|
||||
return e
|
||||
except glare_exc.GlareException:
|
||||
raise
|
||||
# return unserializable result (typically a webob exc)
|
||||
except Exception:
|
||||
return action_result
|
||||
|
||||
def dispatch(self, obj, action, *args, **kwargs):
|
||||
"""Find action-specific method on self and call it."""
|
||||
try:
|
||||
method = getattr(obj, action)
|
||||
except AttributeError:
|
||||
method = getattr(obj, 'default')
|
||||
|
||||
return method(*args, **kwargs)
|
||||
|
||||
def get_action_args(self, request_environment):
|
||||
"""Parse dictionary created by routes library."""
|
||||
try:
|
||||
args = request_environment['wsgiorg.routing_args'][1].copy()
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
try:
|
||||
del args['controller']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del args['format']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return args
|
71
glare/common/wsme_utils.py
Normal file
71
glare/common/wsme_utils.py
Normal file
@ -0,0 +1,71 @@
|
||||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from wsme import types as wsme_types
|
||||
|
||||
from oslo_utils import timeutils
|
||||
|
||||
|
||||
class WSMEModelTransformer(object):
|
||||
|
||||
def to_dict(self):
|
||||
# Return the wsme_attributes names:values as a dict
|
||||
my_dict = {}
|
||||
for attribute in self._wsme_attributes:
|
||||
value = getattr(self, attribute.name)
|
||||
if value is not wsme_types.Unset:
|
||||
my_dict.update({attribute.name: value})
|
||||
return my_dict
|
||||
|
||||
@classmethod
|
||||
def to_wsme_model(model, db_entity, self_link=None, schema=None):
|
||||
# Return the wsme_attributes names:values as a dict
|
||||
names = []
|
||||
for attribute in model._wsme_attributes:
|
||||
names.append(attribute.name)
|
||||
|
||||
values = {}
|
||||
for name in names:
|
||||
value = getattr(db_entity, name, None)
|
||||
if value is not None:
|
||||
if type(value) == datetime:
|
||||
iso_datetime_value = timeutils.isotime(value)
|
||||
values.update({name: iso_datetime_value})
|
||||
else:
|
||||
values.update({name: value})
|
||||
|
||||
if schema:
|
||||
values['schema'] = schema
|
||||
|
||||
model_object = model(**values)
|
||||
|
||||
# 'self' kwarg is used in wsme.types.Base.__init__(self, ..) and
|
||||
# conflicts during initialization. self_link is a proxy field to self.
|
||||
if self_link:
|
||||
model_object.self = self_link
|
||||
|
||||
return model_object
|
||||
|
||||
@classmethod
|
||||
def get_mandatory_attrs(cls):
|
||||
return [attr.name for attr in cls._wsme_attributes if attr.mandatory]
|
||||
|
||||
|
||||
def _get_value(obj):
|
||||
if obj is not wsme_types.Unset:
|
||||
return obj
|
||||
else:
|
||||
return None
|
67
glare/context.py
Normal file
67
glare/context.py
Normal file
@ -0,0 +1,67 @@
|
||||
# Copyright 2011-2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_context import context
|
||||
|
||||
from glare.common import policy
|
||||
|
||||
|
||||
class RequestContext(context.RequestContext):
|
||||
"""Stores information about the security context.
|
||||
|
||||
Stores how the user accesses the system, as well as additional request
|
||||
information.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, owner_is_tenant=True, service_catalog=None,
|
||||
policy_enforcer=None, **kwargs):
|
||||
super(RequestContext, self).__init__(**kwargs)
|
||||
self.owner_is_tenant = owner_is_tenant
|
||||
self.service_catalog = service_catalog
|
||||
self.policy_enforcer = policy_enforcer or policy._get_enforcer()
|
||||
if not self.is_admin:
|
||||
self.is_admin = self.policy_enforcer.check_is_admin(self)
|
||||
|
||||
def to_dict(self):
|
||||
d = super(RequestContext, self).to_dict()
|
||||
d.update({
|
||||
'roles': self.roles,
|
||||
'service_catalog': self.service_catalog,
|
||||
})
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
@property
|
||||
def owner(self):
|
||||
"""Return the owner to correlate with an image."""
|
||||
return self.tenant if self.owner_is_tenant else self.user
|
||||
|
||||
@property
|
||||
def can_see_deleted(self):
|
||||
"""Admins can see deleted by default"""
|
||||
return self.show_deleted or self.is_admin
|
||||
|
||||
|
||||
def get_admin_context(show_deleted=False):
|
||||
"""Create an administrator context."""
|
||||
return RequestContext(auth_token=None,
|
||||
tenant=None,
|
||||
is_admin=True,
|
||||
show_deleted=show_deleted,
|
||||
overwrite=False)
|
0
glare/db/__init__.py
Normal file
0
glare/db/__init__.py
Normal file
73
glare/db/api.py
Normal file
73
glare/db/api.py
Normal file
@ -0,0 +1,73 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Common database interface for all objects"""
|
||||
|
||||
|
||||
class BaseDBAPI(object):
|
||||
|
||||
def __init__(self, cls):
|
||||
self.type = cls.get_type_name()
|
||||
self.cls = cls
|
||||
|
||||
def create(self, context, values):
|
||||
"""Create new artifact in db and return dict of values to the user
|
||||
|
||||
:param context: user context
|
||||
:param values: dict of values that needs to be saved to db
|
||||
:return: dict of created values
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def update(self, context, artifact_id, values):
|
||||
"""Update artifact values in database
|
||||
|
||||
:param artifact_id: id of artifact that needs to be updated
|
||||
:param context: user context
|
||||
:param values: values that needs to be updated
|
||||
:return: dict of updated artifact values
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get(self, context, artifact_id):
|
||||
"""Return artifact values from database
|
||||
|
||||
:param context: user context
|
||||
:param artifact_id: id of the artifact
|
||||
:return: dict of artifact values
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete(self, context, artifact_id):
|
||||
"""Delete artifacts from db
|
||||
|
||||
:param context: user context
|
||||
:param artifact_id: id of artifact that needs to be deleted
|
||||
:return: dict for deleted artifact value
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def list(self, context, filters, marker, limit, sort):
|
||||
"""List artifacts from db
|
||||
|
||||
:param context: user request context
|
||||
:param filters: filter conditions from url
|
||||
:param marker: id of first artifact where we need to start
|
||||
artifact lookup
|
||||
:param limit: max number of items in list
|
||||
:param sort: sort conditions
|
||||
:return: list of artifacts. Each artifact is represented as dict of
|
||||
values.
|
||||
"""
|
||||
raise NotImplementedError()
|
75
glare/db/artifact_api.py
Normal file
75
glare/db/artifact_api.py
Normal file
@ -0,0 +1,75 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Database API for all artifact types"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from glare.db import api as base_api
|
||||
from glare.db.sqlalchemy import api
|
||||
from glare import locking
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ArtifactAPI(base_api.BaseDBAPI):
|
||||
|
||||
def _serialize_values(self, values):
|
||||
new_values = {}
|
||||
if 'tags' in values:
|
||||
new_values['tags'] = values.pop('tags')
|
||||
for key, value in six.iteritems(values):
|
||||
if key in api.BASE_ARTIFACT_PROPERTIES:
|
||||
new_values[key] = value
|
||||
elif self.cls.is_blob(key) or self.cls.is_blob_dict(key):
|
||||
new_values.setdefault('blobs', {})[key] = value
|
||||
else:
|
||||
new_values.setdefault('properties', {})[key] = value
|
||||
return new_values
|
||||
|
||||
def create(self, context, values):
|
||||
values = self._serialize_values(values)
|
||||
values['type_name'] = self.type
|
||||
session = api.get_session()
|
||||
return api.create(context, values, session)
|
||||
|
||||
def update(self, context, artifact_id, values):
|
||||
session = api.get_session()
|
||||
return api.update(context, artifact_id,
|
||||
self._serialize_values(values), session)
|
||||
|
||||
def delete(self, context, artifact_id):
|
||||
session = api.get_session()
|
||||
return api.delete(context, artifact_id, session)
|
||||
|
||||
def get(self, context, artifact_id):
|
||||
session = api.get_session()
|
||||
return api.get(context, artifact_id, session)
|
||||
|
||||
def list(self, context, filters, marker, limit, sort):
|
||||
session = api.get_session()
|
||||
filters.append(('type_name', None, 'eq', None, self.type))
|
||||
return api.get_all(context=context, session=session, filters=filters,
|
||||
marker=marker, limit=limit, sort=sort)
|
||||
|
||||
|
||||
class ArtifactLockApi(locking.LockApiBase):
|
||||
def create_lock(self, context, lock_key):
|
||||
session = api.get_session()
|
||||
return api.create_lock(context, lock_key, session)
|
||||
|
||||
def delete_lock(self, context, lock_id):
|
||||
session = api.get_session()
|
||||
api.delete_lock(context, lock_id, session)
|
176
glare/db/simple_api.py
Normal file
176
glare/db/simple_api.py
Normal file
@ -0,0 +1,176 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""Simple Database API for all artifact types"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
import semantic_version
|
||||
|
||||
import glare.common.exception as glare_exc
|
||||
from glare.common import utils
|
||||
from glare.db import api
|
||||
from glare.i18n import _
|
||||
from glare import locking
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DATA = {
|
||||
'artifacts': {},
|
||||
'locks': {}
|
||||
}
|
||||
|
||||
error_map = [{"catch": KeyError, "raise": glare_exc.NotFound}]
|
||||
|
||||
|
||||
class SimpleAPI(api.BaseDBAPI):
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def create(self, context, values):
|
||||
global DATA
|
||||
values['created_at'] = values['updated_at'] = timeutils.utcnow()
|
||||
artifact_id = values['id']
|
||||
if artifact_id in DATA['artifacts']:
|
||||
msg = _("Artifact with id '%s' already exists") % artifact_id
|
||||
raise glare_exc.BadRequest(msg)
|
||||
values['_type'] = self.type
|
||||
|
||||
DATA['artifacts'][artifact_id] = values
|
||||
return values
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def update(self, context, artifact_id, values):
|
||||
global DATA
|
||||
af = DATA['artifacts'][artifact_id]
|
||||
af.update(values)
|
||||
if 'status' in values and values['status'] == self.cls.STATUS.ACTIVE:
|
||||
af['activated_at'] = timeutils.utcnow()
|
||||
af['updated_at'] = timeutils.utcnow()
|
||||
DATA['artifacts'][artifact_id] = af
|
||||
return af
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def delete(self, context, artifact_id):
|
||||
global DATA
|
||||
del DATA['artifacts'][artifact_id]
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def get(self, context, artifact_id):
|
||||
global DATA
|
||||
return DATA['artifacts'][artifact_id]
|
||||
|
||||
@utils.error_handler(error_map)
|
||||
def list(self, context, filters, marker, limit, sort):
|
||||
global DATA
|
||||
afs = list(DATA['artifacts'].values())
|
||||
filters.append(('_type', None, 'eq', None, self.type))
|
||||
|
||||
for field_name, key_name, op, field_type, value in filters:
|
||||
if field_name == 'tags':
|
||||
values = utils.split_filter_value_for_quotes(value)
|
||||
for af in afs[:]:
|
||||
if not set(values).issubset(af['tags']):
|
||||
afs.remove(af)
|
||||
elif field_name == 'tags-any':
|
||||
values = utils.split_filter_value_for_quotes(value)
|
||||
for af in afs[:]:
|
||||
for tag in values:
|
||||
if tag in af['tags']:
|
||||
break
|
||||
else:
|
||||
afs.remove(af)
|
||||
# filter by dict field
|
||||
elif key_name is not None:
|
||||
for af in afs[:]:
|
||||
if key_name not in af[field_name]:
|
||||
afs.remove(af)
|
||||
elif op == 'in':
|
||||
for val in value:
|
||||
if af[field_name][key_name] == val:
|
||||
break
|
||||
else:
|
||||
afs.remove(af)
|
||||
elif not utils.evaluate_filter_op(
|
||||
af[field_name][key_name], op, value):
|
||||
afs.remove(af)
|
||||
# filter by common field
|
||||
else:
|
||||
for af in afs[:]:
|
||||
if op == 'in':
|
||||
for val in value:
|
||||
if field_name == 'version':
|
||||
val = semantic_version.Version.coerce(val)
|
||||
af_version = semantic_version.Version.coerce(
|
||||
af[field_name])
|
||||
if af_version == val:
|
||||
break
|
||||
elif af[field_name] == val:
|
||||
break
|
||||
else:
|
||||
afs.remove(af)
|
||||
else:
|
||||
if field_name == 'version':
|
||||
|
||||
af_version = semantic_version.Version.coerce(
|
||||
af[field_name])
|
||||
if not utils.evaluate_filter_op(
|
||||
af_version, op,
|
||||
semantic_version.Version.coerce(value)):
|
||||
afs.remove(af)
|
||||
else:
|
||||
if not utils.evaluate_filter_op(
|
||||
af[field_name], op, value):
|
||||
afs.remove(af)
|
||||
|
||||
for key, dir, prop_type in sort:
|
||||
# sort by version
|
||||
if key == 'version':
|
||||
def version_cmp(af1, af2):
|
||||
if af1['version'] is None and af2['version'] is None:
|
||||
return 0
|
||||
elif af1['version'] is None:
|
||||
return -1
|
||||
elif af2['version'] is None:
|
||||
return 1
|
||||
return semantic_version.compare(
|
||||
af1['version'], af2['version'])
|
||||
afs.sort(cmp=version_cmp, reverse=dir == 'desc')
|
||||
else:
|
||||
reverse = dir == 'desc'
|
||||
afs.sort(key=lambda x: x[key] or '', reverse=reverse)
|
||||
|
||||
return afs
|
||||
|
||||
|
||||
class SimpleLockApi(locking.LockApiBase):
|
||||
def create_lock(self, context, lock_key):
|
||||
global DATA
|
||||
item_lock = DATA['locks'].get(lock_key)
|
||||
if item_lock:
|
||||
msg = _("Cannot lock an item with key %s. "
|
||||
"Lock already acquired by other request.") % lock_key
|
||||
raise glare_exc.Conflict(msg)
|
||||
# TODO(kairat) Log user data in the log so we can identify who
|
||||
# acquired the lock
|
||||
else:
|
||||
DATA['locks'][lock_key] = lock_key
|
||||
return lock_key
|
||||
|
||||
def delete_lock(self, context, lock_id):
|
||||
global DATA
|
||||
item_lock = DATA['locks'][lock_id]
|
||||
if item_lock:
|
||||
del DATA['locks'][lock_id]
|
0
glare/db/sqlalchemy/__init__.py
Normal file
0
glare/db/sqlalchemy/__init__.py
Normal file
546
glare/db/sqlalchemy/api.py
Normal file
546
glare/db/sqlalchemy/api.py
Normal file
@ -0,0 +1,546 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import operator
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as db_exception
|
||||
from oslo_db.sqlalchemy import session
|
||||
from oslo_log import log as os_logging
|
||||
from oslo_utils import timeutils
|
||||
import osprofiler.sqlalchemy
|
||||
import six
|
||||
import sqlalchemy
|
||||
from sqlalchemy import and_
|
||||
from sqlalchemy import or_
|
||||
import sqlalchemy.orm as orm
|
||||
from sqlalchemy.orm import aliased
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from glare.common import exception
|
||||
from glare.common import semver_db
|
||||
from glare.common import utils
|
||||
from glare.db.sqlalchemy import models
|
||||
from glare.i18n import _, _LW
|
||||
|
||||
LOG = os_logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_group("profiler", "glare.common.wsgi")
|
||||
|
||||
|
||||
BASE_ARTIFACT_PROPERTIES = ('id', 'visibility', 'created_at', 'updated_at',
|
||||
'activated_at', 'owner', 'status', 'description',
|
||||
'name', 'type_name', 'version')
|
||||
|
||||
DEFAULT_SORT_PARAMETERS = (('created_at', 'desc', None), ('id', 'asc', None))
|
||||
|
||||
_FACADE = None
|
||||
_LOCK = threading.Lock()
|
||||
|
||||
|
||||
def _retry_on_deadlock(exc):
|
||||
"""Decorator to retry a DB API call if Deadlock was received."""
|
||||
|
||||
if isinstance(exc, db_exception.DBDeadlock):
|
||||
LOG.warn(_LW("Deadlock detected. Retrying..."))
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _create_facade_lazily():
|
||||
global _LOCK, _FACADE
|
||||
if _FACADE is None:
|
||||
with _LOCK:
|
||||
if _FACADE is None:
|
||||
_FACADE = session.EngineFacade.from_config(CONF)
|
||||
|
||||
if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy:
|
||||
osprofiler.sqlalchemy.add_tracing(sqlalchemy,
|
||||
_FACADE.get_engine(),
|
||||
"db")
|
||||
return _FACADE
|
||||
|
||||
|
||||
def get_engine():
|
||||
facade = _create_facade_lazily()
|
||||
return facade.get_engine()
|
||||
|
||||
|
||||
def get_session(autocommit=True, expire_on_commit=False):
|
||||
facade = _create_facade_lazily()
|
||||
return facade.get_session(autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit)
|
||||
|
||||
|
||||
def clear_db_env():
|
||||
"""
|
||||
Unset global configuration variables for database.
|
||||
"""
|
||||
global _FACADE
|
||||
_FACADE = None
|
||||
|
||||
|
||||
def create(context, values, session):
|
||||
return _create_or_update(context, None, values, session)
|
||||
|
||||
|
||||
def update(context, artifact_id, values, session):
|
||||
return _create_or_update(context, artifact_id, values, session)
|
||||
|
||||
|
||||
def delete(context, artifact_id, session):
|
||||
artifact = _get(context, artifact_id, session)
|
||||
artifact.properties = []
|
||||
artifact.tags = []
|
||||
artifact.status = 'deleted'
|
||||
artifact.save(session=session)
|
||||
|
||||
|
||||
def _drop_protected_attrs(model_class, values):
|
||||
"""
|
||||
Removed protected attributes from values dictionary using the models
|
||||
__protected_attributes__ field.
|
||||
"""
|
||||
for attr in model_class.__protected_attributes__:
|
||||
if attr in values:
|
||||
del values[attr]
|
||||
|
||||
|
||||
def _create_or_update(context, artifact_id, values, session):
|
||||
with session.begin():
|
||||
_drop_protected_attrs(models.Artifact, values)
|
||||
if artifact_id is None:
|
||||
if 'type_name' not in values:
|
||||
msg = _('Type name must be set.')
|
||||
raise exception.BadRequest(msg)
|
||||
# create new artifact
|
||||
artifact = models.Artifact()
|
||||
if 'id' not in values:
|
||||
artifact.id = str(uuid.uuid4())
|
||||
else:
|
||||
artifact.id = values.pop('id')
|
||||
artifact.created_at = timeutils.utcnow()
|
||||
else:
|
||||
# update the existing artifact
|
||||
artifact = _get(context, artifact_id, session)
|
||||
|
||||
if 'version' in values:
|
||||
values['version'] = semver_db.parse(values['version'])
|
||||
|
||||
if 'tags' in values:
|
||||
tags = values.pop('tags')
|
||||
artifact.tags = _do_tags(artifact, tags)
|
||||
|
||||
if 'properties' in values:
|
||||
properties = values.pop('properties', {})
|
||||
artifact.properties = _do_properties(artifact, properties)
|
||||
|
||||
if 'blobs' in values:
|
||||
blobs = values.pop('blobs')
|
||||
artifact.blobs = _do_blobs(artifact, blobs)
|
||||
|
||||
artifact.updated_at = timeutils.utcnow()
|
||||
if 'status' in values and values['status'] == 'active':
|
||||
artifact.activated_at = timeutils.utcnow()
|
||||
artifact.update(values)
|
||||
artifact.save(session=session)
|
||||
|
||||
return artifact.to_dict()
|
||||
|
||||
|
||||
def _get(context, artifact_id, session):
|
||||
try:
|
||||
query = _do_artifacts_query(context, session).filter_by(
|
||||
id=artifact_id)
|
||||
artifact = query.one()
|
||||
except orm.exc.NoResultFound:
|
||||
msg = _("Artifact with id=%s not found.") % artifact_id
|
||||
LOG.warn(msg)
|
||||
raise exception.ArtifactNotFound(msg)
|
||||
return artifact
|
||||
|
||||
|
||||
def get(context, artifact_id, session):
|
||||
return _get(context, artifact_id, session).to_dict()
|
||||
|
||||
|
||||
def get_all(context, session, filters=None, marker=None, limit=None,
|
||||
sort=None):
|
||||
"""List all visible artifacts
|
||||
:param filters: dict of filter keys and values.
|
||||
:param marker: artifact id after which to start page
|
||||
:param limit: maximum number of artifacts to return
|
||||
:param sort: a tuple (key, dir, type) where key is an attribute by
|
||||
which results should be sorted, dir is a direction: 'asc' or 'desc',
|
||||
and type is type of the attribute: 'bool', 'string', 'numeric' or 'int' or
|
||||
None if attribute is base.
|
||||
"""
|
||||
artifacts = _get_all(context, session, filters, marker, limit, sort)
|
||||
return [af.to_dict() for af in artifacts]
|
||||
|
||||
|
||||
def _get_all(context, session, filters=None, marker=None, limit=None,
|
||||
sort=None):
|
||||
|
||||
filters = filters or {}
|
||||
|
||||
query = _do_artifacts_query(context, session)
|
||||
basic_conds, tag_conds, prop_conds = _do_query_filters(filters)
|
||||
|
||||
if basic_conds:
|
||||
for basic_condition in basic_conds:
|
||||
query = query.filter(and_(*basic_condition))
|
||||
|
||||
if tag_conds:
|
||||
for tag_condition in tag_conds:
|
||||
query = query.join(models.ArtifactTag, aliased=True).filter(
|
||||
and_(*tag_condition))
|
||||
|
||||
if prop_conds:
|
||||
for prop_condition in prop_conds:
|
||||
query = query.join(models.ArtifactProperty, aliased=True).filter(
|
||||
and_(*prop_condition))
|
||||
|
||||
marker_artifact = None
|
||||
if marker is not None:
|
||||
marker_artifact = get(context, marker, session)
|
||||
|
||||
if sort is None:
|
||||
sort = DEFAULT_SORT_PARAMETERS
|
||||
else:
|
||||
for val in DEFAULT_SORT_PARAMETERS:
|
||||
if val not in sort:
|
||||
sort.append(val)
|
||||
|
||||
query = _do_paginate_query(query=query, limit=limit,
|
||||
marker=marker_artifact, sort=sort)
|
||||
|
||||
return query.all()
|
||||
|
||||
|
||||
def _do_paginate_query(query, marker=None, limit=None, sort=None):
|
||||
# Add sorting
|
||||
number_of_custom_props = 0
|
||||
for sort_key, sort_dir, sort_type in sort:
|
||||
try:
|
||||
sort_dir_func = {
|
||||
'asc': sqlalchemy.asc,
|
||||
'desc': sqlalchemy.desc,
|
||||
}[sort_dir]
|
||||
except KeyError:
|
||||
msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
|
||||
raise exception.BadRequest(msg)
|
||||
# Note(mfedosin): Workaround to deal with situation that sqlalchemy
|
||||
# cannot work with composite keys correctly
|
||||
if sort_key == 'version':
|
||||
query = query.order_by(sort_dir_func(models.Artifact.version_prefix))\
|
||||
.order_by(sort_dir_func(models.Artifact.version_suffix))\
|
||||
.order_by(sort_dir_func(models.Artifact.version_meta))
|
||||
elif sort_key in BASE_ARTIFACT_PROPERTIES:
|
||||
# sort by generic property
|
||||
query = query.order_by(sort_dir_func(getattr(models.Artifact,
|
||||
sort_key)))
|
||||
else:
|
||||
# sort by custom property
|
||||
number_of_custom_props += 1
|
||||
if number_of_custom_props > 1:
|
||||
msg = _("For performance sake it's not allowed to sort by "
|
||||
"more than one custom property with this db backend.")
|
||||
raise exception.BadRequest(msg)
|
||||
prop_table = aliased(models.ArtifactProperty)
|
||||
query = (
|
||||
query.join(prop_table).
|
||||
filter(prop_table.name == sort_key).
|
||||
order_by(sort_dir_func(getattr(prop_table,
|
||||
sort_type + '_value'))))
|
||||
|
||||
# Add pagination
|
||||
if marker is not None:
|
||||
marker_values = []
|
||||
for sort_key, __, __ in sort:
|
||||
v = marker.get(sort_key, None)
|
||||
marker_values.append(v)
|
||||
|
||||
# Build up an array of sort criteria as in the docstring
|
||||
criteria_list = []
|
||||
for i in range(len(sort)):
|
||||
crit_attrs = []
|
||||
for j in range(i):
|
||||
value = marker_values[j]
|
||||
if sort[j][0] in BASE_ARTIFACT_PROPERTIES:
|
||||
if sort[j][0] == 'version':
|
||||
value = semver_db.parse(value)
|
||||
crit_attrs.append([getattr(models.Artifact, sort[j][0]) ==
|
||||
value])
|
||||
else:
|
||||
conds = [models.ArtifactProperty.name == sort[j][0]]
|
||||
conds.extend([getattr(models.ArtifactProperty,
|
||||
sort[j][2] + '_value') == value])
|
||||
crit_attrs.append(conds)
|
||||
|
||||
value = marker_values[i]
|
||||
sort_dir_func = operator.gt if sort[i][1] == 'asc' else operator.lt
|
||||
if sort[i][0] in BASE_ARTIFACT_PROPERTIES:
|
||||
if sort[i][0] == 'version':
|
||||
value = semver_db.parse(value)
|
||||
crit_attrs.append([sort_dir_func(getattr(models.Artifact,
|
||||
sort[i][0]), value)])
|
||||
else:
|
||||
query = query.join(models.ArtifactProperty, aliased=True)
|
||||
conds = [models.ArtifactProperty.name == sort[i][0]]
|
||||
conds.extend([sort_dir_func(getattr(models.ArtifactProperty,
|
||||
sort[i][2] + '_value'), value)])
|
||||
crit_attrs.append(conds)
|
||||
|
||||
criteria = [and_(*crit_attr) for crit_attr in crit_attrs]
|
||||
criteria_list.append(criteria)
|
||||
|
||||
criteria_list = [and_(*cr) for cr in criteria_list]
|
||||
query = query.filter(or_(*criteria_list))
|
||||
|
||||
if limit is not None:
|
||||
query = query.limit(limit)
|
||||
|
||||
return query
|
||||
|
||||
|
||||
def _do_artifacts_query(context, session):
|
||||
"""Build the query to get all artifacts based on the context"""
|
||||
query = (
|
||||
session.query(models.Artifact).
|
||||
options(joinedload(models.Artifact.properties)).
|
||||
options(joinedload(models.Artifact.tags)).
|
||||
options(joinedload(models.Artifact.blobs)))
|
||||
|
||||
# If admin, return everything.
|
||||
if context.is_admin:
|
||||
return query
|
||||
|
||||
# If anonymous user, return only public artifacts.
|
||||
# However, if context.tenant has a value, return both
|
||||
# public and private artifacts of the owner.
|
||||
if context.tenant is not None:
|
||||
query = query.filter(
|
||||
or_(models.Artifact.owner == context.tenant,
|
||||
models.Artifact.visibility == 'public'))
|
||||
else:
|
||||
query = query.filter(
|
||||
models.Artifact.visibility == 'public')
|
||||
|
||||
return query
|
||||
|
||||
op_mappings = {
|
||||
'eq': operator.eq,
|
||||
'gt': operator.gt,
|
||||
'gte': operator.ge,
|
||||
'lt': operator.lt,
|
||||
'lte': operator.le,
|
||||
'neq': operator.ne,
|
||||
}
|
||||
|
||||
|
||||
def _do_query_filters(filters):
|
||||
basic_conds = []
|
||||
tag_conds = []
|
||||
prop_conds = []
|
||||
for field_name, key_name, op, field_type, value in filters:
|
||||
if field_name == 'tags':
|
||||
tags = utils.split_filter_value_for_quotes(value)
|
||||
for tag in tags:
|
||||
tag_conds.append([models.ArtifactTag.value == tag])
|
||||
elif field_name == 'tags-any':
|
||||
tags = utils.split_filter_value_for_quotes(value)
|
||||
tag_conds.append([models.ArtifactTag.value.in_(tags)])
|
||||
elif field_name in BASE_ARTIFACT_PROPERTIES:
|
||||
if op != 'in':
|
||||
fn = op_mappings[op]
|
||||
if field_name == 'version':
|
||||
value = semver_db.parse(value)
|
||||
basic_conds.append([fn(getattr(models.Artifact, field_name),
|
||||
value)])
|
||||
else:
|
||||
if field_name == 'version':
|
||||
value = [semver_db.parse(val) for val in value]
|
||||
basic_conds.append(
|
||||
[getattr(models.Artifact, field_name).in_(value)])
|
||||
else:
|
||||
conds = [models.ArtifactProperty.name == field_name]
|
||||
if key_name is not None:
|
||||
conds.extend([models.ArtifactProperty.key_name == key_name])
|
||||
if op != 'in':
|
||||
fn = op_mappings[op]
|
||||
conds.extend([fn(getattr(models.ArtifactProperty,
|
||||
field_type + '_value'), value)])
|
||||
else:
|
||||
conds.extend([getattr(models.ArtifactProperty,
|
||||
field_type + '_value').in_(value)])
|
||||
|
||||
prop_conds.append(conds)
|
||||
|
||||
return basic_conds, tag_conds, prop_conds
|
||||
|
||||
|
||||
def _do_tags(artifact, new_tags):
|
||||
tags_to_update = []
|
||||
# don't touch existing tags
|
||||
for tag in artifact.tags:
|
||||
if tag.value in new_tags:
|
||||
tags_to_update.append(tag)
|
||||
new_tags.remove(tag.value)
|
||||
# add new tags
|
||||
for tag in new_tags:
|
||||
db_tag = models.ArtifactTag()
|
||||
db_tag.value = tag
|
||||
tags_to_update.append(db_tag)
|
||||
return tags_to_update
|
||||
|
||||
|
||||
def _get_prop_type(value):
|
||||
if isinstance(value, bool):
|
||||
return 'bool_value'
|
||||
if isinstance(value, int):
|
||||
return 'int_value'
|
||||
if isinstance(value, six.string_types):
|
||||
return 'string_value'
|
||||
if isinstance(value, float):
|
||||
return 'numeric_value'
|
||||
|
||||
|
||||
def _create_property(prop_name, prop_value, position=None, key_name=None):
|
||||
db_prop = models.ArtifactProperty()
|
||||
db_prop.name = prop_name
|
||||
setattr(db_prop, _get_prop_type(prop_value), prop_value)
|
||||
db_prop.position = position
|
||||
db_prop.key_name = key_name
|
||||
return db_prop
|
||||
|
||||
|
||||
def _do_properties(artifact, new_properties):
|
||||
props_to_update = []
|
||||
# don't touch the existing properties
|
||||
for prop in artifact.properties:
|
||||
if prop.name not in new_properties:
|
||||
props_to_update.append(prop)
|
||||
|
||||
for prop_name, prop_value in six.iteritems(new_properties):
|
||||
if prop_value is None:
|
||||
continue
|
||||
if isinstance(prop_value, list):
|
||||
for pos, list_prop in enumerate(prop_value):
|
||||
for prop in artifact.properties:
|
||||
if prop.name == prop_name and pos == prop.position:
|
||||
if getattr(prop, _get_prop_type(
|
||||
list_prop)) != list_prop:
|
||||
setattr(prop, _get_prop_type(list_prop),
|
||||
list_prop)
|
||||
props_to_update.append(prop)
|
||||
break
|
||||
else:
|
||||
props_to_update.append(
|
||||
_create_property(prop_name, list_prop, position=pos)
|
||||
)
|
||||
elif isinstance(prop_value, dict):
|
||||
for dict_key, dict_val in six.iteritems(prop_value):
|
||||
for prop in artifact.properties:
|
||||
if prop.name == prop_name and prop.key_name == dict_key:
|
||||
if getattr(prop, _get_prop_type(dict_val)) != dict_val:
|
||||
setattr(prop, _get_prop_type(dict_val), dict_val)
|
||||
props_to_update.append(prop)
|
||||
break
|
||||
else:
|
||||
props_to_update.append(
|
||||
_create_property(prop_name, dict_val,
|
||||
key_name=dict_key)
|
||||
)
|
||||
elif prop_value is not None:
|
||||
for prop in artifact.properties:
|
||||
if prop.name == prop_name:
|
||||
setattr(prop, _get_prop_type(prop_value), prop_value)
|
||||
props_to_update.append(prop)
|
||||
break
|
||||
else:
|
||||
props_to_update.append(_create_property(
|
||||
prop_name, prop_value))
|
||||
|
||||
return props_to_update
|
||||
|
||||
|
||||
def _update_blob_values(blob, values):
|
||||
for elem in ('size', 'checksum', 'url', 'external', 'status',
|
||||
'content_type'):
|
||||
setattr(blob, elem, values[elem])
|
||||
return blob
|
||||
|
||||
|
||||
def _do_blobs(artifact, new_blobs):
|
||||
blobs_to_update = []
|
||||
# don't touch the existing blobs
|
||||
for blob in artifact.blobs:
|
||||
if blob.name not in new_blobs:
|
||||
blobs_to_update.append(blob)
|
||||
|
||||
for blob_name, blob_value in six.iteritems(new_blobs):
|
||||
if blob_value is None:
|
||||
continue
|
||||
if isinstance(blob_value.get('status'), str):
|
||||
for blob in artifact.blobs:
|
||||
if blob.name == blob_name:
|
||||
_update_blob_values(blob, blob_value)
|
||||
blobs_to_update.append(blob)
|
||||
break
|
||||
else:
|
||||
blob = models.ArtifactBlob()
|
||||
blob.name = blob_name
|
||||
_update_blob_values(blob, blob_value)
|
||||
blobs_to_update.append(blob)
|
||||
else:
|
||||
for dict_key, dict_val in six.iteritems(blob_value):
|
||||
for blob in artifact.blobs:
|
||||
if blob.name == blob_name and blob.key_name == dict_key:
|
||||
_update_blob_values(blob, dict_val)
|
||||
blobs_to_update.append(blob)
|
||||
break
|
||||
else:
|
||||
blob = models.ArtifactBlob()
|
||||
blob.name = blob_name
|
||||
blob.key_name = dict_key
|
||||
_update_blob_values(blob, dict_val)
|
||||
blobs_to_update.append(blob)
|
||||
|
||||
return blobs_to_update
|
||||
|
||||
|
||||
def create_lock(context, lock_key, session):
|
||||
try:
|
||||
session.query(models.ArtifactLock).filter_by(id=lock_key).one()
|
||||
except orm.exc.NoResultFound:
|
||||
lock = models.ArtifactLock()
|
||||
lock.id = lock_key
|
||||
lock.save(session=session)
|
||||
return lock.id
|
||||
|
||||
msg = _("Cannot lock an item with key %s. "
|
||||
"Lock already acquired by other request") % lock_key
|
||||
raise exception.Conflict(msg)
|
||||
|
||||
|
||||
def delete_lock(context, lock_id, session):
|
||||
try:
|
||||
session.query(models.ArtifactLock).filter_by(id=lock_id).delete()
|
||||
except orm.exc.NoResultFound:
|
||||
msg = _("Cannot delete a lock with id %s.") % lock_id
|
||||
raise exception.NotFound(msg)
|
256
glare/db/sqlalchemy/models.py
Normal file
256
glare/db/sqlalchemy/models.py
Normal file
@ -0,0 +1,256 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
from oslo_db.sqlalchemy import models
|
||||
from oslo_utils import timeutils
|
||||
from sqlalchemy import BigInteger
|
||||
from sqlalchemy import Boolean
|
||||
from sqlalchemy import Column
|
||||
from sqlalchemy import DateTime
|
||||
from sqlalchemy.ext import declarative
|
||||
from sqlalchemy import ForeignKey
|
||||
from sqlalchemy import Index
|
||||
from sqlalchemy import Integer
|
||||
from sqlalchemy import Numeric
|
||||
from sqlalchemy.orm import backref
|
||||
from sqlalchemy.orm import composite
|
||||
from sqlalchemy.orm import relationship
|
||||
from sqlalchemy import String
|
||||
from sqlalchemy import Text
|
||||
|
||||
from glare.common import semver_db
|
||||
|
||||
BASE = declarative.declarative_base()
|
||||
|
||||
|
||||
class ArtifactBase(models.ModelBase):
|
||||
"""Base class for Artifact Models."""
|
||||
|
||||
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
|
||||
__table_initialized__ = False
|
||||
|
||||
def save(self, session=None):
|
||||
from glare.db.sqlalchemy import api as db_api
|
||||
|
||||
super(ArtifactBase, self).save(session or db_api.get_session())
|
||||
|
||||
def keys(self):
|
||||
return self.__dict__.keys()
|
||||
|
||||
def values(self):
|
||||
return self.__dict__.values()
|
||||
|
||||
def items(self):
|
||||
return self.__dict__.items()
|
||||
|
||||
def to_dict(self):
|
||||
d = {}
|
||||
for c in self.__table__.columns:
|
||||
d[c.name] = self[c.name]
|
||||
return d
|
||||
|
||||
|
||||
def _parse_property_value(prop):
|
||||
columns = [
|
||||
'int_value',
|
||||
'string_value',
|
||||
'bool_value',
|
||||
'numeric_value']
|
||||
|
||||
for prop_type in columns:
|
||||
if getattr(prop, prop_type) is not None:
|
||||
return getattr(prop, prop_type)
|
||||
|
||||
|
||||
def _parse_blob_value(blob):
|
||||
return {
|
||||
"id": blob.id,
|
||||
"url": blob.url,
|
||||
"status": blob.status,
|
||||
"external": blob.external,
|
||||
"checksum": blob.checksum,
|
||||
"size": blob.size,
|
||||
"content_type": blob.content_type
|
||||
}
|
||||
|
||||
|
||||
class Artifact(BASE, ArtifactBase):
|
||||
__tablename__ = 'glare_artifacts'
|
||||
__table_args__ = (
|
||||
Index('ix_glare_artifact_name_and_version', 'name', 'version_prefix',
|
||||
'version_suffix'),
|
||||
Index('ix_glare_artifact_type', 'type_name'),
|
||||
Index('ix_glare_artifact_status', 'status'),
|
||||
Index('ix_glare_artifact_owner', 'owner'),
|
||||
Index('ix_glare_artifact_visibility', 'visibility'),
|
||||
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'})
|
||||
__protected_attributes__ = set(["created_at", "updated_at"])
|
||||
|
||||
id = Column(String(36), primary_key=True,
|
||||
default=lambda: str(uuid.uuid4()))
|
||||
name = Column(String(255), nullable=False)
|
||||
type_name = Column(String(255), nullable=False)
|
||||
version_prefix = Column(BigInteger().with_variant(Integer, "sqlite"),
|
||||
nullable=False)
|
||||
version_suffix = Column(String(255))
|
||||
version_meta = Column(String(255))
|
||||
version = composite(semver_db.DBVersion, version_prefix,
|
||||
version_suffix, version_meta,
|
||||
comparator_factory=semver_db.VersionComparator)
|
||||
description = Column(Text())
|
||||
visibility = Column(String(32), nullable=False)
|
||||
status = Column(String(32), nullable=False)
|
||||
owner = Column(String(255))
|
||||
created_at = Column(DateTime, default=lambda: timeutils.utcnow(),
|
||||
nullable=False)
|
||||
updated_at = Column(DateTime, default=lambda: timeutils.utcnow(),
|
||||
nullable=False, onupdate=lambda: timeutils.utcnow())
|
||||
activated_at = Column(DateTime)
|
||||
|
||||
def to_dict(self):
|
||||
d = super(Artifact, self).to_dict()
|
||||
|
||||
d.pop('version_prefix')
|
||||
d.pop('version_suffix')
|
||||
d.pop('version_meta')
|
||||
d['version'] = str(self.version)
|
||||
|
||||
# parse tags
|
||||
tags = []
|
||||
for tag in self.tags:
|
||||
tags.append(tag.value)
|
||||
d['tags'] = tags
|
||||
|
||||
# parse properties
|
||||
for prop in self.properties:
|
||||
prop_value = _parse_property_value(prop)
|
||||
|
||||
if prop.position is not None:
|
||||
if prop.name not in d:
|
||||
# create new list
|
||||
d[prop.name] = []
|
||||
# insert value in position
|
||||
d[prop.name].insert(prop.position, prop_value)
|
||||
elif prop.key_name is not None:
|
||||
if prop.name not in d:
|
||||
# create new dict
|
||||
d[prop.name] = {}
|
||||
# insert value in the dict
|
||||
d[prop.name][prop.key_name] = prop_value
|
||||
else:
|
||||
# make scalar
|
||||
d[prop.name] = prop_value
|
||||
|
||||
# parse blobs
|
||||
for blob in self.blobs:
|
||||
blob_value = _parse_blob_value(blob)
|
||||
if blob.key_name is not None:
|
||||
if blob.name not in d:
|
||||
# create new dict
|
||||
d[blob.name] = {}
|
||||
# insert value in the dict
|
||||
d[blob.name][blob.key_name] = blob_value
|
||||
else:
|
||||
# make scalar
|
||||
d[blob.name] = blob_value
|
||||
|
||||
return d
|
||||
|
||||
|
||||
class ArtifactTag(BASE, ArtifactBase):
|
||||
__tablename__ = 'glare_artifact_tags'
|
||||
__table_args__ = (Index('ix_glare_artifact_tags_artifact_id_tag_value',
|
||||
'artifact_id', 'value'),
|
||||
Index('ix_glare_artifact_tags_artifact_id',
|
||||
'artifact_id'),
|
||||
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},)
|
||||
|
||||
id = Column(String(36), primary_key=True, nullable=False,
|
||||
default=lambda: str(uuid.uuid4()))
|
||||
artifact_id = Column(String(36), ForeignKey('glare_artifacts.id'),
|
||||
nullable=False)
|
||||
artifact = relationship(Artifact,
|
||||
backref=backref('tags',
|
||||
cascade="all, delete-orphan"))
|
||||
value = Column(String(255), nullable=False)
|
||||
|
||||
|
||||
class ArtifactProperty(BASE, ArtifactBase):
|
||||
__tablename__ = 'glare_artifact_properties'
|
||||
__table_args__ = (
|
||||
Index('ix_glare_artifact_properties_artifact_id', 'artifact_id'),
|
||||
Index('ix_glare_artifact_properties_name', 'name'),
|
||||
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},)
|
||||
id = Column(String(36), primary_key=True, nullable=False,
|
||||
default=lambda: str(uuid.uuid4()))
|
||||
artifact_id = Column(String(36), ForeignKey('glare_artifacts.id'),
|
||||
nullable=False)
|
||||
artifact = relationship(Artifact,
|
||||
backref=backref('properties',
|
||||
cascade="all, delete-orphan"))
|
||||
name = Column(String(255), nullable=False)
|
||||
string_value = Column(String(20000))
|
||||
int_value = Column(Integer)
|
||||
numeric_value = Column(Numeric)
|
||||
bool_value = Column(Boolean)
|
||||
position = Column(Integer)
|
||||
key_name = Column(String(255))
|
||||
|
||||
|
||||
class ArtifactBlob(BASE, ArtifactBase):
|
||||
__tablename__ = 'glare_artifact_blobs'
|
||||
__table_args__ = (
|
||||
Index('ix_glare_artifact_blobs_artifact_id', 'artifact_id'),
|
||||
Index('ix_glare_artifact_blobs_name', 'name'),
|
||||
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},)
|
||||
id = Column(String(36), primary_key=True, nullable=False,
|
||||
default=lambda: str(uuid.uuid4()))
|
||||
artifact_id = Column(String(36), ForeignKey('glare_artifacts.id'),
|
||||
nullable=False)
|
||||
name = Column(String(255), nullable=False)
|
||||
size = Column(BigInteger().with_variant(Integer, "sqlite"))
|
||||
checksum = Column(String(32))
|
||||
external = Column(Boolean)
|
||||
url = Column(Text)
|
||||
status = Column(String(32), nullable=False)
|
||||
key_name = Column(String(255))
|
||||
content_type = Column(String(255))
|
||||
artifact = relationship(Artifact,
|
||||
backref=backref('blobs',
|
||||
cascade="all, delete-orphan"))
|
||||
|
||||
|
||||
class ArtifactLock(BASE, ArtifactBase):
|
||||
__tablename__ = 'glare_artifact_locks'
|
||||
__table_args__ = (
|
||||
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},)
|
||||
id = Column(String(255), primary_key=True, nullable=False)
|
||||
|
||||
|
||||
def register_models(engine):
|
||||
"""Create database tables for all models with the given engine."""
|
||||
models = (Artifact, ArtifactTag, ArtifactProperty, ArtifactBlob,
|
||||
ArtifactLock)
|
||||
for model in models:
|
||||
model.metadata.create_all(engine)
|
||||
|
||||
|
||||
def unregister_models(engine):
|
||||
"""Drop database tables for all models with the given engine."""
|
||||
models = (ArtifactLock, ArtifactBlob, ArtifactProperty, ArtifactTag,
|
||||
Artifact)
|
||||
for model in models:
|
||||
model.metadata.drop_all(engine)
|
326
glare/engine.py
Normal file
326
glare/engine.py
Normal file
@ -0,0 +1,326 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
import jsonpatch
|
||||
from oslo_log import log as logging
|
||||
|
||||
from glare.common import exception
|
||||
from glare.common import policy
|
||||
from glare.db import artifact_api
|
||||
from glare.i18n import _
|
||||
from glare import locking
|
||||
from glare.notification import Notifier
|
||||
from glare.objects import registry as glare_registry
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Engine(object):
|
||||
"""Engine is responsible for executing different helper operations when
|
||||
processing incoming requests from Glare API. For Glance developers it is
|
||||
like Domain Model layers unified into 1 Layer.
|
||||
Engine receives incoming data and does the following:
|
||||
- check basic policy permissions
|
||||
- requests artifact definition from registry
|
||||
- check access permission(ro, rw)
|
||||
- lock artifact for update if needed
|
||||
- pass data to base artifact to execute all business logic operations
|
||||
- notify other users about finished operation.
|
||||
Engine should not include any business logic and validation related
|
||||
to Artifacts. Engine should not know any internal details of Artifacts
|
||||
because it controls access to Artifacts in common.
|
||||
"""
|
||||
|
||||
registry = glare_registry.ArtifactRegistry
|
||||
registry.register_all_artifacts()
|
||||
lock_engine = locking.LockEngine(artifact_api.ArtifactLockApi())
|
||||
|
||||
@classmethod
|
||||
def _get_schemas(cls, reg):
|
||||
if getattr(cls, 'schemas', None):
|
||||
pass
|
||||
else:
|
||||
schemas = {}
|
||||
for name, type_list in reg.obj_classes().items():
|
||||
type_name = type_list[0].get_type_name()
|
||||
schemas[type_name] = \
|
||||
reg.get_artifact_type(type_name).gen_schemas()
|
||||
setattr(cls, 'schemas', schemas)
|
||||
return copy.deepcopy(cls.schemas)
|
||||
|
||||
@classmethod
|
||||
def _get_artifact(cls, context, type_name, artifact_id,
|
||||
read_only=False):
|
||||
"""Return artifact for users
|
||||
|
||||
Return artifact for reading/modification by users. Check
|
||||
access permissions and policies for artifact.
|
||||
"""
|
||||
|
||||
def _check_read_write_access(ctx, af):
|
||||
"""Check if artifact can be modified by user
|
||||
|
||||
:param ctx: user context
|
||||
:param af: artifact definition
|
||||
:raise Forbidden if access is not allowed
|
||||
"""
|
||||
if not ctx.is_admin and ctx.tenant != af.owner or ctx.read_only:
|
||||
raise exception.Forbidden()
|
||||
|
||||
def _check_read_only_access(ctx, af):
|
||||
"""Check if user has read only access to artifact
|
||||
|
||||
:param ctx: user context
|
||||
:param af: artifact definition
|
||||
:raise Forbidden if access is not allowed
|
||||
"""
|
||||
private = af.visibility != 'public'
|
||||
if (private and
|
||||
not ctx.is_admin and ctx.tenant != af.owner):
|
||||
# TODO(kairat): check artifact sharing here
|
||||
raise exception.Forbidden()
|
||||
|
||||
artifact_type = Engine.registry.get_artifact_type(type_name)
|
||||
# only artifact is available for class users
|
||||
artifact = artifact_type.get(context, artifact_id)
|
||||
if read_only:
|
||||
_check_read_only_access(context, artifact)
|
||||
LOG.debug("Artifact %s acquired for read-only access", artifact_id)
|
||||
else:
|
||||
_check_read_write_access(context, artifact)
|
||||
LOG.debug("Artifact %s acquired for read-write access",
|
||||
artifact_id)
|
||||
return artifact
|
||||
|
||||
@classmethod
|
||||
def list_type_schemas(cls, context):
|
||||
policy.authorize("artifact:type_list", {}, context)
|
||||
return cls._get_schemas(cls.registry)
|
||||
|
||||
@classmethod
|
||||
def show_type_schema(cls, context, type_name):
|
||||
policy.authorize("artifact:type_get", {}, context)
|
||||
schemas = cls._get_schemas(cls.registry)
|
||||
if type_name not in schemas:
|
||||
msg = _("Artifact type %s does not exist") % type_name
|
||||
raise exception.NotFound(message=msg)
|
||||
return schemas[type_name]
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, type_name, field_values):
|
||||
"""Create new artifact in Glare"""
|
||||
action_name = "artifact:create"
|
||||
policy.authorize(action_name, field_values, context)
|
||||
artifact_type = cls.registry.get_artifact_type(type_name)
|
||||
# acquire version lock and execute artifact create
|
||||
af = artifact_type.create(context, field_values)
|
||||
# notify about new artifact
|
||||
Notifier.notify(context, action_name, af)
|
||||
# return artifact to the user
|
||||
return af.to_dict()
|
||||
|
||||
@classmethod
|
||||
@lock_engine.locked(['type_name', 'artifact_id'])
|
||||
def update(cls, context, type_name, artifact_id, patch):
|
||||
"""Update artifact with json patch.
|
||||
|
||||
Apply patch to artifact and validate artifact before updating it
|
||||
in database. If there is request for visibility change or custom
|
||||
location change then call specific method for that.
|
||||
|
||||
:param context: user context
|
||||
:param type_name: name of artifact type
|
||||
:param artifact_id: id of the artifact to be updated
|
||||
:param patch: json patch
|
||||
:return: updated artifact
|
||||
"""
|
||||
|
||||
def get_updates(af_dict, patch_with_upd):
|
||||
"""Get updated values for artifact and json patch
|
||||
|
||||
:param af_dict: current artifact definition as dict
|
||||
:param patch_with_upd: json-patch
|
||||
:return: dict of updated attributes and their values
|
||||
"""
|
||||
|
||||
class DictDiffer(object):
|
||||
"""
|
||||
Calculate the difference between two dictionaries as:
|
||||
(1) items added
|
||||
(2) items removed
|
||||
(3) keys same in both but changed values
|
||||
(4) keys same in both and unchanged values
|
||||
"""
|
||||
def __init__(self, current_dict, past_dict):
|
||||
self.current_dict, self.past_dict = current_dict, past_dict
|
||||
self.current_keys, self.past_keys = [
|
||||
set(d.keys()) for d in (current_dict, past_dict)
|
||||
]
|
||||
self.intersect = self.current_keys.intersection(
|
||||
self.past_keys)
|
||||
|
||||
def added(self):
|
||||
return self.current_keys - self.intersect
|
||||
|
||||
def removed(self):
|
||||
return self.past_keys - self.intersect
|
||||
|
||||
def changed(self):
|
||||
return set(o for o in self.intersect
|
||||
if self.past_dict[o] != self.current_dict[o])
|
||||
|
||||
def unchanged(self):
|
||||
return set(o for o in self.intersect
|
||||
if self.past_dict[o] == self.current_dict[o])
|
||||
|
||||
try:
|
||||
af_dict_patched = patch_with_upd.apply(af_dict)
|
||||
diff = DictDiffer(af_dict_patched, af_dict)
|
||||
|
||||
# we mustn't add or remove attributes from artifact
|
||||
if diff.added() or diff.removed():
|
||||
msg = _(
|
||||
"Forbidden to add or remove attributes from artifact. "
|
||||
"Added attributes %(added)s. "
|
||||
"Removed attributes %(removed)s") % {
|
||||
'added': diff.added(), 'removed': diff.removed()
|
||||
}
|
||||
raise exception.BadRequest(message=msg)
|
||||
|
||||
return {key: af_dict_patched[key] for key in diff.changed()}
|
||||
|
||||
except (jsonpatch.JsonPatchException,
|
||||
jsonpatch.JsonPointerException,
|
||||
KeyError) as e:
|
||||
raise exception.BadRequest(message=e.message)
|
||||
except TypeError as e:
|
||||
msg = _("Incorrect type of the element. Reason: %s") % str(e)
|
||||
raise exception.BadRequest(msg)
|
||||
|
||||
artifact = cls._get_artifact(context, type_name, artifact_id)
|
||||
af_dict = artifact.to_dict()
|
||||
updates = get_updates(af_dict, patch)
|
||||
LOG.debug("Update diff successfully calculated for artifact %(af)s "
|
||||
"%(diff)s", {'af': artifact_id, 'diff': updates})
|
||||
|
||||
if not updates:
|
||||
return af_dict
|
||||
else:
|
||||
action = artifact.get_action_for_updates(context, artifact,
|
||||
updates, cls.registry)
|
||||
action_name = "artifact:%s" % action.__name__
|
||||
policy.authorize(action_name, af_dict, context)
|
||||
modified_af = action(context, artifact, updates)
|
||||
Notifier.notify(context, action_name, modified_af)
|
||||
return modified_af.to_dict()
|
||||
|
||||
@classmethod
|
||||
def get(cls, context, type_name, artifact_id):
|
||||
"""Return artifact representation from artifact repo."""
|
||||
policy.authorize("artifact:get", {}, context)
|
||||
af = cls._get_artifact(context, type_name, artifact_id,
|
||||
read_only=True)
|
||||
return af.to_dict()
|
||||
|
||||
@classmethod
|
||||
def list(cls, context, type_name, filters, marker=None, limit=None,
|
||||
sort=None):
|
||||
"""Return list of artifacts requested by user
|
||||
|
||||
:param filters: list of requested filters
|
||||
:return: list of artifacts
|
||||
"""
|
||||
policy.authorize("artifact:list", {}, context)
|
||||
artifact_type = cls.registry.get_artifact_type(type_name)
|
||||
# return list to the user
|
||||
af_list = [af.to_dict()
|
||||
for af in artifact_type.list(context, filters, marker,
|
||||
limit, sort)]
|
||||
return af_list
|
||||
|
||||
@classmethod
|
||||
def delete(cls, context, type_name, artifact_id):
|
||||
"""Delete artifact from glare"""
|
||||
af = cls._get_artifact(context, type_name, artifact_id)
|
||||
policy.authorize("artifact:delete", af.to_dict(), context)
|
||||
af.delete(context, af)
|
||||
Notifier.notify(context, "artifact.delete", af)
|
||||
|
||||
@classmethod
|
||||
@lock_engine.locked(['type_name', 'artifact_id'])
|
||||
def add_blob_location(cls, context, type_name,
|
||||
artifact_id, field_name, location):
|
||||
af = cls._get_artifact(context, type_name, artifact_id)
|
||||
action_name = 'artifact:set_location'
|
||||
policy.authorize(action_name, af.to_dict(), context)
|
||||
modified_af = af.add_blob_location(context, af, field_name, location)
|
||||
Notifier.notify(context, action_name, modified_af)
|
||||
return modified_af.to_dict()
|
||||
|
||||
@classmethod
|
||||
@lock_engine.locked(['type_name', 'artifact_id'])
|
||||
def add_blob_dict_location(cls, context, type_name, artifact_id,
|
||||
field_name, blob_key, location):
|
||||
af = cls._get_artifact(context, type_name, artifact_id)
|
||||
action_name = 'artifact:set_location'
|
||||
policy.authorize(action_name, af.to_dict(), context)
|
||||
modified_af = af.add_blob_dict_location(context, af, field_name,
|
||||
blob_key, location)
|
||||
Notifier.notify(context, action_name, modified_af)
|
||||
return modified_af.to_dict()
|
||||
|
||||
@classmethod
|
||||
@lock_engine.locked(['type_name', 'artifact_id'])
|
||||
def upload_blob(cls, context, type_name, artifact_id, field_name, fd,
|
||||
content_type):
|
||||
"""Upload Artifact blob"""
|
||||
af = cls._get_artifact(context, type_name, artifact_id)
|
||||
action_name = "artifact:upload"
|
||||
policy.authorize(action_name, af.to_dict(), context)
|
||||
modified_af = af.upload_blob(context, af, field_name, fd, content_type)
|
||||
Notifier.notify(context, action_name, modified_af)
|
||||
return modified_af.to_dict()
|
||||
|
||||
@classmethod
|
||||
@lock_engine.locked(['type_name', 'artifact_id'])
|
||||
def upload_blob_dict(cls, context, type_name, artifact_id, field_name,
|
||||
blob_key, fd, content_type):
|
||||
"""Upload Artifact blob to dict"""
|
||||
af = cls._get_artifact(context, type_name, artifact_id)
|
||||
action_name = "artifact:upload"
|
||||
policy.authorize(action_name, af.to_dict(), context)
|
||||
modified_af = af.upload_blob_dict(context, af, field_name, blob_key,
|
||||
fd, content_type)
|
||||
Notifier.notify(context, action_name, modified_af)
|
||||
return modified_af.to_dict()
|
||||
|
||||
@classmethod
|
||||
def download_blob(cls, context, type_name, artifact_id, field_name):
|
||||
"""Download blob from artifact"""
|
||||
af = cls._get_artifact(context, type_name, artifact_id,
|
||||
read_only=True)
|
||||
policy.authorize("artifact:download", af.to_dict(), context)
|
||||
return af.download_blob(context, af, field_name)
|
||||
|
||||
@classmethod
|
||||
def download_blob_dict(cls, context, type_name, artifact_id, field_name,
|
||||
blob_key):
|
||||
"""Download blob from artifact"""
|
||||
af = cls._get_artifact(context, type_name, artifact_id,
|
||||
read_only=True)
|
||||
policy.authorize("artifact:download", af.to_dict(), context)
|
||||
return af.download_blob_dict(context, af, field_name, blob_key)
|
0
glare/hacking/__init__.py
Normal file
0
glare/hacking/__init__.py
Normal file
193
glare/hacking/checks.py
Normal file
193
glare/hacking/checks.py
Normal file
@ -0,0 +1,193 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
import pep8
|
||||
|
||||
"""
|
||||
Guidelines for writing new hacking checks
|
||||
|
||||
- Use only for Glare-specific tests. OpenStack general tests
|
||||
should be submitted to the common 'hacking' module.
|
||||
- Pick numbers in the range G3xx. Find the current test with
|
||||
the highest allocated number and then pick the next value.
|
||||
If nova has an N3xx code for that test, use the same number.
|
||||
- Keep the test method code in the source file ordered based
|
||||
on the G3xx value.
|
||||
- List the new rule in the top level HACKING.rst file
|
||||
- Add test cases for each new rule to glare/tests/test_hacking.py
|
||||
|
||||
"""
|
||||
|
||||
|
||||
asse_trueinst_re = re.compile(
|
||||
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
|
||||
"(\w|\.|\'|\"|\[|\])+\)\)")
|
||||
asse_equal_type_re = re.compile(
|
||||
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
|
||||
"(\w|\.|\'|\"|\[|\])+\)")
|
||||
asse_equal_end_with_none_re = re.compile(
|
||||
r"(.)*assertEqual\((\w|\.|\'|\"|\[|\])+, None\)")
|
||||
asse_equal_start_with_none_re = re.compile(
|
||||
r"(.)*assertEqual\(None, (\w|\.|\'|\"|\[|\])+\)")
|
||||
unicode_func_re = re.compile(r"(\s|\W|^)unicode\(")
|
||||
log_translation = re.compile(
|
||||
r"(.)*LOG\.(audit)\(\s*('|\")")
|
||||
log_translation_info = re.compile(
|
||||
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
|
||||
log_translation_exception = re.compile(
|
||||
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
|
||||
log_translation_error = re.compile(
|
||||
r"(.)*LOG\.(error)\(\s*(_\(|'|\")")
|
||||
log_translation_critical = re.compile(
|
||||
r"(.)*LOG\.(critical)\(\s*(_\(|'|\")")
|
||||
log_translation_warning = re.compile(
|
||||
r"(.)*LOG\.(warning)\(\s*(_\(|'|\")")
|
||||
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
|
||||
|
||||
|
||||
def assert_true_instance(logical_line):
|
||||
"""Check for assertTrue(isinstance(a, b)) sentences
|
||||
|
||||
G316
|
||||
"""
|
||||
if asse_trueinst_re.match(logical_line):
|
||||
yield (0, "G316: assertTrue(isinstance(a, b)) sentences not allowed")
|
||||
|
||||
|
||||
def assert_equal_type(logical_line):
|
||||
"""Check for assertEqual(type(A), B) sentences
|
||||
|
||||
G317
|
||||
"""
|
||||
if asse_equal_type_re.match(logical_line):
|
||||
yield (0, "G317: assertEqual(type(A), B) sentences not allowed")
|
||||
|
||||
|
||||
def assert_equal_none(logical_line):
|
||||
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
|
||||
|
||||
G318
|
||||
"""
|
||||
res = (asse_equal_start_with_none_re.match(logical_line) or
|
||||
asse_equal_end_with_none_re.match(logical_line))
|
||||
if res:
|
||||
yield (0, "G318: assertEqual(A, None) or assertEqual(None, A) "
|
||||
"sentences not allowed")
|
||||
|
||||
|
||||
def no_translate_debug_logs(logical_line, filename):
|
||||
dirs = [
|
||||
"glare/api",
|
||||
"glare/cmd",
|
||||
"glare/common",
|
||||
"glare/db",
|
||||
"glare/tests",
|
||||
]
|
||||
|
||||
if max([name in filename for name in dirs]):
|
||||
if logical_line.startswith("LOG.debug(_("):
|
||||
yield(0, "G319: Don't translate debug level logs")
|
||||
|
||||
|
||||
def no_direct_use_of_unicode_function(logical_line):
|
||||
"""Check for use of unicode() builtin
|
||||
|
||||
G320
|
||||
"""
|
||||
if unicode_func_re.match(logical_line):
|
||||
yield(0, "G320: Use six.text_type() instead of unicode()")
|
||||
|
||||
|
||||
def validate_log_translations(logical_line, physical_line, filename):
|
||||
# Translations are not required in the test directory
|
||||
if pep8.noqa(physical_line):
|
||||
return
|
||||
msg = "G322: LOG.info messages require translations `_LI()`!"
|
||||
if log_translation_info.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "G323: LOG.exception messages require translations `_LE()`!"
|
||||
if log_translation_exception.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "G324: LOG.error messages require translations `_LE()`!"
|
||||
if log_translation_error.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "G325: LOG.critical messages require translations `_LC()`!"
|
||||
if log_translation_critical.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "G326: LOG.warning messages require translations `_LW()`!"
|
||||
if log_translation_warning.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "G321: Log messages require translations!"
|
||||
if log_translation.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def check_no_contextlib_nested(logical_line):
|
||||
msg = ("G327: contextlib.nested is deprecated since Python 2.7. See "
|
||||
"https://docs.python.org/2/library/contextlib.html#contextlib."
|
||||
"nested for more information.")
|
||||
if ("with contextlib.nested(" in logical_line or
|
||||
"with nested(" in logical_line):
|
||||
yield(0, msg)
|
||||
|
||||
|
||||
def dict_constructor_with_list_copy(logical_line):
|
||||
msg = ("G328: Must use a dict comprehension instead of a dict constructor "
|
||||
"with a sequence of key-value pairs.")
|
||||
if dict_constructor_with_list_copy_re.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def check_python3_xrange(logical_line):
|
||||
if re.search(r"\bxrange\s*\(", logical_line):
|
||||
yield(0, "G329: Do not use xrange. Use range, or six.moves.range for "
|
||||
"large loops.")
|
||||
|
||||
|
||||
def check_python3_no_iteritems(logical_line):
|
||||
msg = ("G330: Use six.iteritems() or dict.items() instead of "
|
||||
"dict.iteritems().")
|
||||
if re.search(r".*\.iteritems\(\)", logical_line):
|
||||
yield(0, msg)
|
||||
|
||||
|
||||
def check_python3_no_iterkeys(logical_line):
|
||||
msg = ("G331: Use six.iterkeys() or dict.keys() instead of "
|
||||
"dict.iterkeys().")
|
||||
if re.search(r".*\.iterkeys\(\)", logical_line):
|
||||
yield(0, msg)
|
||||
|
||||
|
||||
def check_python3_no_itervalues(logical_line):
|
||||
msg = ("G332: Use six.itervalues() or dict.values instead of "
|
||||
"dict.itervalues().")
|
||||
if re.search(r".*\.itervalues\(\)", logical_line):
|
||||
yield(0, msg)
|
||||
|
||||
|
||||
def factory(register):
|
||||
register(assert_true_instance)
|
||||
register(assert_equal_type)
|
||||
register(assert_equal_none)
|
||||
register(no_translate_debug_logs)
|
||||
register(no_direct_use_of_unicode_function)
|
||||
register(validate_log_translations)
|
||||
register(check_no_contextlib_nested)
|
||||
register(dict_constructor_with_list_copy)
|
||||
register(check_python3_xrange)
|
||||
register(check_python3_no_iteritems)
|
||||
register(check_python3_no_iterkeys)
|
||||
register(check_python3_no_itervalues)
|
31
glare/i18n.py
Normal file
31
glare/i18n.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright 2014 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_i18n import * # noqa
|
||||
|
||||
_translators = TranslatorFactory(domain='glare')
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
136
glare/locking.py
Normal file
136
glare/locking.py
Normal file
@ -0,0 +1,136 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
import six
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from glare.i18n import _LI
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LockApiBase(object):
|
||||
"""Lock Api Base class that responsible for acquiring/releasing locks
|
||||
"""
|
||||
|
||||
def create_lock(self, context, lock_key):
|
||||
"""Acquire lock for current user
|
||||
|
||||
:param context user context
|
||||
:param lock_key: unique lock identifier that defines lock scope
|
||||
:return lock internal identifier
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_lock(self, context, lock_id):
|
||||
"""Delete acquired user lock
|
||||
|
||||
:param context: user context
|
||||
:param lock_id: lock internal identifier
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Lock(object):
|
||||
"""Object that stores lock context for users. This class is internal
|
||||
and used only for Lock Engine. So users shouldn't use this class directly
|
||||
"""
|
||||
|
||||
def __init__(self, context, lock_id, lock_key, release_method):
|
||||
"""Initialize lock context"""
|
||||
self.context = context
|
||||
self.lock_id = lock_id
|
||||
self.lock_key = lock_key
|
||||
self.release = release_method
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
# TODO(kairat) catch all exceptions here
|
||||
self.release(self)
|
||||
|
||||
|
||||
class LockEngine(object):
|
||||
"""Glare lock engine.
|
||||
Defines how artifact updates must be synchronized with each other. When
|
||||
some user obtains lock for the same piece of data then other user cannot
|
||||
request that lock and get Conflict error.
|
||||
This little engine also allows to encapsulate lock logic in one place so
|
||||
we can potentially add tooz functionality in future to Glare. Right now
|
||||
there are troubles with locks in Galera (especially in mysql) and zookeeper
|
||||
requires additional work from IT engineers. So we need support production
|
||||
ready DB locks in our implementation.
|
||||
"""
|
||||
|
||||
MAX_LOCK_LENGTH = 255
|
||||
|
||||
def __init__(self, lock_api):
|
||||
"""Initialize lock engine with some lock api
|
||||
|
||||
:param lock_api: api that allows to create/delete locks. It must be
|
||||
db_api but it might be replaced with DLM in near future.
|
||||
"""
|
||||
self.lock_api = lock_api
|
||||
|
||||
def acquire(self, context, lock_key):
|
||||
"""Acquire lock to update whole artifact
|
||||
|
||||
Acquire lock to update artifact. If there is some other
|
||||
lock for the same artifact then raise Conflict Error.
|
||||
|
||||
:param context: user context
|
||||
:param lock_key: lock key
|
||||
:return: lock definition
|
||||
"""
|
||||
if lock_key is not None and len(lock_key) < self.MAX_LOCK_LENGTH:
|
||||
lock_id = self.lock_api.create_lock(context, lock_key)
|
||||
LOG.info(_LI("Lock %(lock_id)s acquired for lock_key "
|
||||
"%(lock_key)s"),
|
||||
{'lock_id': lock_id, 'lock_key': lock_key})
|
||||
else:
|
||||
lock_id = None
|
||||
LOG.info(_LI("No lock for lock_key %s"), lock_key)
|
||||
|
||||
return Lock(context, lock_id, lock_key, self.release)
|
||||
|
||||
def release(self, lock):
|
||||
if lock.lock_id is not None:
|
||||
self.lock_api.delete_lock(lock.context, lock.lock_id)
|
||||
LOG.info(_LI("Lock %(lock_id)s released for lock_key %(key)s"),
|
||||
{'lock_id': lock.lock_id, 'key': lock.lock_key})
|
||||
|
||||
def locked(self, lock_name_parameters):
|
||||
"""Synchronization decorator.
|
||||
:param list lock_name_parameters: List of parameters that will be used
|
||||
as part of lock name
|
||||
:returns: function that locks artifact by specified parameters
|
||||
"""
|
||||
def wrap(f):
|
||||
@six.wraps(f)
|
||||
def wrapped(*a, **k):
|
||||
call_args = inspect.getcallargs(f, *a, **k)
|
||||
lock_key = ''
|
||||
for param in lock_name_parameters:
|
||||
lock_key += str(call_args[param]) + ':'
|
||||
context = call_args.get('context')
|
||||
with self.acquire(context, lock_key):
|
||||
res = f(*a, **k)
|
||||
return res
|
||||
return wrapped
|
||||
return wrap
|
85
glare/notification.py
Normal file
85
glare/notification.py
Normal file
@ -0,0 +1,85 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_messaging import serializer
|
||||
|
||||
_ALIASES = {
|
||||
'glare.openstack.common.rpc.impl_kombu': 'rabbit',
|
||||
'glare.openstack.common.rpc.impl_qpid': 'qpid',
|
||||
'glare.openstack.common.rpc.impl_zmq': 'zmq',
|
||||
}
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_transport():
|
||||
return oslo_messaging.get_notification_transport(CONF, aliases=_ALIASES)
|
||||
|
||||
|
||||
class RequestSerializer(serializer.Serializer):
|
||||
|
||||
def serialize_entity(self, context, entity):
|
||||
return entity.to_notification()
|
||||
|
||||
def deserialize_entity(self, context, entity):
|
||||
return entity
|
||||
|
||||
def serialize_context(self, context):
|
||||
return context.to_dict()
|
||||
|
||||
def deserialize_context(self, context):
|
||||
return context.from_dict(context)
|
||||
|
||||
|
||||
class Notifier(object):
|
||||
"""Simple interface to receive Glare notifier
|
||||
|
||||
"""
|
||||
|
||||
SERVICE_NAME = 'artifact'
|
||||
GLARE_NOTIFIER = None
|
||||
|
||||
@classmethod
|
||||
def _get_notifier(cls):
|
||||
if cls.GLARE_NOTIFIER is None:
|
||||
notifier_opts = [
|
||||
cfg.StrOpt('glare_publisher_id', default="artifact",
|
||||
help='Default publisher_id for outgoing '
|
||||
'Glare notifications.')]
|
||||
CONF.register_opts(notifier_opts)
|
||||
cls.GLARE_NOTIFIER = oslo_messaging.Notifier(
|
||||
get_transport(),
|
||||
publisher_id=CONF.glare_publisher_id,
|
||||
serializer=RequestSerializer())
|
||||
return cls.GLARE_NOTIFIER
|
||||
|
||||
@classmethod
|
||||
def notify(cls, context, event_type, body, level='INFO'):
|
||||
"""Notify Glare listeners with some useful info
|
||||
|
||||
:param context: User request context
|
||||
:param event_type: type of event
|
||||
:param body: notification payload
|
||||
:param level: notification level ("INFO", "WARN", "ERROR", etc)
|
||||
"""
|
||||
af_notifier = cls._get_notifier()
|
||||
method = getattr(af_notifier, level.lower())
|
||||
method(context, "%s.%s" % (cls.SERVICE_NAME, event_type), body)
|
||||
LOG.debug('Notification event %(event)s send successfully for '
|
||||
'request %(request)s', {'event': event_type,
|
||||
'request': context.request_id})
|
0
glare/objects/__init__.py
Normal file
0
glare/objects/__init__.py
Normal file
189
glare/objects/attribute.py
Normal file
189
glare/objects/attribute.py
Normal file
@ -0,0 +1,189 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from oslo_versionedobjects import fields
|
||||
|
||||
from glare.common import exception as exc
|
||||
from glare.objects import fields as glare_fields
|
||||
from glare.objects import validators as val_lib
|
||||
|
||||
FILTERS = (
|
||||
FILTER_EQ, FILTER_NEQ, FILTER_IN, FILTER_GT, FILTER_GTE, FILTER_LT,
|
||||
FILTER_LTE) = ('eq', 'neq', 'in', 'gt', 'gte', 'lt', 'lte')
|
||||
|
||||
|
||||
class Attribute(object):
|
||||
def __init__(self, field_class, mutable=False, required_on_activate=True,
|
||||
system=False, validators=None, nullable=True, default=None,
|
||||
sortable=False, filter_ops=None):
|
||||
"""Init and validate attribute"""
|
||||
if not issubclass(field_class, fields.AutoTypedField):
|
||||
raise exc.IncorrectArtifactType(
|
||||
"Field class %s must be sub-class of AutoTypedField." %
|
||||
field_class)
|
||||
|
||||
self.validators = validators or []
|
||||
for v in self.validators:
|
||||
v.check_type_allowed(field_class)
|
||||
if isinstance(v, val_lib.MaxStrLen):
|
||||
if v.size > 255 and sortable:
|
||||
raise exc.IncorrectArtifactType(
|
||||
"It's forbidden to make attribute %(attr)s "
|
||||
"sortable if string length can be more than 255 "
|
||||
"symbols. Maximal allowed length now: %(max)d" %
|
||||
{"attr": str(field_class), 'max': v.size})
|
||||
|
||||
self.field_class = field_class
|
||||
self.nullable = nullable
|
||||
self.default = default
|
||||
self.vo_attrs = ['nullable', 'default']
|
||||
|
||||
self.mutable = mutable
|
||||
self.required_on_activate = required_on_activate
|
||||
self.system = system
|
||||
self.sortable = sortable
|
||||
self.filter_ops = filter_ops or [FILTER_EQ, FILTER_NEQ, FILTER_IN]
|
||||
self.field_attrs = ['mutable', 'required_on_activate', 'system',
|
||||
'sortable', 'filter_ops']
|
||||
|
||||
def get_default_validators(self):
|
||||
default = []
|
||||
if issubclass(self.field_class, fields.StringField):
|
||||
# check if fields is string
|
||||
if not any(isinstance(v, val_lib.MaxStrLen)
|
||||
for v in self.validators):
|
||||
default.append(val_lib.MaxStrLen(255))
|
||||
return default
|
||||
|
||||
def get_field(self):
|
||||
# init the field
|
||||
vo_attrs = {attr_name: getattr(self, attr_name)
|
||||
for attr_name in self.vo_attrs}
|
||||
field = self.field_class(**vo_attrs)
|
||||
# setup custom field attrs
|
||||
field_attrs = {attr_name: getattr(self, attr_name)
|
||||
for attr_name in self.field_attrs}
|
||||
for prop, value in six.iteritems(field_attrs):
|
||||
setattr(field, prop, value)
|
||||
|
||||
# apply custom validators
|
||||
vals = self.validators + self.get_default_validators()
|
||||
|
||||
def wrapper(coerce_func):
|
||||
def coerce_wrapper(obj, attr, value):
|
||||
try:
|
||||
val = coerce_func(obj, attr, value)
|
||||
if val is not None:
|
||||
for check_func in vals:
|
||||
check_func(val)
|
||||
return val
|
||||
except (KeyError, ValueError) as e:
|
||||
msg = "Type: %s. Field: %s. Exception: %s" % (
|
||||
obj.get_type_name(), attr, str(e))
|
||||
raise exc.BadRequest(message=msg)
|
||||
return coerce_wrapper
|
||||
|
||||
field.coerce = wrapper(field.coerce)
|
||||
return field
|
||||
|
||||
@classmethod
|
||||
def init(cls, *args, **kwargs):
|
||||
"""Fabric to build attributes"""
|
||||
return cls(*args, **kwargs).get_field()
|
||||
|
||||
|
||||
class CompoundAttribute(Attribute):
|
||||
def __init__(self, field_class, element_type, element_validators=None,
|
||||
max_size=255, **kwargs):
|
||||
super(CompoundAttribute, self).__init__(field_class, **kwargs)
|
||||
if self.sortable:
|
||||
raise exc.IncorrectArtifactType("'sortable' must be False for "
|
||||
"compound type.")
|
||||
|
||||
if element_type is None:
|
||||
raise exc.IncorrectArtifactType("'element_type' must be set for "
|
||||
"compound type.")
|
||||
self.element_type = element_type
|
||||
self.vo_attrs.append('element_type')
|
||||
self.field_attrs.append('element_type')
|
||||
|
||||
self.validators.append(val_lib.MaxSize(max_size))
|
||||
self.element_validators = element_validators or []
|
||||
|
||||
def get_element_validators(self):
|
||||
default_vals = []
|
||||
if issubclass(self.element_type, fields.String):
|
||||
# check if fields is string
|
||||
if not any(isinstance(v, val_lib.MaxStrLen)
|
||||
for v in self.element_validators):
|
||||
default_vals.append(val_lib.MaxStrLen(255))
|
||||
vals = default_vals + self.element_validators
|
||||
for v in vals:
|
||||
v.check_type_allowed(self.element_type)
|
||||
return default_vals + self.element_validators
|
||||
|
||||
|
||||
class ListAttribute(CompoundAttribute):
|
||||
def __init__(self, element_type, **kwargs):
|
||||
if 'default' not in kwargs:
|
||||
kwargs['default'] = []
|
||||
if element_type is glare_fields.BlobField:
|
||||
raise exc.IncorrectArtifactType("List of blobs is not allowed "
|
||||
"to be specified in artifact.")
|
||||
super(ListAttribute, self).__init__(glare_fields.List, element_type,
|
||||
**kwargs)
|
||||
|
||||
def get_default_validators(self):
|
||||
default_vals = []
|
||||
elem_val = val_lib.ListElementValidator(
|
||||
super(ListAttribute, self).get_element_validators())
|
||||
default_vals.append(elem_val)
|
||||
return default_vals
|
||||
|
||||
|
||||
class DictAttribute(CompoundAttribute):
|
||||
def __init__(self, element_type, **kwargs):
|
||||
if 'default' not in kwargs:
|
||||
kwargs['default'] = {}
|
||||
super(DictAttribute, self).__init__(glare_fields.Dict, element_type,
|
||||
**kwargs)
|
||||
|
||||
def get_default_validators(self):
|
||||
default_vals = []
|
||||
elem_val = val_lib.DictElementValidator(
|
||||
super(DictAttribute, self).get_element_validators())
|
||||
default_vals.append(elem_val)
|
||||
default_vals.append(val_lib.MaxDictKeyLen(255))
|
||||
return default_vals
|
||||
|
||||
|
||||
class BlobAttribute(Attribute):
|
||||
DEFAULT_MAX_BLOB_SIZE = 10485760
|
||||
|
||||
def __init__(self, max_blob_size=DEFAULT_MAX_BLOB_SIZE, **kwargs):
|
||||
super(BlobAttribute, self).__init__(
|
||||
field_class=glare_fields.BlobField, **kwargs)
|
||||
self.max_blob_size = int(max_blob_size)
|
||||
self.field_attrs.append('max_blob_size')
|
||||
|
||||
|
||||
class BlobDictAttribute(DictAttribute):
|
||||
def __init__(self, max_blob_size=BlobAttribute.DEFAULT_MAX_BLOB_SIZE,
|
||||
**kwargs):
|
||||
super(BlobDictAttribute, self).__init__(
|
||||
element_type=glare_fields.BlobFieldType, **kwargs)
|
||||
self.max_blob_size = int(max_blob_size)
|
||||
self.field_attrs.append('max_blob_size')
|
1138
glare/objects/base.py
Normal file
1138
glare/objects/base.py
Normal file
File diff suppressed because it is too large
Load Diff
167
glare/objects/fields.py
Normal file
167
glare/objects/fields.py
Normal file
@ -0,0 +1,167 @@
|
||||
# Copyright (c) 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
import jsonschema
|
||||
from jsonschema import exceptions as json_exceptions
|
||||
from oslo_versionedobjects import fields
|
||||
import semantic_version
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from glare.i18n import _
|
||||
|
||||
|
||||
class ArtifactStatusField(fields.StateMachine):
|
||||
ARTIFACT_STATUS = (QUEUED, ACTIVE, DEACTIVATED, DELETED) = (
|
||||
'queued', 'active', 'deactivated', 'deleted')
|
||||
|
||||
ALLOWED_TRANSITIONS = {
|
||||
QUEUED: {QUEUED, ACTIVE, DELETED},
|
||||
ACTIVE: {ACTIVE, DEACTIVATED, DELETED},
|
||||
DEACTIVATED: {DEACTIVATED, ACTIVE, DELETED},
|
||||
DELETED: {DELETED}
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(ArtifactStatusField, self).__init__(self.ARTIFACT_STATUS,
|
||||
**kwargs)
|
||||
|
||||
|
||||
class Version(fields.FieldType):
|
||||
|
||||
@staticmethod
|
||||
def coerce(obj, attr, value):
|
||||
return str(semantic_version.Version.coerce(str(value)))
|
||||
|
||||
|
||||
class VersionField(fields.AutoTypedField):
|
||||
AUTO_TYPE = Version()
|
||||
|
||||
|
||||
class BlobFieldType(fields.FieldType):
|
||||
"""Blob field contains reference to blob location.
|
||||
"""
|
||||
BLOB_STATUS = (SAVING, ACTIVE, PENDING_DELETE) = (
|
||||
'saving', 'active', 'pending_delete')
|
||||
|
||||
BLOB_SCHEMA = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'url': {'type': ['string', 'null'], 'format': 'uri',
|
||||
'max_length': 255},
|
||||
'size': {'type': ['number', 'null']},
|
||||
'checksum': {'type': ['string', 'null']},
|
||||
'external': {'type': 'boolean'},
|
||||
'id': {'type': 'string'},
|
||||
'status': {'type': 'string',
|
||||
'enum': list(BLOB_STATUS)},
|
||||
'content_type': {'type': 'string'},
|
||||
},
|
||||
'required': ['url', 'size', 'checksum', 'external', 'status',
|
||||
'id', 'content_type']
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def coerce(obj, attr, value):
|
||||
"""Validate and store blob info inside oslo.vo"""
|
||||
if not isinstance(value, dict):
|
||||
raise ValueError(_("Blob value must be dict. Got %s type instead")
|
||||
% type(value))
|
||||
value.setdefault('id', str(uuid.uuid4()))
|
||||
try:
|
||||
jsonschema.validate(value, BlobFieldType.BLOB_SCHEMA)
|
||||
except json_exceptions.ValidationError as e:
|
||||
raise ValueError(e)
|
||||
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def to_primitive(obj, attr, value):
|
||||
return {key: val for key, val in six.iteritems(value)
|
||||
if key not in ('url', 'id')}
|
||||
|
||||
|
||||
class BlobField(fields.AutoTypedField):
|
||||
AUTO_TYPE = BlobFieldType()
|
||||
|
||||
|
||||
class DependencyFieldType(fields.FieldType):
|
||||
"""Dependency field specifies Artifact dependency on other artifact or some
|
||||
external resource. From technical perspective it is just soft link to Glare
|
||||
Artifact or https/http resource. So Artifact users can download the
|
||||
referenced file by that link.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def is_external(link):
|
||||
return link.startswith('http')
|
||||
|
||||
@staticmethod
|
||||
def get_type_name(link):
|
||||
url = link.split('/')
|
||||
if len(url) == 4:
|
||||
return url[2]
|
||||
else:
|
||||
raise ValueError(_("It is not possible to "
|
||||
"extract type_name from link %s"), link)
|
||||
|
||||
@staticmethod
|
||||
def coerce(obj, attr, value):
|
||||
# to remove the existing dependency user sets its value to None,
|
||||
# we have to consider this case.
|
||||
if value is None:
|
||||
return value
|
||||
# check that value is string
|
||||
if not isinstance(value, six.string_types):
|
||||
raise ValueError(_('A string is required in field %(attr)s, '
|
||||
'not a %(type)s') %
|
||||
{'attr': attr, 'type': type(value).__name__})
|
||||
# determine if link is external or internal
|
||||
external = DependencyFieldType.is_external(value)
|
||||
# validate link itself
|
||||
if external:
|
||||
link = urlparse.urlparse(value)
|
||||
if link.scheme not in ('http', 'https'):
|
||||
raise ValueError(_('Only http and https requests '
|
||||
'are allowed in url %s') % value)
|
||||
else:
|
||||
result = value.split('/')
|
||||
if len(result) != 4 or result[1] != 'artifacts':
|
||||
raise ValueError(
|
||||
_('Dependency link %(link)s is not valid in field '
|
||||
'%(attr)s. The link must be either valid url or '
|
||||
'reference to artifact. Example: '
|
||||
'/artifacts/<artifact_type>/<artifact_id>'
|
||||
) % {'link': value, 'attr': attr})
|
||||
return value
|
||||
|
||||
|
||||
class Dependency(fields.AutoTypedField):
|
||||
AUTO_TYPE = DependencyFieldType()
|
||||
|
||||
|
||||
class List(fields.AutoTypedField):
|
||||
|
||||
def __init__(self, element_type, **kwargs):
|
||||
self.AUTO_TYPE = fields.List(element_type())
|
||||
super(List, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class Dict(fields.AutoTypedField):
|
||||
|
||||
def __init__(self, element_type, **kwargs):
|
||||
self.AUTO_TYPE = fields.Dict(element_type())
|
||||
super(Dict, self).__init__(**kwargs)
|
148
glare/objects/registry.py
Normal file
148
glare/objects/registry.py
Normal file
@ -0,0 +1,148 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import importlib
|
||||
import pkgutil
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import types
|
||||
from oslo_log import log as logging
|
||||
from oslo_versionedobjects import base as vo_base
|
||||
import six
|
||||
|
||||
from glare.common import exception
|
||||
from glare.i18n import _, _LE
|
||||
from glare.objects import base
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
registry_options = [
|
||||
cfg.ListOpt('enabled_artifact_types',
|
||||
default=[],
|
||||
item_type=types.String(),
|
||||
help=_("List of enabled artifact types that will be "
|
||||
"available to user")),
|
||||
cfg.ListOpt('custom_artifact_types_modules', default=[],
|
||||
item_type=types.String(),
|
||||
help=_("List of custom user modules with artifact types that "
|
||||
"will be uploaded by Glare dynamically during service "
|
||||
"startup."))
|
||||
]
|
||||
CONF.register_opts(registry_options, group='glare')
|
||||
|
||||
|
||||
def import_submodules(module):
|
||||
"""Import all submodules of a module
|
||||
|
||||
:param module: Package name
|
||||
:return list of imported modules
|
||||
"""
|
||||
package = sys.modules[module]
|
||||
return [
|
||||
importlib.import_module(module + '.' + name)
|
||||
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__)]
|
||||
|
||||
|
||||
def import_modules_list(modules):
|
||||
custom_module_list = []
|
||||
for module_name in modules:
|
||||
try:
|
||||
custom_module_list.append(importlib.import_module(module_name))
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.error(_LE("Cannot import custom artifact type from module "
|
||||
"%(module_name)%s. Error: %(error)s"),
|
||||
{'module_name': module_name, 'error': str(e)})
|
||||
return custom_module_list
|
||||
|
||||
|
||||
def get_subclasses(module, base_class):
|
||||
subclasses = []
|
||||
for name in dir(module):
|
||||
obj = getattr(module, name)
|
||||
try:
|
||||
if issubclass(obj, base_class) and obj != base_class:
|
||||
subclasses.append(obj)
|
||||
except TypeError:
|
||||
pass
|
||||
return subclasses
|
||||
|
||||
|
||||
class ArtifactRegistry(vo_base.VersionedObjectRegistry):
|
||||
"""Artifact Registry is responsible for registration of artifacts and
|
||||
returning appropriate artifact types based on artifact type name.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def register_all_artifacts(cls):
|
||||
"""Register all artifacts in glare"""
|
||||
# get all submodules in glare.objects
|
||||
# please note that we registering trusted modules first
|
||||
# and applying custom modules after that to allow custom modules
|
||||
# to specify custom logic inside
|
||||
modules = (import_submodules('glare.objects') +
|
||||
import_modules_list(
|
||||
CONF.glare.custom_artifact_types_modules))
|
||||
# get all versioned object classes in module
|
||||
supported_types = []
|
||||
for module in modules:
|
||||
supported_types.extend(get_subclasses(module, base.BaseArtifact))
|
||||
for type_name in CONF.glare.enabled_artifact_types:
|
||||
for af_type in supported_types:
|
||||
if type_name == af_type.get_type_name():
|
||||
cls._validate_artifact_type(af_type)
|
||||
cls.register(af_type)
|
||||
break
|
||||
else:
|
||||
raise exception.TypeNotFound(name=type_name)
|
||||
|
||||
@classmethod
|
||||
def get_artifact_type(cls, type_name):
|
||||
"""Return artifact type based on artifact type name
|
||||
|
||||
:param type_name: name of artifact type
|
||||
:return: artifact class
|
||||
"""
|
||||
for name, af_type in six.iteritems(cls.obj_classes()):
|
||||
if af_type[0].get_type_name() == type_name:
|
||||
return af_type[0]
|
||||
raise exception.TypeNotFound(name=type_name)
|
||||
|
||||
@classmethod
|
||||
def _validate_artifact_type(cls, type_class):
|
||||
"""Validate artifact type class
|
||||
|
||||
Raises an exception if validation will fail.
|
||||
:param type_class: artifact class
|
||||
"""
|
||||
base_classes = [object, base.BaseArtifact, vo_base.VersionedObject]
|
||||
base_attributes = set()
|
||||
for b_class in base_classes:
|
||||
base_attributes.update(set(vars(b_class).keys()))
|
||||
class_attributes = set(vars(type_class).keys())
|
||||
common_attrs = class_attributes & base_attributes
|
||||
allowed_attributes = ('VERSION', 'fields', 'init_db_api',
|
||||
'get_type_name', 'validate_activate',
|
||||
'validate_publish', 'validate_upload',
|
||||
'__doc__', '__module__')
|
||||
for attr in common_attrs:
|
||||
if attr not in allowed_attributes:
|
||||
raise exception.IncorrectArtifactType(
|
||||
explanation=_("attribute %(attr)s not allowed to be "
|
||||
"redefined in subclass %(class_name)s") % {
|
||||
"attr": attr, "class_name": str(type_class)})
|
224
glare/objects/validators.py
Normal file
224
glare/objects/validators.py
Normal file
@ -0,0 +1,224 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
import uuid
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import encodeutils
|
||||
from oslo_versionedobjects import fields
|
||||
|
||||
from glare.i18n import _
|
||||
from glare.objects import fields as glare_fields
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Validator(object):
|
||||
"""Common interface for all validators"""
|
||||
|
||||
def validate(self, value):
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_allowed_types(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def check_type_allowed(self, field_type):
|
||||
if not issubclass(field_type, self.get_allowed_types()):
|
||||
# try to check if field_type is correct
|
||||
# in case of element_type passed
|
||||
allowed_field_types = tuple(type(field.AUTO_TYPE)
|
||||
for field in self.get_allowed_types()
|
||||
if hasattr(field, 'AUTO_TYPE'))
|
||||
if not issubclass(field_type, allowed_field_types):
|
||||
raise TypeError(
|
||||
_("%(type)s is not allowed for validator "
|
||||
"%(val)s. Allowed types are %(allowed)s.") % {
|
||||
"type": str(field_type),
|
||||
"val": str(self.__class__),
|
||||
"allowed": str(self.get_allowed_types())})
|
||||
|
||||
def __call__(self, value):
|
||||
try:
|
||||
self.validate(value)
|
||||
except ValueError:
|
||||
raise
|
||||
except TypeError as e:
|
||||
# we are raising all expected ex Type Errors as ValueErrors
|
||||
LOG.exception(e)
|
||||
raise ValueError(encodeutils.exception_to_unicode(e))
|
||||
|
||||
|
||||
class UUID(Validator):
|
||||
def get_allowed_types(self):
|
||||
return fields.StringField,
|
||||
|
||||
def validate(self, value):
|
||||
uuid.UUID(value)
|
||||
|
||||
|
||||
class SizeValidator(Validator):
|
||||
def __init__(self, size):
|
||||
self.size = size
|
||||
|
||||
|
||||
class MaxStrLen(SizeValidator):
|
||||
def get_allowed_types(self):
|
||||
return fields.StringField,
|
||||
|
||||
def validate(self, value):
|
||||
l = len(value)
|
||||
if l > self.size:
|
||||
raise ValueError(
|
||||
_("String length must be less than %(size)s. "
|
||||
"Current size: %(cur)s") % {'size': self.size,
|
||||
'cur': l})
|
||||
|
||||
|
||||
class MinStrLen(SizeValidator):
|
||||
def get_allowed_types(self):
|
||||
return fields.StringField,
|
||||
|
||||
def validate(self, value):
|
||||
l = len(value)
|
||||
if l < self.size:
|
||||
raise ValueError(
|
||||
_("String length must be more than %(size)s. "
|
||||
"Current size: %(cur)s") % {'size': self.size,
|
||||
'cur': l})
|
||||
|
||||
|
||||
class ForbiddenChars(Validator):
|
||||
def __init__(self, forbidden_chars):
|
||||
self.forbidden_chars = forbidden_chars
|
||||
|
||||
def get_allowed_types(self):
|
||||
return fields.StringField,
|
||||
|
||||
def validate(self, value):
|
||||
for fc in self.forbidden_chars:
|
||||
if fc in value:
|
||||
raise ValueError(
|
||||
_("Forbidden character %(char) found in string "
|
||||
"%(string)s")
|
||||
% {"char": fc, "string": value})
|
||||
|
||||
|
||||
class MaxSize(SizeValidator):
|
||||
def get_allowed_types(self):
|
||||
return glare_fields.Dict, glare_fields.List
|
||||
|
||||
def validate(self, value):
|
||||
l = len(value)
|
||||
if l > self.size:
|
||||
raise ValueError(
|
||||
_("Number of items must be less than "
|
||||
"%(size)s. Current size: %(cur)s") %
|
||||
{'size': self.size, 'cur': l})
|
||||
|
||||
|
||||
class Unique(Validator):
|
||||
def get_allowed_types(self):
|
||||
return glare_fields.List,
|
||||
|
||||
def validate(self, value):
|
||||
if len(value) != len(set(value)):
|
||||
raise ValueError(_("List items %s must be unique.") % value)
|
||||
|
||||
|
||||
class AllowedListValues(Validator):
|
||||
def __init__(self, allowed_values):
|
||||
self.allowed_items = allowed_values
|
||||
|
||||
def get_allowed_types(self):
|
||||
return glare_fields.List,
|
||||
|
||||
def validate(self, value):
|
||||
for item in value:
|
||||
if item not in self.allowed_items:
|
||||
raise ValueError(
|
||||
_("Value %(item)s is not allowed in list. "
|
||||
"Allowed list values: %(allowed)s") %
|
||||
{"item": item,
|
||||
"allowed": self.allowed_items})
|
||||
|
||||
|
||||
class AllowedDictKeys(Validator):
|
||||
def __init__(self, allowed_keys):
|
||||
self.allowed_items = allowed_keys
|
||||
|
||||
def get_allowed_types(self):
|
||||
return glare_fields.Dict,
|
||||
|
||||
def validate(self, value):
|
||||
for item in value:
|
||||
if item not in self.allowed_items:
|
||||
raise ValueError(_("Key %(item)s is not allowed in dict. "
|
||||
"Allowed key values: %(allowed)s") %
|
||||
{"item": item,
|
||||
"allowed": ', '.join(self.allowed_items)})
|
||||
|
||||
|
||||
class RequiredDictKeys(Validator):
|
||||
def __init__(self, required_keys):
|
||||
self.required_items = required_keys
|
||||
|
||||
def get_allowed_types(self):
|
||||
return glare_fields.Dict,
|
||||
|
||||
def validate(self, value):
|
||||
for item in self.required_items:
|
||||
if item not in value:
|
||||
raise ValueError(_("Key %(item)s is required in dict. "
|
||||
"Required key values: %(required)s") %
|
||||
{"item": item,
|
||||
"required": ', '.join(self.required_items)})
|
||||
|
||||
|
||||
class MaxDictKeyLen(SizeValidator):
|
||||
def get_allowed_types(self):
|
||||
return glare_fields.Dict,
|
||||
|
||||
def validate(self, value):
|
||||
for key in value:
|
||||
if len(str(key)) > self.size:
|
||||
raise ValueError(_("Dict key length %(key)s must be less than "
|
||||
"%(size)s.") % {'key': key,
|
||||
'size': self.size})
|
||||
|
||||
|
||||
class ElementValidator(Validator):
|
||||
def __init__(self, validators):
|
||||
self.validators = validators
|
||||
|
||||
|
||||
class ListElementValidator(ElementValidator):
|
||||
def get_allowed_types(self):
|
||||
return glare_fields.List,
|
||||
|
||||
def validate(self, value):
|
||||
for v in value:
|
||||
for validator in self.validators:
|
||||
validator(v)
|
||||
|
||||
|
||||
class DictElementValidator(ElementValidator):
|
||||
def get_allowed_types(self):
|
||||
return glare_fields.Dict,
|
||||
|
||||
def validate(self, value):
|
||||
for v in six.itervalues(value):
|
||||
for validator in self.validators:
|
||||
validator(v)
|
43
glare/opts.py
Normal file
43
glare/opts.py
Normal file
@ -0,0 +1,43 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
__all__ = [
|
||||
'list_artifacts_opts'
|
||||
]
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
|
||||
from osprofiler import opts as profiler
|
||||
|
||||
import glare.api.middleware.glare_context
|
||||
import glare.api.versions
|
||||
import glare.common.config
|
||||
import glare.common.wsgi
|
||||
|
||||
_artifacts_opts = [
|
||||
(None, list(itertools.chain(
|
||||
glare.api.middleware.glare_context.context_opts,
|
||||
glare.api.versions.versions_opts,
|
||||
glare.common.wsgi.bind_opts,
|
||||
glare.common.wsgi.eventlet_opts,
|
||||
glare.common.wsgi.socket_opts))),
|
||||
profiler.list_opts()[0],
|
||||
('paste_deploy', glare.common.config.paste_deploy_opts)
|
||||
]
|
||||
|
||||
|
||||
def list_artifacts_opts():
|
||||
"""Return a list of oslo_config options available in Glance artifacts"""
|
||||
return [(g, copy.deepcopy(o)) for g, o in _artifacts_opts]
|
0
glare/tests/__init__.py
Normal file
0
glare/tests/__init__.py
Normal file
27
pylintrc
Normal file
27
pylintrc
Normal file
@ -0,0 +1,27 @@
|
||||
[Messages Control]
|
||||
# W0511: TODOs in code comments are fine.
|
||||
# W0142: *args and **kwargs are fine.
|
||||
# W0622: Redefining id is fine.
|
||||
disable-msg=W0511,W0142,W0622
|
||||
|
||||
[Basic]
|
||||
# Variable names can be 1 to 31 characters long, with lowercase and underscores
|
||||
variable-rgx=[a-z_][a-z0-9_]{0,30}$
|
||||
|
||||
# Argument names can be 2 to 31 characters long, with lowercase and underscores
|
||||
argument-rgx=[a-z_][a-z0-9_]{1,30}$
|
||||
|
||||
# Method names should be at least 3 characters long
|
||||
# and be lowercased with underscores
|
||||
method-rgx=[a-z_][a-z0-9_]{2,50}$
|
||||
|
||||
# Module names matching nova-* are ok (files in bin/)
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$
|
||||
|
||||
# Don't require docstrings on tests.
|
||||
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
|
||||
|
||||
[Design]
|
||||
max-public-methods=100
|
||||
min-public-methods=0
|
||||
max-args=6
|
66
requirements.txt
Normal file
66
requirements.txt
Normal file
@ -0,0 +1,66 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
pbr>=1.6 # Apache-2.0
|
||||
|
||||
# < 0.8.0/0.8 does not work, see https://bugs.launchpad.net/bugs/1153983
|
||||
SQLAlchemy<1.1.0,>=1.0.10 # MIT
|
||||
eventlet!=0.18.3,>=0.18.2 # MIT
|
||||
PasteDeploy>=1.5.0 # MIT
|
||||
Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3;python_version=='2.7' # MIT
|
||||
Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT
|
||||
WebOb>=1.2.3 # MIT
|
||||
sqlalchemy-migrate>=0.9.6 # Apache-2.0
|
||||
httplib2>=0.7.5 # MIT
|
||||
pycrypto>=2.6 # Public Domain
|
||||
oslo.config>=3.10.0 # Apache-2.0
|
||||
oslo.concurrency>=3.8.0 # Apache-2.0
|
||||
oslo.context>=2.4.0 # Apache-2.0
|
||||
oslo.service>=1.10.0 # Apache-2.0
|
||||
oslo.utils>=3.14.0 # Apache-2.0
|
||||
stevedore>=1.10.0 # Apache-2.0
|
||||
futurist>=0.11.0 # Apache-2.0
|
||||
taskflow>=1.26.0 # Apache-2.0
|
||||
keystoneauth1>=2.7.0 # Apache-2.0
|
||||
keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0
|
||||
WSME>=0.8 # MIT
|
||||
PrettyTable<0.8,>=0.7 # BSD
|
||||
|
||||
# For paste.util.template used in keystone.common.template
|
||||
Paste # MIT
|
||||
|
||||
jsonpatch>=1.1 # BSD
|
||||
jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
|
||||
python-keystoneclient!=1.8.0,!=2.1.0,>=1.7.0 # Apache-2.0
|
||||
pyOpenSSL>=0.14 # Apache-2.0
|
||||
# Required by openstack.common libraries
|
||||
six>=1.9.0 # MIT
|
||||
|
||||
oslo.db>=4.1.0 # Apache-2.0
|
||||
oslo.i18n>=2.1.0 # Apache-2.0
|
||||
oslo.log>=1.14.0 # Apache-2.0
|
||||
oslo.messaging>=5.2.0 # Apache-2.0
|
||||
oslo.middleware>=3.0.0 # Apache-2.0
|
||||
oslo.policy>=1.9.0 # Apache-2.0
|
||||
oslo.serialization>=1.10.0 # Apache-2.0
|
||||
oslo.versionedobjects>=1.13.0 # Apache-2.0
|
||||
|
||||
retrying!=1.3.0,>=1.2.3 # Apache-2.0
|
||||
osprofiler>=1.3.0 # Apache-2.0
|
||||
|
||||
# Glance Store
|
||||
glance-store>=0.13.0 # Apache-2.0
|
||||
|
||||
|
||||
# Artifact repository
|
||||
microversion-parse>=0.1.2 # Apache-2.0
|
||||
semantic-version>=2.3.1 # BSD
|
||||
|
||||
castellan>=0.4.0 # Apache-2.0
|
||||
cryptography!=1.3.0,>=1.0 # BSD/Apache-2.0
|
||||
debtcollector>=1.2.0 # Apache-2.0
|
||||
|
||||
# timeutils
|
||||
iso8601>=0.1.11 # MIT
|
||||
monotonic>=0.6 # Apache-2.0
|
54
setup.cfg
Normal file
54
setup.cfg
Normal file
@ -0,0 +1,54 @@
|
||||
[metadata]
|
||||
name = glare
|
||||
summary = OpenStack Artifact Service
|
||||
description-file = README.rst
|
||||
author = OpenStack
|
||||
author-email = openstack-dev@lists.openstack.org
|
||||
home-page = http://docs.openstack.org/developer/glare/
|
||||
classifier =
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Information Technology
|
||||
Intended Audience :: System Administrators
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.7
|
||||
|
||||
[files]
|
||||
packages =
|
||||
glare
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
glare-manage = glare.cmd.manage:main
|
||||
glare-glare = glare.cmd.glare:main
|
||||
oslo.config.opts =
|
||||
glare.glare = glare.opts:list_artifacts_opts
|
||||
|
||||
[build_sphinx]
|
||||
all_files = 1
|
||||
build-dir = doc/build
|
||||
source-dir = doc/source
|
||||
|
||||
[egg_info]
|
||||
tag_build =
|
||||
tag_date = 0
|
||||
tag_svn_revision = 0
|
||||
|
||||
[compile_catalog]
|
||||
directory = glare/locale
|
||||
domain = glare
|
||||
|
||||
[update_catalog]
|
||||
domain = glare
|
||||
output_dir = glare/locale
|
||||
input_file = glare/locale/glare.pot
|
||||
|
||||
[extract_messages]
|
||||
keywords = _ gettext ngettext l_ lazy_gettext
|
||||
mapping_file = babel.cfg
|
||||
output_file = glare/locale/glare.pot
|
||||
|
||||
[pbr]
|
||||
autodoc_tree_index_modules = True
|
29
setup.py
Normal file
29
setup.py
Normal file
@ -0,0 +1,29 @@
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||
import setuptools
|
||||
|
||||
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||
# setuptools if some other modules registered functions in `atexit`.
|
||||
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||
try:
|
||||
import multiprocessing # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr>=1.8'],
|
||||
pbr=True)
|
38
test-requirements.txt
Normal file
38
test-requirements.txt
Normal file
@ -0,0 +1,38 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
# Hacking already pins down pep8, pyflakes and flake8
|
||||
hacking<0.11,>=0.10.0
|
||||
|
||||
# For translations processing
|
||||
Babel>=2.3.4 # BSD
|
||||
|
||||
# Needed for testing
|
||||
bandit>=1.0.1 # Apache-2.0
|
||||
coverage>=3.6 # Apache-2.0
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
mox3>=0.7.0 # Apache-2.0
|
||||
mock>=2.0 # BSD
|
||||
sphinx!=1.3b1,<1.3,>=1.2.1 # BSD
|
||||
requests>=2.10.0 # Apache-2.0
|
||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||
testresources>=0.2.4 # Apache-2.0/BSD
|
||||
testscenarios>=0.4 # Apache-2.0/BSD
|
||||
testtools>=1.4.0 # MIT
|
||||
psutil<2.0.0,>=1.1.1 # BSD
|
||||
oslotest>=1.10.0 # Apache-2.0
|
||||
os-testr>=0.7.0 # Apache-2.0
|
||||
|
||||
# Optional packages that should be installed when testing
|
||||
PyMySQL>=0.6.2 # MIT License
|
||||
psycopg2>=2.5 # LGPL/ZPL
|
||||
pysendfile>=2.0.0 # MIT
|
||||
qpid-python;python_version=='2.7' # Apache-2.0
|
||||
xattr>=0.4 # MIT
|
||||
python-swiftclient>=2.2.0 # Apache-2.0
|
||||
|
||||
# Documentation
|
||||
os-api-ref>=0.1.0 # Apache-2.0
|
||||
oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
|
||||
reno>=1.8.0 # Apache2
|
70
tox.ini
Normal file
70
tox.ini
Normal file
@ -0,0 +1,70 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
envlist = py34,py27,pep8
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
usedevelop = True
|
||||
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = ostestr --slowest {posargs}
|
||||
whitelist_externals = bash
|
||||
passenv = *_proxy *_PROXY
|
||||
|
||||
[testenv:debug]
|
||||
commands = oslo_debug_helper {posargs}
|
||||
|
||||
[testenv:debug-py27]
|
||||
basepython = python2.7
|
||||
commands = oslo_debug_helper {posargs}
|
||||
|
||||
[testenv:debug-py34]
|
||||
basepython = python3.4
|
||||
commands = oslo_debug_helper {posargs}
|
||||
|
||||
[testenv:pep8]
|
||||
commands =
|
||||
flake8 {posargs}
|
||||
# Run security linter
|
||||
bandit -c bandit.yaml -r glare -n5 -p gate
|
||||
# Check that .po and .pot files are valid:
|
||||
bash -c "find glare -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null"
|
||||
|
||||
[testenv:cover]
|
||||
# NOTE(jaegerandi): this target does not use constraints because
|
||||
# upstream infra does not yet support it. Once that's fixed, we can
|
||||
# drop the install_command.
|
||||
install_command = pip install -U --force-reinstall {opts} {packages}
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
commands = python setup.py testr --coverage --testr-args='^(?!.*test.*coverage).*$'
|
||||
|
||||
[testenv:venv]
|
||||
# NOTE(jaegerandi): this target does not use constraints because
|
||||
# upstream infra does not yet support it. Once that's fixed, we can
|
||||
# drop the install_command.
|
||||
install_command = pip install -U --force-reinstall {opts} {packages}
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:genconfig]
|
||||
commands =
|
||||
oslo-config-generator --config-file etc/oslo-config-generator/glare.conf
|
||||
|
||||
[testenv:docs]
|
||||
commands = python setup.py build_sphinx
|
||||
|
||||
[testenv:bandit]
|
||||
commands = bandit -c bandit.yaml -r glare -n5 -p gate
|
||||
|
||||
[flake8]
|
||||
# TODO(dmllr): Analyze or fix the warnings blacklisted below
|
||||
# E711 comparison to None should be 'if cond is not None:'
|
||||
# E712 comparison to True should be 'if cond is True:' or 'if cond:'
|
||||
# H404 multi line docstring should start with a summary
|
||||
# H405 multi line docstring summary not separated with an empty line
|
||||
ignore = E711,E712,H404,H405
|
||||
exclude = .venv,.git,.tox,dist,doc,etc,*glare/locale*,*lib/python*,*egg,build
|
||||
|
||||
[hacking]
|
||||
local-check-factory = glare.hacking.checks.factory
|
||||
import_exceptions = glare.i18n
|
Loading…
Reference in New Issue
Block a user