add initial source code
This commit is contained in:
7
.coveragerc
Normal file
7
.coveragerc
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
[run]
|
||||||
|
branch = True
|
||||||
|
source = rack
|
||||||
|
omit = rack/tests/*,rack/openstack/*
|
||||||
|
|
||||||
|
[report]
|
||||||
|
ignore-errors = True
|
||||||
45
.gitignore
vendored
Normal file
45
.gitignore
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
*.DS_Store
|
||||||
|
*.egg*
|
||||||
|
*.log
|
||||||
|
*.mo
|
||||||
|
*.pyc
|
||||||
|
*.swo
|
||||||
|
*.swp
|
||||||
|
*.sqlite
|
||||||
|
*~
|
||||||
|
.autogenerated
|
||||||
|
.coverage
|
||||||
|
.rack-venv
|
||||||
|
.project
|
||||||
|
.pydevproject
|
||||||
|
.ropeproject
|
||||||
|
.testrepository/
|
||||||
|
.settings
|
||||||
|
.tox
|
||||||
|
.idea
|
||||||
|
.venv
|
||||||
|
AUTHORS
|
||||||
|
Authors
|
||||||
|
build-stamp
|
||||||
|
build/*
|
||||||
|
bin/*
|
||||||
|
CA/
|
||||||
|
ChangeLog
|
||||||
|
coverage.xml
|
||||||
|
cover/*
|
||||||
|
covhtml
|
||||||
|
dist/*
|
||||||
|
doc/source/api/*
|
||||||
|
doc/build/*
|
||||||
|
etc/rack.conf
|
||||||
|
instances
|
||||||
|
keeper
|
||||||
|
keys
|
||||||
|
local_settings.py
|
||||||
|
MANIFEST
|
||||||
|
nosetests.xml
|
||||||
|
rack/tests/cover/*
|
||||||
|
rack/vcsversion.py
|
||||||
|
tools/conf/rack.conf*
|
||||||
|
tools/lintstack.head.py
|
||||||
|
tools/pylint_exceptions
|
||||||
8
.testr.conf
Normal file
8
.testr.conf
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||||
|
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||||
|
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \
|
||||||
|
${PYTHON:-python} -m subunit.run discover -t ./ ./rack/tests $LISTOPT $IDOPTION
|
||||||
|
|
||||||
|
test_id_option=--load-list $IDFILE
|
||||||
|
test_list_option=--list
|
||||||
22
README.rst
Normal file
22
README.rst
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
RACK(Real Application Centric Kernel) README
|
||||||
|
=====================
|
||||||
|
|
||||||
|
RACK provides the ability that can control OpenStack as program resource with an application.
|
||||||
|
From an application, the VM instance looks like the Linux process through the RACK, so you can use "Exec", "Fork", "Kill" commands against the processes(actually VMs).
|
||||||
|
It enables you to implement a large scale distributed system in a variety of programming languages on OpenStack.
|
||||||
|
|
||||||
|
You can use RACK in many cases.
|
||||||
|
Followings are some examples.
|
||||||
|
|
||||||
|
* You can implement a new architecture application.
|
||||||
|
For example, you can build an application that calculates the necessary amount of computing resource(i.e. instance) depending on the data to process and launches additional instances dynamically.
|
||||||
|
Then, the data will be processed very quickly since these instances work in parallel.
|
||||||
|
This new architecture application is suitable for processing a large amount of data.
|
||||||
|
|
||||||
|
* You can integrate existing system such as batch system with Hadoop and Web application using RACK.
|
||||||
|
For example, RACK enables you to deploy Hadoop cluster easily and add autoscale function to your Web applications.
|
||||||
|
|
||||||
|
To learn about RACK in detail, read this page on the wiki:
|
||||||
|
|
||||||
|
https://wiki.openstack.org/wiki/RACK
|
||||||
|
|
||||||
33
etc/api-paste.ini
Normal file
33
etc/api-paste.ini
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
[composite:rackapi]
|
||||||
|
use = egg:Paste#urlmap
|
||||||
|
/ = rackversions
|
||||||
|
/v1 = rackapi_v1
|
||||||
|
|
||||||
|
[composite:rackapi_v1]
|
||||||
|
use = call:rack.api.auth:pipeline_factory
|
||||||
|
noauth = faultwrap noauth rackapp_v1
|
||||||
|
keystone = faultwrap authtoken keystonecontext rackapp_v1
|
||||||
|
|
||||||
|
[filter:faultwrap]
|
||||||
|
paste.filter_factory = rack.api:FaultWrapper.factory
|
||||||
|
|
||||||
|
[filter:noauth]
|
||||||
|
paste.filter_factory = rack.api.auth:NoAuthMiddleware.factory
|
||||||
|
|
||||||
|
[pipeline:rackversions]
|
||||||
|
pipeline = faultwrap rackversionapp
|
||||||
|
|
||||||
|
[app:rackversionapp]
|
||||||
|
paste.app_factory = rack.api.versions:Versions.factory
|
||||||
|
|
||||||
|
[app:rackapp_v1]
|
||||||
|
paste.app_factory = rack.api.v1:APIRouter.factory
|
||||||
|
|
||||||
|
[filter:keystonecontext]
|
||||||
|
paste.filter_factory = rack.api.auth:RackKeystoneContext.factory
|
||||||
|
|
||||||
|
[filter:authtoken]
|
||||||
|
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||||
|
auth_port = 35357
|
||||||
|
auth_protocol = http
|
||||||
|
auth_version = v2.0
|
||||||
23
etc/rack.conf.sample
Normal file
23
etc/rack.conf.sample
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
#debug = True
|
||||||
|
#verbose = True
|
||||||
|
#rabbit_password = guest
|
||||||
|
#rabbit_host = localhost
|
||||||
|
#rpc_backend = rack.openstack.common.rpc.impl_kombu
|
||||||
|
#lock_path = /var/lib/rack/lock
|
||||||
|
#state_path = /var/lib/rack
|
||||||
|
#sql_connection = mysql://root:password@127.0.0.1/rack?charset=utf8
|
||||||
|
#my_ip = 127.0.0.1
|
||||||
|
#api_paste_config = /etc/api-paste.ini
|
||||||
|
#auth_strategy = noauth
|
||||||
|
#os_username = admin
|
||||||
|
#os_password = password
|
||||||
|
#os_tenant_name = demo
|
||||||
|
#os_auth_url = http://localhost:5000/v2.0
|
||||||
|
|
||||||
|
[keystone_authtoken]
|
||||||
|
#signing_dir = /var/cache/rack
|
||||||
|
#admin_password = password
|
||||||
|
#admin_user = rack
|
||||||
|
#admin_tenant_name = services
|
||||||
|
#auth_host = 127.0.0.1
|
||||||
47
openstack-common.conf
Normal file
47
openstack-common.conf
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
|
||||||
|
# The list of modules to copy from oslo-incubator.git
|
||||||
|
module=cliutils
|
||||||
|
module=config
|
||||||
|
module=context
|
||||||
|
module=db
|
||||||
|
module=db.sqlalchemy
|
||||||
|
module=eventlet_backdoor
|
||||||
|
module=excutils
|
||||||
|
module=fileutils
|
||||||
|
module=fixture
|
||||||
|
module=gettextutils
|
||||||
|
module=imageutils
|
||||||
|
module=importutils
|
||||||
|
module=install_venv_common
|
||||||
|
module=jsonutils
|
||||||
|
module=local
|
||||||
|
module=lockutils
|
||||||
|
module=log
|
||||||
|
module=loopingcall
|
||||||
|
module=memorycache
|
||||||
|
module=middleware/base
|
||||||
|
module=middleware/request_id
|
||||||
|
module=network_utils
|
||||||
|
module=periodic_task
|
||||||
|
module=policy
|
||||||
|
module=processutils
|
||||||
|
module=report
|
||||||
|
module=report.generators
|
||||||
|
module=report.models
|
||||||
|
module=report.views
|
||||||
|
module=report.views.xml
|
||||||
|
module=report.views.json
|
||||||
|
module=report.views.text
|
||||||
|
module=service
|
||||||
|
module=sslutils
|
||||||
|
module=strutils
|
||||||
|
module=threadgroup
|
||||||
|
module=timeutils
|
||||||
|
module=units
|
||||||
|
module=uuidutils
|
||||||
|
module=versionutils
|
||||||
|
module=xmlutils
|
||||||
|
|
||||||
|
# The base module to hold the copy of openstack.common
|
||||||
|
base=rack
|
||||||
22
rack/__init__.py
Normal file
22
rack/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
:mod:`rack` -- Cloud IaaS Platform
|
||||||
|
===================================
|
||||||
|
|
||||||
|
.. automodule:: rack
|
||||||
|
:platform: Unix
|
||||||
|
:synopsis: Infrastructure-as-a-Service Cloud platform.
|
||||||
|
"""
|
||||||
82
rack/api/__init__.py
Normal file
82
rack/api/__init__.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from oslo.config import cfg
|
||||||
|
import routes
|
||||||
|
import stevedore
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common import gettextutils
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack import utils
|
||||||
|
from rack import wsgi as base_wsgi
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class FaultWrapper(base_wsgi.Middleware):
|
||||||
|
"""Calls down the middleware stack, making exceptions into faults."""
|
||||||
|
|
||||||
|
_status_to_type = {}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def status_to_type(status):
|
||||||
|
if not FaultWrapper._status_to_type:
|
||||||
|
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
|
||||||
|
FaultWrapper._status_to_type[clazz.code] = clazz
|
||||||
|
return FaultWrapper._status_to_type.get(
|
||||||
|
status, webob.exc.HTTPInternalServerError)()
|
||||||
|
|
||||||
|
def _error(self, inner, req):
|
||||||
|
LOG.exception(_("Caught error: %s"), unicode(inner))
|
||||||
|
|
||||||
|
safe = getattr(inner, 'safe', False)
|
||||||
|
headers = getattr(inner, 'headers', None)
|
||||||
|
status = getattr(inner, 'code', 500)
|
||||||
|
if status is None:
|
||||||
|
status = 500
|
||||||
|
|
||||||
|
msg_dict = dict(url=req.url, status=status)
|
||||||
|
LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict)
|
||||||
|
outer = self.status_to_type(status)
|
||||||
|
if headers:
|
||||||
|
outer.headers = headers
|
||||||
|
# NOTE(johannes): We leave the explanation empty here on
|
||||||
|
# purpose. It could possibly have sensitive information
|
||||||
|
# that should not be returned back to the user. See
|
||||||
|
# bugs 868360 and 874472
|
||||||
|
# NOTE(eglynn): However, it would be over-conservative and
|
||||||
|
# inconsistent with the EC2 API to hide every exception,
|
||||||
|
# including those that are safe to expose, see bug 1021373
|
||||||
|
if safe:
|
||||||
|
if isinstance(inner.msg_fmt, gettextutils.Message):
|
||||||
|
user_locale = req.best_match_language()
|
||||||
|
inner_msg = gettextutils.translate(
|
||||||
|
inner.msg_fmt, user_locale)
|
||||||
|
else:
|
||||||
|
inner_msg = unicode(inner)
|
||||||
|
outer.explanation = '%s: %s' % (inner.__class__.__name__,
|
||||||
|
inner_msg)
|
||||||
|
|
||||||
|
#notifications.send_api_fault(req.url, status, inner)
|
||||||
|
return wsgi.Fault(outer)
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
try:
|
||||||
|
return req.get_response(self.application)
|
||||||
|
except Exception as ex:
|
||||||
|
return self._error(ex, req)
|
||||||
188
rack/api/auth.py
Normal file
188
rack/api/auth.py
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
Common Auth Middleware.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from rack import context
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import jsonutils
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack import wsgi as base_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
auth_opts = [
|
||||||
|
cfg.BoolOpt('api_rate_limit',
|
||||||
|
default=False,
|
||||||
|
help=('Whether to use per-user rate limiting for the api. ')),
|
||||||
|
cfg.StrOpt('auth_strategy',
|
||||||
|
default='noauth',
|
||||||
|
help='The strategy to use for auth: noauth or keystone.'),
|
||||||
|
cfg.BoolOpt('use_forwarded_for',
|
||||||
|
default=False,
|
||||||
|
help='Treat X-Forwarded-For as the canonical remote address. '
|
||||||
|
'Only enable this if you have a sanitizing proxy.'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(auth_opts)
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_pipeline(loader, pipeline):
|
||||||
|
filters = [loader.get_filter(n) for n in pipeline[:-1]]
|
||||||
|
app = loader.get_app(pipeline[-1])
|
||||||
|
filters.reverse()
|
||||||
|
for filter in filters:
|
||||||
|
app = filter(app)
|
||||||
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
def pipeline_factory(loader, global_conf, **local_conf):
|
||||||
|
"""A paste pipeline replica that keys off of auth_strategy."""
|
||||||
|
pipeline = local_conf[CONF.auth_strategy]
|
||||||
|
if not CONF.api_rate_limit:
|
||||||
|
limit_name = CONF.auth_strategy + '_nolimit'
|
||||||
|
pipeline = local_conf.get(limit_name, pipeline)
|
||||||
|
pipeline = pipeline.split()
|
||||||
|
return _load_pipeline(loader, pipeline)
|
||||||
|
|
||||||
|
|
||||||
|
class InjectContext(base_wsgi.Middleware):
|
||||||
|
"""Add a 'rack.context' to WSGI environ."""
|
||||||
|
|
||||||
|
def __init__(self, context, *args, **kwargs):
|
||||||
|
self.context = context
|
||||||
|
super(InjectContext, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
req.environ['rack.context'] = self.context
|
||||||
|
return self.application
|
||||||
|
|
||||||
|
|
||||||
|
class RackKeystoneContext(base_wsgi.Middleware):
|
||||||
|
"""Make a request context from keystone headers."""
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
user_id = req.headers.get('X_USER')
|
||||||
|
user_id = req.headers.get('X_USER_ID', user_id)
|
||||||
|
if user_id is None:
|
||||||
|
LOG.debug("Neither X_USER_ID nor X_USER found in request")
|
||||||
|
return webob.exc.HTTPUnauthorized()
|
||||||
|
|
||||||
|
roles = self._get_roles(req)
|
||||||
|
|
||||||
|
if 'X_TENANT_ID' in req.headers:
|
||||||
|
# This is the new header since Keystone went to ID/Name
|
||||||
|
project_id = req.headers['X_TENANT_ID']
|
||||||
|
else:
|
||||||
|
# This is for legacy compatibility
|
||||||
|
project_id = req.headers['X_TENANT']
|
||||||
|
project_name = req.headers.get('X_TENANT_NAME')
|
||||||
|
user_name = req.headers.get('X_USER_NAME')
|
||||||
|
|
||||||
|
# Get the auth token
|
||||||
|
auth_token = req.headers.get('X_AUTH_TOKEN',
|
||||||
|
req.headers.get('X_STORAGE_TOKEN'))
|
||||||
|
|
||||||
|
# Build a context, including the auth_token...
|
||||||
|
remote_address = req.remote_addr
|
||||||
|
if CONF.use_forwarded_for:
|
||||||
|
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||||
|
|
||||||
|
service_catalog = None
|
||||||
|
if req.headers.get('X_SERVICE_CATALOG') is not None:
|
||||||
|
try:
|
||||||
|
catalog_header = req.headers.get('X_SERVICE_CATALOG')
|
||||||
|
service_catalog = jsonutils.loads(catalog_header)
|
||||||
|
except ValueError:
|
||||||
|
raise webob.exc.HTTPInternalServerError(
|
||||||
|
_('Invalid service catalog json.'))
|
||||||
|
|
||||||
|
ctx = context.RequestContext(user_id,
|
||||||
|
project_id,
|
||||||
|
user_name=user_name,
|
||||||
|
project_name=project_name,
|
||||||
|
roles=roles,
|
||||||
|
auth_token=auth_token,
|
||||||
|
remote_address=remote_address,
|
||||||
|
service_catalog=service_catalog)
|
||||||
|
|
||||||
|
req.environ['rack.context'] = ctx
|
||||||
|
return self.application
|
||||||
|
|
||||||
|
def _get_roles(self, req):
|
||||||
|
"""Get the list of roles."""
|
||||||
|
|
||||||
|
if 'X_ROLES' in req.headers:
|
||||||
|
roles = req.headers.get('X_ROLES', '')
|
||||||
|
else:
|
||||||
|
# Fallback to deprecated role header:
|
||||||
|
roles = req.headers.get('X_ROLE', '')
|
||||||
|
if roles:
|
||||||
|
LOG.warn(_("Sourcing roles from deprecated X-Role HTTP "
|
||||||
|
"header"))
|
||||||
|
return [r.strip() for r in roles.split(',')]
|
||||||
|
|
||||||
|
|
||||||
|
class NoAuthMiddlewareBase(base_wsgi.Middleware):
|
||||||
|
"""Return a fake token if one isn't specified."""
|
||||||
|
|
||||||
|
def base_call(self, req, project_id_in_path):
|
||||||
|
if 'X-Auth-Token' not in req.headers:
|
||||||
|
user_id = req.headers.get('X-Auth-User', 'admin')
|
||||||
|
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
|
||||||
|
if project_id_in_path:
|
||||||
|
os_url = '/'.join([req.url.rstrip('/'), project_id])
|
||||||
|
else:
|
||||||
|
os_url = req.url.rstrip('/')
|
||||||
|
res = webob.Response()
|
||||||
|
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
|
||||||
|
# keystone uses 2.0 auth. We should probably allow
|
||||||
|
# 2.0 auth here as well.
|
||||||
|
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
|
||||||
|
res.headers['X-Server-Management-Url'] = os_url
|
||||||
|
res.content_type = 'text/plain'
|
||||||
|
res.status = '204'
|
||||||
|
return res
|
||||||
|
|
||||||
|
token = req.headers['X-Auth-Token']
|
||||||
|
user_id, _sep, project_id = token.partition(':')
|
||||||
|
project_id = project_id or user_id
|
||||||
|
remote_address = getattr(req, 'remote_address', '127.0.0.1')
|
||||||
|
if CONF.use_forwarded_for:
|
||||||
|
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||||
|
ctx = context.RequestContext(user_id,
|
||||||
|
project_id,
|
||||||
|
is_admin=True,
|
||||||
|
remote_address=remote_address)
|
||||||
|
|
||||||
|
req.environ['rack.context'] = ctx
|
||||||
|
return self.application
|
||||||
|
|
||||||
|
|
||||||
|
class NoAuthMiddleware(NoAuthMiddlewareBase):
|
||||||
|
"""Return a fake token if one isn't specified."""
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
return self.base_call(req, True)
|
||||||
448
rack/api/common.py
Normal file
448
rack/api/common.py
Normal file
@@ -0,0 +1,448 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import itertools
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six.moves.urllib.parse as urlparse
|
||||||
|
import webob
|
||||||
|
from webob import exc
|
||||||
|
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack.api import xmlutil
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
osapi_opts = [
|
||||||
|
cfg.IntOpt('osapi_max_limit',
|
||||||
|
default=1000,
|
||||||
|
help='The maximum number of items returned in a single '
|
||||||
|
'response from a collection resource'),
|
||||||
|
cfg.StrOpt('osapi_compute_link_prefix',
|
||||||
|
help='Base URL that will be presented to users in links '
|
||||||
|
'to the OpenStack Compute API'),
|
||||||
|
cfg.StrOpt('osapi_glance_link_prefix',
|
||||||
|
help='Base URL that will be presented to users in links '
|
||||||
|
'to glance resources'),
|
||||||
|
]
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(osapi_opts)
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
VALID_NAME_REGEX = re.compile("^(?! )[\w. _-]+(?<! )$", re.UNICODE)
|
||||||
|
|
||||||
|
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_pagination_params(request):
|
||||||
|
"""Return marker, limit tuple from request.
|
||||||
|
|
||||||
|
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
|
||||||
|
GET variables. 'marker' is the id of the last element
|
||||||
|
the client has seen, and 'limit' is the maximum number
|
||||||
|
of items to return. If 'limit' is not specified, 0, or
|
||||||
|
> max_limit, we default to max_limit. Negative values
|
||||||
|
for either marker or limit will cause
|
||||||
|
exc.HTTPBadRequest() exceptions to be raised.
|
||||||
|
|
||||||
|
"""
|
||||||
|
params = {}
|
||||||
|
if 'limit' in request.GET:
|
||||||
|
params['limit'] = _get_int_param(request, 'limit')
|
||||||
|
if 'page_size' in request.GET:
|
||||||
|
params['page_size'] = _get_int_param(request, 'page_size')
|
||||||
|
if 'marker' in request.GET:
|
||||||
|
params['marker'] = _get_marker_param(request)
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def _get_int_param(request, param):
|
||||||
|
"""Extract integer param from request or fail."""
|
||||||
|
try:
|
||||||
|
int_param = int(request.GET[param])
|
||||||
|
except ValueError:
|
||||||
|
msg = _('%s param must be an integer') % param
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
if int_param < 0:
|
||||||
|
msg = _('%s param must be positive') % param
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
return int_param
|
||||||
|
|
||||||
|
|
||||||
|
def _get_marker_param(request):
|
||||||
|
"""Extract marker id from request or fail."""
|
||||||
|
return request.GET['marker']
|
||||||
|
|
||||||
|
|
||||||
|
def limited(items, request, max_limit=CONF.osapi_max_limit):
|
||||||
|
"""Return a slice of items according to requested offset and limit.
|
||||||
|
|
||||||
|
:param items: A sliceable entity
|
||||||
|
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
|
||||||
|
GET variables. 'offset' is where to start in the list,
|
||||||
|
and 'limit' is the maximum number of items to return. If
|
||||||
|
'limit' is not specified, 0, or > max_limit, we default
|
||||||
|
to max_limit. Negative values for either offset or limit
|
||||||
|
will cause exc.HTTPBadRequest() exceptions to be raised.
|
||||||
|
:kwarg max_limit: The maximum number of items to return from 'items'
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
offset = int(request.GET.get('offset', 0))
|
||||||
|
except ValueError:
|
||||||
|
msg = _('offset param must be an integer')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
limit = int(request.GET.get('limit', max_limit))
|
||||||
|
except ValueError:
|
||||||
|
msg = _('limit param must be an integer')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
if limit < 0:
|
||||||
|
msg = _('limit param must be positive')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
if offset < 0:
|
||||||
|
msg = _('offset param must be positive')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
limit = min(max_limit, limit or max_limit)
|
||||||
|
range_end = offset + limit
|
||||||
|
return items[offset:range_end]
|
||||||
|
|
||||||
|
|
||||||
|
def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit):
|
||||||
|
"""get limited parameter from request."""
|
||||||
|
params = get_pagination_params(request)
|
||||||
|
limit = params.get('limit', max_limit)
|
||||||
|
limit = min(max_limit, limit)
|
||||||
|
marker = params.get('marker')
|
||||||
|
|
||||||
|
return limit, marker
|
||||||
|
|
||||||
|
|
||||||
|
def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit):
|
||||||
|
"""Return a slice of items according to the requested marker and limit."""
|
||||||
|
limit, marker = get_limit_and_marker(request, max_limit)
|
||||||
|
|
||||||
|
limit = min(max_limit, limit)
|
||||||
|
start_index = 0
|
||||||
|
if marker:
|
||||||
|
start_index = -1
|
||||||
|
for i, item in enumerate(items):
|
||||||
|
if 'flavorid' in item:
|
||||||
|
if item['flavorid'] == marker:
|
||||||
|
start_index = i + 1
|
||||||
|
break
|
||||||
|
elif item['id'] == marker or item.get('uuid') == marker:
|
||||||
|
start_index = i + 1
|
||||||
|
break
|
||||||
|
if start_index < 0:
|
||||||
|
msg = _('marker [%s] not found') % marker
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
range_end = start_index + limit
|
||||||
|
return items[start_index:range_end]
|
||||||
|
|
||||||
|
|
||||||
|
def get_id_from_href(href):
|
||||||
|
"""Return the id or uuid portion of a url.
|
||||||
|
|
||||||
|
Given: 'http://www.foo.com/bar/123?q=4'
|
||||||
|
Returns: '123'
|
||||||
|
|
||||||
|
Given: 'http://www.foo.com/bar/abc123?q=4'
|
||||||
|
Returns: 'abc123'
|
||||||
|
|
||||||
|
"""
|
||||||
|
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def remove_version_from_href(href):
|
||||||
|
"""Removes the first api version from the href.
|
||||||
|
|
||||||
|
Given: 'http://www.rack.com/v1.1/123'
|
||||||
|
Returns: 'http://www.rack.com/123'
|
||||||
|
|
||||||
|
Given: 'http://www.rack.com/v1.1'
|
||||||
|
Returns: 'http://www.rack.com'
|
||||||
|
|
||||||
|
"""
|
||||||
|
parsed_url = urlparse.urlsplit(href)
|
||||||
|
url_parts = parsed_url.path.split('/', 2)
|
||||||
|
|
||||||
|
# NOTE: this should match vX.X or vX
|
||||||
|
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
|
||||||
|
if expression.match(url_parts[1]):
|
||||||
|
del url_parts[1]
|
||||||
|
|
||||||
|
new_path = '/'.join(url_parts)
|
||||||
|
|
||||||
|
if new_path == parsed_url.path:
|
||||||
|
msg = _('href %s does not contain version') % href
|
||||||
|
LOG.debug(msg)
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
parsed_url = list(parsed_url)
|
||||||
|
parsed_url[2] = new_path
|
||||||
|
return urlparse.urlunsplit(parsed_url)
|
||||||
|
|
||||||
|
|
||||||
|
def dict_to_query_str(params):
|
||||||
|
# TODO(throughnothing): we should just use urllib.urlencode instead of this
|
||||||
|
# But currently we don't work with urlencoded url's
|
||||||
|
param_str = ""
|
||||||
|
for key, val in params.iteritems():
|
||||||
|
param_str = param_str + '='.join([str(key), str(val)]) + '&'
|
||||||
|
|
||||||
|
return param_str.rstrip('&')
|
||||||
|
|
||||||
|
|
||||||
|
def get_networks_for_instance_from_nw_info(nw_info):
|
||||||
|
networks = {}
|
||||||
|
for vif in nw_info:
|
||||||
|
ips = vif.fixed_ips()
|
||||||
|
floaters = vif.floating_ips()
|
||||||
|
label = vif['network']['label']
|
||||||
|
if label not in networks:
|
||||||
|
networks[label] = {'ips': [], 'floating_ips': []}
|
||||||
|
|
||||||
|
networks[label]['ips'].extend(ips)
|
||||||
|
networks[label]['floating_ips'].extend(floaters)
|
||||||
|
for ip in itertools.chain(networks[label]['ips'],
|
||||||
|
networks[label]['floating_ips']):
|
||||||
|
ip['mac_address'] = vif['address']
|
||||||
|
return networks
|
||||||
|
|
||||||
|
|
||||||
|
def raise_http_conflict_for_instance_invalid_state(exc, action):
|
||||||
|
"""Raises a webob.exc.HTTPConflict instance containing a message
|
||||||
|
appropriate to return via the API based on the original
|
||||||
|
InstanceInvalidState exception.
|
||||||
|
"""
|
||||||
|
attr = exc.kwargs.get('attr')
|
||||||
|
state = exc.kwargs.get('state')
|
||||||
|
not_launched = exc.kwargs.get('not_launched')
|
||||||
|
if attr and state:
|
||||||
|
msg = _("Cannot '%(action)s' while instance is in %(attr)s "
|
||||||
|
"%(state)s") % {'action': action, 'attr': attr, 'state': state}
|
||||||
|
elif not_launched:
|
||||||
|
msg = _("Cannot '%s' an instance which has never been active") % action
|
||||||
|
else:
|
||||||
|
# At least give some meaningful message
|
||||||
|
msg = _("Instance is in an invalid state for '%s'") % action
|
||||||
|
raise webob.exc.HTTPConflict(explanation=msg)
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
|
||||||
|
def deserialize(self, text):
|
||||||
|
dom = xmlutil.safe_minidom_parse_string(text)
|
||||||
|
metadata_node = self.find_first_child_named(dom, "metadata")
|
||||||
|
metadata = self.extract_metadata(metadata_node)
|
||||||
|
return {'body': {'metadata': metadata}}
|
||||||
|
|
||||||
|
|
||||||
|
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
|
||||||
|
def deserialize(self, text):
|
||||||
|
dom = xmlutil.safe_minidom_parse_string(text)
|
||||||
|
metadata_item = self.extract_metadata(dom)
|
||||||
|
return {'body': {'meta': metadata_item}}
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
|
||||||
|
|
||||||
|
def extract_metadata(self, metadata_node):
|
||||||
|
"""Marshal the metadata attribute of a parsed request."""
|
||||||
|
if metadata_node is None:
|
||||||
|
return {}
|
||||||
|
metadata = {}
|
||||||
|
for meta_node in self.find_children_named(metadata_node, "meta"):
|
||||||
|
key = meta_node.getAttribute("key")
|
||||||
|
metadata[key] = self.extract_text(meta_node)
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def _extract_metadata_container(self, datastring):
|
||||||
|
dom = xmlutil.safe_minidom_parse_string(datastring)
|
||||||
|
metadata_node = self.find_first_child_named(dom, "metadata")
|
||||||
|
metadata = self.extract_metadata(metadata_node)
|
||||||
|
return {'body': {'metadata': metadata}}
|
||||||
|
|
||||||
|
def create(self, datastring):
|
||||||
|
return self._extract_metadata_container(datastring)
|
||||||
|
|
||||||
|
def update_all(self, datastring):
|
||||||
|
return self._extract_metadata_container(datastring)
|
||||||
|
|
||||||
|
def update(self, datastring):
|
||||||
|
dom = xmlutil.safe_minidom_parse_string(datastring)
|
||||||
|
metadata_item = self.extract_metadata(dom)
|
||||||
|
return {'body': {'meta': metadata_item}}
|
||||||
|
|
||||||
|
|
||||||
|
metadata_nsmap = {None: xmlutil.XMLNS_V11}
|
||||||
|
|
||||||
|
|
||||||
|
class MetaItemTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
sel = xmlutil.Selector('meta', xmlutil.get_items, 0)
|
||||||
|
root = xmlutil.TemplateElement('meta', selector=sel)
|
||||||
|
root.set('key', 0)
|
||||||
|
root.text = 1
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataTemplateElement(xmlutil.TemplateElement):
|
||||||
|
def will_render(self, datum):
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = MetadataTemplateElement('metadata', selector='metadata')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'meta',
|
||||||
|
selector=xmlutil.get_items)
|
||||||
|
elem.set('key', 0)
|
||||||
|
elem.text = 1
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
def check_snapshots_enabled(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
if not CONF.allow_instance_snapshots:
|
||||||
|
LOG.warn(_('Rejecting snapshot request, snapshots currently'
|
||||||
|
' disabled'))
|
||||||
|
msg = _("Instance snapshots are not permitted at this time.")
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return inner
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(object):
|
||||||
|
"""Model API responses as dictionaries."""
|
||||||
|
|
||||||
|
def _get_project_id(self, request):
|
||||||
|
"""Get project id from request url if present or empty string
|
||||||
|
otherwise
|
||||||
|
"""
|
||||||
|
project_id = request.environ["rack.context"].project_id
|
||||||
|
if project_id in request.url:
|
||||||
|
return project_id
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def _get_links(self, request, identifier, collection_name):
|
||||||
|
return [{
|
||||||
|
"rel": "self",
|
||||||
|
"href": self._get_href_link(request, identifier, collection_name),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rel": "bookmark",
|
||||||
|
"href": self._get_bookmark_link(request,
|
||||||
|
identifier,
|
||||||
|
collection_name),
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _get_next_link(self, request, identifier, collection_name):
|
||||||
|
"""Return href string with proper limit and marker params."""
|
||||||
|
params = request.params.copy()
|
||||||
|
params["marker"] = identifier
|
||||||
|
prefix = self._update_compute_link_prefix(request.application_url)
|
||||||
|
url = os.path.join(prefix,
|
||||||
|
self._get_project_id(request),
|
||||||
|
collection_name)
|
||||||
|
return "%s?%s" % (url, dict_to_query_str(params))
|
||||||
|
|
||||||
|
def _get_href_link(self, request, identifier, collection_name):
|
||||||
|
"""Return an href string pointing to this object."""
|
||||||
|
prefix = self._update_compute_link_prefix(request.application_url)
|
||||||
|
return os.path.join(prefix,
|
||||||
|
self._get_project_id(request),
|
||||||
|
collection_name,
|
||||||
|
str(identifier))
|
||||||
|
|
||||||
|
def _get_bookmark_link(self, request, identifier, collection_name):
|
||||||
|
"""Create a URL that refers to a specific resource."""
|
||||||
|
base_url = remove_version_from_href(request.application_url)
|
||||||
|
base_url = self._update_compute_link_prefix(base_url)
|
||||||
|
return os.path.join(base_url,
|
||||||
|
self._get_project_id(request),
|
||||||
|
collection_name,
|
||||||
|
str(identifier))
|
||||||
|
|
||||||
|
def _get_collection_links(self,
|
||||||
|
request,
|
||||||
|
items,
|
||||||
|
collection_name,
|
||||||
|
id_key="uuid"):
|
||||||
|
"""Retrieve 'next' link, if applicable."""
|
||||||
|
links = []
|
||||||
|
limit = int(request.params.get("limit", 0))
|
||||||
|
if limit and limit == len(items):
|
||||||
|
last_item = items[-1]
|
||||||
|
if id_key in last_item:
|
||||||
|
last_item_id = last_item[id_key]
|
||||||
|
elif 'id' in last_item:
|
||||||
|
last_item_id = last_item["id"]
|
||||||
|
else:
|
||||||
|
last_item_id = last_item["flavorid"]
|
||||||
|
links.append({
|
||||||
|
"rel": "next",
|
||||||
|
"href": self._get_next_link(request,
|
||||||
|
last_item_id,
|
||||||
|
collection_name),
|
||||||
|
})
|
||||||
|
return links
|
||||||
|
|
||||||
|
def _update_link_prefix(self, orig_url, prefix):
|
||||||
|
if not prefix:
|
||||||
|
return orig_url
|
||||||
|
url_parts = list(urlparse.urlsplit(orig_url))
|
||||||
|
prefix_parts = list(urlparse.urlsplit(prefix))
|
||||||
|
url_parts[0:2] = prefix_parts[0:2]
|
||||||
|
return urlparse.urlunsplit(url_parts)
|
||||||
|
|
||||||
|
def _update_glance_link_prefix(self, orig_url):
|
||||||
|
return self._update_link_prefix(orig_url,
|
||||||
|
CONF.osapi_glance_link_prefix)
|
||||||
|
|
||||||
|
def _update_compute_link_prefix(self, orig_url):
|
||||||
|
return self._update_link_prefix(orig_url,
|
||||||
|
CONF.osapi_compute_link_prefix)
|
||||||
|
|
||||||
|
|
||||||
|
def get_instance(compute_api, context, instance_id, want_objects=False,
|
||||||
|
expected_attrs=None):
|
||||||
|
"""Fetch an instance from the compute API, handling error checking."""
|
||||||
|
try:
|
||||||
|
return compute_api.get(context, instance_id,
|
||||||
|
want_objects=want_objects,
|
||||||
|
expected_attrs=expected_attrs)
|
||||||
|
except exception.InstanceNotFound as e:
|
||||||
|
raise exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
|
||||||
|
def check_cells_enabled(function):
|
||||||
|
@functools.wraps(function)
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
if not CONF.cells.enable:
|
||||||
|
msg = _("Cells is not enabled.")
|
||||||
|
raise webob.exc.HTTPNotImplemented(explanation=msg)
|
||||||
|
return function(*args, **kwargs)
|
||||||
|
return inner
|
||||||
186
rack/api/v1/__init__.py
Normal file
186
rack/api/v1/__init__.py
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
WSGI middleware for RACK API controllers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import routes
|
||||||
|
import stevedore
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack.api.v1 import groups
|
||||||
|
from rack.api.v1 import networks
|
||||||
|
from rack.api.v1 import keypairs
|
||||||
|
from rack.api.v1 import securitygroups
|
||||||
|
from rack.api.v1 import processes
|
||||||
|
from rack.api import versions
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common import gettextutils
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack import utils
|
||||||
|
from rack import wsgi as base_wsgi
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class APIMapper(routes.Mapper):
|
||||||
|
def routematch(self, url=None, environ=None):
|
||||||
|
if url == "":
|
||||||
|
result = self._match("", environ)
|
||||||
|
return result[0], result[1]
|
||||||
|
return routes.Mapper.routematch(self, url, environ)
|
||||||
|
|
||||||
|
def connect(self, *args, **kargs):
|
||||||
|
# NOTE(vish): Default the format part of a route to only accept json
|
||||||
|
# and xml so it doesn't eat all characters after a '.'
|
||||||
|
# in the url.
|
||||||
|
kargs.setdefault('requirements', {})
|
||||||
|
if not kargs['requirements'].get('format'):
|
||||||
|
kargs['requirements']['format'] = 'json|xml'
|
||||||
|
return routes.Mapper.connect(self, *args, **kargs)
|
||||||
|
|
||||||
|
|
||||||
|
class APIRouter(base_wsgi.Router):
|
||||||
|
"""Routes requests on the RACK API to the appropriate controller
|
||||||
|
and method.
|
||||||
|
"""
|
||||||
|
@classmethod
|
||||||
|
def factory(cls, global_config, **local_config):
|
||||||
|
"""Simple paste factory, :class:`rack.wsgi.Router` doesn't have one."""
|
||||||
|
return cls()
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
mapper = APIMapper()
|
||||||
|
self._setup_routes(mapper)
|
||||||
|
super(APIRouter, self).__init__(mapper)
|
||||||
|
|
||||||
|
def _setup_routes(self, mapper):
|
||||||
|
versions_resource = versions.create_resource()
|
||||||
|
mapper.connect("/",
|
||||||
|
controller=versions_resource,
|
||||||
|
action="show",
|
||||||
|
conditions={'method': ['GET']})
|
||||||
|
|
||||||
|
mapper.redirect("", "/")
|
||||||
|
|
||||||
|
groups_resource = groups.create_resource()
|
||||||
|
mapper.connect("/groups",
|
||||||
|
controller=groups_resource,
|
||||||
|
action="index",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}",
|
||||||
|
controller=groups_resource,
|
||||||
|
action="show",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups",
|
||||||
|
controller=groups_resource,
|
||||||
|
action="create",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/groups/{gid}",
|
||||||
|
controller=groups_resource,
|
||||||
|
action="update",
|
||||||
|
conditions={"method": ["PUT"]})
|
||||||
|
mapper.connect("/groups/{gid}",
|
||||||
|
controller=groups_resource,
|
||||||
|
action="delete",
|
||||||
|
conditions={"method": ["DELETE"]})
|
||||||
|
|
||||||
|
networks_resource = networks.create_resource()
|
||||||
|
mapper.connect("/groups/{gid}/networks",
|
||||||
|
controller=networks_resource,
|
||||||
|
action="index",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}/networks/{network_id}",
|
||||||
|
controller=networks_resource,
|
||||||
|
action="show",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}/networks",
|
||||||
|
controller=networks_resource,
|
||||||
|
action="create",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/groups/{gid}/networks/{network_id}",
|
||||||
|
controller=networks_resource,
|
||||||
|
action="update",
|
||||||
|
conditions={"method": ["PUT"]})
|
||||||
|
mapper.connect("/groups/{gid}/networks/{network_id}",
|
||||||
|
controller=networks_resource,
|
||||||
|
action="delete",
|
||||||
|
conditions={"method": ["DELETE"]})
|
||||||
|
|
||||||
|
keypairs_resource = keypairs.create_resource()
|
||||||
|
mapper.connect("/groups/{gid}/keypairs",
|
||||||
|
controller=keypairs_resource,
|
||||||
|
action="index",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
|
||||||
|
controller=keypairs_resource,
|
||||||
|
action="show",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}/keypairs",
|
||||||
|
controller=keypairs_resource,
|
||||||
|
action="create",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
|
||||||
|
controller=keypairs_resource,
|
||||||
|
action="update",
|
||||||
|
conditions={"method": ["PUT"]})
|
||||||
|
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
|
||||||
|
controller=keypairs_resource,
|
||||||
|
action="delete",
|
||||||
|
conditions={"method": ["DELETE"]})
|
||||||
|
|
||||||
|
securitygroups_resource = securitygroups.create_resource()
|
||||||
|
mapper.connect("/groups/{gid}/securitygroups",
|
||||||
|
controller=securitygroups_resource,
|
||||||
|
action="index",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
|
||||||
|
controller=securitygroups_resource,
|
||||||
|
action="show",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}/securitygroups",
|
||||||
|
controller=securitygroups_resource,
|
||||||
|
action="create",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
|
||||||
|
controller=securitygroups_resource,
|
||||||
|
action="update",
|
||||||
|
conditions={"method": ["PUT"]})
|
||||||
|
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
|
||||||
|
controller=securitygroups_resource,
|
||||||
|
action="delete",
|
||||||
|
conditions={"method": ["DELETE"]})
|
||||||
|
|
||||||
|
processes_resource = processes.create_resource()
|
||||||
|
mapper.connect("/groups/{gid}/processes",
|
||||||
|
controller=processes_resource,
|
||||||
|
action="index",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}/processes/{pid}",
|
||||||
|
controller=processes_resource,
|
||||||
|
action="show",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/groups/{gid}/processes",
|
||||||
|
controller=processes_resource,
|
||||||
|
action="create",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/groups/{gid}/processes/{pid}",
|
||||||
|
controller=processes_resource,
|
||||||
|
action="delete",
|
||||||
|
conditions={"method": ["DELETE"]})
|
||||||
212
rack/api/v1/groups.py
Normal file
212
rack/api/v1/groups.py
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import six
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from rack.api.v1.views import groups as views_groups
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack import db
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import uuidutils
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(wsgi.Controller):
|
||||||
|
|
||||||
|
"""Group controller for RACK API."""
|
||||||
|
|
||||||
|
_view_builder_class = views_groups.ViewBuilder
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(Controller, self).__init__()
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def index(self, req):
|
||||||
|
filters = {}
|
||||||
|
project_id = req.params.get('project_id')
|
||||||
|
name = req.params.get('name')
|
||||||
|
status = req.params.get('status')
|
||||||
|
|
||||||
|
if project_id:
|
||||||
|
filters['project_id'] = project_id
|
||||||
|
if name:
|
||||||
|
filters['display_name'] = name
|
||||||
|
if status:
|
||||||
|
filters['status'] = status
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
group_list = db.group_get_all(context, filters)
|
||||||
|
|
||||||
|
return self._view_builder.index(group_list)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def show(self, req, gid):
|
||||||
|
|
||||||
|
def _validate(gid):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_validate(gid)
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
group = db.group_get_by_gid(context, gid)
|
||||||
|
except exception.NotFound:
|
||||||
|
msg = _("Group could not be found")
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=msg)
|
||||||
|
|
||||||
|
return self._view_builder.show(group)
|
||||||
|
|
||||||
|
@wsgi.response(201)
|
||||||
|
def create(self, req, body):
|
||||||
|
|
||||||
|
def _validate(body):
|
||||||
|
if not self.is_valid_body(body, 'group'):
|
||||||
|
msg = _("Invalid request body")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
values = body["group"]
|
||||||
|
name = values.get("name")
|
||||||
|
description = values.get("description")
|
||||||
|
|
||||||
|
if not name:
|
||||||
|
msg = _("Group name is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
if isinstance(name, six.string_types):
|
||||||
|
name = name.strip()
|
||||||
|
utils.check_string_length(name, 'name', min_length=1,
|
||||||
|
max_length=255)
|
||||||
|
|
||||||
|
if description:
|
||||||
|
utils.check_string_length(description, 'description',
|
||||||
|
min_length=0, max_length=255)
|
||||||
|
|
||||||
|
valid_values = {}
|
||||||
|
valid_values["display_name"] = name
|
||||||
|
valid_values["display_description"] = description
|
||||||
|
return valid_values
|
||||||
|
|
||||||
|
try:
|
||||||
|
values = _validate(body)
|
||||||
|
except exception.InvalidInput as exc:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
values["gid"] = unicode(uuid.uuid4())
|
||||||
|
values["user_id"] = context.user_id
|
||||||
|
values["project_id"] = context.project_id
|
||||||
|
values["status"] = "ACTIVE"
|
||||||
|
group = db.group_create(context, values)
|
||||||
|
|
||||||
|
return self._view_builder.create(group)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def update(self, req, body, gid):
|
||||||
|
|
||||||
|
def _validate(body, gid):
|
||||||
|
if not self.is_valid_body(body, 'group'):
|
||||||
|
msg = _("Invalid request body")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
values = body["group"]
|
||||||
|
name = values.get("name")
|
||||||
|
description = values.get("description")
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
if name is None and description is None:
|
||||||
|
msg = _("Group name or description is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
if name is not None:
|
||||||
|
if isinstance(name, six.string_types):
|
||||||
|
name = name.strip()
|
||||||
|
utils.check_string_length(name, 'name', min_length=1,
|
||||||
|
max_length=255)
|
||||||
|
|
||||||
|
if description is not None:
|
||||||
|
utils.check_string_length(description, 'description',
|
||||||
|
min_length=0, max_length=255)
|
||||||
|
|
||||||
|
valid_values = {}
|
||||||
|
if name:
|
||||||
|
valid_values["display_name"] = name
|
||||||
|
# allow blank string to clear description
|
||||||
|
if description is not None:
|
||||||
|
valid_values["display_description"] = description
|
||||||
|
valid_values["gid"] = gid
|
||||||
|
return valid_values
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
|
||||||
|
try:
|
||||||
|
values = _validate(body, gid)
|
||||||
|
group = db.group_update(context, values)
|
||||||
|
except exception.InvalidInput as exc:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
|
||||||
|
except exception.GroupNotFound:
|
||||||
|
msg = _("Group could not be found")
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=msg)
|
||||||
|
|
||||||
|
return self._view_builder.update(group)
|
||||||
|
|
||||||
|
@wsgi.response(204)
|
||||||
|
def delete(self, req, gid):
|
||||||
|
|
||||||
|
def _validate(gid):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
try:
|
||||||
|
_validate(gid)
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
|
||||||
|
keypairs = db.keypair_get_all(context, gid)
|
||||||
|
if keypairs:
|
||||||
|
raise exception.GroupInUse(gid=gid)
|
||||||
|
|
||||||
|
securitygroups = db.securitygroup_get_all(context, gid)
|
||||||
|
if securitygroups:
|
||||||
|
raise exception.GroupInUse(gid=gid)
|
||||||
|
|
||||||
|
networks = db.network_get_all(context, gid)
|
||||||
|
if networks:
|
||||||
|
raise exception.GroupInUse(gid=gid)
|
||||||
|
|
||||||
|
processes = db.process_get_all(context, gid)
|
||||||
|
if processes:
|
||||||
|
raise exception.GroupInUse(gid=gid)
|
||||||
|
|
||||||
|
db.group_delete(context, gid)
|
||||||
|
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
except exception.GroupInUse as e:
|
||||||
|
raise webob.exc.HTTPConflict(explanation=e.format_message())
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
LOG.warn(e)
|
||||||
|
raise exception.GroupDeleteFailed()
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(Controller())
|
||||||
235
rack/api/v1/keypairs.py
Normal file
235
rack/api/v1/keypairs.py
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import six
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from rack.api.v1.views import keypairs as views_keypairs
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack import db
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import strutils
|
||||||
|
from rack.openstack.common import uuidutils
|
||||||
|
from rack.resourceoperator import rpcapi as operator_rpcapi
|
||||||
|
from rack.scheduler import rpcapi as scheduler_rpcapi
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(wsgi.Controller):
|
||||||
|
"""Keypair controller for RACK API."""
|
||||||
|
|
||||||
|
_view_builder_class = views_keypairs.ViewBuilder
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(Controller, self).__init__()
|
||||||
|
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||||
|
self.operator_rpcapi = operator_rpcapi.ResourceOperatorAPI()
|
||||||
|
|
||||||
|
def _uuid_check(self, gid=None, keypair_id=None):
|
||||||
|
if gid:
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
if keypair_id:
|
||||||
|
if not uuidutils.is_uuid_like(keypair_id):
|
||||||
|
raise exception.KeypairNotFound(keypair_id=keypair_id)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def index(self, req, gid):
|
||||||
|
try:
|
||||||
|
self._uuid_check(gid=gid)
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
filters = {}
|
||||||
|
keypair_id = req.params.get('keypair_id')
|
||||||
|
nova_keypair_id = req.params.get('nova_keypair_id')
|
||||||
|
name = req.params.get('name')
|
||||||
|
status = req.params.get('status')
|
||||||
|
is_default = req.params.get('is_default')
|
||||||
|
|
||||||
|
if keypair_id:
|
||||||
|
filters['keypair_id'] = keypair_id
|
||||||
|
if nova_keypair_id:
|
||||||
|
filters['nova_keypair_id'] = nova_keypair_id
|
||||||
|
if name:
|
||||||
|
filters['display_name'] = name
|
||||||
|
if status:
|
||||||
|
filters['status'] = status
|
||||||
|
if is_default:
|
||||||
|
filters['is_default'] = is_default
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
keypair_list = db.keypair_get_all(context, gid, filters)
|
||||||
|
return self._view_builder.index(keypair_list)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def show(self, req, gid, keypair_id):
|
||||||
|
try:
|
||||||
|
self._uuid_check(gid=gid, keypair_id=keypair_id)
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
try:
|
||||||
|
keypair = db.keypair_get_by_keypair_id(context, gid, keypair_id)
|
||||||
|
except exception.KeypairNotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
return self._view_builder.show(keypair)
|
||||||
|
|
||||||
|
@wsgi.response(202)
|
||||||
|
def create(self, req, body, gid):
|
||||||
|
|
||||||
|
def _validate(body, gid):
|
||||||
|
if not self.is_valid_body(body, 'keypair'):
|
||||||
|
msg = _("Invalid request body")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
self._uuid_check(gid)
|
||||||
|
values = body["keypair"]
|
||||||
|
name = values.get("name")
|
||||||
|
is_default = values.get("is_default")
|
||||||
|
|
||||||
|
if name:
|
||||||
|
if isinstance(name, six.string_types):
|
||||||
|
name = name.strip()
|
||||||
|
utils.check_string_length(name, 'name', min_length=1,
|
||||||
|
max_length=255)
|
||||||
|
|
||||||
|
if is_default:
|
||||||
|
try:
|
||||||
|
is_default = strutils.bool_from_string(is_default, strict=True)
|
||||||
|
except ValueError:
|
||||||
|
msg = _("is_default must be a boolean")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
else:
|
||||||
|
is_default = False
|
||||||
|
|
||||||
|
valid_values = {}
|
||||||
|
valid_values["gid"] = gid
|
||||||
|
valid_values["display_name"] = name
|
||||||
|
valid_values["is_default"] = is_default
|
||||||
|
return valid_values
|
||||||
|
|
||||||
|
try:
|
||||||
|
values = _validate(body, gid)
|
||||||
|
except exception.InvalidInput as e:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
|
||||||
|
except exception.GroupNotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
values["keypair_id"] = unicode(uuid.uuid4())
|
||||||
|
if not values["display_name"]:
|
||||||
|
values["display_name"] = "keypair-" + values["keypair_id"]
|
||||||
|
values["user_id"] = context.user_id
|
||||||
|
values["project_id"] = context.project_id
|
||||||
|
values["status"] = "BUILDING"
|
||||||
|
|
||||||
|
try:
|
||||||
|
db.group_get_by_gid(context, gid)
|
||||||
|
keypair = db.keypair_create(context, values)
|
||||||
|
host = self.scheduler_rpcapi.select_destinations(
|
||||||
|
context,
|
||||||
|
request_spec={},
|
||||||
|
filter_properties={})
|
||||||
|
self.operator_rpcapi.keypair_create(
|
||||||
|
context,
|
||||||
|
host["host"],
|
||||||
|
gid=gid,
|
||||||
|
keypair_id=values["keypair_id"],
|
||||||
|
name=values["display_name"])
|
||||||
|
except exception.GroupNotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
except Exception:
|
||||||
|
keypair_id = values["keypair_id"]
|
||||||
|
db.keypair_update(context, gid, keypair_id, {"status": "ERROR"})
|
||||||
|
raise exception.KeypairCreateFailed()
|
||||||
|
|
||||||
|
return self._view_builder.create(keypair)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def update(self, req, body, gid, keypair_id):
|
||||||
|
|
||||||
|
def _validate(body, gid, keypair_id):
|
||||||
|
if not self.is_valid_body(body, 'keypair'):
|
||||||
|
msg = _("Invalid request body")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
self._uuid_check(gid, keypair_id)
|
||||||
|
values = body["keypair"]
|
||||||
|
is_default = values.get("is_default")
|
||||||
|
|
||||||
|
if is_default:
|
||||||
|
try:
|
||||||
|
is_default = strutils.bool_from_string(is_default, strict=True)
|
||||||
|
except ValueError:
|
||||||
|
msg = _("is_default must be a boolean")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
else:
|
||||||
|
msg = _("is_default is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
valid_values = {"is_default": is_default}
|
||||||
|
return valid_values
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
|
||||||
|
try:
|
||||||
|
values = _validate(body, gid, keypair_id)
|
||||||
|
keypair = db.keypair_update(context, gid, keypair_id, values)
|
||||||
|
except exception.InvalidInput as e:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
return self._view_builder.update(keypair)
|
||||||
|
|
||||||
|
@wsgi.response(204)
|
||||||
|
def delete(self, req, gid, keypair_id):
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._uuid_check(gid=gid, keypair_id=keypair_id)
|
||||||
|
filters = {"keypair_id": keypair_id}
|
||||||
|
processes = db.process_get_all(context, gid, filters=filters)
|
||||||
|
if processes:
|
||||||
|
raise exception.keypairInUse(keypair_id=keypair_id)
|
||||||
|
keypair = db.keypair_delete(context, gid, keypair_id)
|
||||||
|
host = self.scheduler_rpcapi.select_destinations(
|
||||||
|
context,
|
||||||
|
request_spec={},
|
||||||
|
filter_properties={})
|
||||||
|
self.operator_rpcapi.keypair_delete(
|
||||||
|
context,
|
||||||
|
host["host"],
|
||||||
|
nova_keypair_id=keypair["nova_keypair_id"])
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
except exception.keypairInUse as e:
|
||||||
|
raise webob.exc.HTTPConflict(explanation=e.format_message())
|
||||||
|
except Exception as e:
|
||||||
|
LOG.warn(e)
|
||||||
|
raise exception.KeypairDeleteFailed()
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(Controller())
|
||||||
254
rack/api/v1/networks.py
Normal file
254
rack/api/v1/networks.py
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from rack import db, utils
|
||||||
|
from rack import exception
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack.api.v1.views import networks as views_networks
|
||||||
|
from rack.openstack.common import log as logging, uuidutils, strutils
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.resourceoperator import rpcapi as ro_rpcapi
|
||||||
|
from rack.scheduler import rpcapi as sch_rpcapi
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import webob
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(wsgi.Controller):
|
||||||
|
|
||||||
|
"""Network controller for RACK API."""
|
||||||
|
|
||||||
|
_view_builder_class = views_networks.ViewBuilder
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.scheduler_rpcapi = sch_rpcapi.SchedulerAPI()
|
||||||
|
self.resourceoperator_rpcapi = ro_rpcapi.ResourceOperatorAPI()
|
||||||
|
super(Controller, self).__init__()
|
||||||
|
|
||||||
|
@wsgi.response(202)
|
||||||
|
def create(self, req, gid, body):
|
||||||
|
|
||||||
|
def _validate(context, body, gid):
|
||||||
|
# validation checks
|
||||||
|
if not self.is_valid_body(body, "network"):
|
||||||
|
msg = _("Invalid request body")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
values = body.get("network")
|
||||||
|
|
||||||
|
# Required item
|
||||||
|
subnet = values.get("cidr")
|
||||||
|
if subnet is None:
|
||||||
|
msg = _("Ntwork cidr is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
if not utils.is_valid_cidr(subnet):
|
||||||
|
msg = _("cidr must be a CIDR")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
# Non-essential items
|
||||||
|
network_id = unicode(uuid.uuid4())
|
||||||
|
name = values.get("name")
|
||||||
|
if name is None or not name:
|
||||||
|
name = "net-" + network_id
|
||||||
|
else:
|
||||||
|
name = name.strip()
|
||||||
|
utils.check_string_length(name, 'name', min_length=1,max_length=255)
|
||||||
|
|
||||||
|
is_admin = values.get("is_admin")
|
||||||
|
if is_admin:
|
||||||
|
try:
|
||||||
|
is_admin = strutils.bool_from_string(
|
||||||
|
is_admin, strict=True)
|
||||||
|
except ValueError:
|
||||||
|
msg = _("is_admin must be a boolean")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
else:
|
||||||
|
is_admin = False
|
||||||
|
|
||||||
|
gateway = values.get("gateway")
|
||||||
|
if gateway is not None and not utils.is_valid_ip_address(gateway):
|
||||||
|
msg = _("Invalid gateway")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
dns_nameservers = values.get("dns_nameservers")
|
||||||
|
if dns_nameservers is not None:
|
||||||
|
if isinstance(dns_nameservers, list):
|
||||||
|
for dns in dns_nameservers:
|
||||||
|
if dns == "" or not utils.is_valid_ip_address(dns):
|
||||||
|
msg = _("Invalid dns_nameservers")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
else:
|
||||||
|
msg = _("dns_nameservers must be list format")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
ext_router = values.get("ext_router_id")
|
||||||
|
if ext_router is not None and not uuidutils.is_uuid_like(ext_router):
|
||||||
|
msg = _("ext_router must be a uuid")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
valid_values1 = {}
|
||||||
|
valid_values1["network_id"] = network_id
|
||||||
|
valid_values1["gid"] = gid
|
||||||
|
valid_values1["neutron_network_id"] = None
|
||||||
|
valid_values1["is_admin"] = is_admin
|
||||||
|
valid_values1["subnet"] = subnet
|
||||||
|
valid_values1["ext_router"] = ext_router
|
||||||
|
valid_values1["user_id"] = context.user_id
|
||||||
|
valid_values1["project_id"] = context.project_id
|
||||||
|
valid_values1["display_name"] = name
|
||||||
|
valid_values1["status"] = "BUILDING"
|
||||||
|
valid_values1["deleted"] = 0
|
||||||
|
|
||||||
|
valid_values2 = {}
|
||||||
|
valid_values2["gateway"] = gateway
|
||||||
|
valid_values2["dns_nameservers"] = dns_nameservers
|
||||||
|
|
||||||
|
valid_values = {}
|
||||||
|
valid_values["db"] = valid_values1
|
||||||
|
valid_values["opst"] = valid_values2
|
||||||
|
|
||||||
|
return valid_values
|
||||||
|
|
||||||
|
try:
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
values = _validate(context, body, gid)
|
||||||
|
except exception.InvalidInput as e:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
|
||||||
|
|
||||||
|
try:
|
||||||
|
# db access
|
||||||
|
self._check_gid(gid, is_create=True, context=context)
|
||||||
|
network = db.network_create(context, values["db"])
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise exception.NetworkCreateFailed()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# scheduler access
|
||||||
|
resourceoperator = self._get_resorceoperator(context)
|
||||||
|
# resource operator access
|
||||||
|
for k, v in values["opst"].items():
|
||||||
|
if v is not None:
|
||||||
|
network[k] = v
|
||||||
|
self.resourceoperator_rpcapi.network_create(context, resourceoperator["host"], network)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
error_values = {"status": "ERROR"}
|
||||||
|
db.network_update(context, network["network_id"], error_values)
|
||||||
|
raise exception.NetworkCreateFailed()
|
||||||
|
|
||||||
|
return self._view_builder.create(network)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def index(self, req, gid):
|
||||||
|
def _validate(gid):
|
||||||
|
self._check_gid(gid)
|
||||||
|
|
||||||
|
try:
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
_validate(gid)
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
filters = {}
|
||||||
|
network_id = req.params.get('network_id')
|
||||||
|
neutron_network_id = req.params.get('neutron_network_id')
|
||||||
|
name = req.params.get('name')
|
||||||
|
status = req.params.get('status')
|
||||||
|
is_admin = req.params.get('is_admin')
|
||||||
|
subnet = req.params.get('subnet')
|
||||||
|
ext_router = req.params.get('ext_router')
|
||||||
|
|
||||||
|
|
||||||
|
if network_id:
|
||||||
|
filters['network_id'] = network_id
|
||||||
|
if neutron_network_id:
|
||||||
|
filters['neutron_network_id'] = neutron_network_id
|
||||||
|
if name:
|
||||||
|
filters['name'] = name
|
||||||
|
if status:
|
||||||
|
filters['status'] = status
|
||||||
|
if is_admin:
|
||||||
|
filters['is_admin'] = is_admin
|
||||||
|
if subnet:
|
||||||
|
filters['subnet'] = subnet
|
||||||
|
if ext_router:
|
||||||
|
filters['ext_router'] = ext_router
|
||||||
|
|
||||||
|
|
||||||
|
network_list = db.network_get_all(context, gid)
|
||||||
|
|
||||||
|
return self._view_builder.index(network_list)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def show(self, req, gid, network_id):
|
||||||
|
def _validate(gid, network_id):
|
||||||
|
self._check_gid(gid)
|
||||||
|
if not uuidutils.is_uuid_like(network_id):
|
||||||
|
raise exception.NetworkNotFound(network_id=network_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
_validate(gid, network_id)
|
||||||
|
network = db.network_get_by_network_id(context, gid, network_id)
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
return self._view_builder.show(network)
|
||||||
|
|
||||||
|
@wsgi.response(204)
|
||||||
|
def delete(self, req, gid, network_id):
|
||||||
|
def _validate(gid, network_id):
|
||||||
|
self._check_gid(gid)
|
||||||
|
if not uuidutils.is_uuid_like(network_id):
|
||||||
|
raise exception.NetworkNotFound(network_id=network_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
_validate(gid, network_id)
|
||||||
|
network = db.network_get_by_network_id(context, gid, network_id)
|
||||||
|
if network["processes"]:
|
||||||
|
raise exception.NetworkInUse(network_id=network_id)
|
||||||
|
network = db.network_delete(context, gid, network_id)
|
||||||
|
resourceoperator = self._get_resorceoperator(context)
|
||||||
|
self.resourceoperator_rpcapi.network_delete(
|
||||||
|
context, resourceoperator["host"],
|
||||||
|
neutron_network_id=network["neutron_network_id"],
|
||||||
|
ext_router=network["ext_router"])
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
except exception.NetworkInUse as e:
|
||||||
|
raise webob.exc.HTTPConflict(explanation=e.format_message())
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise exception.NetworkDeleteFailed()
|
||||||
|
|
||||||
|
def _check_gid(self, gid, is_create=False, context=None):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
if is_create:
|
||||||
|
db.group_get_by_gid(context, gid)
|
||||||
|
|
||||||
|
def _get_resorceoperator(self, context,
|
||||||
|
request_spec={}, filter_properties={}):
|
||||||
|
resorceoperator = self.scheduler_rpcapi.select_destinations(context, request_spec, filter_properties)
|
||||||
|
return resorceoperator
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(Controller())
|
||||||
312
rack/api/v1/processes.py
Normal file
312
rack/api/v1/processes.py
Normal file
@@ -0,0 +1,312 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import uuid
|
||||||
|
import netaddr
|
||||||
|
|
||||||
|
import six
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from rack.api.v1.views import processes as views_processes
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack import db
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import uuidutils
|
||||||
|
from rack.openstack.common import strutils
|
||||||
|
from rack import utils
|
||||||
|
from rack.scheduler import rpcapi as scheduler_rpcapi
|
||||||
|
from rack.resourceoperator import rpcapi as operator_rpcapi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class Controller(wsgi.Controller):
|
||||||
|
|
||||||
|
"""Process controller for RACK API."""
|
||||||
|
|
||||||
|
_view_builder_class = views_processes.ViewBuilder
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(Controller, self).__init__()
|
||||||
|
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||||
|
self.operator_rpcapi = operator_rpcapi.ResourceOperatorAPI()
|
||||||
|
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def index(self, req, gid):
|
||||||
|
|
||||||
|
def _validate(gid):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_validate(gid)
|
||||||
|
except exception.ProcessNotFound:
|
||||||
|
msg = _("Process could not be found")
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=msg)
|
||||||
|
|
||||||
|
filters = {}
|
||||||
|
pid = req.params.get('pid')
|
||||||
|
ppid = req.params.get('ppid')
|
||||||
|
name = req.params.get('name')
|
||||||
|
status = req.params.get('status')
|
||||||
|
glance_image_id = req.params.get('glance_image_id')
|
||||||
|
nova_flavor_id = req.params.get('nova_flavor_id')
|
||||||
|
securitygroup_id = req.params.get('securitygroup_id')
|
||||||
|
network_id = req.params.get('network_id')
|
||||||
|
keypair_id = req.params.get('keypair_id')
|
||||||
|
|
||||||
|
if pid:
|
||||||
|
filters['pid'] = pid
|
||||||
|
if ppid:
|
||||||
|
filters['ppid'] = ppid
|
||||||
|
if name:
|
||||||
|
filters['name'] = name
|
||||||
|
if status:
|
||||||
|
filters['status'] = status
|
||||||
|
if glance_image_id:
|
||||||
|
filters['glance_image_id'] = glance_image_id
|
||||||
|
if nova_flavor_id:
|
||||||
|
filters['nova_flavor_id'] = nova_flavor_id
|
||||||
|
if securitygroup_id:
|
||||||
|
filters['securitygroup_id'] = securitygroup_id
|
||||||
|
if network_id:
|
||||||
|
filters['network_id'] = network_id
|
||||||
|
if keypair_id:
|
||||||
|
filters['keypair_id'] = keypair_id
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
process_list = db.process_get_all(context, gid, filters)
|
||||||
|
|
||||||
|
return self._view_builder.index(process_list)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def show(self, req, gid, pid):
|
||||||
|
|
||||||
|
def _validate(gid, pid):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(pid):
|
||||||
|
raise exception.ProcessNotFound(pid=pid)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_validate(gid, pid)
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
process = db.process_get_by_pid(context, gid, pid)
|
||||||
|
except exception.NotFound as exc:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
|
||||||
|
|
||||||
|
return self._view_builder.show(process)
|
||||||
|
|
||||||
|
|
||||||
|
@wsgi.response(202)
|
||||||
|
def create(self, req, body, gid):
|
||||||
|
|
||||||
|
def _validate_process(context, gid, body):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
if not self.is_valid_body(body, 'process'):
|
||||||
|
msg = _("Invalid request body")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
values = body["process"]
|
||||||
|
ppid = values.get("ppid")
|
||||||
|
keypair_id = values.get("keypair_id")
|
||||||
|
name = values.get("name")
|
||||||
|
glance_image_id = values.get("glance_image_id")
|
||||||
|
nova_flavor_id = values.get("nova_flavor_id")
|
||||||
|
securitygroup_ids = values.get("securitygroup_ids")
|
||||||
|
|
||||||
|
if ppid is not None:
|
||||||
|
if not uuidutils.is_uuid_like(ppid):
|
||||||
|
raise exception.ProcessNotFound(pid=ppid)
|
||||||
|
p_process = db.process_get_by_pid(context, gid, ppid)
|
||||||
|
|
||||||
|
if keypair_id is not None:
|
||||||
|
if not uuidutils.is_uuid_like(keypair_id):
|
||||||
|
raise exception.KeypairNotFound(keypair_id=keypair_id)
|
||||||
|
elif ppid is not None:
|
||||||
|
keypair_id = p_process.get("keypair_id")
|
||||||
|
|
||||||
|
if isinstance(name, six.string_types):
|
||||||
|
name = name.strip()
|
||||||
|
utils.check_string_length(name, 'name', min_length=1,
|
||||||
|
max_length=255)
|
||||||
|
elif name is not None:
|
||||||
|
msg = _("name must be a String")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
if glance_image_id is None:
|
||||||
|
if ppid is not None:
|
||||||
|
glance_image_id = p_process.get("glance_image_id")
|
||||||
|
elif not uuidutils.is_uuid_like(glance_image_id):
|
||||||
|
msg = _("glance_image_id is invalid format")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
if nova_flavor_id is None and ppid is not None:
|
||||||
|
nova_flavor_id = p_process.get("nova_flavor_id")
|
||||||
|
utils.validate_integer(nova_flavor_id, 'nova_flavor_id')
|
||||||
|
|
||||||
|
if not securitygroup_ids:
|
||||||
|
if ppid is not None:
|
||||||
|
securitygroup_ids = [securitygroup.get("securitygroup_id")
|
||||||
|
for securitygroup in p_process.get("securitygroups")]
|
||||||
|
else:
|
||||||
|
msg = _("securitygroup_ids is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
if isinstance(securitygroup_ids, list):
|
||||||
|
for securitygroup_id in securitygroup_ids:
|
||||||
|
if securitygroup_id is not None and not uuidutils.is_uuid_like(securitygroup_id):
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id)
|
||||||
|
else:
|
||||||
|
msg = _("securitygroup_ids must be list")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
valid_values = {}
|
||||||
|
valid_values_process = {}
|
||||||
|
valid_values_process["gid"] = gid
|
||||||
|
valid_values_process["keypair_id"] = keypair_id
|
||||||
|
valid_values_process["ppid"] = ppid
|
||||||
|
valid_values_process["display_name"] = name
|
||||||
|
valid_values_process["glance_image_id"] = glance_image_id
|
||||||
|
valid_values_process["nova_flavor_id"] = nova_flavor_id
|
||||||
|
|
||||||
|
valid_values_securitygroup = {}
|
||||||
|
valid_values_securitygroup["securitygroup_ids"] = securitygroup_ids
|
||||||
|
|
||||||
|
valid_values["process"] = valid_values_process
|
||||||
|
valid_values["securitygroup"] = valid_values_securitygroup
|
||||||
|
return valid_values
|
||||||
|
|
||||||
|
def _validate_metadata(metadata):
|
||||||
|
if metadata is None:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
if not isinstance(metadata, dict):
|
||||||
|
msg = _("metadata must be a dict")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
try:
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
valid_values = _validate_process(context, gid, body)
|
||||||
|
values = valid_values.get("process")
|
||||||
|
securitygroup_ids = valid_values.get("securitygroup").get("securitygroup_ids")
|
||||||
|
metadata = _validate_metadata(
|
||||||
|
body["process"].get("metadata"))
|
||||||
|
|
||||||
|
values["deleted"] = 0
|
||||||
|
values["status"] = "BUILDING"
|
||||||
|
values["pid"] = unicode(uuid.uuid4())
|
||||||
|
values["user_id"] = context.user_id
|
||||||
|
values["project_id"] = context.project_id
|
||||||
|
values["display_name"] = values["display_name"] or "pro-" + values["pid"]
|
||||||
|
|
||||||
|
if values["ppid"]:
|
||||||
|
db.process_get_by_pid(context, gid, values["ppid"])
|
||||||
|
if values["keypair_id"]:
|
||||||
|
nova_keypair_id = db.keypair_get_by_keypair_id(
|
||||||
|
context, gid, values["keypair_id"]).get("nova_keypair_id")
|
||||||
|
else:
|
||||||
|
nova_keypair_id = None
|
||||||
|
networks = db.network_get_all(context, gid, {"status":"ACTIVE"})
|
||||||
|
if not networks:
|
||||||
|
raise exception.NoNetworksFound(gid=values["gid"])
|
||||||
|
network_ids = [network["network_id"] for network in networks]
|
||||||
|
process = db.process_create(context, values, network_ids, securitygroup_ids)
|
||||||
|
|
||||||
|
except exception.InvalidInput as e:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
|
||||||
|
except exception.NotFound as e:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=e.format_message())
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
host = self.scheduler_rpcapi.select_destinations(
|
||||||
|
context,
|
||||||
|
request_spec={},
|
||||||
|
filter_properties={})
|
||||||
|
self.operator_rpcapi.process_create(
|
||||||
|
context,
|
||||||
|
host["host"],
|
||||||
|
pid=values["pid"],
|
||||||
|
ppid=values["ppid"] or values["pid"],
|
||||||
|
gid=gid,
|
||||||
|
name=values["display_name"],
|
||||||
|
glance_image_id=values["glance_image_id"],
|
||||||
|
nova_flavor_id=values["nova_flavor_id"],
|
||||||
|
nova_keypair_id=nova_keypair_id,
|
||||||
|
neutron_securitygroup_ids=[securitygroup["neutron_securitygroup_id"] for securitygroup in process["securitygroups"]],
|
||||||
|
neutron_network_ids=[network["neutron_network_id"] for network in process["networks"]],
|
||||||
|
metadata=metadata)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
pid = values["pid"]
|
||||||
|
db.process_update(context, gid, pid, {"status": "ERROR"})
|
||||||
|
raise exception.ProcessCreateFailed()
|
||||||
|
|
||||||
|
return self._view_builder.create(process)
|
||||||
|
|
||||||
|
|
||||||
|
@wsgi.response(204)
|
||||||
|
def delete(self, req, gid, pid):
|
||||||
|
|
||||||
|
def _validate(gid, pid):
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(pid):
|
||||||
|
raise exception.ProcessNotFound(pid=pid)
|
||||||
|
|
||||||
|
def _get_child_pid(context, gid, pid):
|
||||||
|
processes = db.process_get_all(context, gid, {"ppid": pid})
|
||||||
|
targets=[]
|
||||||
|
for process in processes:
|
||||||
|
targets.append(process["pid"])
|
||||||
|
targets.extend(_get_child_pid(context, gid, process["pid"]))
|
||||||
|
return targets
|
||||||
|
|
||||||
|
try:
|
||||||
|
_validate(gid, pid)
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
targets = _get_child_pid(context, gid, pid)
|
||||||
|
targets.append(pid)
|
||||||
|
|
||||||
|
for target in targets:
|
||||||
|
process = db.process_delete(context, gid, target)
|
||||||
|
host = self.scheduler_rpcapi.select_destinations(
|
||||||
|
context,
|
||||||
|
request_spec={},
|
||||||
|
filter_properties={})
|
||||||
|
self.operator_rpcapi.process_delete(
|
||||||
|
context,
|
||||||
|
host["host"],
|
||||||
|
nova_instance_id=process["nova_instance_id"])
|
||||||
|
|
||||||
|
except exception.NotFound as exc:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
raise exception.ProcessDeleteFailed()
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(Controller())
|
||||||
321
rack/api/v1/securitygroups.py
Normal file
321
rack/api/v1/securitygroups.py
Normal file
@@ -0,0 +1,321 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import uuid
|
||||||
|
import netaddr
|
||||||
|
|
||||||
|
import six
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from rack.api.v1.views import securitygroups as views_securitygroups
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack import db
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import uuidutils
|
||||||
|
from rack.openstack.common import strutils
|
||||||
|
from rack import utils
|
||||||
|
from rack.scheduler import rpcapi as scheduler_rpcapi
|
||||||
|
from rack.resourceoperator import rpcapi as operator_rpcapi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class Controller(wsgi.Controller):
|
||||||
|
|
||||||
|
"""Securitygroup controller for RACK API."""
|
||||||
|
|
||||||
|
_view_builder_class = views_securitygroups.ViewBuilder
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(Controller, self).__init__()
|
||||||
|
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||||
|
self.operator_rpcapi = operator_rpcapi.ResourceOperatorAPI()
|
||||||
|
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def index(self, req, gid):
|
||||||
|
|
||||||
|
def _validate(gid):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_validate(gid)
|
||||||
|
except exception.SecuritygroupNotFound:
|
||||||
|
msg = _("Securitygroup could not be found")
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=msg)
|
||||||
|
|
||||||
|
filters = {}
|
||||||
|
securitygroup_id = req.params.get('securitygroup_id')
|
||||||
|
name = req.params.get('name')
|
||||||
|
status = req.params.get('status')
|
||||||
|
is_default = req.params.get('is_default')
|
||||||
|
|
||||||
|
if securitygroup_id:
|
||||||
|
filters['securitygroup_id'] = securitygroup_id
|
||||||
|
if name:
|
||||||
|
filters['name'] = name
|
||||||
|
if status:
|
||||||
|
filters['status'] = status
|
||||||
|
if is_default:
|
||||||
|
filters['is_default'] = is_default
|
||||||
|
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
securitygroup_list = db.securitygroup_get_all(context, gid, filters)
|
||||||
|
|
||||||
|
return self._view_builder.index(securitygroup_list)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def show(self, req, gid, securitygroup_id):
|
||||||
|
|
||||||
|
def _validate(gid, securitygroup_id):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(securitygroup_id):
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_validate(gid, securitygroup_id)
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
securitygroup = db.securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id)
|
||||||
|
except exception.NotFound as exc:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
|
||||||
|
|
||||||
|
return self._view_builder.show(securitygroup)
|
||||||
|
|
||||||
|
|
||||||
|
@wsgi.response(202)
|
||||||
|
def create(self, req, body, gid):
|
||||||
|
|
||||||
|
def _validate_securitygroup(gid, body):
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
if not self.is_valid_body(body, 'securitygroup'):
|
||||||
|
msg = _("Invalid request body")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
values = body["securitygroup"]
|
||||||
|
name = values.get("name")
|
||||||
|
is_default = values.get("is_default")
|
||||||
|
|
||||||
|
if isinstance(name, six.string_types):
|
||||||
|
name = name.strip()
|
||||||
|
utils.check_string_length(name, 'name', min_length=1,
|
||||||
|
max_length=255)
|
||||||
|
|
||||||
|
if is_default:
|
||||||
|
try:
|
||||||
|
is_default = strutils.bool_from_string(is_default, strict=True)
|
||||||
|
except ValueError:
|
||||||
|
msg = _("is_default must be a boolean")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
else:
|
||||||
|
is_default = False
|
||||||
|
|
||||||
|
valid_values = {}
|
||||||
|
valid_values["gid"] = gid
|
||||||
|
valid_values["display_name"] = name
|
||||||
|
valid_values["is_default"] = is_default
|
||||||
|
return valid_values
|
||||||
|
|
||||||
|
def _validate_securitygrouprules(securitygrouprules):
|
||||||
|
|
||||||
|
valid_securitygrouprules = []
|
||||||
|
for securitygroup in securitygrouprules:
|
||||||
|
protocol = securitygroup.get("protocol")
|
||||||
|
port_range_max = securitygroup.get("port_range_max")
|
||||||
|
port_range_min = securitygroup.get("port_range_min")
|
||||||
|
remote_securitygroup_id = securitygroup.get("remote_securitygroup_id")
|
||||||
|
remote_ip_prefix = securitygroup.get("remote_ip_prefix")
|
||||||
|
|
||||||
|
if not protocol:
|
||||||
|
msg = _("SecurityGroupRule protocol is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
elif not utils.is_valid_protocol(protocol):
|
||||||
|
msg = _("SecurityGroupRule protocol should be tcp or udp or icmp")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
if not remote_securitygroup_id and not remote_ip_prefix:
|
||||||
|
msg = _("SecurityGroupRule either remote_securitygroup_id or remote_ip_prefix is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
elif remote_securitygroup_id and remote_ip_prefix:
|
||||||
|
msg = _("SecurityGroupRule either remote_securitygroup_id or remote_ip_prefix is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
elif remote_securitygroup_id is not None:
|
||||||
|
if not uuidutils.is_uuid_like(remote_securitygroup_id):
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=remote_securitygroup_id)
|
||||||
|
elif remote_ip_prefix is not None:
|
||||||
|
if not utils.is_valid_cidr(remote_ip_prefix):
|
||||||
|
msg = _("SecurityGroupRule remote_ip_prefix should be cidr format")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
if protocol in ["tcp","udp"]:
|
||||||
|
if port_range_max is None:
|
||||||
|
msg = _("SecurityGroupRule port_range_max is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
utils.validate_integer(port_range_max, 'port_range_max', min_value=1, max_value=65535)
|
||||||
|
if port_range_min:
|
||||||
|
utils.validate_integer(port_range_min, 'port_range_min', min_value=1, max_value=65535)
|
||||||
|
if port_range_min > port_range_max:
|
||||||
|
msg = _("SecurityGroupRule port_range_min should be lower than port_range_max")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
elif protocol == "icmp":
|
||||||
|
port_range_max = None
|
||||||
|
port_range_min = None
|
||||||
|
|
||||||
|
valid_securitygrouprules.append({
|
||||||
|
"protocol":protocol,
|
||||||
|
"port_range_max":port_range_max,
|
||||||
|
"port_range_min":port_range_min,
|
||||||
|
"remote_securitygroup_id":remote_securitygroup_id,
|
||||||
|
"remote_ip_prefix": unicode(netaddr.IPNetwork(remote_ip_prefix)) if remote_ip_prefix else remote_ip_prefix
|
||||||
|
})
|
||||||
|
return valid_securitygrouprules
|
||||||
|
|
||||||
|
try:
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
values = _validate_securitygroup(gid, body)
|
||||||
|
if(body["securitygroup"].get("securitygrouprules")):
|
||||||
|
securitygrouprules = _validate_securitygrouprules(
|
||||||
|
body["securitygroup"].get("securitygrouprules"))
|
||||||
|
else:
|
||||||
|
securitygrouprules = []
|
||||||
|
except exception.InvalidInput as exc:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
|
||||||
|
except exception.NotFound as exc:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
|
||||||
|
|
||||||
|
values["deleted"] = 0
|
||||||
|
values["status"] = "BUILDING"
|
||||||
|
values["securitygroup_id"] = unicode(uuid.uuid4())
|
||||||
|
values["user_id"] = context.user_id
|
||||||
|
values["project_id"] = context.project_id
|
||||||
|
values["display_name"] = values["display_name"] or "sec-" + values["securitygroup_id"]
|
||||||
|
|
||||||
|
try:
|
||||||
|
for i in range(len(securitygrouprules)):
|
||||||
|
if securitygrouprules[i]["remote_securitygroup_id"]:
|
||||||
|
securitygroup = db.securitygroup_get_by_securitygroup_id(
|
||||||
|
context, gid, securitygrouprules[i]["remote_securitygroup_id"])
|
||||||
|
remote_neutron_securitygroup_id = securitygroup.get("neutron_securitygroup_id")
|
||||||
|
securitygrouprules[i]["remote_neutron_securitygroup_id"] = remote_neutron_securitygroup_id
|
||||||
|
db.group_get_by_gid(context, gid)
|
||||||
|
securitygroup = db.securitygroup_create(context, values)
|
||||||
|
except exception.NotFound as exc:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
|
||||||
|
|
||||||
|
try:
|
||||||
|
host = self.scheduler_rpcapi.select_destinations(
|
||||||
|
context,
|
||||||
|
request_spec={},
|
||||||
|
filter_properties={})
|
||||||
|
self.operator_rpcapi.securitygroup_create(
|
||||||
|
context,
|
||||||
|
host["host"],
|
||||||
|
gid=gid,
|
||||||
|
securitygroup_id=values["securitygroup_id"],
|
||||||
|
name=values["display_name"],
|
||||||
|
securitygrouprules=securitygrouprules)
|
||||||
|
except Exception:
|
||||||
|
securitygroup_id = values["securitygroup_id"]
|
||||||
|
db.securitygroup_update(context, gid, securitygroup_id, {"status": "ERROR"})
|
||||||
|
raise exception.SecuritygroupCreateFailed()
|
||||||
|
|
||||||
|
return self._view_builder.create(securitygroup)
|
||||||
|
|
||||||
|
@wsgi.response(200)
|
||||||
|
def update(self, req, body, gid, securitygroup_id):
|
||||||
|
|
||||||
|
def _validate(body, gid, securitygroup_id):
|
||||||
|
if not self.is_valid_body(body, 'securitygroup'):
|
||||||
|
msg = _("Invalid request body")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
values = body["securitygroup"]
|
||||||
|
is_default = values.get("is_default")
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(securitygroup_id):
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id)
|
||||||
|
|
||||||
|
if is_default:
|
||||||
|
try:
|
||||||
|
is_default = strutils.bool_from_string(is_default, strict=True)
|
||||||
|
except ValueError:
|
||||||
|
msg = _("is_default must be a boolean")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
else:
|
||||||
|
msg = _("SecurityGroup is_default is required")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
|
||||||
|
valid_values = {}
|
||||||
|
valid_values["is_default"] = is_default
|
||||||
|
return valid_values
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
values = _validate(body, gid, securitygroup_id)
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
securitygroup = db.securitygroup_update(context, gid, securitygroup_id, values)
|
||||||
|
except exception.InvalidInput as exc:
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
|
||||||
|
except exception.NotFound as exc:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
|
||||||
|
|
||||||
|
return self._view_builder.update(securitygroup)
|
||||||
|
|
||||||
|
@wsgi.response(204)
|
||||||
|
def delete(self, req, gid, securitygroup_id):
|
||||||
|
|
||||||
|
def _validate(gid, securitygroup_id):
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(gid):
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
if not uuidutils.is_uuid_like(securitygroup_id):
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_validate(gid, securitygroup_id)
|
||||||
|
context = req.environ['rack.context']
|
||||||
|
securitygroup = db.securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id)
|
||||||
|
if securitygroup["processes"]:
|
||||||
|
raise exception.SecuritygroupInUse(securitygroup_id=securitygroup_id)
|
||||||
|
securitygroup = db.securitygroup_delete(context, gid, securitygroup_id)
|
||||||
|
except exception.SecuritygroupInUse as exc:
|
||||||
|
raise webob.exc.HTTPConflict(explanation=exc.format_message())
|
||||||
|
except exception.NotFound as exc:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
|
||||||
|
|
||||||
|
try:
|
||||||
|
host = self.scheduler_rpcapi.select_destinations(
|
||||||
|
context,
|
||||||
|
request_spec={},
|
||||||
|
filter_properties={})
|
||||||
|
self.operator_rpcapi.securitygroup_delete(
|
||||||
|
context,
|
||||||
|
host["host"],
|
||||||
|
neutron_securitygroup_id=securitygroup["neutron_securitygroup_id"])
|
||||||
|
except Exception:
|
||||||
|
raise exception.SecuritygroupDeleteFailed()
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(Controller())
|
||||||
0
rack/api/v1/views/__init__.py
Normal file
0
rack/api/v1/views/__init__.py
Normal file
49
rack/api/v1/views/groups.py
Normal file
49
rack/api/v1/views/groups.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from rack.api import common
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(common.ViewBuilder):
|
||||||
|
"""Model a group API response as a python dictionary."""
|
||||||
|
|
||||||
|
def index(self, group_list):
|
||||||
|
return dict(groups=
|
||||||
|
[self._base_response(group)
|
||||||
|
for group in group_list])
|
||||||
|
|
||||||
|
def show(self, group):
|
||||||
|
base = self._base_response(group)
|
||||||
|
return dict(group=base)
|
||||||
|
|
||||||
|
def create(self, group):
|
||||||
|
base = self._base_response(group)
|
||||||
|
return dict(group=base)
|
||||||
|
|
||||||
|
def update(self, group):
|
||||||
|
base = self._base_response(group)
|
||||||
|
return dict(group=base)
|
||||||
|
|
||||||
|
def _base_response(self, group):
|
||||||
|
return {
|
||||||
|
"gid": group["gid"],
|
||||||
|
"user_id": group["user_id"],
|
||||||
|
"project_id": group["project_id"],
|
||||||
|
"name": group["display_name"],
|
||||||
|
"description": group["display_description"],
|
||||||
|
"status": group["status"]
|
||||||
|
}
|
||||||
|
|
||||||
51
rack/api/v1/views/keypairs.py
Normal file
51
rack/api/v1/views/keypairs.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from rack.api import common
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(common.ViewBuilder):
|
||||||
|
"""Model a keypair API response as a python dictionary."""
|
||||||
|
|
||||||
|
def index(self, keypair_list):
|
||||||
|
return dict(keypairs=
|
||||||
|
[self._base_response(keypair)
|
||||||
|
for keypair in keypair_list])
|
||||||
|
|
||||||
|
def show(self, keypair):
|
||||||
|
base = self._base_response(keypair)
|
||||||
|
return dict(keypair=base)
|
||||||
|
|
||||||
|
def create(self, keypair):
|
||||||
|
base = self._base_response(keypair)
|
||||||
|
return dict(keypair=base)
|
||||||
|
|
||||||
|
def update(self, keypair):
|
||||||
|
base = self._base_response(keypair)
|
||||||
|
return dict(keypair=base)
|
||||||
|
|
||||||
|
def _base_response(self, keypair):
|
||||||
|
return {
|
||||||
|
"keypair_id": keypair.get("keypair_id", ""),
|
||||||
|
"nova_keypair_id": keypair.get("nova_keypair_id", ""),
|
||||||
|
"user_id": keypair.get("user_id", ""),
|
||||||
|
"project_id": keypair.get("project_id", ""),
|
||||||
|
"gid": keypair.get("gid", ""),
|
||||||
|
"name": keypair.get("display_name", ""),
|
||||||
|
"private_key": keypair.get("private_key", ""),
|
||||||
|
"is_default": keypair.get("is_default", ""),
|
||||||
|
"status": keypair.get("status", "")
|
||||||
|
}
|
||||||
47
rack/api/v1/views/networks.py
Normal file
47
rack/api/v1/views/networks.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from rack.api import common
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(common.ViewBuilder):
|
||||||
|
"""Model a networks API response as a python dictionary."""
|
||||||
|
|
||||||
|
def index(self, network_list):
|
||||||
|
return dict(networks=[
|
||||||
|
self._base_response(network) for network in network_list])
|
||||||
|
|
||||||
|
def show(self, network):
|
||||||
|
base = self._base_response(network)
|
||||||
|
return dict(network=base)
|
||||||
|
|
||||||
|
def create(self, network):
|
||||||
|
base = self._base_response(network)
|
||||||
|
return dict(network=base)
|
||||||
|
|
||||||
|
def _base_response(self, network):
|
||||||
|
return {
|
||||||
|
"network_id": network["network_id"],
|
||||||
|
"neutron_network_id": network["neutron_network_id"],
|
||||||
|
"gid": network["gid"],
|
||||||
|
"user_id": network["user_id"],
|
||||||
|
"project_id": network["project_id"],
|
||||||
|
"name": network["display_name"],
|
||||||
|
"is_admin": network["is_admin"],
|
||||||
|
"cidr": network["subnet"],
|
||||||
|
"ext_router_id": network["ext_router"],
|
||||||
|
"status": network["status"]
|
||||||
|
}
|
||||||
56
rack/api/v1/views/processes.py
Normal file
56
rack/api/v1/views/processes.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from rack.api import common
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(common.ViewBuilder):
|
||||||
|
|
||||||
|
"""Model a process API response as a python dictionary."""
|
||||||
|
def index(self, process_list):
|
||||||
|
return dict(processes=
|
||||||
|
[self._base_response(process)
|
||||||
|
for process in process_list])
|
||||||
|
|
||||||
|
def show(self, process):
|
||||||
|
base = self._base_response(process)
|
||||||
|
return dict(process=base)
|
||||||
|
|
||||||
|
def create(self, process):
|
||||||
|
base = self._base_response(process)
|
||||||
|
return dict(process=base)
|
||||||
|
|
||||||
|
def update(self, process):
|
||||||
|
base = self._base_response(process)
|
||||||
|
return dict(process=base)
|
||||||
|
|
||||||
|
def _base_response(self, process):
|
||||||
|
return {
|
||||||
|
"gid": process.get("gid"),
|
||||||
|
"pid": process.get("pid"),
|
||||||
|
"ppid": process.get("ppid", ""),
|
||||||
|
"user_id": process.get("user_id"),
|
||||||
|
"project_id": process.get("project_id"),
|
||||||
|
"name": process.get("display_name"),
|
||||||
|
"glance_image_id": process.get("glance_image_id"),
|
||||||
|
"nova_flavor_id": process.get("nova_flavor_id"),
|
||||||
|
"status": process.get("status"),
|
||||||
|
"keypair_id": process.get("keypair_id"),
|
||||||
|
"network_ids": [network.get("network_id")
|
||||||
|
for network in process.get("networks")],
|
||||||
|
"securitygroup_ids": [securitygroup.get("securitygroup_id")
|
||||||
|
for securitygroup in process.get("securitygroups")],
|
||||||
|
}
|
||||||
50
rack/api/v1/views/securitygroups.py
Normal file
50
rack/api/v1/views/securitygroups.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from rack.api import common
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(common.ViewBuilder):
|
||||||
|
|
||||||
|
"""Model a securitygroup API response as a python dictionary."""
|
||||||
|
def index(self, securitygroup_list):
|
||||||
|
return dict(securitygroups=
|
||||||
|
[self._base_response(securitygroup)
|
||||||
|
for securitygroup in securitygroup_list])
|
||||||
|
|
||||||
|
def show(self, securitygroup):
|
||||||
|
base = self._base_response(securitygroup)
|
||||||
|
return dict(securitygroup=base)
|
||||||
|
|
||||||
|
def create(self, securitygroup):
|
||||||
|
base = self._base_response(securitygroup)
|
||||||
|
return dict(securitygroup=base)
|
||||||
|
|
||||||
|
def update(self, securitygroup):
|
||||||
|
base = self._base_response(securitygroup)
|
||||||
|
return dict(securitygroup=base)
|
||||||
|
|
||||||
|
def _base_response(self, securitygroup):
|
||||||
|
return {
|
||||||
|
"securitygroup_id": securitygroup.get("securitygroup_id"),
|
||||||
|
"neutron_securitygroup_id": securitygroup.get("neutron_securitygroup_id"),
|
||||||
|
"user_id": securitygroup.get("user_id"),
|
||||||
|
"project_id": securitygroup.get("project_id"),
|
||||||
|
"gid": securitygroup.get("gid"),
|
||||||
|
"name": securitygroup.get("display_name"),
|
||||||
|
"is_default": securitygroup.get("is_default"),
|
||||||
|
"status": securitygroup.get("status")
|
||||||
|
}
|
||||||
243
rack/api/versions.py
Normal file
243
rack/api/versions.py
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack.api.views import versions as views_versions
|
||||||
|
from rack.api import wsgi
|
||||||
|
from rack.api import xmlutil
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
LINKS = {
|
||||||
|
'v2.0': {
|
||||||
|
'pdf': 'http://docs.openstack.org/'
|
||||||
|
'api/openstack-compute/2/os-compute-devguide-2.pdf',
|
||||||
|
'wadl': 'http://docs.openstack.org/'
|
||||||
|
'api/openstack-compute/2/wadl/os-compute-2.wadl'
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
VERSIONS = {
|
||||||
|
"v2.0": {
|
||||||
|
"id": "v2.0",
|
||||||
|
"status": "CURRENT",
|
||||||
|
"updated": "2011-01-21T11:33:21Z",
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"rel": "describedby",
|
||||||
|
"type": "application/pdf",
|
||||||
|
"href": LINKS['v2.0']['pdf'],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rel": "describedby",
|
||||||
|
"type": "application/vnd.sun.wadl+xml",
|
||||||
|
"href": LINKS['v2.0']['wadl'],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"media-types": [
|
||||||
|
{
|
||||||
|
"base": "application/xml",
|
||||||
|
"type": "application/vnd.openstack.compute+xml;version=2",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"base": "application/json",
|
||||||
|
"type": "application/vnd.openstack.compute+json;version=2",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class MediaTypesTemplateElement(xmlutil.TemplateElement):
|
||||||
|
def will_render(self, datum):
|
||||||
|
return 'media-types' in datum
|
||||||
|
|
||||||
|
|
||||||
|
def make_version(elem):
|
||||||
|
elem.set('id')
|
||||||
|
elem.set('status')
|
||||||
|
elem.set('updated')
|
||||||
|
|
||||||
|
mts = MediaTypesTemplateElement('media-types')
|
||||||
|
elem.append(mts)
|
||||||
|
|
||||||
|
mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types')
|
||||||
|
mt.set('base')
|
||||||
|
mt.set('type')
|
||||||
|
|
||||||
|
xmlutil.make_links(elem, 'links')
|
||||||
|
|
||||||
|
|
||||||
|
version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
|
||||||
|
|
||||||
|
|
||||||
|
class VersionTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('version', selector='version')
|
||||||
|
make_version(root)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class VersionsTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('versions')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'version', selector='versions')
|
||||||
|
make_version(elem)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class ChoicesTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('choices')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'version', selector='choices')
|
||||||
|
make_version(elem)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class AtomSerializer(wsgi.XMLDictSerializer):
|
||||||
|
|
||||||
|
NSMAP = {None: xmlutil.XMLNS_ATOM}
|
||||||
|
|
||||||
|
def __init__(self, metadata=None, xmlns=None):
|
||||||
|
self.metadata = metadata or {}
|
||||||
|
if not xmlns:
|
||||||
|
self.xmlns = wsgi.XMLNS_ATOM
|
||||||
|
else:
|
||||||
|
self.xmlns = xmlns
|
||||||
|
|
||||||
|
def _get_most_recent_update(self, versions):
|
||||||
|
recent = None
|
||||||
|
for version in versions:
|
||||||
|
updated = timeutils.parse_strtime(version['updated'],
|
||||||
|
'%Y-%m-%dT%H:%M:%SZ')
|
||||||
|
if not recent:
|
||||||
|
recent = updated
|
||||||
|
elif updated > recent:
|
||||||
|
recent = updated
|
||||||
|
|
||||||
|
return recent.strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||||
|
|
||||||
|
def _get_base_url(self, link_href):
|
||||||
|
# Make sure no trailing /
|
||||||
|
link_href = link_href.rstrip('/')
|
||||||
|
return link_href.rsplit('/', 1)[0] + '/'
|
||||||
|
|
||||||
|
def _create_feed(self, versions, feed_title, feed_id):
|
||||||
|
feed = etree.Element('feed', nsmap=self.NSMAP)
|
||||||
|
title = etree.SubElement(feed, 'title')
|
||||||
|
title.set('type', 'text')
|
||||||
|
title.text = feed_title
|
||||||
|
|
||||||
|
# Set this updated to the most recently updated version
|
||||||
|
recent = self._get_most_recent_update(versions)
|
||||||
|
etree.SubElement(feed, 'updated').text = recent
|
||||||
|
|
||||||
|
etree.SubElement(feed, 'id').text = feed_id
|
||||||
|
|
||||||
|
link = etree.SubElement(feed, 'link')
|
||||||
|
link.set('rel', 'self')
|
||||||
|
link.set('href', feed_id)
|
||||||
|
|
||||||
|
author = etree.SubElement(feed, 'author')
|
||||||
|
etree.SubElement(author, 'name').text = 'Rackspace'
|
||||||
|
etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/'
|
||||||
|
|
||||||
|
for version in versions:
|
||||||
|
feed.append(self._create_version_entry(version))
|
||||||
|
|
||||||
|
return feed
|
||||||
|
|
||||||
|
def _create_version_entry(self, version):
|
||||||
|
entry = etree.Element('entry')
|
||||||
|
etree.SubElement(entry, 'id').text = version['links'][0]['href']
|
||||||
|
title = etree.SubElement(entry, 'title')
|
||||||
|
title.set('type', 'text')
|
||||||
|
title.text = 'Version %s' % version['id']
|
||||||
|
etree.SubElement(entry, 'updated').text = version['updated']
|
||||||
|
|
||||||
|
for link in version['links']:
|
||||||
|
link_elem = etree.SubElement(entry, 'link')
|
||||||
|
link_elem.set('rel', link['rel'])
|
||||||
|
link_elem.set('href', link['href'])
|
||||||
|
if 'type' in link:
|
||||||
|
link_elem.set('type', link['type'])
|
||||||
|
|
||||||
|
content = etree.SubElement(entry, 'content')
|
||||||
|
content.set('type', 'text')
|
||||||
|
content.text = 'Version %s %s (%s)' % (version['id'],
|
||||||
|
version['status'],
|
||||||
|
version['updated'])
|
||||||
|
return entry
|
||||||
|
|
||||||
|
|
||||||
|
class VersionsAtomSerializer(AtomSerializer):
|
||||||
|
def default(self, data):
|
||||||
|
versions = data['versions']
|
||||||
|
feed_id = self._get_base_url(versions[0]['links'][0]['href'])
|
||||||
|
feed = self._create_feed(versions, 'Available API Versions', feed_id)
|
||||||
|
return self._to_xml(feed)
|
||||||
|
|
||||||
|
|
||||||
|
class VersionAtomSerializer(AtomSerializer):
|
||||||
|
def default(self, data):
|
||||||
|
version = data['version']
|
||||||
|
feed_id = version['links'][0]['href']
|
||||||
|
feed = self._create_feed([version], 'About This Version', feed_id)
|
||||||
|
return self._to_xml(feed)
|
||||||
|
|
||||||
|
|
||||||
|
class Versions(wsgi.Resource):
|
||||||
|
def __init__(self):
|
||||||
|
super(Versions, self).__init__(None)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VersionsTemplate,
|
||||||
|
atom=VersionsAtomSerializer)
|
||||||
|
def index(self, req):
|
||||||
|
"""Return all versions."""
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_versions(VERSIONS)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=ChoicesTemplate)
|
||||||
|
@wsgi.response(300)
|
||||||
|
def multi(self, req):
|
||||||
|
"""Return multiple choices."""
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_choices(VERSIONS, req)
|
||||||
|
|
||||||
|
def get_action_args(self, request_environment):
|
||||||
|
"""Parse dictionary created by routes library."""
|
||||||
|
args = {}
|
||||||
|
if request_environment['PATH_INFO'] == '/':
|
||||||
|
args['action'] = 'index'
|
||||||
|
else:
|
||||||
|
args['action'] = 'multi'
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
class VersionV2(object):
|
||||||
|
@wsgi.serializers(xml=VersionTemplate,
|
||||||
|
atom=VersionAtomSerializer)
|
||||||
|
def show(self, req):
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_version(VERSIONS['v2.0'])
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(VersionV2())
|
||||||
0
rack/api/views/__init__.py
Normal file
0
rack/api/views/__init__.py
Normal file
96
rack/api/views/versions.py
Normal file
96
rack/api/views/versions.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import os
|
||||||
|
|
||||||
|
from rack.api import common
|
||||||
|
|
||||||
|
|
||||||
|
def get_view_builder(req):
|
||||||
|
base_url = req.application_url
|
||||||
|
return ViewBuilder(base_url)
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(common.ViewBuilder):
|
||||||
|
|
||||||
|
def __init__(self, base_url):
|
||||||
|
""":param base_url: url of the root wsgi application."""
|
||||||
|
self.base_url = base_url
|
||||||
|
|
||||||
|
def build_choices(self, VERSIONS, req):
|
||||||
|
version_objs = []
|
||||||
|
for version in VERSIONS:
|
||||||
|
version = VERSIONS[version]
|
||||||
|
version_objs.append({
|
||||||
|
"id": version['id'],
|
||||||
|
"status": version['status'],
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"rel": "self",
|
||||||
|
"href": self.generate_href(version['id'], req.path),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"media-types": version['media-types'],
|
||||||
|
})
|
||||||
|
|
||||||
|
return dict(choices=version_objs)
|
||||||
|
|
||||||
|
def build_versions(self, versions):
|
||||||
|
version_objs = []
|
||||||
|
for version in sorted(versions.keys()):
|
||||||
|
version = versions[version]
|
||||||
|
version_objs.append({
|
||||||
|
"id": version['id'],
|
||||||
|
"status": version['status'],
|
||||||
|
"updated": version['updated'],
|
||||||
|
"links": self._build_links(version),
|
||||||
|
})
|
||||||
|
|
||||||
|
return dict(versions=version_objs)
|
||||||
|
|
||||||
|
def build_version(self, version):
|
||||||
|
reval = copy.deepcopy(version)
|
||||||
|
reval['links'].insert(0, {
|
||||||
|
"rel": "self",
|
||||||
|
"href": self.base_url.rstrip('/') + '/',
|
||||||
|
})
|
||||||
|
return dict(version=reval)
|
||||||
|
|
||||||
|
def _build_links(self, version_data):
|
||||||
|
"""Generate a container of links that refer to the provided version."""
|
||||||
|
href = self.generate_href(version_data['id'])
|
||||||
|
|
||||||
|
links = [
|
||||||
|
{
|
||||||
|
"rel": "self",
|
||||||
|
"href": href,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
return links
|
||||||
|
|
||||||
|
def generate_href(self, version, path=None):
|
||||||
|
"""Create an url that refers to a specific version_number."""
|
||||||
|
prefix = self._update_compute_link_prefix(self.base_url)
|
||||||
|
if version.find('v3.') == 0:
|
||||||
|
version_number = 'v3'
|
||||||
|
else:
|
||||||
|
version_number = 'v2'
|
||||||
|
|
||||||
|
if path:
|
||||||
|
path = path.strip('/')
|
||||||
|
return os.path.join(prefix, version_number, path)
|
||||||
|
else:
|
||||||
|
return os.path.join(prefix, version_number) + '/'
|
||||||
1302
rack/api/wsgi.py
Normal file
1302
rack/api/wsgi.py
Normal file
File diff suppressed because it is too large
Load Diff
993
rack/api/xmlutil.py
Normal file
993
rack/api/xmlutil.py
Normal file
@@ -0,0 +1,993 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
import six
|
||||||
|
from xml.dom import minidom
|
||||||
|
from xml.parsers import expat
|
||||||
|
from xml import sax
|
||||||
|
from xml.sax import expatreader
|
||||||
|
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
|
||||||
|
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
|
||||||
|
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
|
||||||
|
XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0'
|
||||||
|
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
|
||||||
|
|
||||||
|
|
||||||
|
def validate_schema(xml, schema_name, version='v1.1'):
|
||||||
|
if isinstance(xml, str):
|
||||||
|
xml = etree.fromstring(xml)
|
||||||
|
base_path = 'rack/api/openstack/compute/schemas/'
|
||||||
|
if schema_name not in ('atom', 'atom-link'):
|
||||||
|
base_path += '%s/' % version
|
||||||
|
schema_path = os.path.join(utils.rackdir(),
|
||||||
|
'%s%s.rng' % (base_path, schema_name))
|
||||||
|
schema_doc = etree.parse(schema_path)
|
||||||
|
relaxng = etree.RelaxNG(schema_doc)
|
||||||
|
relaxng.assertValid(xml)
|
||||||
|
|
||||||
|
|
||||||
|
class Selector(object):
|
||||||
|
"""Selects datum to operate on from an object."""
|
||||||
|
|
||||||
|
def __init__(self, *chain):
|
||||||
|
"""Initialize the selector.
|
||||||
|
|
||||||
|
Each argument is a subsequent index into the object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.chain = chain
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return a representation of the selector."""
|
||||||
|
|
||||||
|
return "Selector" + repr(self.chain)
|
||||||
|
|
||||||
|
def __call__(self, obj, do_raise=False):
|
||||||
|
"""Select a datum to operate on.
|
||||||
|
|
||||||
|
Selects the relevant datum within the object.
|
||||||
|
|
||||||
|
:param obj: The object from which to select the object.
|
||||||
|
:param do_raise: If False (the default), return None if the
|
||||||
|
indexed datum does not exist. Otherwise,
|
||||||
|
raise a KeyError.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Walk the selector list
|
||||||
|
for elem in self.chain:
|
||||||
|
# If it's callable, call it
|
||||||
|
if callable(elem):
|
||||||
|
obj = elem(obj)
|
||||||
|
else:
|
||||||
|
if obj == '':
|
||||||
|
return ''
|
||||||
|
# Use indexing
|
||||||
|
try:
|
||||||
|
obj = obj[elem]
|
||||||
|
except (KeyError, IndexError):
|
||||||
|
# No sense going any further
|
||||||
|
if do_raise:
|
||||||
|
# Convert to a KeyError, for consistency
|
||||||
|
raise KeyError(elem)
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Return the finally-selected object
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def get_items(obj):
|
||||||
|
"""Get items in obj."""
|
||||||
|
|
||||||
|
return list(obj.items())
|
||||||
|
|
||||||
|
|
||||||
|
def get_items_without_dict(obj):
|
||||||
|
"""Get items in obj but omit any items containing a dict."""
|
||||||
|
|
||||||
|
obj_list = list(obj.items())
|
||||||
|
for item in obj_list:
|
||||||
|
if isinstance(list(item)[1], dict):
|
||||||
|
obj_list.remove(item)
|
||||||
|
return obj_list
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyStringSelector(Selector):
|
||||||
|
"""Returns the empty string if Selector would return None."""
|
||||||
|
def __call__(self, obj, do_raise=False):
|
||||||
|
"""Returns empty string if the selected value does not exist."""
|
||||||
|
|
||||||
|
try:
|
||||||
|
return super(EmptyStringSelector, self).__call__(obj, True)
|
||||||
|
except KeyError:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
class ConstantSelector(object):
|
||||||
|
"""Returns a constant."""
|
||||||
|
|
||||||
|
def __init__(self, value):
|
||||||
|
"""Initialize the selector.
|
||||||
|
|
||||||
|
:param value: The value to return.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return a representation of the selector."""
|
||||||
|
|
||||||
|
return repr(self.value)
|
||||||
|
|
||||||
|
def __call__(self, _obj, _do_raise=False):
|
||||||
|
"""Select a datum to operate on.
|
||||||
|
|
||||||
|
Returns a constant value. Compatible with
|
||||||
|
Selector.__call__().
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.value
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateElement(object):
|
||||||
|
"""Represent an element in the template."""
|
||||||
|
|
||||||
|
def __init__(self, tag, attrib=None, selector=None, subselector=None,
|
||||||
|
colon_ns=False, **extra):
|
||||||
|
"""Initialize an element.
|
||||||
|
|
||||||
|
Initializes an element in the template. Keyword arguments
|
||||||
|
specify attributes to be set on the element; values must be
|
||||||
|
callables. See TemplateElement.set() for more information.
|
||||||
|
|
||||||
|
:param tag: The name of the tag to create.
|
||||||
|
:param attrib: An optional dictionary of element attributes.
|
||||||
|
:param selector: An optional callable taking an object and
|
||||||
|
optional boolean do_raise indicator and
|
||||||
|
returning the object bound to the element.
|
||||||
|
:param subselector: An optional callable taking an object and
|
||||||
|
optional boolean do_raise indicator and
|
||||||
|
returning the object bound to the element.
|
||||||
|
This is used to further refine the datum
|
||||||
|
object returned by selector in the event
|
||||||
|
that it is a list of objects.
|
||||||
|
:colon_ns: An optional flag indicating whether to support k:v
|
||||||
|
type tagname, if True the k:v type tagname will
|
||||||
|
be supported by adding the k into the namespace.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Convert selector into a Selector
|
||||||
|
if selector is None:
|
||||||
|
selector = Selector()
|
||||||
|
elif not callable(selector):
|
||||||
|
selector = Selector(selector)
|
||||||
|
|
||||||
|
# Convert subselector into a Selector
|
||||||
|
if subselector is not None and not callable(subselector):
|
||||||
|
subselector = Selector(subselector)
|
||||||
|
|
||||||
|
self.tag = tag
|
||||||
|
self.selector = selector
|
||||||
|
self.subselector = subselector
|
||||||
|
self.attrib = {}
|
||||||
|
self._text = None
|
||||||
|
self._children = []
|
||||||
|
self._childmap = {}
|
||||||
|
self.colon_ns = colon_ns
|
||||||
|
|
||||||
|
# Run the incoming attributes through set() so that they
|
||||||
|
# become selectorized
|
||||||
|
if not attrib:
|
||||||
|
attrib = {}
|
||||||
|
attrib.update(extra)
|
||||||
|
for k, v in attrib.items():
|
||||||
|
self.set(k, v)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return a representation of the template element."""
|
||||||
|
|
||||||
|
return ('<%s.%s %r at %#x>' %
|
||||||
|
(self.__class__.__module__, self.__class__.__name__,
|
||||||
|
self.tag, id(self)))
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""Return the number of child elements."""
|
||||||
|
|
||||||
|
return len(self._children)
|
||||||
|
|
||||||
|
def __contains__(self, key):
|
||||||
|
"""Determine whether a child node named by key exists."""
|
||||||
|
|
||||||
|
return key in self._childmap
|
||||||
|
|
||||||
|
def __getitem__(self, idx):
|
||||||
|
"""Retrieve a child node by index or name."""
|
||||||
|
|
||||||
|
if isinstance(idx, six.string_types):
|
||||||
|
# Allow access by node name
|
||||||
|
return self._childmap[idx]
|
||||||
|
else:
|
||||||
|
return self._children[idx]
|
||||||
|
|
||||||
|
def append(self, elem):
|
||||||
|
"""Append a child to the element."""
|
||||||
|
|
||||||
|
# Unwrap templates...
|
||||||
|
elem = elem.unwrap()
|
||||||
|
|
||||||
|
# Avoid duplications
|
||||||
|
if elem.tag in self._childmap:
|
||||||
|
raise KeyError(elem.tag)
|
||||||
|
|
||||||
|
self._children.append(elem)
|
||||||
|
self._childmap[elem.tag] = elem
|
||||||
|
|
||||||
|
def extend(self, elems):
|
||||||
|
"""Append children to the element."""
|
||||||
|
|
||||||
|
# Pre-evaluate the elements
|
||||||
|
elemmap = {}
|
||||||
|
elemlist = []
|
||||||
|
for elem in elems:
|
||||||
|
# Unwrap templates...
|
||||||
|
elem = elem.unwrap()
|
||||||
|
|
||||||
|
# Avoid duplications
|
||||||
|
if elem.tag in self._childmap or elem.tag in elemmap:
|
||||||
|
raise KeyError(elem.tag)
|
||||||
|
|
||||||
|
elemmap[elem.tag] = elem
|
||||||
|
elemlist.append(elem)
|
||||||
|
|
||||||
|
# Update the children
|
||||||
|
self._children.extend(elemlist)
|
||||||
|
self._childmap.update(elemmap)
|
||||||
|
|
||||||
|
def insert(self, idx, elem):
|
||||||
|
"""Insert a child element at the given index."""
|
||||||
|
|
||||||
|
# Unwrap templates...
|
||||||
|
elem = elem.unwrap()
|
||||||
|
|
||||||
|
# Avoid duplications
|
||||||
|
if elem.tag in self._childmap:
|
||||||
|
raise KeyError(elem.tag)
|
||||||
|
|
||||||
|
self._children.insert(idx, elem)
|
||||||
|
self._childmap[elem.tag] = elem
|
||||||
|
|
||||||
|
def remove(self, elem):
|
||||||
|
"""Remove a child element."""
|
||||||
|
|
||||||
|
# Unwrap templates...
|
||||||
|
elem = elem.unwrap()
|
||||||
|
|
||||||
|
# Check if element exists
|
||||||
|
if elem.tag not in self._childmap or self._childmap[elem.tag] != elem:
|
||||||
|
raise ValueError(_('element is not a child'))
|
||||||
|
|
||||||
|
self._children.remove(elem)
|
||||||
|
del self._childmap[elem.tag]
|
||||||
|
|
||||||
|
def get(self, key):
|
||||||
|
"""Get an attribute.
|
||||||
|
|
||||||
|
Returns a callable which performs datum selection.
|
||||||
|
|
||||||
|
:param key: The name of the attribute to get.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.attrib[key]
|
||||||
|
|
||||||
|
def set(self, key, value=None):
|
||||||
|
"""Set an attribute.
|
||||||
|
|
||||||
|
:param key: The name of the attribute to set.
|
||||||
|
|
||||||
|
:param value: A callable taking an object and optional boolean
|
||||||
|
do_raise indicator and returning the datum bound
|
||||||
|
to the attribute. If None, a Selector() will be
|
||||||
|
constructed from the key. If a string, a
|
||||||
|
Selector() will be constructed from the string.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Convert value to a selector
|
||||||
|
if value is None:
|
||||||
|
value = Selector(key)
|
||||||
|
elif not callable(value):
|
||||||
|
value = Selector(value)
|
||||||
|
|
||||||
|
self.attrib[key] = value
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
"""Return the attribute names."""
|
||||||
|
|
||||||
|
return self.attrib.keys()
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
"""Return the attribute names and values."""
|
||||||
|
|
||||||
|
return self.attrib.items()
|
||||||
|
|
||||||
|
def unwrap(self):
|
||||||
|
"""Unwraps a template to return a template element."""
|
||||||
|
|
||||||
|
# We are a template element
|
||||||
|
return self
|
||||||
|
|
||||||
|
def wrap(self):
|
||||||
|
"""Wraps a template element to return a template."""
|
||||||
|
|
||||||
|
# Wrap in a basic Template
|
||||||
|
return Template(self)
|
||||||
|
|
||||||
|
def apply(self, elem, obj):
|
||||||
|
"""Apply text and attributes to an etree.Element.
|
||||||
|
|
||||||
|
Applies the text and attribute instructions in the template
|
||||||
|
element to an etree.Element instance.
|
||||||
|
|
||||||
|
:param elem: An etree.Element instance.
|
||||||
|
:param obj: The base object associated with this template
|
||||||
|
element.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Start with the text...
|
||||||
|
if self.text is not None:
|
||||||
|
elem.text = unicode(self.text(obj))
|
||||||
|
|
||||||
|
# Now set up all the attributes...
|
||||||
|
for key, value in self.attrib.items():
|
||||||
|
try:
|
||||||
|
elem.set(key, unicode(value(obj, True)))
|
||||||
|
except KeyError:
|
||||||
|
# Attribute has no value, so don't include it
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _render(self, parent, datum, patches, nsmap):
|
||||||
|
"""Internal rendering.
|
||||||
|
|
||||||
|
Renders the template node into an etree.Element object.
|
||||||
|
Returns the etree.Element object.
|
||||||
|
|
||||||
|
:param parent: The parent etree.Element instance.
|
||||||
|
:param datum: The datum associated with this template element.
|
||||||
|
:param patches: A list of other template elements that must
|
||||||
|
also be applied.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the etree.Element instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Allocate a node
|
||||||
|
if callable(self.tag):
|
||||||
|
tagname = self.tag(datum)
|
||||||
|
else:
|
||||||
|
tagname = self.tag
|
||||||
|
|
||||||
|
if self.colon_ns:
|
||||||
|
if ':' in tagname:
|
||||||
|
if nsmap is None:
|
||||||
|
nsmap = {}
|
||||||
|
colon_key, colon_name = tagname.split(':')
|
||||||
|
nsmap[colon_key] = colon_key
|
||||||
|
tagname = '{%s}%s' % (colon_key, colon_name)
|
||||||
|
|
||||||
|
elem = etree.Element(tagname, nsmap=nsmap)
|
||||||
|
|
||||||
|
# If we have a parent, append the node to the parent
|
||||||
|
if parent is not None:
|
||||||
|
parent.append(elem)
|
||||||
|
|
||||||
|
# If the datum is None, do nothing else
|
||||||
|
if datum is None:
|
||||||
|
return elem
|
||||||
|
|
||||||
|
# Apply this template element to the element
|
||||||
|
self.apply(elem, datum)
|
||||||
|
|
||||||
|
# Additionally, apply the patches
|
||||||
|
for patch in patches:
|
||||||
|
patch.apply(elem, datum)
|
||||||
|
|
||||||
|
# We have fully rendered the element; return it
|
||||||
|
return elem
|
||||||
|
|
||||||
|
def render(self, parent, obj, patches=[], nsmap=None):
|
||||||
|
"""Render an object.
|
||||||
|
|
||||||
|
Renders an object against this template node. Returns a list
|
||||||
|
of two-item tuples, where the first item is an etree.Element
|
||||||
|
instance and the second item is the datum associated with that
|
||||||
|
instance.
|
||||||
|
|
||||||
|
:param parent: The parent for the etree.Element instances.
|
||||||
|
:param obj: The object to render this template element
|
||||||
|
against.
|
||||||
|
:param patches: A list of other template elements to apply
|
||||||
|
when rendering this template element.
|
||||||
|
:param nsmap: An optional namespace dictionary to attach to
|
||||||
|
the etree.Element instances.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# First, get the datum we're rendering
|
||||||
|
data = None if obj is None else self.selector(obj)
|
||||||
|
|
||||||
|
# Check if we should render at all
|
||||||
|
if not self.will_render(data):
|
||||||
|
return []
|
||||||
|
elif data is None:
|
||||||
|
return [(self._render(parent, None, patches, nsmap), None)]
|
||||||
|
|
||||||
|
# Make the data into a list if it isn't already
|
||||||
|
if not isinstance(data, list):
|
||||||
|
data = [data]
|
||||||
|
elif parent is None:
|
||||||
|
raise ValueError(_('root element selecting a list'))
|
||||||
|
|
||||||
|
# Render all the elements
|
||||||
|
elems = []
|
||||||
|
for datum in data:
|
||||||
|
if self.subselector is not None:
|
||||||
|
datum = self.subselector(datum)
|
||||||
|
elems.append((self._render(parent, datum, patches, nsmap), datum))
|
||||||
|
|
||||||
|
# Return all the elements rendered, as well as the
|
||||||
|
# corresponding datum for the next step down the tree
|
||||||
|
return elems
|
||||||
|
|
||||||
|
def will_render(self, datum):
|
||||||
|
"""Hook method.
|
||||||
|
|
||||||
|
An overridable hook method to determine whether this template
|
||||||
|
element will be rendered at all. By default, returns False
|
||||||
|
(inhibiting rendering) if the datum is None.
|
||||||
|
|
||||||
|
:param datum: The datum associated with this template element.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Don't render if datum is None
|
||||||
|
return datum is not None
|
||||||
|
|
||||||
|
def _text_get(self):
|
||||||
|
"""Template element text.
|
||||||
|
|
||||||
|
Either None or a callable taking an object and optional
|
||||||
|
boolean do_raise indicator and returning the datum bound to
|
||||||
|
the text of the template element.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._text
|
||||||
|
|
||||||
|
def _text_set(self, value):
|
||||||
|
# Convert value to a selector
|
||||||
|
if value is not None and not callable(value):
|
||||||
|
value = Selector(value)
|
||||||
|
|
||||||
|
self._text = value
|
||||||
|
|
||||||
|
def _text_del(self):
|
||||||
|
self._text = None
|
||||||
|
|
||||||
|
text = property(_text_get, _text_set, _text_del)
|
||||||
|
|
||||||
|
def tree(self):
|
||||||
|
"""Return string representation of the template tree.
|
||||||
|
|
||||||
|
Returns a representation of the template rooted at this
|
||||||
|
element as a string, suitable for inclusion in debug logs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Build the inner contents of the tag...
|
||||||
|
contents = [self.tag, '!selector=%r' % self.selector]
|
||||||
|
|
||||||
|
# Add the text...
|
||||||
|
if self.text is not None:
|
||||||
|
contents.append('!text=%r' % self.text)
|
||||||
|
|
||||||
|
# Add all the other attributes
|
||||||
|
for key, value in self.attrib.items():
|
||||||
|
contents.append('%s=%r' % (key, value))
|
||||||
|
|
||||||
|
# If there are no children, return it as a closed tag
|
||||||
|
if len(self) == 0:
|
||||||
|
return '<%s/>' % ' '.join([str(i) for i in contents])
|
||||||
|
|
||||||
|
# OK, recurse to our children
|
||||||
|
children = [c.tree() for c in self]
|
||||||
|
|
||||||
|
# Return the result
|
||||||
|
return ('<%s>%s</%s>' %
|
||||||
|
(' '.join(contents), ''.join(children), self.tag))
|
||||||
|
|
||||||
|
|
||||||
|
def SubTemplateElement(parent, tag, attrib=None, selector=None,
|
||||||
|
subselector=None, colon_ns=False, **extra):
|
||||||
|
"""Create a template element as a child of another.
|
||||||
|
|
||||||
|
Corresponds to the etree.SubElement interface. Parameters are as
|
||||||
|
for TemplateElement, with the addition of the parent.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Convert attributes
|
||||||
|
attrib = attrib or {}
|
||||||
|
attrib.update(extra)
|
||||||
|
|
||||||
|
# Get a TemplateElement
|
||||||
|
elem = TemplateElement(tag, attrib=attrib, selector=selector,
|
||||||
|
subselector=subselector, colon_ns=colon_ns)
|
||||||
|
|
||||||
|
# Append the parent safely
|
||||||
|
if parent is not None:
|
||||||
|
parent.append(elem)
|
||||||
|
|
||||||
|
return elem
|
||||||
|
|
||||||
|
|
||||||
|
class Template(object):
|
||||||
|
"""Represent a template."""
|
||||||
|
|
||||||
|
def __init__(self, root, nsmap=None):
|
||||||
|
"""Initialize a template.
|
||||||
|
|
||||||
|
:param root: The root element of the template.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the root element of the
|
||||||
|
template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.root = root.unwrap() if root is not None else None
|
||||||
|
self.nsmap = nsmap or {}
|
||||||
|
self.serialize_options = dict(encoding='UTF-8', xml_declaration=True)
|
||||||
|
|
||||||
|
def _serialize(self, parent, obj, siblings, nsmap=None):
|
||||||
|
"""Internal serialization.
|
||||||
|
|
||||||
|
Recursive routine to build a tree of etree.Element instances
|
||||||
|
from an object based on the template. Returns the first
|
||||||
|
etree.Element instance rendered, or None.
|
||||||
|
|
||||||
|
:param parent: The parent etree.Element instance. Can be
|
||||||
|
None.
|
||||||
|
:param obj: The object to render.
|
||||||
|
:param siblings: The TemplateElement instances against which
|
||||||
|
to render the object.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the etree.Element instance
|
||||||
|
rendered.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# First step, render the element
|
||||||
|
elems = siblings[0].render(parent, obj, siblings[1:], nsmap)
|
||||||
|
|
||||||
|
# Now, recurse to all child elements
|
||||||
|
seen = set()
|
||||||
|
for idx, sibling in enumerate(siblings):
|
||||||
|
for child in sibling:
|
||||||
|
# Have we handled this child already?
|
||||||
|
if child.tag in seen:
|
||||||
|
continue
|
||||||
|
seen.add(child.tag)
|
||||||
|
|
||||||
|
# Determine the child's siblings
|
||||||
|
nieces = [child]
|
||||||
|
for sib in siblings[idx + 1:]:
|
||||||
|
if child.tag in sib:
|
||||||
|
nieces.append(sib[child.tag])
|
||||||
|
|
||||||
|
# Now we recurse for every data element
|
||||||
|
for elem, datum in elems:
|
||||||
|
self._serialize(elem, datum, nieces)
|
||||||
|
|
||||||
|
# Return the first element; at the top level, this will be the
|
||||||
|
# root element
|
||||||
|
if elems:
|
||||||
|
return elems[0][0]
|
||||||
|
|
||||||
|
def serialize(self, obj, *args, **kwargs):
|
||||||
|
"""Serialize an object.
|
||||||
|
|
||||||
|
Serializes an object against the template. Returns a string
|
||||||
|
with the serialized XML. Positional and keyword arguments are
|
||||||
|
passed to etree.tostring().
|
||||||
|
|
||||||
|
:param obj: The object to serialize.
|
||||||
|
"""
|
||||||
|
|
||||||
|
elem = self.make_tree(obj)
|
||||||
|
if elem is None:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
for k, v in self.serialize_options.items():
|
||||||
|
kwargs.setdefault(k, v)
|
||||||
|
|
||||||
|
# Serialize it into XML
|
||||||
|
return etree.tostring(elem, *args, **kwargs)
|
||||||
|
|
||||||
|
def make_tree(self, obj):
|
||||||
|
"""Create a tree.
|
||||||
|
|
||||||
|
Serializes an object against the template. Returns an Element
|
||||||
|
node with appropriate children.
|
||||||
|
|
||||||
|
:param obj: The object to serialize.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# If the template is empty, return the empty string
|
||||||
|
if self.root is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get the siblings and nsmap of the root element
|
||||||
|
siblings = self._siblings()
|
||||||
|
nsmap = self._nsmap()
|
||||||
|
|
||||||
|
# Form the element tree
|
||||||
|
return self._serialize(None, obj, siblings, nsmap)
|
||||||
|
|
||||||
|
def _siblings(self):
|
||||||
|
"""Hook method for computing root siblings.
|
||||||
|
|
||||||
|
An overridable hook method to return the siblings of the root
|
||||||
|
element. By default, this is the root element itself.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return [self.root]
|
||||||
|
|
||||||
|
def _nsmap(self):
|
||||||
|
"""Hook method for computing the namespace dictionary.
|
||||||
|
|
||||||
|
An overridable hook method to return the namespace dictionary.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.nsmap.copy()
|
||||||
|
|
||||||
|
def unwrap(self):
|
||||||
|
"""Unwraps a template to return a template element."""
|
||||||
|
|
||||||
|
# Return the root element
|
||||||
|
return self.root
|
||||||
|
|
||||||
|
def wrap(self):
|
||||||
|
"""Wraps a template element to return a template."""
|
||||||
|
|
||||||
|
# We are a template
|
||||||
|
return self
|
||||||
|
|
||||||
|
def apply(self, master):
|
||||||
|
"""Hook method for determining slave applicability.
|
||||||
|
|
||||||
|
An overridable hook method used to determine if this template
|
||||||
|
is applicable as a slave to a given master template.
|
||||||
|
|
||||||
|
:param master: The master template to test.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def tree(self):
|
||||||
|
"""Return string representation of the template tree.
|
||||||
|
|
||||||
|
Returns a representation of the template as a string, suitable
|
||||||
|
for inclusion in debug logs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return "%r: %s" % (self, self.root.tree())
|
||||||
|
|
||||||
|
|
||||||
|
class MasterTemplate(Template):
|
||||||
|
"""Represent a master template.
|
||||||
|
|
||||||
|
Master templates are versioned derivatives of templates that
|
||||||
|
additionally allow slave templates to be attached. Slave
|
||||||
|
templates allow modification of the serialized result without
|
||||||
|
directly changing the master.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, root, version, nsmap=None):
|
||||||
|
"""Initialize a master template.
|
||||||
|
|
||||||
|
:param root: The root element of the template.
|
||||||
|
:param version: The version number of the template.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the root element of the
|
||||||
|
template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
super(MasterTemplate, self).__init__(root, nsmap)
|
||||||
|
self.version = version
|
||||||
|
self.slaves = []
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return string representation of the template."""
|
||||||
|
|
||||||
|
return ("<%s.%s object version %s at %#x>" %
|
||||||
|
(self.__class__.__module__, self.__class__.__name__,
|
||||||
|
self.version, id(self)))
|
||||||
|
|
||||||
|
def _siblings(self):
|
||||||
|
"""Hook method for computing root siblings.
|
||||||
|
|
||||||
|
An overridable hook method to return the siblings of the root
|
||||||
|
element. This is the root element plus the root elements of
|
||||||
|
all the slave templates.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return [self.root] + [slave.root for slave in self.slaves]
|
||||||
|
|
||||||
|
def _nsmap(self):
|
||||||
|
"""Hook method for computing the namespace dictionary.
|
||||||
|
|
||||||
|
An overridable hook method to return the namespace dictionary.
|
||||||
|
The namespace dictionary is computed by taking the master
|
||||||
|
template's namespace dictionary and updating it from all the
|
||||||
|
slave templates.
|
||||||
|
"""
|
||||||
|
|
||||||
|
nsmap = self.nsmap.copy()
|
||||||
|
for slave in self.slaves:
|
||||||
|
nsmap.update(slave._nsmap())
|
||||||
|
return nsmap
|
||||||
|
|
||||||
|
def attach(self, *slaves):
|
||||||
|
"""Attach one or more slave templates.
|
||||||
|
|
||||||
|
Attaches one or more slave templates to the master template.
|
||||||
|
Slave templates must have a root element with the same tag as
|
||||||
|
the master template. The slave template's apply() method will
|
||||||
|
be called to determine if the slave should be applied to this
|
||||||
|
master; if it returns False, that slave will be skipped.
|
||||||
|
(This allows filtering of slaves based on the version of the
|
||||||
|
master template.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
slave_list = []
|
||||||
|
for slave in slaves:
|
||||||
|
slave = slave.wrap()
|
||||||
|
|
||||||
|
# Make sure we have a tree match
|
||||||
|
if slave.root.tag != self.root.tag:
|
||||||
|
msg = _("Template tree mismatch; adding slave %(slavetag)s to "
|
||||||
|
"master %(mastertag)s") % {'slavetag': slave.root.tag,
|
||||||
|
'mastertag': self.root.tag}
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
# Make sure slave applies to this template
|
||||||
|
if not slave.apply(self):
|
||||||
|
continue
|
||||||
|
|
||||||
|
slave_list.append(slave)
|
||||||
|
|
||||||
|
# Add the slaves
|
||||||
|
self.slaves.extend(slave_list)
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
"""Return a copy of this master template."""
|
||||||
|
|
||||||
|
# Return a copy of the MasterTemplate
|
||||||
|
tmp = self.__class__(self.root, self.version, self.nsmap)
|
||||||
|
tmp.slaves = self.slaves[:]
|
||||||
|
return tmp
|
||||||
|
|
||||||
|
|
||||||
|
class SlaveTemplate(Template):
|
||||||
|
"""Represent a slave template.
|
||||||
|
|
||||||
|
Slave templates are versioned derivatives of templates. Each
|
||||||
|
slave has a minimum version and optional maximum version of the
|
||||||
|
master template to which they can be attached.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
|
||||||
|
"""Initialize a slave template.
|
||||||
|
|
||||||
|
:param root: The root element of the template.
|
||||||
|
:param min_vers: The minimum permissible version of the master
|
||||||
|
template for this slave template to apply.
|
||||||
|
:param max_vers: An optional upper bound for the master
|
||||||
|
template version.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the root element of the
|
||||||
|
template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
super(SlaveTemplate, self).__init__(root, nsmap)
|
||||||
|
self.min_vers = min_vers
|
||||||
|
self.max_vers = max_vers
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return string representation of the template."""
|
||||||
|
|
||||||
|
return ("<%s.%s object versions %s-%s at %#x>" %
|
||||||
|
(self.__class__.__module__, self.__class__.__name__,
|
||||||
|
self.min_vers, self.max_vers, id(self)))
|
||||||
|
|
||||||
|
def apply(self, master):
|
||||||
|
"""Hook method for determining slave applicability.
|
||||||
|
|
||||||
|
An overridable hook method used to determine if this template
|
||||||
|
is applicable as a slave to a given master template. This
|
||||||
|
version requires the master template to have a version number
|
||||||
|
between min_vers and max_vers.
|
||||||
|
|
||||||
|
:param master: The master template to test.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Does the master meet our minimum version requirement?
|
||||||
|
if master.version < self.min_vers:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# How about our maximum version requirement?
|
||||||
|
if self.max_vers is not None and master.version > self.max_vers:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateBuilder(object):
|
||||||
|
"""Template builder.
|
||||||
|
|
||||||
|
This class exists to allow templates to be lazily built without
|
||||||
|
having to build them each time they are needed. It must be
|
||||||
|
subclassed, and the subclass must implement the construct()
|
||||||
|
method, which must return a Template (or subclass) instance. The
|
||||||
|
constructor will always return the template returned by
|
||||||
|
construct(), or, if it has a copy() method, a copy of that
|
||||||
|
template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_tmpl = None
|
||||||
|
|
||||||
|
def __new__(cls, copy=True):
|
||||||
|
"""Construct and return a template.
|
||||||
|
|
||||||
|
:param copy: If True (the default), a copy of the template
|
||||||
|
will be constructed and returned, if possible.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Do we need to construct the template?
|
||||||
|
if cls._tmpl is None:
|
||||||
|
tmp = super(TemplateBuilder, cls).__new__(cls)
|
||||||
|
|
||||||
|
# Construct the template
|
||||||
|
cls._tmpl = tmp.construct()
|
||||||
|
|
||||||
|
# If the template has a copy attribute, return the result of
|
||||||
|
# calling it
|
||||||
|
if copy and hasattr(cls._tmpl, 'copy'):
|
||||||
|
return cls._tmpl.copy()
|
||||||
|
|
||||||
|
# Return the template
|
||||||
|
return cls._tmpl
|
||||||
|
|
||||||
|
def construct(self):
|
||||||
|
"""Construct a template.
|
||||||
|
|
||||||
|
Called to construct a template instance, which it must return.
|
||||||
|
Only called once.
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise NotImplementedError(_("subclasses must implement construct()!"))
|
||||||
|
|
||||||
|
|
||||||
|
def make_links(parent, selector=None):
|
||||||
|
"""Attach an Atom <links> element to the parent."""
|
||||||
|
|
||||||
|
elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM,
|
||||||
|
selector=selector)
|
||||||
|
elem.set('rel')
|
||||||
|
elem.set('type')
|
||||||
|
elem.set('href')
|
||||||
|
|
||||||
|
# Just for completeness...
|
||||||
|
return elem
|
||||||
|
|
||||||
|
|
||||||
|
def make_flat_dict(name, selector=None, subselector=None,
|
||||||
|
ns=None, colon_ns=False, root=None,
|
||||||
|
ignore_sub_dicts=False):
|
||||||
|
"""Utility for simple XML templates that traditionally used
|
||||||
|
XMLDictSerializer with no metadata. Returns a template element
|
||||||
|
where the top-level element has the given tag name, and where
|
||||||
|
sub-elements have tag names derived from the object's keys and
|
||||||
|
text derived from the object's values.
|
||||||
|
|
||||||
|
:param root: if None, this will create the root.
|
||||||
|
:param ignore_sub_dicts: If True, ignores any dict objects inside the
|
||||||
|
object. If False, causes an error if there is a
|
||||||
|
dict object present.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Set up the names we need...
|
||||||
|
if ns is None:
|
||||||
|
elemname = name
|
||||||
|
tagname = Selector(0)
|
||||||
|
else:
|
||||||
|
elemname = '{%s}%s' % (ns, name)
|
||||||
|
tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0])
|
||||||
|
|
||||||
|
if selector is None:
|
||||||
|
selector = name
|
||||||
|
if not root:
|
||||||
|
# Build the root element
|
||||||
|
root = TemplateElement(elemname, selector=selector,
|
||||||
|
subselector=subselector, colon_ns=colon_ns)
|
||||||
|
choice = get_items if ignore_sub_dicts is False else get_items_without_dict
|
||||||
|
# Build an element to represent all the keys and values
|
||||||
|
elem = SubTemplateElement(root, tagname, selector=choice,
|
||||||
|
colon_ns=colon_ns)
|
||||||
|
elem.text = 1
|
||||||
|
|
||||||
|
# Return the template
|
||||||
|
return root
|
||||||
|
|
||||||
|
|
||||||
|
class ProtectedExpatParser(expatreader.ExpatParser):
|
||||||
|
"""An expat parser which disables DTD's and entities by default."""
|
||||||
|
|
||||||
|
def __init__(self, forbid_dtd=True, forbid_entities=True,
|
||||||
|
*args, **kwargs):
|
||||||
|
# Python 2.x old style class
|
||||||
|
expatreader.ExpatParser.__init__(self, *args, **kwargs)
|
||||||
|
self.forbid_dtd = forbid_dtd
|
||||||
|
self.forbid_entities = forbid_entities
|
||||||
|
|
||||||
|
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
|
||||||
|
raise ValueError("Inline DTD forbidden")
|
||||||
|
|
||||||
|
def entity_decl(self, entityName, is_parameter_entity, value, base,
|
||||||
|
systemId, publicId, notationName):
|
||||||
|
raise ValueError("<!ENTITY> entity declaration forbidden")
|
||||||
|
|
||||||
|
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
|
||||||
|
# expat 1.2
|
||||||
|
raise ValueError("<!ENTITY> unparsed entity forbidden")
|
||||||
|
|
||||||
|
def external_entity_ref(self, context, base, systemId, publicId):
|
||||||
|
raise ValueError("<!ENTITY> external entity forbidden")
|
||||||
|
|
||||||
|
def notation_decl(self, name, base, sysid, pubid):
|
||||||
|
raise ValueError("<!ENTITY> notation forbidden")
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
expatreader.ExpatParser.reset(self)
|
||||||
|
if self.forbid_dtd:
|
||||||
|
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
|
||||||
|
self._parser.EndDoctypeDeclHandler = None
|
||||||
|
if self.forbid_entities:
|
||||||
|
self._parser.EntityDeclHandler = self.entity_decl
|
||||||
|
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
|
||||||
|
self._parser.ExternalEntityRefHandler = self.external_entity_ref
|
||||||
|
self._parser.NotationDeclHandler = self.notation_decl
|
||||||
|
try:
|
||||||
|
self._parser.SkippedEntityHandler = None
|
||||||
|
except AttributeError:
|
||||||
|
# some pyexpat versions do not support SkippedEntity
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def safe_minidom_parse_string(xml_string):
|
||||||
|
"""Parse an XML string using minidom safely."""
|
||||||
|
try:
|
||||||
|
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
|
||||||
|
except (sax.SAXParseException, ValueError,
|
||||||
|
expat.ExpatError, LookupError) as e:
|
||||||
|
#NOTE(Vijaya Erukala): XML input such as
|
||||||
|
# <?xml version="1.0" encoding="TF-8"?>
|
||||||
|
# raises LookupError: unknown encoding: TF-8
|
||||||
|
raise exception.MalformedRequestBody(reason=str(e))
|
||||||
81
rack/baserpc.py
Normal file
81
rack/baserpc.py
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Base RPC client and server common to all services.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
from oslo import messaging
|
||||||
|
|
||||||
|
from rack.openstack.common import jsonutils
|
||||||
|
from rack import rpc
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
rpcapi_cap_opt = cfg.StrOpt('baseapi',
|
||||||
|
help='Set a version cap for messages sent to the base api in any '
|
||||||
|
'service')
|
||||||
|
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
|
||||||
|
|
||||||
|
_NAMESPACE = 'baseapi'
|
||||||
|
|
||||||
|
|
||||||
|
class BaseAPI(object):
|
||||||
|
"""Client side of the base rpc API.
|
||||||
|
|
||||||
|
API version history:
|
||||||
|
|
||||||
|
1.0 - Initial version.
|
||||||
|
1.1 - Add get_backdoor_port
|
||||||
|
"""
|
||||||
|
|
||||||
|
VERSION_ALIASES = {
|
||||||
|
# baseapi was added in havana
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, topic):
|
||||||
|
super(BaseAPI, self).__init__()
|
||||||
|
target = messaging.Target(topic=topic,
|
||||||
|
namespace=_NAMESPACE,
|
||||||
|
version='1.0')
|
||||||
|
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.baseapi,
|
||||||
|
CONF.upgrade_levels.baseapi)
|
||||||
|
self.client = rpc.get_client(target, version_cap=version_cap)
|
||||||
|
|
||||||
|
def ping(self, context, arg, timeout=None):
|
||||||
|
arg_p = jsonutils.to_primitive(arg)
|
||||||
|
cctxt = self.client.prepare(timeout=timeout)
|
||||||
|
return cctxt.call(context, 'ping', arg=arg_p)
|
||||||
|
|
||||||
|
def get_backdoor_port(self, context, host):
|
||||||
|
cctxt = self.client.prepare(server=host, version='1.1')
|
||||||
|
return cctxt.call(context, 'get_backdoor_port')
|
||||||
|
|
||||||
|
|
||||||
|
class BaseRPCAPI(object):
|
||||||
|
"""Server side of the base RPC API."""
|
||||||
|
|
||||||
|
target = messaging.Target(namespace=_NAMESPACE, version='1.1')
|
||||||
|
|
||||||
|
def __init__(self, service_name, backdoor_port):
|
||||||
|
self.service_name = service_name
|
||||||
|
self.backdoor_port = backdoor_port
|
||||||
|
|
||||||
|
def ping(self, context, arg):
|
||||||
|
resp = {'service': self.service_name, 'arg': arg}
|
||||||
|
return jsonutils.to_primitive(resp)
|
||||||
|
|
||||||
|
def get_backdoor_port(self, context):
|
||||||
|
return self.backdoor_port
|
||||||
33
rack/cmd/__init__.py
Normal file
33
rack/cmd/__init__.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if ('eventlet' in sys.modules and
|
||||||
|
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
|
||||||
|
raise ImportError('eventlet imported before rack/cmd/__init__ '
|
||||||
|
'(env var set to %s)'
|
||||||
|
% os.environ.get('EVENTLET_NO_GREENDNS'))
|
||||||
|
|
||||||
|
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
from rack import debugger
|
||||||
|
|
||||||
|
if debugger.enabled():
|
||||||
|
# turn off thread patching to enable the remote debugger
|
||||||
|
eventlet.monkey_patch(os=False, thread=False)
|
||||||
|
else:
|
||||||
|
eventlet.monkey_patch(os=False, thread=False)
|
||||||
36
rack/cmd/api.py
Normal file
36
rack/cmd/api.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Starter script for RACK API."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack import config
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack import service
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
def main():
|
||||||
|
config.parse_args(sys.argv)
|
||||||
|
logging.setup("rack")
|
||||||
|
utils.monkey_patch()
|
||||||
|
|
||||||
|
launcher = service.process_launcher()
|
||||||
|
server = service.WSGIService('rackapi')
|
||||||
|
launcher.launch_service(server, workers=server.workers or 1)
|
||||||
|
launcher.wait()
|
||||||
42
rack/cmd/resourceoperator.py
Normal file
42
rack/cmd/resourceoperator.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Starter script for RACK ResourceOperator."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack import config
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack import service
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_opt('resourceoperator_topic', 'rack.resourceoperator.rpcapi')
|
||||||
|
CONF.import_opt('os_username', 'rack.resourceoperator.openstack')
|
||||||
|
CONF.import_opt('os_password', 'rack.resourceoperator.openstack')
|
||||||
|
CONF.import_opt('os_tenant_name', 'rack.resourceoperator.openstack')
|
||||||
|
CONF.import_opt('os_auth_url', 'rack.resourceoperator.openstack')
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
config.parse_args(sys.argv)
|
||||||
|
logging.setup("rack")
|
||||||
|
utils.monkey_patch()
|
||||||
|
|
||||||
|
server = service.Service.create(binary='rack-resourceoperator',
|
||||||
|
topic=CONF.resourceoperator_topic)
|
||||||
|
service.serve(server)
|
||||||
|
service.wait()
|
||||||
37
rack/cmd/scheduler.py
Normal file
37
rack/cmd/scheduler.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""Starter script for Rack Scheduler."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack import config
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack import service
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_opt('scheduler_topic', 'rack.scheduler.rpcapi')
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
config.parse_args(sys.argv)
|
||||||
|
logging.setup("rack")
|
||||||
|
utils.monkey_patch()
|
||||||
|
|
||||||
|
server = service.Service.create(binary='rack-scheduler',
|
||||||
|
topic=CONF.scheduler_topic)
|
||||||
|
service.serve(server)
|
||||||
|
service.wait()
|
||||||
35
rack/config.py
Normal file
35
rack/config.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack import debugger
|
||||||
|
from rack.openstack.common.db import options
|
||||||
|
from rack import paths
|
||||||
|
from rack import rpc
|
||||||
|
from rack import version
|
||||||
|
|
||||||
|
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('rack.sqlite')
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args(argv, default_config_files=None):
|
||||||
|
options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
|
||||||
|
sqlite_db='rack.sqlite')
|
||||||
|
rpc.set_defaults(control_exchange='rack')
|
||||||
|
debugger.register_cli_opts()
|
||||||
|
cfg.CONF(argv[1:],
|
||||||
|
project='rack',
|
||||||
|
version=version.version_string(),
|
||||||
|
default_config_files=default_config_files)
|
||||||
|
rpc.init(cfg.CONF)
|
||||||
227
rack/context.py
Normal file
227
rack/context.py
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""RequestContext: context for requests that persist through all of rack."""
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import local
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
from rack import policy
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_request_id():
|
||||||
|
return 'req-' + str(uuid.uuid4())
|
||||||
|
|
||||||
|
|
||||||
|
class RequestContext(object):
|
||||||
|
"""Security context and request information.
|
||||||
|
|
||||||
|
Represents the user taking a given action within the system.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
|
||||||
|
roles=None, remote_address=None, timestamp=None,
|
||||||
|
request_id=None, auth_token=None, overwrite=True,
|
||||||
|
quota_class=None, user_name=None, project_name=None,
|
||||||
|
service_catalog=None, instance_lock_checked=False, **kwargs):
|
||||||
|
""":param read_deleted: 'no' indicates deleted records are hidden,
|
||||||
|
'yes' indicates deleted records are visible,
|
||||||
|
'only' indicates that *only* deleted records are visible.
|
||||||
|
|
||||||
|
|
||||||
|
:param overwrite: Set to False to ensure that the greenthread local
|
||||||
|
copy of the index is not overwritten.
|
||||||
|
|
||||||
|
:param kwargs: Extra arguments that might be present, but we ignore
|
||||||
|
because they possibly came in from older rpc messages.
|
||||||
|
"""
|
||||||
|
if kwargs:
|
||||||
|
LOG.warn(_('Arguments dropped when creating context: %s') %
|
||||||
|
str(kwargs))
|
||||||
|
|
||||||
|
self.user_id = user_id
|
||||||
|
self.project_id = project_id
|
||||||
|
self.roles = roles or []
|
||||||
|
self.read_deleted = read_deleted
|
||||||
|
self.remote_address = remote_address
|
||||||
|
if not timestamp:
|
||||||
|
timestamp = timeutils.utcnow()
|
||||||
|
if isinstance(timestamp, six.string_types):
|
||||||
|
timestamp = timeutils.parse_strtime(timestamp)
|
||||||
|
self.timestamp = timestamp
|
||||||
|
if not request_id:
|
||||||
|
request_id = generate_request_id()
|
||||||
|
self.request_id = request_id
|
||||||
|
self.auth_token = auth_token
|
||||||
|
|
||||||
|
if service_catalog:
|
||||||
|
# Only include required parts of service_catalog
|
||||||
|
self.service_catalog = [s for s in service_catalog
|
||||||
|
if s.get('type') in ('identity', 'image', 'network', 'compute')]
|
||||||
|
else:
|
||||||
|
# if list is empty or none
|
||||||
|
self.service_catalog = []
|
||||||
|
|
||||||
|
self.instance_lock_checked = instance_lock_checked
|
||||||
|
|
||||||
|
# NOTE(markmc): this attribute is currently only used by the
|
||||||
|
# rs_limits turnstile pre-processor.
|
||||||
|
# See https://lists.launchpad.net/openstack/msg12200.html
|
||||||
|
self.quota_class = quota_class
|
||||||
|
self.user_name = user_name
|
||||||
|
self.project_name = project_name
|
||||||
|
self.is_admin = is_admin
|
||||||
|
if self.is_admin is None:
|
||||||
|
self.is_admin = policy.check_is_admin(self)
|
||||||
|
if overwrite or not hasattr(local.store, 'context'):
|
||||||
|
self.update_store()
|
||||||
|
|
||||||
|
def _get_read_deleted(self):
|
||||||
|
return self._read_deleted
|
||||||
|
|
||||||
|
def _set_read_deleted(self, read_deleted):
|
||||||
|
if read_deleted not in ('no', 'yes', 'only'):
|
||||||
|
raise ValueError(_("read_deleted can only be one of 'no', "
|
||||||
|
"'yes' or 'only', not %r") % read_deleted)
|
||||||
|
self._read_deleted = read_deleted
|
||||||
|
|
||||||
|
def _del_read_deleted(self):
|
||||||
|
del self._read_deleted
|
||||||
|
|
||||||
|
read_deleted = property(_get_read_deleted, _set_read_deleted,
|
||||||
|
_del_read_deleted)
|
||||||
|
|
||||||
|
def update_store(self):
|
||||||
|
local.store.context = self
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {'user_id': self.user_id,
|
||||||
|
'project_id': self.project_id,
|
||||||
|
'is_admin': self.is_admin,
|
||||||
|
'read_deleted': self.read_deleted,
|
||||||
|
'roles': self.roles,
|
||||||
|
'remote_address': self.remote_address,
|
||||||
|
'timestamp': timeutils.strtime(self.timestamp),
|
||||||
|
'request_id': self.request_id,
|
||||||
|
'auth_token': self.auth_token,
|
||||||
|
'quota_class': self.quota_class,
|
||||||
|
'user_name': self.user_name,
|
||||||
|
'service_catalog': self.service_catalog,
|
||||||
|
'project_name': self.project_name,
|
||||||
|
'instance_lock_checked': self.instance_lock_checked,
|
||||||
|
'tenant': self.tenant,
|
||||||
|
'user': self.user}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, values):
|
||||||
|
values.pop('user', None)
|
||||||
|
values.pop('tenant', None)
|
||||||
|
return cls(**values)
|
||||||
|
|
||||||
|
def elevated(self, read_deleted=None, overwrite=False):
|
||||||
|
"""Return a version of this context with admin flag set."""
|
||||||
|
context = copy.copy(self)
|
||||||
|
context.is_admin = True
|
||||||
|
|
||||||
|
if 'admin' not in context.roles:
|
||||||
|
context.roles.append('admin')
|
||||||
|
|
||||||
|
if read_deleted is not None:
|
||||||
|
context.read_deleted = read_deleted
|
||||||
|
|
||||||
|
return context
|
||||||
|
|
||||||
|
# NOTE(sirp): the openstack/common version of RequestContext uses
|
||||||
|
# tenant/user whereas the Rack version uses project_id/user_id. We need
|
||||||
|
# this shim in order to use context-aware code from openstack/common, like
|
||||||
|
# logging, until we make the switch to using openstack/common's version of
|
||||||
|
# RequestContext.
|
||||||
|
@property
|
||||||
|
def tenant(self):
|
||||||
|
return self.project_id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user(self):
|
||||||
|
return self.user_id
|
||||||
|
|
||||||
|
|
||||||
|
def get_admin_context(read_deleted="no"):
|
||||||
|
return RequestContext(user_id=None,
|
||||||
|
project_id=None,
|
||||||
|
is_admin=True,
|
||||||
|
read_deleted=read_deleted,
|
||||||
|
overwrite=False)
|
||||||
|
|
||||||
|
|
||||||
|
def is_user_context(context):
|
||||||
|
"""Indicates if the request context is a normal user."""
|
||||||
|
if not context:
|
||||||
|
return False
|
||||||
|
if context.is_admin:
|
||||||
|
return False
|
||||||
|
if not context.user_id or not context.project_id:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def require_admin_context(ctxt):
|
||||||
|
"""Raise exception.AdminRequired() if context is an admin context."""
|
||||||
|
if not ctxt.is_admin:
|
||||||
|
raise exception.AdminRequired()
|
||||||
|
|
||||||
|
|
||||||
|
def require_context(ctxt):
|
||||||
|
"""Raise exception.NotAuthorized() if context is not a user or an
|
||||||
|
admin context.
|
||||||
|
"""
|
||||||
|
if not ctxt.is_admin and not is_user_context(ctxt):
|
||||||
|
raise exception.NotAuthorized()
|
||||||
|
|
||||||
|
|
||||||
|
def authorize_project_context(context, project_id):
|
||||||
|
"""Ensures a request has permission to access the given project."""
|
||||||
|
if is_user_context(context):
|
||||||
|
if not context.project_id:
|
||||||
|
raise exception.NotAuthorized()
|
||||||
|
elif context.project_id != project_id:
|
||||||
|
raise exception.NotAuthorized()
|
||||||
|
|
||||||
|
|
||||||
|
def authorize_user_context(context, user_id):
|
||||||
|
"""Ensures a request has permission to access the given user."""
|
||||||
|
if is_user_context(context):
|
||||||
|
if not context.user_id:
|
||||||
|
raise exception.NotAuthorized()
|
||||||
|
elif context.user_id != user_id:
|
||||||
|
raise exception.NotAuthorized()
|
||||||
|
|
||||||
|
|
||||||
|
def authorize_quota_class_context(context, class_name):
|
||||||
|
"""Ensures a request has permission to access the given quota class."""
|
||||||
|
if is_user_context(context):
|
||||||
|
if not context.quota_class:
|
||||||
|
raise exception.NotAuthorized()
|
||||||
|
elif context.quota_class != class_name:
|
||||||
|
raise exception.NotAuthorized()
|
||||||
18
rack/db/__init__.py
Normal file
18
rack/db/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
DB abstraction for Nova
|
||||||
|
"""
|
||||||
|
|
||||||
|
from rack.db.api import * # noqa
|
||||||
179
rack/db/api.py
Normal file
179
rack/db/api.py
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from oslo.config import cfg
|
||||||
|
from rack.openstack.common.db import api as db_api
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
db_opts = [
|
||||||
|
cfg.BoolOpt('enable_new_services',
|
||||||
|
default=True,
|
||||||
|
help='Services to be added to the available pool on create')
|
||||||
|
]
|
||||||
|
CONF.register_opts(db_opts)
|
||||||
|
CONF.import_opt('backend', 'rack.openstack.common.db.options',
|
||||||
|
group='database')
|
||||||
|
|
||||||
|
_BACKEND_MAPPING = {'sqlalchemy': 'rack.db.sqlalchemy.api'}
|
||||||
|
|
||||||
|
|
||||||
|
IMPL = db_api.DBAPI(CONF.database.backend, backend_mapping=_BACKEND_MAPPING)
|
||||||
|
|
||||||
|
|
||||||
|
def group_get_all(context, filters=None):
|
||||||
|
return IMPL.group_get_all(context, filters)
|
||||||
|
|
||||||
|
|
||||||
|
def group_get_by_gid(context, gid):
|
||||||
|
return IMPL.group_get_by_gid(context, gid)
|
||||||
|
|
||||||
|
|
||||||
|
def group_create(context, values):
|
||||||
|
return IMPL.group_create(context, values)
|
||||||
|
|
||||||
|
|
||||||
|
def group_update(context, values):
|
||||||
|
return IMPL.group_update(context, values)
|
||||||
|
|
||||||
|
def group_delete(context, gid):
|
||||||
|
return IMPL.group_delete(context, gid)
|
||||||
|
|
||||||
|
|
||||||
|
def service_destroy(context, service_id):
|
||||||
|
"""Destroy the service or raise if it does not exist."""
|
||||||
|
return IMPL.service_destroy(context, service_id)
|
||||||
|
|
||||||
|
|
||||||
|
def service_get(context, service_id):
|
||||||
|
"""Get a service or raise if it does not exist."""
|
||||||
|
return IMPL.service_get(context, service_id)
|
||||||
|
|
||||||
|
|
||||||
|
def service_get_by_host_and_topic(context, host, topic):
|
||||||
|
"""Get a service by host it's on and topic it listens to."""
|
||||||
|
return IMPL.service_get_by_host_and_topic(context, host, topic)
|
||||||
|
|
||||||
|
|
||||||
|
def service_get_all(context, disabled=None):
|
||||||
|
"""Get all services."""
|
||||||
|
return IMPL.service_get_all(context, disabled)
|
||||||
|
|
||||||
|
|
||||||
|
def service_get_all_by_topic(context, topic):
|
||||||
|
"""Get all services for a given topic."""
|
||||||
|
return IMPL.service_get_all_by_topic(context, topic)
|
||||||
|
|
||||||
|
|
||||||
|
def service_get_all_by_host(context, host):
|
||||||
|
"""Get all services for a given host."""
|
||||||
|
return IMPL.service_get_all_by_host(context, host)
|
||||||
|
|
||||||
|
|
||||||
|
def service_get_by_args(context, host, binary):
|
||||||
|
"""Get the state of a service by node name and binary."""
|
||||||
|
return IMPL.service_get_by_args(context, host, binary)
|
||||||
|
|
||||||
|
|
||||||
|
def service_create(context, values):
|
||||||
|
"""Create a service from the values dictionary."""
|
||||||
|
return IMPL.service_create(context, values)
|
||||||
|
|
||||||
|
|
||||||
|
def service_update(context, service_id, values):
|
||||||
|
"""Set the given properties on a service and update it.
|
||||||
|
|
||||||
|
Raises NotFound if service does not exist.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return IMPL.service_update(context, service_id, values)
|
||||||
|
|
||||||
|
|
||||||
|
def network_create(context, values):
|
||||||
|
return IMPL.network_create(context, values)
|
||||||
|
|
||||||
|
|
||||||
|
def network_update(context, network_id, values):
|
||||||
|
IMPL.network_update(context, network_id, values)
|
||||||
|
|
||||||
|
|
||||||
|
def network_get_all(context, gid, filters={}):
|
||||||
|
return IMPL.network_get_all(context, gid, filters)
|
||||||
|
|
||||||
|
|
||||||
|
def network_get_by_network_id(context, gid, network_id):
|
||||||
|
return IMPL.network_get_by_network_id(context, gid, network_id)
|
||||||
|
|
||||||
|
|
||||||
|
def network_delete(context, gid, network_id):
|
||||||
|
return IMPL.network_delete(context, gid, network_id)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_get_all(context, gid, filters={}):
|
||||||
|
return IMPL.keypair_get_all(context, gid, filters)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_get_by_keypair_id(context, gid, keypair_id):
|
||||||
|
return IMPL.keypair_get_by_keypair_id(context, gid, keypair_id)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_create(context, values):
|
||||||
|
return IMPL.keypair_create(context, values)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_update(context, gid, keypair_id, values):
|
||||||
|
return IMPL.keypair_update(context, gid, keypair_id, values)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_delete(context, gid, keypair_id):
|
||||||
|
return IMPL.keypair_delete(context, gid, keypair_id)
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_get_all(context, gid, filters={}):
|
||||||
|
return IMPL.securitygroup_get_all(context, gid, filters)
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id):
|
||||||
|
return IMPL.securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id)
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_create(context, values):
|
||||||
|
return IMPL.securitygroup_create(context, values)
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_update(context, gid, securitygroup_id, values):
|
||||||
|
return IMPL.securitygroup_update(context, gid, securitygroup_id, values)
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_delete(context, gid, securitygroup_id):
|
||||||
|
return IMPL.securitygroup_delete(context, gid, securitygroup_id)
|
||||||
|
|
||||||
|
|
||||||
|
def process_get_all(context, gid, filters={}):
|
||||||
|
return IMPL.process_get_all(context, gid, filters)
|
||||||
|
|
||||||
|
|
||||||
|
def process_get_by_pid(context, gid, pid):
|
||||||
|
return IMPL.process_get_by_pid(context, gid, pid)
|
||||||
|
|
||||||
|
|
||||||
|
def process_create(context, values, network_ids, securitygroup_ids):
|
||||||
|
return IMPL.process_create(context, values, network_ids, securitygroup_ids)
|
||||||
|
|
||||||
|
|
||||||
|
def process_update(context, gid, pid, values):
|
||||||
|
return IMPL.process_update(context, gid, pid, values)
|
||||||
|
|
||||||
|
|
||||||
|
def process_delete(context, gid, pid):
|
||||||
|
return IMPL.process_delete(context, gid, pid)
|
||||||
36
rack/db/base.py
Normal file
36
rack/db/base.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Base class for classes that need modular database access."""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack.openstack.common import importutils
|
||||||
|
|
||||||
|
db_driver_opt = cfg.StrOpt('db_driver',
|
||||||
|
default='rack.db',
|
||||||
|
help='The driver to use for database access')
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opt(db_driver_opt)
|
||||||
|
|
||||||
|
|
||||||
|
class Base(object):
|
||||||
|
"""DB driver is injected in the init method."""
|
||||||
|
|
||||||
|
def __init__(self, db_driver=None):
|
||||||
|
super(Base, self).__init__()
|
||||||
|
if not db_driver:
|
||||||
|
db_driver = CONF.db_driver
|
||||||
|
self.db = importutils.import_module(db_driver) # pylint: disable=C0103
|
||||||
37
rack/db/migration.py
Normal file
37
rack/db/migration.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Database setup and migration commands."""
|
||||||
|
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
|
||||||
|
IMPL = utils.LazyPluggable('backend',
|
||||||
|
config_group='database',
|
||||||
|
sqlalchemy='rack.db.sqlalchemy.migration')
|
||||||
|
|
||||||
|
|
||||||
|
def db_sync(version=None):
|
||||||
|
"""Migrate the database to `version` or the most recent version."""
|
||||||
|
return IMPL.db_sync(version=version)
|
||||||
|
|
||||||
|
|
||||||
|
def db_version():
|
||||||
|
"""Display the current database version."""
|
||||||
|
return IMPL.db_version()
|
||||||
|
|
||||||
|
|
||||||
|
def db_initial_version():
|
||||||
|
"""The starting version for the database."""
|
||||||
|
return IMPL.db_initial_version()
|
||||||
21
rack/db/sqlalchemy/__init__.py
Normal file
21
rack/db/sqlalchemy/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import BigInteger
|
||||||
|
from sqlalchemy.ext.compiler import compiles
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(BigInteger, 'sqlite')
|
||||||
|
def compile_big_int_sqlite(type_, compiler, **kw):
|
||||||
|
return 'INTEGER'
|
||||||
647
rack/db/sqlalchemy/api.py
Normal file
647
rack/db/sqlalchemy/api.py
Normal file
@@ -0,0 +1,647 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import functools
|
||||||
|
from rack import exception
|
||||||
|
import rack.context
|
||||||
|
from rack.db.sqlalchemy import models
|
||||||
|
from rack.openstack.common.db import exception as db_exc
|
||||||
|
from rack.openstack.common.db.sqlalchemy import session as db_session
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
from rack.openstack.common import jsonutils
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_opt('connection',
|
||||||
|
'rack.openstack.common.db.options',
|
||||||
|
group='database')
|
||||||
|
|
||||||
|
_FACADE = None
|
||||||
|
|
||||||
|
|
||||||
|
def _create_facade_lazily():
|
||||||
|
global _FACADE
|
||||||
|
if _FACADE is None:
|
||||||
|
_FACADE = db_session.EngineFacade(
|
||||||
|
CONF.database.connection,
|
||||||
|
**dict(CONF.database.iteritems()))
|
||||||
|
return _FACADE
|
||||||
|
|
||||||
|
|
||||||
|
def get_engine():
|
||||||
|
facade = _create_facade_lazily()
|
||||||
|
return facade.get_engine()
|
||||||
|
|
||||||
|
|
||||||
|
def get_session(**kwargs):
|
||||||
|
facade = _create_facade_lazily()
|
||||||
|
return facade.get_session(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend():
|
||||||
|
return sys.modules[__name__]
|
||||||
|
|
||||||
|
|
||||||
|
def group_get_all(context, filters=None):
|
||||||
|
session = get_session()
|
||||||
|
filters = filters or {}
|
||||||
|
query = session.query(models.Group).filter_by(user_id=context.user_id)\
|
||||||
|
.filter_by(deleted=0)
|
||||||
|
if 'project_id' in filters:
|
||||||
|
query = query.filter_by(project_id=filters['project_id'])
|
||||||
|
if 'name' in filters:
|
||||||
|
query = query.filter_by(display_name=filters['name'])
|
||||||
|
if 'status' in filters:
|
||||||
|
query = query.filter_by(status=filters['status'])
|
||||||
|
responce_groups = query.all()
|
||||||
|
|
||||||
|
return [dict(group) for group in responce_groups]
|
||||||
|
|
||||||
|
|
||||||
|
def group_get_by_gid(context, gid):
|
||||||
|
session = get_session()
|
||||||
|
group = session.query(models.Group)\
|
||||||
|
.filter_by(user_id=context.user_id)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.first()
|
||||||
|
|
||||||
|
if not group:
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
return dict(group)
|
||||||
|
|
||||||
|
|
||||||
|
def require_admin_context(f):
|
||||||
|
"""Decorator to require admin request context.
|
||||||
|
|
||||||
|
The first argument to the wrapped function must be the context.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
rack.context.require_admin_context(args[0])
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def group_create(context, values):
|
||||||
|
session = get_session()
|
||||||
|
group_ref = models.Group()
|
||||||
|
group_ref.update(values)
|
||||||
|
group_ref.save(session)
|
||||||
|
|
||||||
|
return dict(group_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def group_update(context, values):
|
||||||
|
session = get_session()
|
||||||
|
group_ref = session.query(models.Group). \
|
||||||
|
filter(models.Group.gid == values["gid"]).first()
|
||||||
|
if group_ref is None:
|
||||||
|
raise exception.GroupNotFound(gid=values["gid"])
|
||||||
|
|
||||||
|
group_ref.update(values)
|
||||||
|
group_ref.save(session)
|
||||||
|
|
||||||
|
return dict(group_ref)
|
||||||
|
|
||||||
|
def group_delete(context, gid):
|
||||||
|
session = get_session()
|
||||||
|
group_ref = session.query(models.Group)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.first()
|
||||||
|
if group_ref is None:
|
||||||
|
raise exception.GroupNotFound(gid=gid)
|
||||||
|
|
||||||
|
values = {
|
||||||
|
"status": "DELETING",
|
||||||
|
"deleted": 1,
|
||||||
|
"deleted_at": timeutils.utcnow()
|
||||||
|
}
|
||||||
|
group_ref.update(values)
|
||||||
|
group_ref.save(session)
|
||||||
|
|
||||||
|
return dict(group_ref)
|
||||||
|
|
||||||
|
def service_model_query(context, model, *args, **kwargs):
|
||||||
|
session = kwargs.get('session') or get_session()
|
||||||
|
read_deleted = kwargs.get('read_deleted') or context.read_deleted
|
||||||
|
query = session.query(model, *args)
|
||||||
|
|
||||||
|
default_deleted_value = model.__mapper__.c.deleted.default.arg
|
||||||
|
if read_deleted == 'no':
|
||||||
|
query = query.filter(model.deleted == default_deleted_value)
|
||||||
|
elif read_deleted == 'yes':
|
||||||
|
pass # omit the filter to include deleted and active
|
||||||
|
elif read_deleted == 'only':
|
||||||
|
query = query.filter(model.deleted != default_deleted_value)
|
||||||
|
else:
|
||||||
|
raise Exception(_("Unrecognized read_deleted value '%s'")
|
||||||
|
% read_deleted)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_destroy(context, service_id):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
count = service_model_query(context, models.Service,
|
||||||
|
session=session).\
|
||||||
|
filter_by(id=service_id).\
|
||||||
|
soft_delete(synchronize_session=False)
|
||||||
|
|
||||||
|
if count == 0:
|
||||||
|
raise exception.ServiceNotFound(service_id=service_id)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_get(context, service_id):
|
||||||
|
session = get_session()
|
||||||
|
service_ref = service_model_query(context, models.Service,
|
||||||
|
session=session).\
|
||||||
|
filter_by(id=service_id).\
|
||||||
|
first()
|
||||||
|
|
||||||
|
if not service_ref:
|
||||||
|
raise exception.ServiceNotFound(service_id=service_id)
|
||||||
|
|
||||||
|
return jsonutils.to_primitive(service_ref)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_get_all(context, disabled=None):
|
||||||
|
session = get_session()
|
||||||
|
query = service_model_query(context, models.Service,
|
||||||
|
session=session)
|
||||||
|
|
||||||
|
if disabled is not None:
|
||||||
|
query = query.filter_by(disabled=disabled)
|
||||||
|
|
||||||
|
service_refs = query.all()
|
||||||
|
return jsonutils.to_primitive(service_refs)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_get_all_by_topic(context, topic):
|
||||||
|
session = get_session()
|
||||||
|
service_refs = service_model_query(context, models.Service,
|
||||||
|
session=session,
|
||||||
|
read_deleted="no").\
|
||||||
|
filter_by(disabled=False).\
|
||||||
|
filter_by(topic=topic).\
|
||||||
|
all()
|
||||||
|
|
||||||
|
return jsonutils.to_primitive(service_refs)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_get_by_host_and_topic(context, host, topic):
|
||||||
|
session = get_session()
|
||||||
|
service_ref = service_model_query(context, models.Service,
|
||||||
|
session=session,
|
||||||
|
read_deleted="no").\
|
||||||
|
filter_by(disabled=False).\
|
||||||
|
filter_by(host=host).\
|
||||||
|
filter_by(topic=topic).\
|
||||||
|
first()
|
||||||
|
|
||||||
|
return jsonutils.to_primitive(service_ref)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_get_all_by_host(context, host):
|
||||||
|
session = get_session()
|
||||||
|
service_refs = service_model_query(context, models.Service,
|
||||||
|
session=session,
|
||||||
|
read_deleted="no").\
|
||||||
|
filter_by(host=host).\
|
||||||
|
all()
|
||||||
|
|
||||||
|
return jsonutils.to_primitive(service_refs)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_get_by_args(context, host, binary):
|
||||||
|
session = get_session()
|
||||||
|
service_ref = service_model_query(context, models.Service,
|
||||||
|
session=session).\
|
||||||
|
filter_by(host=host).\
|
||||||
|
filter_by(binary=binary).\
|
||||||
|
first()
|
||||||
|
|
||||||
|
if not service_ref:
|
||||||
|
raise exception.HostBinaryNotFound(host=host, binary=binary)
|
||||||
|
|
||||||
|
return jsonutils.to_primitive(service_ref)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_create(context, values):
|
||||||
|
session = get_session()
|
||||||
|
service_ref = models.Service()
|
||||||
|
service_ref.update(values)
|
||||||
|
if not CONF.enable_new_services:
|
||||||
|
service_ref.disabled = True
|
||||||
|
try:
|
||||||
|
service_ref.save(session)
|
||||||
|
except db_exc.DBDuplicateEntry as e:
|
||||||
|
if 'binary' in e.columns:
|
||||||
|
raise exception.ServiceBinaryExists(host=values.get('host'),
|
||||||
|
binary=values.get('binary'))
|
||||||
|
raise exception.ServiceTopicExists(host=values.get('host'),
|
||||||
|
topic=values.get('topic'))
|
||||||
|
|
||||||
|
return jsonutils.to_primitive(service_ref)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def service_update(context, service_id, values):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
service_ref = service_model_query(context, models.Service,
|
||||||
|
session=session).\
|
||||||
|
filter_by(id=service_id).\
|
||||||
|
first()
|
||||||
|
|
||||||
|
if not service_ref:
|
||||||
|
raise exception.ServiceNotFound(service_id=service_id)
|
||||||
|
|
||||||
|
service_ref.update(values)
|
||||||
|
|
||||||
|
return jsonutils.to_primitive(service_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def network_create(context, values):
|
||||||
|
session = get_session()
|
||||||
|
network_ref = models.Network()
|
||||||
|
network_ref.update(values)
|
||||||
|
network_ref.save(session)
|
||||||
|
|
||||||
|
return dict(network_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def network_update(context, network_id, values):
|
||||||
|
session = get_session()
|
||||||
|
network_ref = session.query(models.Network)\
|
||||||
|
.filter(models.Network.deleted == 0)\
|
||||||
|
.filter(models.Network.network_id == network_id)\
|
||||||
|
.first()
|
||||||
|
|
||||||
|
network_ref.update(values)
|
||||||
|
network_ref.save(session)
|
||||||
|
|
||||||
|
|
||||||
|
def network_get_all(context, gid, filters):
|
||||||
|
session = get_session()
|
||||||
|
query = session.query(models.Network)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.filter_by(gid=gid)
|
||||||
|
|
||||||
|
if 'network_id' in filters:
|
||||||
|
query = query.filter_by(network_id=filters['network_id'])
|
||||||
|
if 'neutron_network_id' in filters:
|
||||||
|
query = query.filter_by(neutron_network_id=filters['neutron_network_id'])
|
||||||
|
if 'display_name' in filters:
|
||||||
|
query = query.filter_by(display_name=filters['display_name'])
|
||||||
|
if 'status' in filters:
|
||||||
|
query = query.filter_by(status=filters['status'])
|
||||||
|
if 'is_admin' in filters:
|
||||||
|
query = query.filter_by(is_admin=filters['is_admin'])
|
||||||
|
if 'subnet' in filters:
|
||||||
|
query = query.filter_by(subnet=filters['subnet'])
|
||||||
|
if 'ext_router' in filters:
|
||||||
|
query = query.filter_by(ext_router=filters['ext_router'])
|
||||||
|
|
||||||
|
networks = query.all()
|
||||||
|
|
||||||
|
return [dict(network) for network in networks]
|
||||||
|
|
||||||
|
|
||||||
|
def network_get_by_network_id(context, gid, network_id):
|
||||||
|
session = get_session()
|
||||||
|
network = session.query(models.Network)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.filter_by(network_id=network_id)\
|
||||||
|
.first()
|
||||||
|
if not network:
|
||||||
|
raise exception.NetworkNotFound(network_id=network_id)
|
||||||
|
|
||||||
|
network_dict = dict(network)
|
||||||
|
network_dict.update(dict(processes=[dict(process) for process in network.processes]))
|
||||||
|
|
||||||
|
return network_dict
|
||||||
|
|
||||||
|
|
||||||
|
def network_delete(context, gid, network_id):
|
||||||
|
session = get_session()
|
||||||
|
network_ref = session.query(models.Network)\
|
||||||
|
.filter(models.Network.deleted == 0)\
|
||||||
|
.filter(models.Network.gid == gid)\
|
||||||
|
.filter(models.Network.network_id == network_id)\
|
||||||
|
.first()
|
||||||
|
values = {}
|
||||||
|
values["deleted"] = 1
|
||||||
|
values["deleted_at"] = timeutils.utcnow()
|
||||||
|
values["status"] = "DELETING"
|
||||||
|
network_ref.update(values)
|
||||||
|
network_ref.save(session)
|
||||||
|
return dict(network_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_get_all(context, gid, filters={}):
|
||||||
|
session = get_session()
|
||||||
|
query = session.query(models.Keypair)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.filter_by(deleted=0)
|
||||||
|
if 'keypair_id' in filters:
|
||||||
|
query = query.filter_by(keypair_id=filters['keypair_id'])
|
||||||
|
if 'nova_keypair_id' in filters:
|
||||||
|
query = query.filter_by(nova_keypair_id=filters['nova_keypair_id'])
|
||||||
|
if 'display_name' in filters:
|
||||||
|
query = query.filter_by(display_name=filters['display_name'])
|
||||||
|
if 'status' in filters:
|
||||||
|
query = query.filter_by(status=filters['status'])
|
||||||
|
if 'is_default' in filters:
|
||||||
|
query = query.filter_by(is_default=filters['is_default'])
|
||||||
|
|
||||||
|
responce_keypairs = query.all()
|
||||||
|
|
||||||
|
return [dict(keypair) for keypair in responce_keypairs]
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_get_by_keypair_id(context, gid, keypair_id):
|
||||||
|
session = get_session()
|
||||||
|
keypair = session.query(models.Keypair)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.filter_by(keypair_id=keypair_id)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.first()
|
||||||
|
|
||||||
|
if not keypair:
|
||||||
|
raise exception.KeypairNotFound(keypair_id=keypair_id)
|
||||||
|
|
||||||
|
return dict(keypair)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_create(context, values):
|
||||||
|
session = get_session()
|
||||||
|
keypair_ref = models.Keypair()
|
||||||
|
keypair_ref.update(values)
|
||||||
|
keypair_ref.save(session)
|
||||||
|
return dict(keypair_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_update(context, gid, keypair_id, values):
|
||||||
|
session = get_session()
|
||||||
|
keypair_ref = session.query(models.Keypair)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.filter_by(keypair_id=keypair_id)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.first()
|
||||||
|
if keypair_ref is None:
|
||||||
|
raise exception.KeypairNotFound(keypair_id=keypair_id)
|
||||||
|
|
||||||
|
keypair_ref.update(values)
|
||||||
|
keypair_ref.save(session)
|
||||||
|
|
||||||
|
return dict(keypair_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def keypair_delete(context, gid, keypair_id):
|
||||||
|
session = get_session()
|
||||||
|
keypair_ref = session.query(models.Keypair)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.filter_by(keypair_id=keypair_id)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.first()
|
||||||
|
if keypair_ref is None:
|
||||||
|
raise exception.KeypairNotFound(keypair_id=keypair_id)
|
||||||
|
|
||||||
|
values = {
|
||||||
|
"status": "DELETING",
|
||||||
|
"deleted": 1,
|
||||||
|
"deleted_at": timeutils.utcnow()
|
||||||
|
}
|
||||||
|
keypair_ref.update(values)
|
||||||
|
keypair_ref.save(session)
|
||||||
|
|
||||||
|
return dict(keypair_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_get_all(context, gid, filters={}):
|
||||||
|
session = get_session()
|
||||||
|
query = session.query(models.Securitygroup).filter_by(gid=gid, deleted=0)
|
||||||
|
|
||||||
|
if 'securitygroup_id' in filters:
|
||||||
|
query = query.filter_by(securitygroup_id=filters['securitygroup_id'])
|
||||||
|
if 'name' in filters:
|
||||||
|
query = query.filter_by(display_name=filters['name'])
|
||||||
|
if 'status' in filters:
|
||||||
|
query = query.filter_by(status=filters['status'])
|
||||||
|
if 'is_default' in filters:
|
||||||
|
query = query.filter_by(is_default=filters['is_default'])
|
||||||
|
securitygroups = query.all()
|
||||||
|
|
||||||
|
return [dict(securitygroup) for securitygroup in securitygroups]
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id):
|
||||||
|
session = get_session()
|
||||||
|
securitygroup = session.query(models.Securitygroup)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.filter_by(securitygroup_id=securitygroup_id)\
|
||||||
|
.first()
|
||||||
|
|
||||||
|
if not securitygroup:
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id)
|
||||||
|
|
||||||
|
securitygroup_dict = dict(securitygroup)
|
||||||
|
securitygroup_dict.update(dict(processes=[dict(process) for process in securitygroup.processes]))
|
||||||
|
return securitygroup_dict
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_create(context, values):
|
||||||
|
session = get_session()
|
||||||
|
securitygroup_ref = models.Securitygroup()
|
||||||
|
securitygroup_ref.update(values)
|
||||||
|
securitygroup_ref.save(session)
|
||||||
|
|
||||||
|
return dict(securitygroup_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def securitygroup_update(context, gid, securitygroup_id, values):
|
||||||
|
session = get_session()
|
||||||
|
securitygroup_ref = session.query(models.Securitygroup). \
|
||||||
|
filter_by(deleted=0). \
|
||||||
|
filter_by(gid=gid). \
|
||||||
|
filter_by(securitygroup_id=securitygroup_id). \
|
||||||
|
first()
|
||||||
|
if securitygroup_ref is None:
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id)
|
||||||
|
|
||||||
|
securitygroup_ref.update(values)
|
||||||
|
securitygroup_ref.save(session)
|
||||||
|
|
||||||
|
return dict(securitygroup_ref)
|
||||||
|
|
||||||
|
def securitygroup_delete(context, gid, securitygroup_id):
|
||||||
|
session = get_session()
|
||||||
|
securitygroup_ref = session.query(models.Securitygroup). \
|
||||||
|
filter_by(deleted=0). \
|
||||||
|
filter_by(gid = gid). \
|
||||||
|
filter_by(securitygroup_id = securitygroup_id). \
|
||||||
|
first()
|
||||||
|
if securitygroup_ref is None:
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id)
|
||||||
|
|
||||||
|
securitygroup_ref.update({"deleted":1,
|
||||||
|
'deleted_at':timeutils.utcnow(),
|
||||||
|
"status":"DELETING"})
|
||||||
|
securitygroup_ref.save(session)
|
||||||
|
|
||||||
|
return dict(securitygroup_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def process_get_all(context, gid, filters={}):
|
||||||
|
session = get_session()
|
||||||
|
query = session.query(models.Process).filter_by(gid=gid, deleted=0)
|
||||||
|
|
||||||
|
|
||||||
|
if 'pid' in filters:
|
||||||
|
query = query.filter_by(pid=filters['pid'])
|
||||||
|
if 'ppid' in filters:
|
||||||
|
query = query.filter_by(ppid=filters['ppid'])
|
||||||
|
if 'name' in filters:
|
||||||
|
query = query.filter_by(display_name=filters['name'])
|
||||||
|
if 'status' in filters:
|
||||||
|
query = query.filter_by(status=filters['status'])
|
||||||
|
if 'glance_image_id' in filters:
|
||||||
|
query = query.filter_by(is_default=filters['glance_image_id'])
|
||||||
|
if 'nova_flavor_id' in filters:
|
||||||
|
query = query.filter_by(is_default=filters['nova_flavor_id'])
|
||||||
|
if 'keypair_id' in filters:
|
||||||
|
query = query.filter_by(keypair_id=filters['keypair_id'])
|
||||||
|
if 'securitygroup_id' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
models.Process.securitygroups.any(
|
||||||
|
securitygroup_id=filters["securitygroup_id"]))
|
||||||
|
if 'network_id' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
models.Process.networks.any(
|
||||||
|
network_id=filters["network_id"]))
|
||||||
|
|
||||||
|
process_refs = query.all()
|
||||||
|
return [_get_process_dict(process_ref) for process_ref in process_refs]
|
||||||
|
|
||||||
|
def process_get_by_pid(context, gid, pid):
|
||||||
|
session = get_session()
|
||||||
|
process_ref = session.query(models.Process)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.filter_by(gid=gid)\
|
||||||
|
.filter_by(pid=pid)\
|
||||||
|
.first()
|
||||||
|
|
||||||
|
if not process_ref:
|
||||||
|
raise exception.ProcessNotFound(pid=pid)
|
||||||
|
return _get_process_dict(process_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def process_create(context, values, network_ids, securitygroup_ids):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
process_ref = models.Process(**values)
|
||||||
|
session.add(process_ref)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if network_ids:
|
||||||
|
for network_id in network_ids:
|
||||||
|
network_ref = session.query(models.Network)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.filter_by(gid=values["gid"])\
|
||||||
|
.filter_by(network_id=network_id)\
|
||||||
|
.first()
|
||||||
|
if network_ref is None:
|
||||||
|
raise exception.NetworkNotFound(network_id=network_id)
|
||||||
|
session.add(models.ProcessNetwork(pid=values["pid"], network_id=network_ref.network_id))
|
||||||
|
|
||||||
|
if securitygroup_ids:
|
||||||
|
for securitygroup_id in securitygroup_ids:
|
||||||
|
securitygroup_ref = session.query(models.Securitygroup)\
|
||||||
|
.filter_by(deleted=0)\
|
||||||
|
.filter_by(gid=values["gid"])\
|
||||||
|
.filter_by(securitygroup_id=securitygroup_id)\
|
||||||
|
.first()
|
||||||
|
if securitygroup_ref is None:
|
||||||
|
raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id)
|
||||||
|
session.add(models.ProcessSecuritygroup(pid=values["pid"], securitygroup_id=securitygroup_ref.securitygroup_id))
|
||||||
|
|
||||||
|
session.flush()
|
||||||
|
except db_exc.DBDuplicateEntry:
|
||||||
|
msg = _("securitygroup or network is duplicated")
|
||||||
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
return _get_process_dict(process_ref)
|
||||||
|
|
||||||
|
def process_update(context, gid, pid, values):
|
||||||
|
session = get_session()
|
||||||
|
process_ref = session.query(models.Process). \
|
||||||
|
filter_by(deleted=0). \
|
||||||
|
filter_by(gid=gid). \
|
||||||
|
filter_by(pid=pid). \
|
||||||
|
first()
|
||||||
|
if process_ref is None:
|
||||||
|
raise exception.ProcessNotFound(pid=pid)
|
||||||
|
|
||||||
|
process_ref.update(values)
|
||||||
|
process_ref.save(session)
|
||||||
|
|
||||||
|
return dict(process_ref)
|
||||||
|
|
||||||
|
def process_delete(context, gid, pid):
|
||||||
|
session = get_session()
|
||||||
|
process_ref = session.query(models.Process). \
|
||||||
|
filter_by(deleted=0). \
|
||||||
|
filter_by(gid=gid). \
|
||||||
|
filter_by(pid=pid). \
|
||||||
|
first()
|
||||||
|
if process_ref is None:
|
||||||
|
raise exception.ProcessNotFound(pid=pid)
|
||||||
|
|
||||||
|
process_ref.update({"deleted":1,
|
||||||
|
'deleted_at':timeutils.utcnow(),
|
||||||
|
"status":"DELETING"})
|
||||||
|
process_ref.save(session)
|
||||||
|
|
||||||
|
return _get_process_dict(process_ref)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_process_dict(process_ref):
|
||||||
|
process_dict = dict(process_ref)
|
||||||
|
process_dict.update(dict(securitygroups=[dict(securitygroup)
|
||||||
|
for securitygroup in process_ref.securitygroups]))
|
||||||
|
process_dict.update(dict(networks=[dict(network)
|
||||||
|
for network in process_ref.networks]))
|
||||||
|
return process_dict
|
||||||
4
rack/db/sqlalchemy/migrate_repo/README
Normal file
4
rack/db/sqlalchemy/migrate_repo/README
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
This is a database migration repository.
|
||||||
|
|
||||||
|
More information at
|
||||||
|
http://code.google.com/p/sqlalchemy-migrate/
|
||||||
0
rack/db/sqlalchemy/migrate_repo/__init__.py
Normal file
0
rack/db/sqlalchemy/migrate_repo/__init__.py
Normal file
19
rack/db/sqlalchemy/migrate_repo/manage.py
Normal file
19
rack/db/sqlalchemy/migrate_repo/manage.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from migrate.versioning.shell import main
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(debug='False', repository='.')
|
||||||
20
rack/db/sqlalchemy/migrate_repo/migrate.cfg
Normal file
20
rack/db/sqlalchemy/migrate_repo/migrate.cfg
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
[db_settings]
|
||||||
|
# Used to identify which repository this database is versioned under.
|
||||||
|
# You can use the name of your project.
|
||||||
|
repository_id=rack
|
||||||
|
|
||||||
|
# The name of the database table used to track the schema version.
|
||||||
|
# This name shouldn't already be used by your project.
|
||||||
|
# If this is changed once a database is under version control, you'll need to
|
||||||
|
# change the table name in each database too.
|
||||||
|
version_table=migrate_version
|
||||||
|
|
||||||
|
# When committing a change script, Migrate will attempt to generate the
|
||||||
|
# sql for all supported databases; normally, if one of them fails - probably
|
||||||
|
# because you don't have that database installed - it is ignored and the
|
||||||
|
# commit continues, perhaps ending successfully.
|
||||||
|
# Databases in this list MUST compile successfully during a commit, or the
|
||||||
|
# entire commit will fail. List the databases your application will actually
|
||||||
|
# be using to ensure your updates to that database work properly.
|
||||||
|
# This must be a list; example: ['postgres','sqlite']
|
||||||
|
required_dbs=[]
|
||||||
@@ -0,0 +1,56 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from sqlalchemy import MetaData, Table, Column, Integer, String, DateTime
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
groups = Table('groups', meta,
|
||||||
|
Column('created_at', DateTime),
|
||||||
|
Column('updated_at', DateTime),
|
||||||
|
Column('deleted_at', DateTime),
|
||||||
|
Column('deleted', Integer),
|
||||||
|
Column('gid', String(length=255), primary_key=True, nullable=False),
|
||||||
|
Column('user_id', String(length=255)),
|
||||||
|
Column('project_id', String(length=255)),
|
||||||
|
Column('display_name', String(length=255)),
|
||||||
|
Column('display_description', String(length=255)),
|
||||||
|
Column('status', String(length=255)),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8'
|
||||||
|
)
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
groups.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(groups))
|
||||||
|
LOG.exception(_('Exception while creating groups table.'))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
groups.drop()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(groups))
|
||||||
|
LOG.exception(_('Exception while dropping groups table.'))
|
||||||
|
raise
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from migrate.changeset import UniqueConstraint
|
||||||
|
from sqlalchemy import Column, MetaData, Table
|
||||||
|
from sqlalchemy import Boolean, DateTime, Integer, String
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
services = Table('services', meta,
|
||||||
|
Column('created_at', DateTime),
|
||||||
|
Column('updated_at', DateTime),
|
||||||
|
Column('deleted_at', DateTime),
|
||||||
|
Column('id', Integer, primary_key=True, nullable=False),
|
||||||
|
Column('host', String(length=255)),
|
||||||
|
Column('binary', String(length=255)),
|
||||||
|
Column('topic', String(length=255)),
|
||||||
|
Column('report_count', Integer, nullable=False),
|
||||||
|
Column('disabled', Boolean),
|
||||||
|
Column('deleted', Integer),
|
||||||
|
Column('disabled_reason', String(length=255)),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8'
|
||||||
|
)
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
services.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(services))
|
||||||
|
LOG.exception(_('Exception while creating services table.'))
|
||||||
|
raise
|
||||||
|
|
||||||
|
UniqueConstraint('host', 'topic', 'deleted',
|
||||||
|
table=services,
|
||||||
|
name='uniq_services0host0topic0deleted').create()
|
||||||
|
UniqueConstraint('host', 'binary', 'deleted',
|
||||||
|
table=services,
|
||||||
|
name='uniq_services0host0binary0deleted').create()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
services.drop()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(services))
|
||||||
|
LOG.exception(_('Exception while dropping services table.'))
|
||||||
|
raise
|
||||||
@@ -0,0 +1,66 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from migrate import ForeignKeyConstraint
|
||||||
|
from migrate.changeset import UniqueConstraint
|
||||||
|
from sqlalchemy import Column, MetaData, Table
|
||||||
|
from sqlalchemy import Boolean, DateTime, Integer, String, Text
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
keypairs = Table('keypairs', meta,
|
||||||
|
Column('created_at', DateTime),
|
||||||
|
Column('updated_at', DateTime),
|
||||||
|
Column('deleted_at', DateTime),
|
||||||
|
Column('deleted', Integer),
|
||||||
|
Column('keypair_id', String(length=36), primary_key=True, nullable=False),
|
||||||
|
Column('gid', String(length=36), nullable=False),
|
||||||
|
Column('nova_keypair_id', String(length=255)),
|
||||||
|
Column('private_key', Text),
|
||||||
|
Column('display_name', String(length=255)),
|
||||||
|
Column('is_default', Boolean),
|
||||||
|
Column('user_id', String(length=255)),
|
||||||
|
Column('project_id', String(length=255)),
|
||||||
|
Column('status', String(length=255)),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
keypairs.create()
|
||||||
|
groups = Table("groups", meta, autoload=True)
|
||||||
|
ForeignKeyConstraint([keypairs.c.gid], [groups.c.gid]).create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(keypairs))
|
||||||
|
LOG.exception(_('Exception while creating keypairs table.'))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
keypairs.drop()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(keypairs))
|
||||||
|
LOG.exception(_('Exception while dropping keypairs table.'))
|
||||||
|
raise
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from migrate.changeset import UniqueConstraint
|
||||||
|
from migrate import ForeignKeyConstraint
|
||||||
|
from sqlalchemy import Column, MetaData, Table
|
||||||
|
from sqlalchemy import Boolean, DateTime, Integer, String
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
securitygroups = Table('securitygroups', meta,
|
||||||
|
Column('created_at', DateTime),
|
||||||
|
Column('updated_at', DateTime),
|
||||||
|
Column('deleted_at', DateTime),
|
||||||
|
Column('deleted', Integer, nullable=False),
|
||||||
|
Column('securitygroup_id', String(length=36), primary_key=True, nullable=False),
|
||||||
|
Column('gid', String(length=36), nullable=False),
|
||||||
|
Column('neutron_securitygroup_id', String(length=36)),
|
||||||
|
Column('is_default', Boolean, nullable=False),
|
||||||
|
Column('user_id', String(length=255), nullable=False),
|
||||||
|
Column('project_id', String(length=255), nullable=False),
|
||||||
|
Column('display_name', String(length=255), nullable=False),
|
||||||
|
Column('status', String(length=255), nullable=False),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8'
|
||||||
|
)
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
groups = Table("groups", meta, autoload=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
securitygroups.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(securitygroups))
|
||||||
|
LOG.exception(_('Exception while creating securitygroups table.'))
|
||||||
|
raise
|
||||||
|
|
||||||
|
ForeignKeyConstraint(columns=[securitygroups.c.gid],
|
||||||
|
refcolumns=[groups.c.gid]).create()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
securitygroups.drop()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(securitygroups))
|
||||||
|
LOG.exception(_('Exception while dropping securitygroups table.'))
|
||||||
|
raise
|
||||||
@@ -0,0 +1,66 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from migrate import ForeignKeyConstraint
|
||||||
|
from sqlalchemy import MetaData, Table, Column, Integer
|
||||||
|
from sqlalchemy import String, DateTime, Boolean
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
networks = Table('networks', meta,
|
||||||
|
Column('created_at', DateTime),
|
||||||
|
Column('updated_at', DateTime),
|
||||||
|
Column('deleted_at', DateTime),
|
||||||
|
Column('network_id', String(length=255),
|
||||||
|
primary_key=True, nullable=False),
|
||||||
|
Column('gid', String(length=255), nullable=False),
|
||||||
|
Column('neutron_network_id', String(length=255)),
|
||||||
|
Column('is_admin', Boolean),
|
||||||
|
Column('subnet', String(length=255)),
|
||||||
|
Column('ext_router', String(length=255)),
|
||||||
|
Column('user_id', String(length=255)),
|
||||||
|
Column('project_id', String(length=255)),
|
||||||
|
Column('display_name', String(length=255)),
|
||||||
|
Column('deleted', Integer),
|
||||||
|
Column('status', String(length=255)),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
networks.create()
|
||||||
|
groups = Table("groups", meta, autoload=True)
|
||||||
|
ForeignKeyConstraint([networks.c.gid], [groups.c.gid]).create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(networks))
|
||||||
|
LOG.exception(_('Exception while creating networks table.'))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
networks.drop()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(networks))
|
||||||
|
LOG.exception(_('Exception while dropping networks table.'))
|
||||||
|
raise
|
||||||
@@ -0,0 +1,76 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from migrate import ForeignKeyConstraint
|
||||||
|
from sqlalchemy import Column, MetaData, Table
|
||||||
|
from sqlalchemy import DateTime, Integer, String
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
processes = Table('processes', meta,
|
||||||
|
Column('created_at', DateTime),
|
||||||
|
Column('updated_at', DateTime),
|
||||||
|
Column('deleted_at', DateTime),
|
||||||
|
Column('deleted', Integer, nullable=False),
|
||||||
|
Column('gid', String(length=36), nullable=False),
|
||||||
|
Column('keypair_id', String(length=36)),
|
||||||
|
Column('pid', String(length=36), primary_key=True, nullable=False),
|
||||||
|
Column('ppid', String(length=36)),
|
||||||
|
Column('nova_instance_id', String(length=36)),
|
||||||
|
Column('glance_image_id', String(length=36), nullable=False),
|
||||||
|
Column('nova_flavor_id', Integer, nullable=False),
|
||||||
|
Column('user_id', String(length=255), nullable=False),
|
||||||
|
Column('project_id', String(length=255), nullable=False),
|
||||||
|
Column('display_name', String(length=255), nullable=False),
|
||||||
|
Column('status', String(length=255), nullable=False),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
groups = Table("groups", meta, autoload=True)
|
||||||
|
keypairs = Table("keypairs", meta, autoload=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
processes.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(processes))
|
||||||
|
LOG.exception(_('Exception while creating processes table.'))
|
||||||
|
raise
|
||||||
|
|
||||||
|
ForeignKeyConstraint(columns=[processes.c.gid],
|
||||||
|
refcolumns=[groups.c.gid]).create()
|
||||||
|
|
||||||
|
ForeignKeyConstraint(columns=[processes.c.keypair_id],
|
||||||
|
refcolumns=[keypairs.c.keypair_id]).create()
|
||||||
|
|
||||||
|
ForeignKeyConstraint(columns=[processes.c.ppid],
|
||||||
|
refcolumns=[processes.c.pid]).create()
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
processes.drop()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(processes))
|
||||||
|
LOG.exception(_('Exception while dropping processes table.'))
|
||||||
|
raise
|
||||||
@@ -0,0 +1,60 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from migrate import ForeignKeyConstraint, UniqueConstraint
|
||||||
|
from sqlalchemy import Column, MetaData, Table
|
||||||
|
from sqlalchemy import String
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
processes_securitygroups = Table('processes_securitygroups', meta,
|
||||||
|
Column('pid', String(length=36), nullable=False, primary_key=True),
|
||||||
|
Column('securitygroup_id', String(length=36), nullable=False, primary_key=True),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
processes = Table("processes", meta, autoload=True)
|
||||||
|
securitygroups = Table("securitygroups", meta, autoload=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
processes_securitygroups.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(processes_securitygroups))
|
||||||
|
LOG.exception(_('Exception while creating processes_securitygroups table.'))
|
||||||
|
raise
|
||||||
|
|
||||||
|
ForeignKeyConstraint(columns=[processes_securitygroups.c.pid],
|
||||||
|
refcolumns=[processes.c.pid]).create()
|
||||||
|
ForeignKeyConstraint(columns=[processes_securitygroups.c.securitygroup_id],
|
||||||
|
refcolumns=[securitygroups.c.securitygroup_id]).create()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
processes_securitygroups.drop()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(processes_securitygroups))
|
||||||
|
LOG.exception(_('Exception while dropping processes_securitygroups table.'))
|
||||||
|
raise
|
||||||
@@ -0,0 +1,60 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from migrate import ForeignKeyConstraint
|
||||||
|
from sqlalchemy import Column, MetaData, Table
|
||||||
|
from sqlalchemy import String
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
processes_networks = Table('processes_networks', meta,
|
||||||
|
Column('pid', String(length=36), nullable=False, primary_key=True),
|
||||||
|
Column('network_id', String(length=36), nullable=False, primary_key=True),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
processes = Table("processes", meta, autoload=True)
|
||||||
|
networks = Table("networks", meta, autoload=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
processes_networks.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(processes_networks))
|
||||||
|
LOG.exception(_('Exception while creating processes_networks table.'))
|
||||||
|
raise
|
||||||
|
|
||||||
|
ForeignKeyConstraint(columns=[processes_networks.c.pid],
|
||||||
|
refcolumns=[processes.c.pid]).create()
|
||||||
|
ForeignKeyConstraint(columns=[processes_networks.c.network_id],
|
||||||
|
refcolumns=[networks.c.network_id]).create()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
try:
|
||||||
|
processes_networks.drop()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(processes_networks))
|
||||||
|
LOG.exception(_('Exception while dropping processes_networks table.'))
|
||||||
|
raise
|
||||||
85
rack/db/sqlalchemy/migration.py
Normal file
85
rack/db/sqlalchemy/migration.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from migrate import exceptions as versioning_exceptions
|
||||||
|
from migrate.versioning import api as versioning_api
|
||||||
|
from migrate.versioning.repository import Repository
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from rack.db.sqlalchemy import api as db_session
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
INIT_VERSION = 0
|
||||||
|
_REPOSITORY = None
|
||||||
|
|
||||||
|
get_engine = db_session.get_engine
|
||||||
|
|
||||||
|
|
||||||
|
def db_sync(version=None):
|
||||||
|
if version is not None:
|
||||||
|
try:
|
||||||
|
version = int(version)
|
||||||
|
except ValueError:
|
||||||
|
raise exception.RackException(_("version should be an integer"))
|
||||||
|
|
||||||
|
current_version = db_version()
|
||||||
|
repository = _find_migrate_repo()
|
||||||
|
if version is None or version > current_version:
|
||||||
|
return versioning_api.upgrade(get_engine(), repository, version)
|
||||||
|
else:
|
||||||
|
return versioning_api.downgrade(get_engine(), repository,
|
||||||
|
version)
|
||||||
|
|
||||||
|
|
||||||
|
def db_version():
|
||||||
|
repository = _find_migrate_repo()
|
||||||
|
try:
|
||||||
|
return versioning_api.db_version(get_engine(), repository)
|
||||||
|
except versioning_exceptions.DatabaseNotControlledError:
|
||||||
|
meta = sqlalchemy.MetaData()
|
||||||
|
engine = get_engine()
|
||||||
|
meta.reflect(bind=engine)
|
||||||
|
tables = meta.tables
|
||||||
|
if len(tables) == 0:
|
||||||
|
db_version_control(INIT_VERSION)
|
||||||
|
return versioning_api.db_version(get_engine(), repository)
|
||||||
|
else:
|
||||||
|
# Some pre-Essex DB's may not be version controlled.
|
||||||
|
# Require them to upgrade using Essex first.
|
||||||
|
raise exception.RackException(
|
||||||
|
_("Upgrade DB using Essex release first."))
|
||||||
|
|
||||||
|
|
||||||
|
def db_initial_version():
|
||||||
|
return INIT_VERSION
|
||||||
|
|
||||||
|
|
||||||
|
def db_version_control(version=None):
|
||||||
|
repository = _find_migrate_repo()
|
||||||
|
versioning_api.version_control(get_engine(), repository, version)
|
||||||
|
return version
|
||||||
|
|
||||||
|
|
||||||
|
def _find_migrate_repo():
|
||||||
|
"""Get the path for the migrate repository."""
|
||||||
|
global _REPOSITORY
|
||||||
|
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||||
|
'migrate_repo')
|
||||||
|
assert os.path.exists(path)
|
||||||
|
if _REPOSITORY is None:
|
||||||
|
_REPOSITORY = Repository(path)
|
||||||
|
return _REPOSITORY
|
||||||
184
rack/db/sqlalchemy/models.py
Normal file
184
rack/db/sqlalchemy/models.py
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from rack.openstack.common.db.sqlalchemy import models
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, Text, schema
|
||||||
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
|
from sqlalchemy.orm import relationship
|
||||||
|
|
||||||
|
Base = declarative_base()
|
||||||
|
|
||||||
|
class Group(models.SoftDeleteMixin,
|
||||||
|
models.TimestampMixin,
|
||||||
|
models.ModelBase,
|
||||||
|
Base):
|
||||||
|
|
||||||
|
__tablename__ = 'groups'
|
||||||
|
securitygroups = relationship("Securitygroup")
|
||||||
|
processes = relationship("Process")
|
||||||
|
|
||||||
|
gid = Column(String(36), primary_key=True)
|
||||||
|
user_id = Column(String(255))
|
||||||
|
project_id = Column(String(255))
|
||||||
|
display_name = Column(String(255))
|
||||||
|
display_description = Column(String(255))
|
||||||
|
status = Column(String(255))
|
||||||
|
|
||||||
|
|
||||||
|
class Service(models.SoftDeleteMixin,
|
||||||
|
models.TimestampMixin,
|
||||||
|
models.ModelBase,
|
||||||
|
Base):
|
||||||
|
"""Represents a running service on a host."""
|
||||||
|
|
||||||
|
__tablename__ = 'services'
|
||||||
|
__table_args__ = (
|
||||||
|
schema.UniqueConstraint("host", "topic", "deleted",
|
||||||
|
name="uniq_services0host0topic0deleted"),
|
||||||
|
schema.UniqueConstraint("host", "binary", "deleted",
|
||||||
|
name="uniq_services0host0binary0deleted")
|
||||||
|
)
|
||||||
|
|
||||||
|
id = Column(Integer, primary_key=True)
|
||||||
|
host = Column(String(255))
|
||||||
|
binary = Column(String(255))
|
||||||
|
topic = Column(String(255))
|
||||||
|
report_count = Column(Integer, nullable=False, default=0)
|
||||||
|
disabled = Column(Boolean, default=False)
|
||||||
|
disabled_reason = Column(String(255))
|
||||||
|
|
||||||
|
|
||||||
|
class Network(models.SoftDeleteMixin,
|
||||||
|
models.TimestampMixin,
|
||||||
|
models.ModelBase,
|
||||||
|
Base):
|
||||||
|
|
||||||
|
__tablename__ = 'networks'
|
||||||
|
|
||||||
|
network_id = Column(String(255), primary_key=True)
|
||||||
|
gid = Column(String(255))
|
||||||
|
neutron_network_id = Column(String(255))
|
||||||
|
is_admin = Column(Boolean, default=False)
|
||||||
|
subnet = Column(String(255))
|
||||||
|
ext_router = Column(String(255))
|
||||||
|
user_id = Column(String(255))
|
||||||
|
project_id = Column(String(255))
|
||||||
|
display_name = Column(String(255))
|
||||||
|
status = Column(String(255))
|
||||||
|
|
||||||
|
|
||||||
|
class Keypair(models.SoftDeleteMixin,
|
||||||
|
models.TimestampMixin,
|
||||||
|
models.ModelBase,
|
||||||
|
Base):
|
||||||
|
|
||||||
|
__tablename__ = 'keypairs'
|
||||||
|
|
||||||
|
keypair_id = Column(String(36), primary_key=True)
|
||||||
|
gid = Column(String(36), ForeignKey('groups.gid'), nullable=False)
|
||||||
|
user_id = Column(String(255))
|
||||||
|
project_id = Column(String(255))
|
||||||
|
nova_keypair_id = Column(String(255))
|
||||||
|
private_key = Column(Text)
|
||||||
|
display_name = Column(String(255))
|
||||||
|
is_default = Column(Boolean, default=False)
|
||||||
|
status = Column(String(255))
|
||||||
|
|
||||||
|
|
||||||
|
class Securitygroup(models.SoftDeleteMixin,
|
||||||
|
models.TimestampMixin,
|
||||||
|
models.ModelBase,
|
||||||
|
Base):
|
||||||
|
|
||||||
|
__tablename__ = 'securitygroups'
|
||||||
|
|
||||||
|
deleted = Column(Integer, nullable=False, default=0)
|
||||||
|
securitygroup_id = Column(String(36), primary_key=True)
|
||||||
|
gid = Column(String(36), ForeignKey('groups.gid'))
|
||||||
|
neutron_securitygroup_id = Column(String(36))
|
||||||
|
is_default = Column(Boolean, default=False)
|
||||||
|
user_id = Column(String(255))
|
||||||
|
project_id = Column(String(255))
|
||||||
|
display_name = Column(String(255))
|
||||||
|
status = Column(String(255))
|
||||||
|
|
||||||
|
group = relationship("Group",
|
||||||
|
foreign_keys=gid,
|
||||||
|
primaryjoin='and_('
|
||||||
|
'Securitygroup.gid == Group.gid,'
|
||||||
|
'Securitygroup.deleted == 0,'
|
||||||
|
'Group.deleted == 0)')
|
||||||
|
|
||||||
|
|
||||||
|
class Process(models.SoftDeleteMixin,
|
||||||
|
models.TimestampMixin,
|
||||||
|
models.ModelBase,
|
||||||
|
Base):
|
||||||
|
|
||||||
|
__tablename__ = 'processes'
|
||||||
|
|
||||||
|
|
||||||
|
deleted = Column(Integer, nullable=False, default=0)
|
||||||
|
gid = Column(String(36), ForeignKey('groups.gid'), nullable=False)
|
||||||
|
keypair_id = Column(String(36), ForeignKey('keypairs.keypair_id'))
|
||||||
|
pid = Column(String(36), primary_key=True)
|
||||||
|
ppid = Column(String(36), ForeignKey('processes.pid'))
|
||||||
|
nova_instance_id = Column(String(36))
|
||||||
|
glance_image_id = Column(String(36), nullable=False)
|
||||||
|
nova_flavor_id = Column(Integer, nullable=False)
|
||||||
|
user_id = Column(String(255), nullable=False)
|
||||||
|
project_id = Column(String(255), nullable=False)
|
||||||
|
display_name = Column(String(255), nullable=False)
|
||||||
|
status = Column(String(255), nullable=False)
|
||||||
|
|
||||||
|
group = relationship("Group",
|
||||||
|
foreign_keys=gid,
|
||||||
|
primaryjoin='and_('
|
||||||
|
'Process.gid == Group.gid,'
|
||||||
|
'Process.deleted == 0,'
|
||||||
|
'Group.deleted == 0)')
|
||||||
|
|
||||||
|
securitygroups = relationship("Securitygroup",
|
||||||
|
secondary="processes_securitygroups",
|
||||||
|
primaryjoin='and_('
|
||||||
|
'Process.pid == ProcessSecuritygroup.pid,'
|
||||||
|
'Process.deleted == 0)',
|
||||||
|
secondaryjoin='and_('
|
||||||
|
'Securitygroup.securitygroup_id == ProcessSecuritygroup.securitygroup_id,'
|
||||||
|
'Securitygroup.deleted == 0)',
|
||||||
|
backref="processes")
|
||||||
|
|
||||||
|
networks = relationship("Network",
|
||||||
|
secondary="processes_networks",
|
||||||
|
primaryjoin='and_('
|
||||||
|
'Process.pid == ProcessNetwork.pid,'
|
||||||
|
'Process.deleted == 0)',
|
||||||
|
secondaryjoin='and_('
|
||||||
|
'Network.network_id == ProcessNetwork.network_id,'
|
||||||
|
'Network.deleted == 0)',
|
||||||
|
backref="processes")
|
||||||
|
|
||||||
|
class ProcessSecuritygroup(models.ModelBase,Base):
|
||||||
|
|
||||||
|
__tablename__ = 'processes_securitygroups'
|
||||||
|
|
||||||
|
pid = Column(String(36), ForeignKey('processes.pid'), nullable=False, primary_key=True)
|
||||||
|
securitygroup_id = Column(String(36), ForeignKey('securitygroups.securitygroup_id'), nullable=False, primary_key=True)
|
||||||
|
|
||||||
|
class ProcessNetwork(models.ModelBase,Base):
|
||||||
|
|
||||||
|
__tablename__ = 'processes_networks'
|
||||||
|
|
||||||
|
pid = Column(String(36), ForeignKey('processes.pid'), nullable=False, primary_key=True)
|
||||||
|
network_id = Column(String(36), ForeignKey('networks.network_id'), nullable=False, primary_key=True)
|
||||||
61
rack/db/sqlalchemy/types.py
Normal file
61
rack/db/sqlalchemy/types.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Custom SQLAlchemy types."""
|
||||||
|
|
||||||
|
from sqlalchemy.dialects import postgresql
|
||||||
|
from sqlalchemy import types
|
||||||
|
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
|
||||||
|
class IPAddress(types.TypeDecorator):
|
||||||
|
"""An SQLAlchemy type representing an IP-address."""
|
||||||
|
|
||||||
|
impl = types.String
|
||||||
|
|
||||||
|
def load_dialect_impl(self, dialect):
|
||||||
|
if dialect.name == 'postgresql':
|
||||||
|
return dialect.type_descriptor(postgresql.INET())
|
||||||
|
else:
|
||||||
|
return dialect.type_descriptor(types.String(39))
|
||||||
|
|
||||||
|
def process_bind_param(self, value, dialect):
|
||||||
|
"""Process/Formats the value before insert it into the db."""
|
||||||
|
if dialect.name == 'postgresql':
|
||||||
|
return value
|
||||||
|
# NOTE(maurosr): The purpose here is to convert ipv6 to the shortened
|
||||||
|
# form, not validate it.
|
||||||
|
elif utils.is_valid_ipv6(value):
|
||||||
|
return utils.get_shortened_ipv6(value)
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
class CIDR(types.TypeDecorator):
|
||||||
|
"""An SQLAlchemy type representing a CIDR definition."""
|
||||||
|
|
||||||
|
impl = types.String
|
||||||
|
|
||||||
|
def load_dialect_impl(self, dialect):
|
||||||
|
if dialect.name == 'postgresql':
|
||||||
|
return dialect.type_descriptor(postgresql.INET())
|
||||||
|
else:
|
||||||
|
return dialect.type_descriptor(types.String(43))
|
||||||
|
|
||||||
|
def process_bind_param(self, value, dialect):
|
||||||
|
"""Process/Formats the value before insert it into the db."""
|
||||||
|
# NOTE(sdague): normalize all the inserts
|
||||||
|
if utils.is_valid_ipv6_cidr(value):
|
||||||
|
return utils.get_shortened_ipv6_cidr(value)
|
||||||
|
return value
|
||||||
606
rack/db/sqlalchemy/utils.py
Normal file
606
rack/db/sqlalchemy/utils.py
Normal file
@@ -0,0 +1,606 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from migrate.changeset import UniqueConstraint, ForeignKeyConstraint
|
||||||
|
from sqlalchemy import Boolean
|
||||||
|
from sqlalchemy import CheckConstraint
|
||||||
|
from sqlalchemy import Column
|
||||||
|
from sqlalchemy.engine import reflection
|
||||||
|
from sqlalchemy.exc import OperationalError
|
||||||
|
from sqlalchemy.exc import ProgrammingError
|
||||||
|
from sqlalchemy.ext.compiler import compiles
|
||||||
|
from sqlalchemy import func
|
||||||
|
from sqlalchemy import Index
|
||||||
|
from sqlalchemy import Integer
|
||||||
|
from sqlalchemy import MetaData
|
||||||
|
from sqlalchemy import schema
|
||||||
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
from sqlalchemy.sql.expression import UpdateBase
|
||||||
|
from sqlalchemy.sql import select
|
||||||
|
from sqlalchemy import String
|
||||||
|
from sqlalchemy import Table
|
||||||
|
from sqlalchemy.types import NullType
|
||||||
|
|
||||||
|
from rack.db.sqlalchemy import api as db
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_table(engine, name):
|
||||||
|
"""Returns an sqlalchemy table dynamically from db.
|
||||||
|
|
||||||
|
Needed because the models don't work for us in migrations
|
||||||
|
as models will be far out of sync with the current data.
|
||||||
|
"""
|
||||||
|
metadata = MetaData()
|
||||||
|
metadata.bind = engine
|
||||||
|
return Table(name, metadata, autoload=True)
|
||||||
|
|
||||||
|
|
||||||
|
class InsertFromSelect(UpdateBase):
|
||||||
|
def __init__(self, table, select):
|
||||||
|
self.table = table
|
||||||
|
self.select = select
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(InsertFromSelect)
|
||||||
|
def visit_insert_from_select(element, compiler, **kw):
|
||||||
|
return "INSERT INTO %s %s" % (
|
||||||
|
compiler.process(element.table, asfrom=True),
|
||||||
|
compiler.process(element.select))
|
||||||
|
|
||||||
|
|
||||||
|
class DeleteFromSelect(UpdateBase):
|
||||||
|
def __init__(self, table, select, column):
|
||||||
|
self.table = table
|
||||||
|
self.select = select
|
||||||
|
self.column = column
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(DeleteFromSelect)
|
||||||
|
def visit_delete_from_select(element, compiler, **kw):
|
||||||
|
return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
|
||||||
|
compiler.process(element.table, asfrom=True),
|
||||||
|
compiler.process(element.column),
|
||||||
|
element.column.name,
|
||||||
|
compiler.process(element.select))
|
||||||
|
|
||||||
|
|
||||||
|
def _get_not_supported_column(col_name_col_instance, column_name):
|
||||||
|
try:
|
||||||
|
column = col_name_col_instance[column_name]
|
||||||
|
except Exception:
|
||||||
|
msg = _("Please specify column %s in col_name_col_instance "
|
||||||
|
"param. It is required because column has unsupported "
|
||||||
|
"type by sqlite).")
|
||||||
|
raise exception.RackException(msg % column_name)
|
||||||
|
|
||||||
|
if not isinstance(column, Column):
|
||||||
|
msg = _("col_name_col_instance param has wrong type of "
|
||||||
|
"column instance for column %s It should be instance "
|
||||||
|
"of sqlalchemy.Column.")
|
||||||
|
raise exception.RackException(msg % column_name)
|
||||||
|
return column
|
||||||
|
|
||||||
|
|
||||||
|
def _get_unique_constraints_in_sqlite(migrate_engine, table_name):
|
||||||
|
regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
|
||||||
|
|
||||||
|
meta = MetaData(bind=migrate_engine)
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
|
||||||
|
sql_data = migrate_engine.execute(
|
||||||
|
"""
|
||||||
|
SELECT sql
|
||||||
|
FROM
|
||||||
|
sqlite_master
|
||||||
|
WHERE
|
||||||
|
type = 'table' AND
|
||||||
|
name = :table_name;
|
||||||
|
""",
|
||||||
|
table_name=table_name
|
||||||
|
).fetchone()[0]
|
||||||
|
|
||||||
|
uniques = set([
|
||||||
|
schema.UniqueConstraint(
|
||||||
|
*[getattr(table.c, c.strip(' "'))
|
||||||
|
for c in cols.split(",")], name=name
|
||||||
|
)
|
||||||
|
for name, cols in re.findall(regexp, sql_data)
|
||||||
|
])
|
||||||
|
|
||||||
|
return uniques
|
||||||
|
|
||||||
|
|
||||||
|
def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
meta = MetaData(bind=migrate_engine)
|
||||||
|
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
columns = []
|
||||||
|
for column in table.columns:
|
||||||
|
if isinstance(column.type, NullType):
|
||||||
|
new_column = _get_not_supported_column(col_name_col_instance,
|
||||||
|
column.name)
|
||||||
|
columns.append(new_column)
|
||||||
|
else:
|
||||||
|
columns.append(column.copy())
|
||||||
|
|
||||||
|
uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name)
|
||||||
|
table.constraints.update(uniques)
|
||||||
|
|
||||||
|
constraints = [constraint for constraint in table.constraints
|
||||||
|
if not constraint.name == uc_name and
|
||||||
|
not isinstance(constraint, schema.ForeignKeyConstraint)]
|
||||||
|
|
||||||
|
new_table = Table(table_name + "__tmp__", meta, *(columns + constraints))
|
||||||
|
new_table.create()
|
||||||
|
|
||||||
|
indexes = []
|
||||||
|
for index in insp.get_indexes(table_name):
|
||||||
|
column_names = [new_table.c[c] for c in index['column_names']]
|
||||||
|
indexes.append(Index(index["name"],
|
||||||
|
*column_names,
|
||||||
|
unique=index["unique"]))
|
||||||
|
f_keys = []
|
||||||
|
for fk in insp.get_foreign_keys(table_name):
|
||||||
|
refcolumns = [fk['referred_table'] + '.' + col
|
||||||
|
for col in fk['referred_columns']]
|
||||||
|
f_keys.append(ForeignKeyConstraint(fk['constrained_columns'],
|
||||||
|
refcolumns, table=new_table, name=fk['name']))
|
||||||
|
|
||||||
|
ins = InsertFromSelect(new_table, table.select())
|
||||||
|
migrate_engine.execute(ins)
|
||||||
|
table.drop()
|
||||||
|
|
||||||
|
[index.create(migrate_engine) for index in indexes]
|
||||||
|
for fkey in f_keys:
|
||||||
|
fkey.create()
|
||||||
|
new_table.rename(table_name)
|
||||||
|
|
||||||
|
|
||||||
|
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
||||||
|
**col_name_col_instance):
|
||||||
|
"""This method drops UC from table and works for mysql, postgresql and
|
||||||
|
sqlite. In mysql and postgresql we are able to use "alter table"
|
||||||
|
construction. In sqlite is only one way to drop UC:
|
||||||
|
1) Create new table with same columns, indexes and constraints
|
||||||
|
(except one that we want to drop).
|
||||||
|
2) Copy data from old table to new.
|
||||||
|
3) Drop old table.
|
||||||
|
4) Rename new table to the name of old table.
|
||||||
|
|
||||||
|
:param migrate_engine: sqlalchemy engine
|
||||||
|
:param table_name: name of table that contains uniq constraint.
|
||||||
|
:param uc_name: name of uniq constraint that will be dropped.
|
||||||
|
:param columns: columns that are in uniq constraint.
|
||||||
|
:param col_name_col_instance: contains pair column_name=column_instance.
|
||||||
|
column_instance is instance of Column. These params
|
||||||
|
are required only for columns that have unsupported
|
||||||
|
types by sqlite. For example BigInteger.
|
||||||
|
"""
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
_drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
|
||||||
|
**col_name_col_instance)
|
||||||
|
else:
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
t = Table(table_name, meta, autoload=True)
|
||||||
|
uc = UniqueConstraint(*columns, table=t, name=uc_name)
|
||||||
|
uc.drop()
|
||||||
|
|
||||||
|
|
||||||
|
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
||||||
|
use_soft_delete, *uc_column_names):
|
||||||
|
"""This method is used to drop all old rows that have the same values for
|
||||||
|
columns in uc_columns.
|
||||||
|
"""
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
columns_for_group_by = [table.c[name] for name in uc_column_names]
|
||||||
|
|
||||||
|
columns_for_select = [func.max(table.c.id)]
|
||||||
|
columns_for_select.extend(list(columns_for_group_by))
|
||||||
|
|
||||||
|
duplicated_rows_select = select(columns_for_select,
|
||||||
|
group_by=columns_for_group_by,
|
||||||
|
having=func.count(table.c.id) > 1)
|
||||||
|
|
||||||
|
for row in migrate_engine.execute(duplicated_rows_select):
|
||||||
|
# NOTE(boris-42): Do not remove row that has the biggest ID.
|
||||||
|
delete_condition = table.c.id != row[0]
|
||||||
|
for name in uc_column_names:
|
||||||
|
delete_condition &= table.c[name] == row[name]
|
||||||
|
|
||||||
|
rows_to_delete_select = select([table.c.id]).where(delete_condition)
|
||||||
|
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
||||||
|
LOG.info(_("Deleted duplicated row with id: %(id)s from table: "
|
||||||
|
"%(table)s") % dict(id=row[0], table=table_name))
|
||||||
|
|
||||||
|
if use_soft_delete:
|
||||||
|
delete_statement = table.update().\
|
||||||
|
where(delete_condition).\
|
||||||
|
values({
|
||||||
|
'deleted': literal_column('id'),
|
||||||
|
'updated_at': literal_column('updated_at'),
|
||||||
|
'deleted_at': timeutils.utcnow()
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
delete_statement = table.delete().where(delete_condition)
|
||||||
|
migrate_engine.execute(delete_statement)
|
||||||
|
|
||||||
|
|
||||||
|
def check_shadow_table(migrate_engine, table_name):
|
||||||
|
"""This method checks that table with ``table_name`` and
|
||||||
|
corresponding shadow table have same columns.
|
||||||
|
"""
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
|
||||||
|
autoload=True)
|
||||||
|
|
||||||
|
columns = dict([(c.name, c) for c in table.columns])
|
||||||
|
shadow_columns = dict([(c.name, c) for c in shadow_table.columns])
|
||||||
|
|
||||||
|
for name, column in columns.iteritems():
|
||||||
|
if name not in shadow_columns:
|
||||||
|
raise exception.RackException(
|
||||||
|
_("Missing column %(table)s.%(column)s in shadow table")
|
||||||
|
% {'column': name, 'table': shadow_table.name})
|
||||||
|
shadow_column = shadow_columns[name]
|
||||||
|
|
||||||
|
if not isinstance(shadow_column.type, type(column.type)):
|
||||||
|
raise exception.RackException(
|
||||||
|
_("Different types in %(table)s.%(column)s and shadow table: "
|
||||||
|
"%(c_type)s %(shadow_c_type)s")
|
||||||
|
% {'column': name, 'table': table.name,
|
||||||
|
'c_type': column.type,
|
||||||
|
'shadow_c_type': shadow_column.type})
|
||||||
|
|
||||||
|
for name, column in shadow_columns.iteritems():
|
||||||
|
if name not in columns:
|
||||||
|
raise exception.RackException(
|
||||||
|
_("Extra column %(table)s.%(column)s in shadow table")
|
||||||
|
% {'column': name, 'table': shadow_table.name})
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def create_shadow_table(migrate_engine, table_name=None, table=None,
|
||||||
|
**col_name_col_instance):
|
||||||
|
"""This method create shadow table for table with name ``table_name``
|
||||||
|
or table instance ``table``.
|
||||||
|
:param table_name: Autoload table with this name and create shadow table
|
||||||
|
:param table: Autoloaded table, so just create corresponding shadow table.
|
||||||
|
:param col_name_col_instance: contains pair column_name=column_instance.
|
||||||
|
column_instance is instance of Column. These params
|
||||||
|
are required only for columns that have unsupported
|
||||||
|
types by sqlite. For example BigInteger.
|
||||||
|
|
||||||
|
:returns: The created shadow_table object.
|
||||||
|
"""
|
||||||
|
meta = MetaData(bind=migrate_engine)
|
||||||
|
|
||||||
|
if table_name is None and table is None:
|
||||||
|
raise exception.RackException(_("Specify `table_name` or `table` "
|
||||||
|
"param"))
|
||||||
|
if not (table_name is None or table is None):
|
||||||
|
raise exception.RackException(_("Specify only one param `table_name` "
|
||||||
|
"`table`"))
|
||||||
|
|
||||||
|
if table is None:
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
|
||||||
|
columns = []
|
||||||
|
for column in table.columns:
|
||||||
|
if isinstance(column.type, NullType):
|
||||||
|
new_column = _get_not_supported_column(col_name_col_instance,
|
||||||
|
column.name)
|
||||||
|
columns.append(new_column)
|
||||||
|
else:
|
||||||
|
columns.append(column.copy())
|
||||||
|
|
||||||
|
shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
|
||||||
|
shadow_table = Table(shadow_table_name, meta, *columns,
|
||||||
|
mysql_engine='InnoDB')
|
||||||
|
try:
|
||||||
|
shadow_table.create()
|
||||||
|
return shadow_table
|
||||||
|
except (OperationalError, ProgrammingError):
|
||||||
|
LOG.info(repr(shadow_table))
|
||||||
|
LOG.exception(_('Exception while creating table.'))
|
||||||
|
raise exception.ShadowTableExists(name=shadow_table_name)
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(shadow_table))
|
||||||
|
LOG.exception(_('Exception while creating table.'))
|
||||||
|
|
||||||
|
|
||||||
|
def _get_default_deleted_value(table):
|
||||||
|
if isinstance(table.c.id.type, Integer):
|
||||||
|
return 0
|
||||||
|
if isinstance(table.c.id.type, String):
|
||||||
|
return ""
|
||||||
|
raise exception.RackException(_("Unsupported id columns type"))
|
||||||
|
|
||||||
|
|
||||||
|
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
real_indexes = insp.get_indexes(table_name)
|
||||||
|
existing_index_names = dict([(index['name'], index['column_names'])
|
||||||
|
for index in real_indexes])
|
||||||
|
|
||||||
|
# NOTE(boris-42): Restore indexes on `deleted` column
|
||||||
|
for index in indexes:
|
||||||
|
if 'deleted' not in index['column_names']:
|
||||||
|
continue
|
||||||
|
name = index['name']
|
||||||
|
if name in existing_index_names:
|
||||||
|
column_names = [table.c[c] for c in existing_index_names[name]]
|
||||||
|
old_index = Index(name, *column_names, unique=index["unique"])
|
||||||
|
old_index.drop(migrate_engine)
|
||||||
|
|
||||||
|
column_names = [table.c[c] for c in index['column_names']]
|
||||||
|
new_index = Index(index["name"], *column_names, unique=index["unique"])
|
||||||
|
new_index.create(migrate_engine)
|
||||||
|
|
||||||
|
|
||||||
|
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
return _change_deleted_column_type_to_boolean_sqlite(migrate_engine,
|
||||||
|
table_name,
|
||||||
|
**col_name_col_instance)
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
indexes = insp.get_indexes(table_name)
|
||||||
|
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
old_deleted = Column('old_deleted', Boolean, default=False)
|
||||||
|
old_deleted.create(table, populate_default=False)
|
||||||
|
|
||||||
|
table.update().\
|
||||||
|
where(table.c.deleted == table.c.id).\
|
||||||
|
values(old_deleted=True).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
table.c.deleted.drop()
|
||||||
|
table.c.old_deleted.alter(name="deleted")
|
||||||
|
|
||||||
|
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
||||||
|
|
||||||
|
|
||||||
|
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
columns = []
|
||||||
|
for column in table.columns:
|
||||||
|
column_copy = None
|
||||||
|
if column.name != "deleted":
|
||||||
|
if isinstance(column.type, NullType):
|
||||||
|
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||||
|
column.name)
|
||||||
|
else:
|
||||||
|
column_copy = column.copy()
|
||||||
|
else:
|
||||||
|
column_copy = Column('deleted', Boolean, default=0)
|
||||||
|
columns.append(column_copy)
|
||||||
|
|
||||||
|
constraints = [constraint.copy() for constraint in table.constraints]
|
||||||
|
|
||||||
|
meta = MetaData(bind=migrate_engine)
|
||||||
|
new_table = Table(table_name + "__tmp__", meta,
|
||||||
|
*(columns + constraints))
|
||||||
|
new_table.create()
|
||||||
|
|
||||||
|
indexes = []
|
||||||
|
for index in insp.get_indexes(table_name):
|
||||||
|
column_names = [new_table.c[c] for c in index['column_names']]
|
||||||
|
indexes.append(Index(index["name"], *column_names,
|
||||||
|
unique=index["unique"]))
|
||||||
|
|
||||||
|
c_select = []
|
||||||
|
for c in table.c:
|
||||||
|
if c.name != "deleted":
|
||||||
|
c_select.append(c)
|
||||||
|
else:
|
||||||
|
c_select.append(table.c.deleted == table.c.id)
|
||||||
|
|
||||||
|
ins = InsertFromSelect(new_table, select(c_select))
|
||||||
|
migrate_engine.execute(ins)
|
||||||
|
|
||||||
|
table.drop()
|
||||||
|
[index.create(migrate_engine) for index in indexes]
|
||||||
|
|
||||||
|
new_table.rename(table_name)
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == new_table.c.id).\
|
||||||
|
values(deleted=True).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
|
||||||
|
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
return _change_deleted_column_type_to_id_type_sqlite(migrate_engine,
|
||||||
|
table_name,
|
||||||
|
**col_name_col_instance)
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
indexes = insp.get_indexes(table_name)
|
||||||
|
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
new_deleted = Column('new_deleted', table.c.id.type,
|
||||||
|
default=_get_default_deleted_value(table))
|
||||||
|
new_deleted.create(table, populate_default=True)
|
||||||
|
|
||||||
|
table.update().\
|
||||||
|
where(table.c.deleted == True).\
|
||||||
|
values(new_deleted=table.c.id).\
|
||||||
|
execute()
|
||||||
|
table.c.deleted.drop()
|
||||||
|
table.c.new_deleted.alter(name="deleted")
|
||||||
|
|
||||||
|
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
||||||
|
|
||||||
|
|
||||||
|
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
|
||||||
|
# constraints in sqlite DB and our `deleted` column has
|
||||||
|
# 2 check constraints. So there is only one way to remove
|
||||||
|
# these constraints:
|
||||||
|
# 1) Create new table with the same columns, constraints
|
||||||
|
# and indexes. (except deleted column).
|
||||||
|
# 2) Copy all data from old to new table.
|
||||||
|
# 3) Drop old table.
|
||||||
|
# 4) Rename new table to old table name.
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
meta = MetaData(bind=migrate_engine)
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
default_deleted_value = _get_default_deleted_value(table)
|
||||||
|
|
||||||
|
columns = []
|
||||||
|
for column in table.columns:
|
||||||
|
column_copy = None
|
||||||
|
if column.name != "deleted":
|
||||||
|
if isinstance(column.type, NullType):
|
||||||
|
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||||
|
column.name)
|
||||||
|
else:
|
||||||
|
column_copy = column.copy()
|
||||||
|
else:
|
||||||
|
column_copy = Column('deleted', table.c.id.type,
|
||||||
|
default=default_deleted_value)
|
||||||
|
columns.append(column_copy)
|
||||||
|
|
||||||
|
def is_deleted_column_constraint(constraint):
|
||||||
|
# NOTE(boris-42): There is no other way to check is CheckConstraint
|
||||||
|
# associated with deleted column.
|
||||||
|
if not isinstance(constraint, CheckConstraint):
|
||||||
|
return False
|
||||||
|
sqltext = str(constraint.sqltext)
|
||||||
|
# NOTE(I159): when the type of column `deleted` is changed from boolean
|
||||||
|
# to int, the corresponding CHECK constraint is dropped too. But
|
||||||
|
# starting from SQLAlchemy version 0.8.3, those CHECK constraints
|
||||||
|
# aren't dropped anymore. So despite the fact that column deleted is
|
||||||
|
# of type int now, we still restrict its values to be either 0 or 1.
|
||||||
|
constraint_markers = (
|
||||||
|
"deleted in (0, 1)",
|
||||||
|
"deleted IN (:deleted_1, :deleted_2)",
|
||||||
|
"deleted IN (:param_1, :param_2)"
|
||||||
|
)
|
||||||
|
return any(sqltext.endswith(marker) for marker in constraint_markers)
|
||||||
|
|
||||||
|
constraints = []
|
||||||
|
for constraint in table.constraints:
|
||||||
|
if not is_deleted_column_constraint(constraint):
|
||||||
|
constraints.append(constraint.copy())
|
||||||
|
|
||||||
|
new_table = Table(table_name + "__tmp__", meta,
|
||||||
|
*(columns + constraints))
|
||||||
|
new_table.create()
|
||||||
|
|
||||||
|
indexes = []
|
||||||
|
for index in insp.get_indexes(table_name):
|
||||||
|
column_names = [new_table.c[c] for c in index['column_names']]
|
||||||
|
indexes.append(Index(index["name"], *column_names,
|
||||||
|
unique=index["unique"]))
|
||||||
|
|
||||||
|
ins = InsertFromSelect(new_table, table.select())
|
||||||
|
migrate_engine.execute(ins)
|
||||||
|
|
||||||
|
table.drop()
|
||||||
|
[index.create(migrate_engine) for index in indexes]
|
||||||
|
|
||||||
|
new_table.rename(table_name)
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == True).\
|
||||||
|
values(deleted=new_table.c.id).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == False).\
|
||||||
|
values(deleted=default_deleted_value).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
|
||||||
|
def _index_exists(migrate_engine, table_name, index_name):
|
||||||
|
inspector = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
indexes = inspector.get_indexes(table_name)
|
||||||
|
index_names = [index['name'] for index in indexes]
|
||||||
|
|
||||||
|
return index_name in index_names
|
||||||
|
|
||||||
|
|
||||||
|
def _add_index(migrate_engine, table, index_name, idx_columns):
|
||||||
|
index = Index(
|
||||||
|
index_name, *[getattr(table.c, col) for col in idx_columns]
|
||||||
|
)
|
||||||
|
index.create()
|
||||||
|
|
||||||
|
|
||||||
|
def _drop_index(migrate_engine, table, index_name, idx_columns):
|
||||||
|
if _index_exists(migrate_engine, table.name, index_name):
|
||||||
|
index = Index(
|
||||||
|
index_name, *[getattr(table.c, col) for col in idx_columns]
|
||||||
|
)
|
||||||
|
index.drop()
|
||||||
|
|
||||||
|
|
||||||
|
def _change_index_columns(migrate_engine, table, index_name,
|
||||||
|
new_columns, old_columns):
|
||||||
|
_drop_index(migrate_engine, table, index_name, old_columns)
|
||||||
|
_add_index(migrate_engine, table, index_name, new_columns)
|
||||||
|
|
||||||
|
|
||||||
|
def modify_indexes(migrate_engine, data, upgrade=True):
|
||||||
|
if migrate_engine.name == 'sqlite':
|
||||||
|
return
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
for table_name, indexes in data.iteritems():
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
|
||||||
|
for index_name, old_columns, new_columns in indexes:
|
||||||
|
if not upgrade:
|
||||||
|
new_columns, old_columns = old_columns, new_columns
|
||||||
|
|
||||||
|
if migrate_engine.name == 'postgresql':
|
||||||
|
if upgrade:
|
||||||
|
_add_index(migrate_engine, table, index_name, new_columns)
|
||||||
|
else:
|
||||||
|
_drop_index(migrate_engine, table, index_name, old_columns)
|
||||||
|
elif migrate_engine.name == 'mysql':
|
||||||
|
_change_index_columns(migrate_engine, table, index_name,
|
||||||
|
new_columns, old_columns)
|
||||||
|
else:
|
||||||
|
raise ValueError('Unsupported DB %s' % migrate_engine.name)
|
||||||
75
rack/debugger.py
Normal file
75
rack/debugger.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def enabled():
|
||||||
|
return ('--remote_debug-host' in sys.argv and
|
||||||
|
'--remote_debug-port' in sys.argv)
|
||||||
|
|
||||||
|
|
||||||
|
def register_cli_opts():
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
cli_opts = [
|
||||||
|
cfg.StrOpt('host',
|
||||||
|
help='Debug host (IP or name) to connect. Note '
|
||||||
|
'that using the remote debug option changes how '
|
||||||
|
'Rack uses the eventlet library to support async IO. '
|
||||||
|
'This could result in failures that do not occur '
|
||||||
|
'under normal operation. Use at your own risk.'),
|
||||||
|
|
||||||
|
cfg.IntOpt('port',
|
||||||
|
help='Debug port to connect. Note '
|
||||||
|
'that using the remote debug option changes how '
|
||||||
|
'Rack uses the eventlet library to support async IO. '
|
||||||
|
'This could result in failures that do not occur '
|
||||||
|
'under normal operation. Use at your own risk.')
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
cfg.CONF.register_cli_opts(cli_opts, 'remote_debug')
|
||||||
|
|
||||||
|
|
||||||
|
def init():
|
||||||
|
from oslo.config import cfg
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
# NOTE(markmc): gracefully handle the CLI options not being registered
|
||||||
|
if 'remote_debug' not in CONF:
|
||||||
|
return
|
||||||
|
|
||||||
|
if not (CONF.remote_debug.host and CONF.remote_debug.port):
|
||||||
|
return
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
LOG.debug(_('Listening on %(host)s:%(port)s for debug connection'),
|
||||||
|
{'host': CONF.remote_debug.host,
|
||||||
|
'port': CONF.remote_debug.port})
|
||||||
|
|
||||||
|
from pydev import pydevd
|
||||||
|
pydevd.settrace(host=CONF.remote_debug.host,
|
||||||
|
port=CONF.remote_debug.port,
|
||||||
|
stdoutToServer=False,
|
||||||
|
stderrToServer=False)
|
||||||
|
|
||||||
|
LOG.warn(_('WARNING: Using the remote debug option changes how '
|
||||||
|
'Rack uses the eventlet library to support async IO. This '
|
||||||
|
'could result in failures that do not occur under normal '
|
||||||
|
'operation. Use at your own risk.'))
|
||||||
1598
rack/exception.py
Normal file
1598
rack/exception.py
Normal file
File diff suppressed because it is too large
Load Diff
114
rack/manager.py
Normal file
114
rack/manager.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Base Manager class.
|
||||||
|
|
||||||
|
Managers are responsible for a certain aspect of the system. It is a logical
|
||||||
|
grouping of code relating to a portion of the system. In general other
|
||||||
|
components should be using the manager to make changes to the components that
|
||||||
|
it is responsible for.
|
||||||
|
|
||||||
|
For example, other components that need to deal with volumes in some way,
|
||||||
|
should do so by calling methods on the VolumeManager instead of directly
|
||||||
|
changing fields in the database. This allows us to keep all of the code
|
||||||
|
relating to volumes in the same place.
|
||||||
|
|
||||||
|
We have adopted a basic strategy of Smart managers and dumb data, which means
|
||||||
|
rather than attaching methods to data objects, components should call manager
|
||||||
|
methods that act on the data.
|
||||||
|
|
||||||
|
Methods on managers that can be executed locally should be called directly. If
|
||||||
|
a particular method must execute on a remote host, this should be done via rpc
|
||||||
|
to the service that wraps the manager
|
||||||
|
|
||||||
|
Managers should be responsible for most of the db access, and
|
||||||
|
non-implementation specific data. Anything implementation specific that can't
|
||||||
|
be generalized should be done by the Driver.
|
||||||
|
|
||||||
|
In general, we prefer to have one manager with multiple drivers for different
|
||||||
|
implementations, but sometimes it makes sense to have multiple managers. You
|
||||||
|
can think of it this way: Abstract different overall strategies at the manager
|
||||||
|
level(FlatNetwork vs VlanNetwork), and different implementations at the driver
|
||||||
|
level(LinuxNetDriver vs CiscoNetDriver).
|
||||||
|
|
||||||
|
Managers will often provide methods for initial setup of a host or periodic
|
||||||
|
tasks to a wrapping service.
|
||||||
|
|
||||||
|
This module provides Manager, a base class for managers.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack.db import base
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import periodic_task
|
||||||
|
from rack import rpc
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_opt('host', 'rack.netconf')
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Manager(base.Base, periodic_task.PeriodicTasks):
|
||||||
|
|
||||||
|
def __init__(self, host=None, db_driver=None, service_name='undefined'):
|
||||||
|
if not host:
|
||||||
|
host = CONF.host
|
||||||
|
self.host = host
|
||||||
|
self.backdoor_port = None
|
||||||
|
self.service_name = service_name
|
||||||
|
self.notifier = rpc.get_notifier(self.service_name, self.host)
|
||||||
|
self.additional_endpoints = []
|
||||||
|
super(Manager, self).__init__(db_driver)
|
||||||
|
|
||||||
|
def periodic_tasks(self, context, raise_on_error=False):
|
||||||
|
"""Tasks to be run at a periodic interval."""
|
||||||
|
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
|
||||||
|
|
||||||
|
def init_host(self):
|
||||||
|
"""Hook to do additional manager initialization when one requests
|
||||||
|
the service be started. This is called before any service record
|
||||||
|
is created.
|
||||||
|
|
||||||
|
Child classes should override this method.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def cleanup_host(self):
|
||||||
|
"""Hook to do cleanup work when the service shuts down.
|
||||||
|
|
||||||
|
Child classes should override this method.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def pre_start_hook(self):
|
||||||
|
"""Hook to provide the manager the ability to do additional
|
||||||
|
start-up work before any RPC queues/consumers are created. This is
|
||||||
|
called after other initialization has succeeded and a service
|
||||||
|
record is created.
|
||||||
|
|
||||||
|
Child classes should override this method.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def post_start_hook(self):
|
||||||
|
"""Hook to provide the manager the ability to do additional
|
||||||
|
start-up work immediately after a service creates RPC consumers
|
||||||
|
and starts 'running'.
|
||||||
|
|
||||||
|
Child classes should override this method.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
58
rack/netconf.py
Normal file
58
rack/netconf.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
def _get_my_ip():
|
||||||
|
"""Returns the actual ip of the local machine.
|
||||||
|
|
||||||
|
This code figures out what source address would be used if some traffic
|
||||||
|
were to be sent out to some well known address on the Internet. In this
|
||||||
|
case, a Google DNS server is used, but the specific address does not
|
||||||
|
matter much. No traffic is actually sent.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
csock.connect(('8.8.8.8', 80))
|
||||||
|
(addr, port) = csock.getsockname()
|
||||||
|
csock.close()
|
||||||
|
return addr
|
||||||
|
except socket.error:
|
||||||
|
return utils.get_my_ipv4_address()
|
||||||
|
|
||||||
|
|
||||||
|
netconf_opts = [
|
||||||
|
cfg.StrOpt('my_ip',
|
||||||
|
default=_get_my_ip(),
|
||||||
|
help='IP address of this host'),
|
||||||
|
cfg.StrOpt('host',
|
||||||
|
default=socket.gethostname(),
|
||||||
|
help='Name of this node. This can be an opaque identifier. '
|
||||||
|
'It is not necessarily a hostname, FQDN, or IP address. '
|
||||||
|
'However, the node name must be valid within '
|
||||||
|
'an AMQP key, and if using ZeroMQ, a valid '
|
||||||
|
'hostname, FQDN, or IP address'),
|
||||||
|
cfg.BoolOpt('use_ipv6',
|
||||||
|
default=False,
|
||||||
|
help='Use IPv6'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF.register_opts(netconf_opts)
|
||||||
569
rack/object.py
Normal file
569
rack/object.py
Normal file
@@ -0,0 +1,569 @@
|
|||||||
|
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import collections
|
||||||
|
import copy
|
||||||
|
import functools
|
||||||
|
|
||||||
|
import netaddr
|
||||||
|
from oslo import messaging
|
||||||
|
import six
|
||||||
|
|
||||||
|
from rack import context
|
||||||
|
from rack import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import versionutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger('object')
|
||||||
|
|
||||||
|
|
||||||
|
class NotSpecifiedSentinel:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_attrname(name):
|
||||||
|
"""Return the mangled name of the attribute's underlying storage."""
|
||||||
|
return '_%s' % name
|
||||||
|
|
||||||
|
|
||||||
|
def make_class_properties(cls):
|
||||||
|
# NOTE(danms/comstud): Inherit fields from super classes.
|
||||||
|
# mro() returns the current class first and returns 'object' last, so
|
||||||
|
# those can be skipped. Also be careful to not overwrite any fields
|
||||||
|
# that already exist. And make sure each cls has its own copy of
|
||||||
|
# fields and that it is not sharing the dict with a super class.
|
||||||
|
cls.fields = dict(cls.fields)
|
||||||
|
for supercls in cls.mro()[1:-1]:
|
||||||
|
if not hasattr(supercls, 'fields'):
|
||||||
|
continue
|
||||||
|
for name, field in supercls.fields.items():
|
||||||
|
if name not in cls.fields:
|
||||||
|
cls.fields[name] = field
|
||||||
|
for name, field in cls.fields.iteritems():
|
||||||
|
|
||||||
|
def getter(self, name=name):
|
||||||
|
attrname = get_attrname(name)
|
||||||
|
if not hasattr(self, attrname):
|
||||||
|
self.obj_load_attr(name)
|
||||||
|
return getattr(self, attrname)
|
||||||
|
|
||||||
|
def setter(self, value, name=name, field=field):
|
||||||
|
self._changed_fields.add(name)
|
||||||
|
try:
|
||||||
|
return setattr(self, get_attrname(name),
|
||||||
|
field.coerce(self, name, value))
|
||||||
|
except Exception:
|
||||||
|
attr = "%s.%s" % (self.obj_name(), name)
|
||||||
|
LOG.exception(_('Error setting %(attr)s') %
|
||||||
|
{'attr': attr})
|
||||||
|
raise
|
||||||
|
|
||||||
|
setattr(cls, name, property(getter, setter))
|
||||||
|
|
||||||
|
|
||||||
|
class RackObjectMetaclass(type):
|
||||||
|
"""Metaclass that allows tracking of object classes."""
|
||||||
|
|
||||||
|
# NOTE(danms): This is what controls whether object operations are
|
||||||
|
# remoted. If this is not None, use it to remote things over RPC.
|
||||||
|
indirection_api = None
|
||||||
|
|
||||||
|
def __init__(cls, names, bases, dict_):
|
||||||
|
if not hasattr(cls, '_obj_classes'):
|
||||||
|
# This will be set in the 'RackObject' class.
|
||||||
|
cls._obj_classes = collections.defaultdict(list)
|
||||||
|
else:
|
||||||
|
# Add the subclass to RackObject._obj_classes
|
||||||
|
make_class_properties(cls)
|
||||||
|
cls._obj_classes[cls.obj_name()].append(cls)
|
||||||
|
|
||||||
|
|
||||||
|
def remotable_classmethod(fn):
|
||||||
|
"""Decorator for remotable classmethods."""
|
||||||
|
@functools.wraps(fn)
|
||||||
|
def wrapper(cls, context, *args, **kwargs):
|
||||||
|
if RackObject.indirection_api:
|
||||||
|
result = RackObject.indirection_api.object_class_action(
|
||||||
|
context, cls.obj_name(), fn.__name__, cls.VERSION,
|
||||||
|
args, kwargs)
|
||||||
|
else:
|
||||||
|
result = fn(cls, context, *args, **kwargs)
|
||||||
|
if isinstance(result, RackObject):
|
||||||
|
result._context = context
|
||||||
|
return result
|
||||||
|
return classmethod(wrapper)
|
||||||
|
|
||||||
|
|
||||||
|
def remotable(fn):
|
||||||
|
"""Decorator for remotable object methods."""
|
||||||
|
@functools.wraps(fn)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
ctxt = self._context
|
||||||
|
try:
|
||||||
|
if isinstance(args[0], (context.RequestContext)):
|
||||||
|
ctxt = args[0]
|
||||||
|
args = args[1:]
|
||||||
|
except IndexError:
|
||||||
|
pass
|
||||||
|
if ctxt is None:
|
||||||
|
raise exception.OrphanedObjectError(method=fn.__name__,
|
||||||
|
objtype=self.obj_name())
|
||||||
|
# Force this to be set if it wasn't before.
|
||||||
|
self._context = ctxt
|
||||||
|
if RackObject.indirection_api:
|
||||||
|
updates, result = RackObject.indirection_api.object_action(
|
||||||
|
ctxt, self, fn.__name__, args, kwargs)
|
||||||
|
for key, value in updates.iteritems():
|
||||||
|
if key in self.fields:
|
||||||
|
field = self.fields[key]
|
||||||
|
self[key] = field.from_primitive(self, key, value)
|
||||||
|
self.obj_reset_changes()
|
||||||
|
self._changed_fields = set(updates.get('obj_what_changed', []))
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
return fn(self, ctxt, *args, **kwargs)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(RackObjectMetaclass)
|
||||||
|
class RackObject(object):
|
||||||
|
"""Base class and object factory.
|
||||||
|
|
||||||
|
This forms the base of all objects that can be remoted or instantiated
|
||||||
|
via RPC. Simply defining a class that inherits from this base class
|
||||||
|
will make it remotely instantiatable. Objects should implement the
|
||||||
|
necessary "get" classmethod routines as well as "save" object methods
|
||||||
|
as appropriate.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Object versioning rules
|
||||||
|
#
|
||||||
|
# Each service has its set of objects, each with a version attached. When
|
||||||
|
# a client attempts to call an object method, the server checks to see if
|
||||||
|
# the version of that object matches (in a compatible way) its object
|
||||||
|
# implementation. If so, cool, and if not, fail.
|
||||||
|
VERSION = '1.0'
|
||||||
|
|
||||||
|
# The fields present in this object as key:field pairs. For example:
|
||||||
|
#
|
||||||
|
# fields = { 'foo': fields.IntegerField(),
|
||||||
|
# 'bar': fields.StringField(),
|
||||||
|
# }
|
||||||
|
fields = {}
|
||||||
|
obj_extra_fields = []
|
||||||
|
|
||||||
|
def __init__(self, context=None, **kwargs):
|
||||||
|
self._changed_fields = set()
|
||||||
|
self._context = context
|
||||||
|
for key in kwargs.keys():
|
||||||
|
self[key] = kwargs[key]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def obj_name(cls):
|
||||||
|
"""Return a canonical name for this object which will be used over
|
||||||
|
the wire for remote hydration.
|
||||||
|
"""
|
||||||
|
return cls.__name__
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def obj_class_from_name(cls, objname, objver):
|
||||||
|
"""Returns a class from the registry based on a name and version."""
|
||||||
|
if objname not in cls._obj_classes:
|
||||||
|
LOG.error(_('Unable to instantiate unregistered object type '
|
||||||
|
'%(objtype)s') % dict(objtype=objname))
|
||||||
|
raise exception.UnsupportedObjectError(objtype=objname)
|
||||||
|
|
||||||
|
latest = None
|
||||||
|
compatible_match = None
|
||||||
|
for objclass in cls._obj_classes[objname]:
|
||||||
|
if objclass.VERSION == objver:
|
||||||
|
return objclass
|
||||||
|
|
||||||
|
version_bits = tuple([int(x) for x in objclass.VERSION.split(".")])
|
||||||
|
if latest is None:
|
||||||
|
latest = version_bits
|
||||||
|
elif latest < version_bits:
|
||||||
|
latest = version_bits
|
||||||
|
|
||||||
|
if versionutils.is_compatible(objver, objclass.VERSION):
|
||||||
|
compatible_match = objclass
|
||||||
|
|
||||||
|
if compatible_match:
|
||||||
|
return compatible_match
|
||||||
|
|
||||||
|
latest_ver = '%i.%i' % latest
|
||||||
|
raise exception.IncompatibleObjectVersion(objname=objname,
|
||||||
|
objver=objver,
|
||||||
|
supported=latest_ver)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _obj_from_primitive(cls, context, objver, primitive):
|
||||||
|
self = cls()
|
||||||
|
self._context = context
|
||||||
|
self.VERSION = objver
|
||||||
|
objdata = primitive['rack_object.data']
|
||||||
|
changes = primitive.get('rack_object.changes', [])
|
||||||
|
for name, field in self.fields.items():
|
||||||
|
if name in objdata:
|
||||||
|
setattr(self, name, field.from_primitive(self, name,
|
||||||
|
objdata[name]))
|
||||||
|
self._changed_fields = set([x for x in changes if x in self.fields])
|
||||||
|
return self
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def obj_from_primitive(cls, primitive, context=None):
|
||||||
|
"""Object field-by-field hydration."""
|
||||||
|
if primitive['rack_object.namespace'] != 'rack':
|
||||||
|
# NOTE(danms): We don't do anything with this now, but it's
|
||||||
|
# there for "the future"
|
||||||
|
raise exception.UnsupportedObjectError(
|
||||||
|
objtype='%s.%s' % (primitive['rack_object.namespace'],
|
||||||
|
primitive['rack_object.name']))
|
||||||
|
objname = primitive['rack_object.name']
|
||||||
|
objver = primitive['rack_object.version']
|
||||||
|
objclass = cls.obj_class_from_name(objname, objver)
|
||||||
|
return objclass._obj_from_primitive(context, objver, primitive)
|
||||||
|
|
||||||
|
def __deepcopy__(self, memo):
|
||||||
|
"""Efficiently make a deep copy of this object."""
|
||||||
|
|
||||||
|
# NOTE(danms): A naive deepcopy would copy more than we need,
|
||||||
|
# and since we have knowledge of the volatile bits of the
|
||||||
|
# object, we can be smarter here. Also, nested entities within
|
||||||
|
# some objects may be uncopyable, so we can avoid those sorts
|
||||||
|
# of issues by copying only our field data.
|
||||||
|
|
||||||
|
nobj = self.__class__()
|
||||||
|
nobj._context = self._context
|
||||||
|
for name in self.fields:
|
||||||
|
if self.obj_attr_is_set(name):
|
||||||
|
nval = copy.deepcopy(getattr(self, name), memo)
|
||||||
|
setattr(nobj, name, nval)
|
||||||
|
nobj._changed_fields = set(self._changed_fields)
|
||||||
|
return nobj
|
||||||
|
|
||||||
|
def obj_clone(self):
|
||||||
|
"""Create a copy."""
|
||||||
|
return copy.deepcopy(self)
|
||||||
|
|
||||||
|
def obj_make_compatible(self, primitive, target_version):
|
||||||
|
"""Make an object representation compatible with a target version.
|
||||||
|
|
||||||
|
This is responsible for taking the primitive representation of
|
||||||
|
an object and making it suitable for the given target_version.
|
||||||
|
This may mean converting the format of object attributes, removing
|
||||||
|
attributes that have been added since the target version, etc.
|
||||||
|
|
||||||
|
:param:primitive: The result of self.obj_to_primitive()
|
||||||
|
:param:target_version: The version string requested by the recipient
|
||||||
|
of the object.
|
||||||
|
:param:raises: rack.exception.UnsupportedObjectError if conversion
|
||||||
|
is not possible for some reason.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def obj_to_primitive(self, target_version=None):
|
||||||
|
"""Simple base-case dehydration.
|
||||||
|
|
||||||
|
This calls to_primitive() for each item in fields.
|
||||||
|
"""
|
||||||
|
primitive = dict()
|
||||||
|
for name, field in self.fields.items():
|
||||||
|
if self.obj_attr_is_set(name):
|
||||||
|
primitive[name] = field.to_primitive(self, name,
|
||||||
|
getattr(self, name))
|
||||||
|
if target_version:
|
||||||
|
self.obj_make_compatible(primitive, target_version)
|
||||||
|
obj = {'rack_object.name': self.obj_name(),
|
||||||
|
'rack_object.namespace': 'rack',
|
||||||
|
'rack_object.version': target_version or self.VERSION,
|
||||||
|
'rack_object.data': primitive}
|
||||||
|
if self.obj_what_changed():
|
||||||
|
obj['rack_object.changes'] = list(self.obj_what_changed())
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def obj_load_attr(self, attrname):
|
||||||
|
"""Load an additional attribute from the real object.
|
||||||
|
|
||||||
|
This should use self._conductor, and cache any data that might
|
||||||
|
be useful for future load operations.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
_("Cannot load '%s' in the base class") % attrname)
|
||||||
|
|
||||||
|
def save(self, context):
|
||||||
|
"""Save the changed fields back to the store.
|
||||||
|
|
||||||
|
This is optional for subclasses, but is presented here in the base
|
||||||
|
class for consistency among those that do.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError('Cannot save anything in the base class')
|
||||||
|
|
||||||
|
def obj_what_changed(self):
|
||||||
|
"""Returns a set of fields that have been modified."""
|
||||||
|
changes = set(self._changed_fields)
|
||||||
|
for field in self.fields:
|
||||||
|
if (self.obj_attr_is_set(field) and
|
||||||
|
isinstance(self[field], RackObject) and
|
||||||
|
self[field].obj_what_changed()):
|
||||||
|
changes.add(field)
|
||||||
|
return changes
|
||||||
|
|
||||||
|
def obj_get_changes(self):
|
||||||
|
"""Returns a dict of changed fields and their new values."""
|
||||||
|
changes = {}
|
||||||
|
for key in self.obj_what_changed():
|
||||||
|
changes[key] = self[key]
|
||||||
|
return changes
|
||||||
|
|
||||||
|
def obj_reset_changes(self, fields=None):
|
||||||
|
"""Reset the list of fields that have been changed.
|
||||||
|
|
||||||
|
Note that this is NOT "revert to previous values"
|
||||||
|
"""
|
||||||
|
if fields:
|
||||||
|
self._changed_fields -= set(fields)
|
||||||
|
else:
|
||||||
|
self._changed_fields.clear()
|
||||||
|
|
||||||
|
def obj_attr_is_set(self, attrname):
|
||||||
|
"""Test object to see if attrname is present.
|
||||||
|
|
||||||
|
Returns True if the named attribute has a value set, or
|
||||||
|
False if not. Raises AttributeError if attrname is not
|
||||||
|
a valid attribute for this object.
|
||||||
|
"""
|
||||||
|
if attrname not in self.obj_fields:
|
||||||
|
raise AttributeError(
|
||||||
|
_("%(objname)s object has no attribute '%(attrname)s'") %
|
||||||
|
{'objname': self.obj_name(), 'attrname': attrname})
|
||||||
|
return hasattr(self, get_attrname(attrname))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def obj_fields(self):
|
||||||
|
return self.fields.keys() + self.obj_extra_fields
|
||||||
|
|
||||||
|
# dictish syntactic sugar
|
||||||
|
def iteritems(self):
|
||||||
|
"""For backwards-compatibility with dict-based objects.
|
||||||
|
|
||||||
|
NOTE(danms): May be removed in the future.
|
||||||
|
"""
|
||||||
|
for name in self.obj_fields:
|
||||||
|
if (self.obj_attr_is_set(name) or
|
||||||
|
name in self.obj_extra_fields):
|
||||||
|
yield name, getattr(self, name)
|
||||||
|
|
||||||
|
items = lambda self: list(self.iteritems())
|
||||||
|
|
||||||
|
def __getitem__(self, name):
|
||||||
|
"""For backwards-compatibility with dict-based objects.
|
||||||
|
|
||||||
|
NOTE(danms): May be removed in the future.
|
||||||
|
"""
|
||||||
|
return getattr(self, name)
|
||||||
|
|
||||||
|
def __setitem__(self, name, value):
|
||||||
|
"""For backwards-compatibility with dict-based objects.
|
||||||
|
|
||||||
|
NOTE(danms): May be removed in the future.
|
||||||
|
"""
|
||||||
|
setattr(self, name, value)
|
||||||
|
|
||||||
|
def __contains__(self, name):
|
||||||
|
"""For backwards-compatibility with dict-based objects.
|
||||||
|
|
||||||
|
NOTE(danms): May be removed in the future.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return self.obj_attr_is_set(name)
|
||||||
|
except AttributeError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get(self, key, value=NotSpecifiedSentinel):
|
||||||
|
"""For backwards-compatibility with dict-based objects.
|
||||||
|
|
||||||
|
NOTE(danms): May be removed in the future.
|
||||||
|
"""
|
||||||
|
if key not in self.obj_fields:
|
||||||
|
raise AttributeError("'%s' object has no attribute '%s'" % (
|
||||||
|
self.__class__, key))
|
||||||
|
if value != NotSpecifiedSentinel and not self.obj_attr_is_set(key):
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
return self[key]
|
||||||
|
|
||||||
|
def update(self, updates):
|
||||||
|
"""For backwards-compatibility with dict-base objects.
|
||||||
|
|
||||||
|
NOTE(danms): May be removed in the future.
|
||||||
|
"""
|
||||||
|
for key, value in updates.items():
|
||||||
|
self[key] = value
|
||||||
|
|
||||||
|
# This is a dictionary of my_version:child_version mappings so that
|
||||||
|
# we can support backleveling our contents based on the version
|
||||||
|
# requested of the list object.
|
||||||
|
child_versions = {}
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
"""List iterator interface."""
|
||||||
|
return iter(self.objects)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""List length."""
|
||||||
|
return len(self.objects)
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
"""List index access."""
|
||||||
|
if isinstance(index, slice):
|
||||||
|
new_obj = self.__class__()
|
||||||
|
new_obj.objects = self.objects[index]
|
||||||
|
# NOTE(danms): We must be mixed in with a RackObject!
|
||||||
|
new_obj.obj_reset_changes()
|
||||||
|
new_obj._context = self._context
|
||||||
|
return new_obj
|
||||||
|
return self.objects[index]
|
||||||
|
|
||||||
|
def __contains__(self, value):
|
||||||
|
"""List membership test."""
|
||||||
|
return value in self.objects
|
||||||
|
|
||||||
|
def count(self, value):
|
||||||
|
"""List count of value occurrences."""
|
||||||
|
return self.objects.count(value)
|
||||||
|
|
||||||
|
def index(self, value):
|
||||||
|
"""List index of value."""
|
||||||
|
return self.objects.index(value)
|
||||||
|
|
||||||
|
def sort(self, cmp=None, key=None, reverse=False):
|
||||||
|
self.objects.sort(cmp=cmp, key=key, reverse=reverse)
|
||||||
|
|
||||||
|
def _attr_objects_to_primitive(self):
|
||||||
|
"""Serialization of object list."""
|
||||||
|
return [x.obj_to_primitive() for x in self.objects]
|
||||||
|
|
||||||
|
def _attr_objects_from_primitive(self, value):
|
||||||
|
"""Deserialization of object list."""
|
||||||
|
objects = []
|
||||||
|
for entity in value:
|
||||||
|
obj = RackObject.obj_from_primitive(entity, context=self._context)
|
||||||
|
objects.append(obj)
|
||||||
|
return objects
|
||||||
|
|
||||||
|
def obj_make_compatible(self, primitive, target_version):
|
||||||
|
primitives = primitive['objects']
|
||||||
|
child_target_version = self.child_versions.get(target_version, '1.0')
|
||||||
|
for index, item in enumerate(self.objects):
|
||||||
|
self.objects[index].obj_make_compatible(
|
||||||
|
primitives[index]['rack_object.data'],
|
||||||
|
child_target_version)
|
||||||
|
primitives[index]['rack_object.version'] = child_target_version
|
||||||
|
|
||||||
|
def obj_what_changed(self):
|
||||||
|
changes = set(self._changed_fields)
|
||||||
|
for child in self.objects:
|
||||||
|
if child.obj_what_changed():
|
||||||
|
changes.add('objects')
|
||||||
|
return changes
|
||||||
|
|
||||||
|
|
||||||
|
class RackObjectSerializer(messaging.NoOpSerializer):
|
||||||
|
|
||||||
|
def _process_object(self, context, objprim):
|
||||||
|
try:
|
||||||
|
objinst = RackObject.obj_from_primitive(objprim, context=context)
|
||||||
|
except exception.IncompatibleObjectVersion as e:
|
||||||
|
objinst = self.conductor.object_backport(context, objprim,
|
||||||
|
e.kwargs['supported'])
|
||||||
|
return objinst
|
||||||
|
|
||||||
|
def _process_iterable(self, context, action_fn, values):
|
||||||
|
"""Process an iterable, taking an action on each value.
|
||||||
|
:param:context: Request context
|
||||||
|
:param:action_fn: Action to take on each item in values
|
||||||
|
:param:values: Iterable container of things to take action on
|
||||||
|
:returns: A new container of the same type (except set) with
|
||||||
|
items from values having had action applied.
|
||||||
|
"""
|
||||||
|
iterable = values.__class__
|
||||||
|
if iterable == set:
|
||||||
|
# NOTE(danms): A set can't have an unhashable value inside, such as
|
||||||
|
# a dict. Convert sets to tuples, which is fine, since we can't
|
||||||
|
# send them over RPC anyway.
|
||||||
|
iterable = tuple
|
||||||
|
return iterable([action_fn(context, value) for value in values])
|
||||||
|
|
||||||
|
def serialize_entity(self, context, entity):
|
||||||
|
if isinstance(entity, (tuple, list, set)):
|
||||||
|
entity = self._process_iterable(context, self.serialize_entity,
|
||||||
|
entity)
|
||||||
|
elif (hasattr(entity, 'obj_to_primitive') and
|
||||||
|
callable(entity.obj_to_primitive)):
|
||||||
|
entity = entity.obj_to_primitive()
|
||||||
|
return entity
|
||||||
|
|
||||||
|
def deserialize_entity(self, context, entity):
|
||||||
|
if isinstance(entity, dict) and 'rack_object.name' in entity:
|
||||||
|
entity = self._process_object(context, entity)
|
||||||
|
elif isinstance(entity, (tuple, list, set)):
|
||||||
|
entity = self._process_iterable(context, self.deserialize_entity,
|
||||||
|
entity)
|
||||||
|
return entity
|
||||||
|
|
||||||
|
|
||||||
|
def obj_to_primitive(obj):
|
||||||
|
"""Recursively turn an object into a python primitive.
|
||||||
|
|
||||||
|
A RackObject becomes a dict, and anything that implements ObjectListBase
|
||||||
|
becomes a list.
|
||||||
|
"""
|
||||||
|
if isinstance(obj, ObjectListBase):
|
||||||
|
return [obj_to_primitive(x) for x in obj]
|
||||||
|
elif isinstance(obj, RackObject):
|
||||||
|
result = {}
|
||||||
|
for key, value in obj.iteritems():
|
||||||
|
result[key] = obj_to_primitive(value)
|
||||||
|
return result
|
||||||
|
elif isinstance(obj, netaddr.IPAddress):
|
||||||
|
return str(obj)
|
||||||
|
elif isinstance(obj, netaddr.IPNetwork):
|
||||||
|
return str(obj)
|
||||||
|
else:
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
|
||||||
|
"""Construct an object list from a list of primitives.
|
||||||
|
|
||||||
|
This calls item_cls._from_db_object() on each item of db_list, and
|
||||||
|
adds the resulting object to list_obj.
|
||||||
|
|
||||||
|
:param:context: Request contextr
|
||||||
|
:param:list_obj: An ObjectListBase object
|
||||||
|
:param:item_cls: The RackObject class of the objects within the list
|
||||||
|
:param:db_list: The list of primitives to convert to objects
|
||||||
|
:param:extra_args: Extra arguments to pass to _from_db_object()
|
||||||
|
:returns: list_obj
|
||||||
|
"""
|
||||||
|
list_obj.objects = []
|
||||||
|
for db_item in db_list:
|
||||||
|
item = item_cls._from_db_object(context, item_cls(), db_item,
|
||||||
|
**extra_args)
|
||||||
|
list_obj.objects.append(item)
|
||||||
|
list_obj._context = context
|
||||||
|
list_obj.obj_reset_changes()
|
||||||
|
return list_obj
|
||||||
0
rack/openstack/__init__.py
Normal file
0
rack/openstack/__init__.py
Normal file
13
rack/openstack/common/README
Normal file
13
rack/openstack/common/README
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
openstack-common
|
||||||
|
----------------
|
||||||
|
|
||||||
|
A number of modules from openstack-common are imported into this project.
|
||||||
|
|
||||||
|
These modules are "incubating" in openstack-common and are kept in sync
|
||||||
|
with the help of openstack-common's update.py script. See:
|
||||||
|
|
||||||
|
https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator
|
||||||
|
|
||||||
|
The copy of the code should never be directly modified here. Please
|
||||||
|
always update openstack-common first and then run the script to copy
|
||||||
|
the changes across.
|
||||||
2
rack/openstack/common/__init__.py
Normal file
2
rack/openstack/common/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
import six
|
||||||
|
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
||||||
63
rack/openstack/common/cliutils.py
Normal file
63
rack/openstack/common/cliutils.py
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2012 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
|
||||||
|
class MissingArgs(Exception):
|
||||||
|
|
||||||
|
def __init__(self, missing):
|
||||||
|
self.missing = missing
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if len(self.missing) == 1:
|
||||||
|
return "An argument is missing"
|
||||||
|
else:
|
||||||
|
return ("%(num)d arguments are missing" %
|
||||||
|
dict(num=len(self.missing)))
|
||||||
|
|
||||||
|
|
||||||
|
def validate_args(fn, *args, **kwargs):
|
||||||
|
"""Check that the supplied args are sufficient for calling a function.
|
||||||
|
|
||||||
|
>>> validate_args(lambda a: None)
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
MissingArgs: An argument is missing
|
||||||
|
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
MissingArgs: 2 arguments are missing
|
||||||
|
|
||||||
|
:param fn: the function to check
|
||||||
|
:param arg: the positional arguments supplied
|
||||||
|
:param kwargs: the keyword arguments supplied
|
||||||
|
"""
|
||||||
|
argspec = inspect.getargspec(fn)
|
||||||
|
|
||||||
|
num_defaults = len(argspec.defaults or [])
|
||||||
|
required_args = argspec.args[:len(argspec.args) - num_defaults]
|
||||||
|
|
||||||
|
def isbound(method):
|
||||||
|
return getattr(method, 'im_self', None) is not None
|
||||||
|
|
||||||
|
if isbound(fn):
|
||||||
|
required_args.pop(0)
|
||||||
|
|
||||||
|
missing = [arg for arg in required_args if arg not in kwargs]
|
||||||
|
missing = missing[len(args):]
|
||||||
|
if missing:
|
||||||
|
raise MissingArgs(missing)
|
||||||
0
rack/openstack/common/config/__init__.py
Normal file
0
rack/openstack/common/config/__init__.py
Normal file
302
rack/openstack/common/config/generator.py
Normal file
302
rack/openstack/common/config/generator.py
Normal file
@@ -0,0 +1,302 @@
|
|||||||
|
# Copyright 2012 SINA Corporation
|
||||||
|
# Copyright 2014 Cisco Systems, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
"""Extracts OpenStack config option info from module(s)."""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import imp
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import textwrap
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
import stevedore.named
|
||||||
|
|
||||||
|
from rack.openstack.common import gettextutils
|
||||||
|
from rack.openstack.common import importutils
|
||||||
|
|
||||||
|
gettextutils.install('rack')
|
||||||
|
|
||||||
|
STROPT = "StrOpt"
|
||||||
|
BOOLOPT = "BoolOpt"
|
||||||
|
INTOPT = "IntOpt"
|
||||||
|
FLOATOPT = "FloatOpt"
|
||||||
|
LISTOPT = "ListOpt"
|
||||||
|
DICTOPT = "DictOpt"
|
||||||
|
MULTISTROPT = "MultiStrOpt"
|
||||||
|
|
||||||
|
OPT_TYPES = {
|
||||||
|
STROPT: 'string value',
|
||||||
|
BOOLOPT: 'boolean value',
|
||||||
|
INTOPT: 'integer value',
|
||||||
|
FLOATOPT: 'floating point value',
|
||||||
|
LISTOPT: 'list value',
|
||||||
|
DICTOPT: 'dict value',
|
||||||
|
MULTISTROPT: 'multi valued',
|
||||||
|
}
|
||||||
|
|
||||||
|
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
||||||
|
FLOATOPT, LISTOPT, DICTOPT,
|
||||||
|
MULTISTROPT]))
|
||||||
|
|
||||||
|
PY_EXT = ".py"
|
||||||
|
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
|
"../../../../"))
|
||||||
|
WORDWRAP_WIDTH = 60
|
||||||
|
|
||||||
|
|
||||||
|
def generate(argv):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='generate sample configuration file',
|
||||||
|
)
|
||||||
|
parser.add_argument('-m', dest='modules', action='append')
|
||||||
|
parser.add_argument('-l', dest='libraries', action='append')
|
||||||
|
parser.add_argument('srcfiles', nargs='*')
|
||||||
|
parsed_args = parser.parse_args(argv)
|
||||||
|
|
||||||
|
mods_by_pkg = dict()
|
||||||
|
for filepath in parsed_args.srcfiles:
|
||||||
|
pkg_name = filepath.split(os.sep)[1]
|
||||||
|
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
|
||||||
|
os.path.basename(filepath).split('.')[0]])
|
||||||
|
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
||||||
|
# NOTE(lzyeval): place top level modules before packages
|
||||||
|
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
|
||||||
|
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
|
||||||
|
pkg_names.extend(ext_names)
|
||||||
|
|
||||||
|
# opts_by_group is a mapping of group name to an options list
|
||||||
|
# The options list is a list of (module, options) tuples
|
||||||
|
opts_by_group = {'DEFAULT': []}
|
||||||
|
|
||||||
|
if parsed_args.modules:
|
||||||
|
for module_name in parsed_args.modules:
|
||||||
|
module = _import_module(module_name)
|
||||||
|
if module:
|
||||||
|
for group, opts in _list_opts(module):
|
||||||
|
opts_by_group.setdefault(group, []).append((module_name,
|
||||||
|
opts))
|
||||||
|
|
||||||
|
# Look for entry points defined in libraries (or applications) for
|
||||||
|
# option discovery, and include their return values in the output.
|
||||||
|
#
|
||||||
|
# Each entry point should be a function returning an iterable
|
||||||
|
# of pairs with the group name (or None for the default group)
|
||||||
|
# and the list of Opt instances for that group.
|
||||||
|
if parsed_args.libraries:
|
||||||
|
loader = stevedore.named.NamedExtensionManager(
|
||||||
|
'oslo.config.opts',
|
||||||
|
names=list(set(parsed_args.libraries)),
|
||||||
|
invoke_on_load=False,
|
||||||
|
)
|
||||||
|
for ext in loader:
|
||||||
|
for group, opts in ext.plugin():
|
||||||
|
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
|
||||||
|
opt_list.append((ext.name, opts))
|
||||||
|
|
||||||
|
for pkg_name in pkg_names:
|
||||||
|
mods = mods_by_pkg.get(pkg_name)
|
||||||
|
mods.sort()
|
||||||
|
for mod_str in mods:
|
||||||
|
if mod_str.endswith('.__init__'):
|
||||||
|
mod_str = mod_str[:mod_str.rfind(".")]
|
||||||
|
|
||||||
|
mod_obj = _import_module(mod_str)
|
||||||
|
if not mod_obj:
|
||||||
|
raise RuntimeError("Unable to import module %s" % mod_str)
|
||||||
|
|
||||||
|
for group, opts in _list_opts(mod_obj):
|
||||||
|
opts_by_group.setdefault(group, []).append((mod_str, opts))
|
||||||
|
|
||||||
|
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
|
||||||
|
for group in sorted(opts_by_group.keys()):
|
||||||
|
print_group_opts(group, opts_by_group[group])
|
||||||
|
|
||||||
|
|
||||||
|
def _import_module(mod_str):
|
||||||
|
try:
|
||||||
|
if mod_str.startswith('bin.'):
|
||||||
|
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
|
||||||
|
return sys.modules[mod_str[4:]]
|
||||||
|
else:
|
||||||
|
return importutils.import_module(mod_str)
|
||||||
|
except Exception as e:
|
||||||
|
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _is_in_group(opt, group):
|
||||||
|
"Check if opt is in group."
|
||||||
|
for value in group._opts.values():
|
||||||
|
# NOTE(llu): Temporary workaround for bug #1262148, wait until
|
||||||
|
# newly released oslo.config support '==' operator.
|
||||||
|
if not(value['opt'] != opt):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _guess_groups(opt, mod_obj):
|
||||||
|
# is it in the DEFAULT group?
|
||||||
|
if _is_in_group(opt, cfg.CONF):
|
||||||
|
return 'DEFAULT'
|
||||||
|
|
||||||
|
# what other groups is it in?
|
||||||
|
for value in cfg.CONF.values():
|
||||||
|
if isinstance(value, cfg.CONF.GroupAttr):
|
||||||
|
if _is_in_group(opt, value._group):
|
||||||
|
return value._group.name
|
||||||
|
|
||||||
|
raise RuntimeError(
|
||||||
|
"Unable to find group for option %s, "
|
||||||
|
"maybe it's defined twice in the same group?"
|
||||||
|
% opt.name
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _list_opts(obj):
|
||||||
|
def is_opt(o):
|
||||||
|
return (isinstance(o, cfg.Opt) and
|
||||||
|
not isinstance(o, cfg.SubCommandOpt))
|
||||||
|
|
||||||
|
opts = list()
|
||||||
|
for attr_str in dir(obj):
|
||||||
|
attr_obj = getattr(obj, attr_str)
|
||||||
|
if is_opt(attr_obj):
|
||||||
|
opts.append(attr_obj)
|
||||||
|
elif (isinstance(attr_obj, list) and
|
||||||
|
all(map(lambda x: is_opt(x), attr_obj))):
|
||||||
|
opts.extend(attr_obj)
|
||||||
|
|
||||||
|
ret = {}
|
||||||
|
for opt in opts:
|
||||||
|
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
|
||||||
|
return ret.items()
|
||||||
|
|
||||||
|
|
||||||
|
def print_group_opts(group, opts_by_module):
|
||||||
|
print("[%s]" % group)
|
||||||
|
print('')
|
||||||
|
for mod, opts in opts_by_module:
|
||||||
|
print('#')
|
||||||
|
print('# Options defined in %s' % mod)
|
||||||
|
print('#')
|
||||||
|
print('')
|
||||||
|
for opt in opts:
|
||||||
|
_print_opt(opt)
|
||||||
|
print('')
|
||||||
|
|
||||||
|
|
||||||
|
def _get_my_ip():
|
||||||
|
try:
|
||||||
|
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
csock.connect(('8.8.8.8', 80))
|
||||||
|
(addr, port) = csock.getsockname()
|
||||||
|
csock.close()
|
||||||
|
return addr
|
||||||
|
except socket.error:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _sanitize_default(name, value):
|
||||||
|
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
|
||||||
|
if value.startswith(sys.prefix):
|
||||||
|
# NOTE(jd) Don't use os.path.join, because it is likely to think the
|
||||||
|
# second part is an absolute pathname and therefore drop the first
|
||||||
|
# part.
|
||||||
|
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
|
||||||
|
elif value.startswith(BASEDIR):
|
||||||
|
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
|
||||||
|
elif BASEDIR in value:
|
||||||
|
return value.replace(BASEDIR, '')
|
||||||
|
elif value == _get_my_ip():
|
||||||
|
return '10.0.0.1'
|
||||||
|
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
|
||||||
|
return 'rack'
|
||||||
|
elif value.strip() != value:
|
||||||
|
return '"%s"' % value
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
def _print_opt(opt):
|
||||||
|
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
|
||||||
|
if not opt_help:
|
||||||
|
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
|
||||||
|
opt_help = ""
|
||||||
|
opt_type = None
|
||||||
|
try:
|
||||||
|
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
|
||||||
|
except (ValueError, AttributeError) as err:
|
||||||
|
sys.stderr.write("%s\n" % str(err))
|
||||||
|
sys.exit(1)
|
||||||
|
opt_help = u'%s (%s)' % (opt_help,
|
||||||
|
OPT_TYPES[opt_type])
|
||||||
|
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
||||||
|
if opt.deprecated_opts:
|
||||||
|
for deprecated_opt in opt.deprecated_opts:
|
||||||
|
if deprecated_opt.name:
|
||||||
|
deprecated_group = (deprecated_opt.group if
|
||||||
|
deprecated_opt.group else "DEFAULT")
|
||||||
|
print('# Deprecated group/name - [%s]/%s' %
|
||||||
|
(deprecated_group,
|
||||||
|
deprecated_opt.name))
|
||||||
|
try:
|
||||||
|
if opt_default is None:
|
||||||
|
print('#%s=<None>' % opt_name)
|
||||||
|
elif opt_type == STROPT:
|
||||||
|
assert(isinstance(opt_default, six.string_types))
|
||||||
|
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
|
||||||
|
opt_default)))
|
||||||
|
elif opt_type == BOOLOPT:
|
||||||
|
assert(isinstance(opt_default, bool))
|
||||||
|
print('#%s=%s' % (opt_name, str(opt_default).lower()))
|
||||||
|
elif opt_type == INTOPT:
|
||||||
|
assert(isinstance(opt_default, int) and
|
||||||
|
not isinstance(opt_default, bool))
|
||||||
|
print('#%s=%s' % (opt_name, opt_default))
|
||||||
|
elif opt_type == FLOATOPT:
|
||||||
|
assert(isinstance(opt_default, float))
|
||||||
|
print('#%s=%s' % (opt_name, opt_default))
|
||||||
|
elif opt_type == LISTOPT:
|
||||||
|
assert(isinstance(opt_default, list))
|
||||||
|
print('#%s=%s' % (opt_name, ','.join(opt_default)))
|
||||||
|
elif opt_type == DICTOPT:
|
||||||
|
assert(isinstance(opt_default, dict))
|
||||||
|
opt_default_strlist = [str(key) + ':' + str(value)
|
||||||
|
for (key, value) in opt_default.items()]
|
||||||
|
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
|
||||||
|
elif opt_type == MULTISTROPT:
|
||||||
|
assert(isinstance(opt_default, list))
|
||||||
|
if not opt_default:
|
||||||
|
opt_default = ['']
|
||||||
|
for default in opt_default:
|
||||||
|
print('#%s=%s' % (opt_name, default))
|
||||||
|
print('')
|
||||||
|
except Exception:
|
||||||
|
sys.stderr.write('Error in option "%s"\n' % opt_name)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
generate(sys.argv[1:])
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
83
rack/openstack/common/context.py
Normal file
83
rack/openstack/common/context.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Simple class that stores security context information in the web request.
|
||||||
|
|
||||||
|
Projects should subclass this class if they wish to enhance the request
|
||||||
|
context or provide additional information in their specific WSGI pipeline.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
from rack.openstack.common import uuidutils
|
||||||
|
|
||||||
|
|
||||||
|
def generate_request_id():
|
||||||
|
return 'req-%s' % uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
|
||||||
|
class RequestContext(object):
|
||||||
|
|
||||||
|
"""Helper class to represent useful information about a request context.
|
||||||
|
|
||||||
|
Stores information about the security context under which the user
|
||||||
|
accesses the system, as well as additional request information.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
|
||||||
|
read_only=False, show_deleted=False, request_id=None):
|
||||||
|
self.auth_token = auth_token
|
||||||
|
self.user = user
|
||||||
|
self.tenant = tenant
|
||||||
|
self.is_admin = is_admin
|
||||||
|
self.read_only = read_only
|
||||||
|
self.show_deleted = show_deleted
|
||||||
|
if not request_id:
|
||||||
|
request_id = generate_request_id()
|
||||||
|
self.request_id = request_id
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {'user': self.user,
|
||||||
|
'tenant': self.tenant,
|
||||||
|
'is_admin': self.is_admin,
|
||||||
|
'read_only': self.read_only,
|
||||||
|
'show_deleted': self.show_deleted,
|
||||||
|
'auth_token': self.auth_token,
|
||||||
|
'request_id': self.request_id}
|
||||||
|
|
||||||
|
|
||||||
|
def get_admin_context(show_deleted=False):
|
||||||
|
context = RequestContext(None,
|
||||||
|
tenant=None,
|
||||||
|
is_admin=True,
|
||||||
|
show_deleted=show_deleted)
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def get_context_from_function_and_args(function, args, kwargs):
|
||||||
|
"""Find an arg of type RequestContext and return it.
|
||||||
|
|
||||||
|
This is useful in a couple of decorators where we don't
|
||||||
|
know much about the function we're wrapping.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for arg in itertools.chain(kwargs.values(), args):
|
||||||
|
if isinstance(arg, RequestContext):
|
||||||
|
return arg
|
||||||
|
|
||||||
|
return None
|
||||||
0
rack/openstack/common/db/__init__.py
Normal file
0
rack/openstack/common/db/__init__.py
Normal file
162
rack/openstack/common/db/api.py
Normal file
162
rack/openstack/common/db/api.py
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
# Copyright (c) 2013 Rackspace Hosting
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Multiple DB API backend support.
|
||||||
|
|
||||||
|
A DB backend module should implement a method named 'get_backend' which
|
||||||
|
takes no arguments. The method can return any object that implements DB
|
||||||
|
API methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
from rack.openstack.common.db import exception
|
||||||
|
from rack.openstack.common.gettextutils import _LE
|
||||||
|
from rack.openstack.common import importutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def safe_for_db_retry(f):
|
||||||
|
"""Enable db-retry for decorated function, if config option enabled."""
|
||||||
|
f.__dict__['enable_retry'] = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
class wrap_db_retry(object):
|
||||||
|
"""Retry db.api methods, if DBConnectionError() raised
|
||||||
|
|
||||||
|
Retry decorated db.api methods. If we enabled `use_db_reconnect`
|
||||||
|
in config, this decorator will be applied to all db.api functions,
|
||||||
|
marked with @safe_for_db_retry decorator.
|
||||||
|
Decorator catchs DBConnectionError() and retries function in a
|
||||||
|
loop until it succeeds, or until maximum retries count will be reached.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, retry_interval, max_retries, inc_retry_interval,
|
||||||
|
max_retry_interval):
|
||||||
|
super(wrap_db_retry, self).__init__()
|
||||||
|
|
||||||
|
self.retry_interval = retry_interval
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self.inc_retry_interval = inc_retry_interval
|
||||||
|
self.max_retry_interval = max_retry_interval
|
||||||
|
|
||||||
|
def __call__(self, f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
next_interval = self.retry_interval
|
||||||
|
remaining = self.max_retries
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except exception.DBConnectionError as e:
|
||||||
|
if remaining == 0:
|
||||||
|
LOG.exception(_LE('DB exceeded retry limit.'))
|
||||||
|
raise exception.DBError(e)
|
||||||
|
if remaining != -1:
|
||||||
|
remaining -= 1
|
||||||
|
LOG.exception(_LE('DB connection error.'))
|
||||||
|
# NOTE(vsergeyev): We are using patched time module, so
|
||||||
|
# this effectively yields the execution
|
||||||
|
# context to another green thread.
|
||||||
|
time.sleep(next_interval)
|
||||||
|
if self.inc_retry_interval:
|
||||||
|
next_interval = min(
|
||||||
|
next_interval * 2,
|
||||||
|
self.max_retry_interval
|
||||||
|
)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class DBAPI(object):
|
||||||
|
def __init__(self, backend_name, backend_mapping=None, lazy=False,
|
||||||
|
**kwargs):
|
||||||
|
"""Initialize the chosen DB API backend.
|
||||||
|
|
||||||
|
:param backend_name: name of the backend to load
|
||||||
|
:type backend_name: str
|
||||||
|
|
||||||
|
:param backend_mapping: backend name -> module/class to load mapping
|
||||||
|
:type backend_mapping: dict
|
||||||
|
|
||||||
|
:param lazy: load the DB backend lazily on the first DB API method call
|
||||||
|
:type lazy: bool
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
|
||||||
|
:keyword use_db_reconnect: retry DB transactions on disconnect or not
|
||||||
|
:type use_db_reconnect: bool
|
||||||
|
|
||||||
|
:keyword retry_interval: seconds between transaction retries
|
||||||
|
:type retry_interval: int
|
||||||
|
|
||||||
|
:keyword inc_retry_interval: increase retry interval or not
|
||||||
|
:type inc_retry_interval: bool
|
||||||
|
|
||||||
|
:keyword max_retry_interval: max interval value between retries
|
||||||
|
:type max_retry_interval: int
|
||||||
|
|
||||||
|
:keyword max_retries: max number of retries before an error is raised
|
||||||
|
:type max_retries: int
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self._backend = None
|
||||||
|
self._backend_name = backend_name
|
||||||
|
self._backend_mapping = backend_mapping or {}
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
|
||||||
|
if not lazy:
|
||||||
|
self._load_backend()
|
||||||
|
|
||||||
|
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
|
||||||
|
self.retry_interval = kwargs.get('retry_interval', 1)
|
||||||
|
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
|
||||||
|
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
|
||||||
|
self.max_retries = kwargs.get('max_retries', 20)
|
||||||
|
|
||||||
|
def _load_backend(self):
|
||||||
|
with self._lock:
|
||||||
|
if not self._backend:
|
||||||
|
# Import the untranslated name if we don't have a mapping
|
||||||
|
backend_path = self._backend_mapping.get(self._backend_name,
|
||||||
|
self._backend_name)
|
||||||
|
backend_mod = importutils.import_module(backend_path)
|
||||||
|
self._backend = backend_mod.get_backend()
|
||||||
|
|
||||||
|
def __getattr__(self, key):
|
||||||
|
if not self._backend:
|
||||||
|
self._load_backend()
|
||||||
|
|
||||||
|
attr = getattr(self._backend, key)
|
||||||
|
if not hasattr(attr, '__call__'):
|
||||||
|
return attr
|
||||||
|
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
|
||||||
|
# DB API methods, decorated with @safe_for_db_retry
|
||||||
|
# on disconnect.
|
||||||
|
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
|
||||||
|
attr = wrap_db_retry(
|
||||||
|
retry_interval=self.retry_interval,
|
||||||
|
max_retries=self.max_retries,
|
||||||
|
inc_retry_interval=self.inc_retry_interval,
|
||||||
|
max_retry_interval=self.max_retry_interval)(attr)
|
||||||
|
|
||||||
|
return attr
|
||||||
56
rack/openstack/common/db/exception.py
Normal file
56
rack/openstack/common/db/exception.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""DB related custom exceptions."""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
class DBError(Exception):
|
||||||
|
"""Wraps an implementation specific exception."""
|
||||||
|
def __init__(self, inner_exception=None):
|
||||||
|
self.inner_exception = inner_exception
|
||||||
|
super(DBError, self).__init__(six.text_type(inner_exception))
|
||||||
|
|
||||||
|
|
||||||
|
class DBDuplicateEntry(DBError):
|
||||||
|
"""Wraps an implementation specific exception."""
|
||||||
|
def __init__(self, columns=[], inner_exception=None):
|
||||||
|
self.columns = columns
|
||||||
|
super(DBDuplicateEntry, self).__init__(inner_exception)
|
||||||
|
|
||||||
|
|
||||||
|
class DBDeadlock(DBError):
|
||||||
|
def __init__(self, inner_exception=None):
|
||||||
|
super(DBDeadlock, self).__init__(inner_exception)
|
||||||
|
|
||||||
|
|
||||||
|
class DBInvalidUnicodeParameter(Exception):
|
||||||
|
message = _("Invalid Parameter: "
|
||||||
|
"Unicode is not supported by the current database.")
|
||||||
|
|
||||||
|
|
||||||
|
class DbMigrationError(DBError):
|
||||||
|
"""Wraps migration specific exception."""
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super(DbMigrationError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class DBConnectionError(DBError):
|
||||||
|
"""Wraps connection specific exception."""
|
||||||
|
pass
|
||||||
168
rack/openstack/common/db/options.py
Normal file
168
rack/openstack/common/db/options.py
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
|
||||||
|
database_opts = [
|
||||||
|
cfg.StrOpt('sqlite_db',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
default='rack.sqlite',
|
||||||
|
help='The file name to use with SQLite'),
|
||||||
|
cfg.BoolOpt('sqlite_synchronous',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
default=True,
|
||||||
|
help='If True, SQLite uses synchronous mode'),
|
||||||
|
cfg.StrOpt('backend',
|
||||||
|
default='sqlalchemy',
|
||||||
|
deprecated_name='db_backend',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
help='The backend to use for db'),
|
||||||
|
cfg.StrOpt('connection',
|
||||||
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
|
'database',
|
||||||
|
secret=True,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('connection',
|
||||||
|
group='sql'), ]),
|
||||||
|
cfg.StrOpt('mysql_sql_mode',
|
||||||
|
help='The SQL mode to be used for MySQL sessions '
|
||||||
|
'(default is empty, meaning do not override '
|
||||||
|
'any server-side SQL mode setting)'),
|
||||||
|
cfg.IntOpt('idle_timeout',
|
||||||
|
default=3600,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('idle_timeout',
|
||||||
|
group='sql')],
|
||||||
|
help='Timeout before idle sql connections are reaped'),
|
||||||
|
cfg.IntOpt('min_pool_size',
|
||||||
|
default=1,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_min_pool_size',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Minimum number of SQL connections to keep open in a '
|
||||||
|
'pool'),
|
||||||
|
cfg.IntOpt('max_pool_size',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_max_pool_size',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Maximum number of SQL connections to keep open in a '
|
||||||
|
'pool'),
|
||||||
|
cfg.IntOpt('max_retries',
|
||||||
|
default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Maximum db connection retries during startup. '
|
||||||
|
'(setting -1 implies an infinite retry count)'),
|
||||||
|
cfg.IntOpt('retry_interval',
|
||||||
|
default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('reconnect_interval',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Interval between retries of opening a sql connection'),
|
||||||
|
cfg.IntOpt('max_overflow',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='If set, use this value for max_overflow with sqlalchemy'),
|
||||||
|
cfg.IntOpt('connection_debug',
|
||||||
|
default=0,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help='Verbosity of SQL debugging information. 0=None, '
|
||||||
|
'100=Everything'),
|
||||||
|
cfg.BoolOpt('connection_trace',
|
||||||
|
default=False,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help='Add python stack traces to SQL as comment strings'),
|
||||||
|
cfg.IntOpt('pool_timeout',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='If set, use this value for pool_timeout with sqlalchemy'),
|
||||||
|
cfg.BoolOpt('use_db_reconnect',
|
||||||
|
default=False,
|
||||||
|
help='Enable the experimental use of database reconnect '
|
||||||
|
'on connection lost'),
|
||||||
|
cfg.IntOpt('db_retry_interval',
|
||||||
|
default=1,
|
||||||
|
help='seconds between db connection retries'),
|
||||||
|
cfg.BoolOpt('db_inc_retry_interval',
|
||||||
|
default=True,
|
||||||
|
help='Whether to increase interval between db connection '
|
||||||
|
'retries, up to db_max_retry_interval'),
|
||||||
|
cfg.IntOpt('db_max_retry_interval',
|
||||||
|
default=10,
|
||||||
|
help='max seconds between db connection retries, if '
|
||||||
|
'db_inc_retry_interval is enabled'),
|
||||||
|
cfg.IntOpt('db_max_retries',
|
||||||
|
default=20,
|
||||||
|
help='maximum db connection retries before error is raised. '
|
||||||
|
'(setting -1 implies an infinite retry count)'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(database_opts, 'database')
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
||||||
|
max_overflow=None, pool_timeout=None):
|
||||||
|
"""Set defaults for configuration variables."""
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
connection=sql_connection,
|
||||||
|
sqlite_db=sqlite_db)
|
||||||
|
# Update the QueuePool defaults
|
||||||
|
if max_pool_size is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_pool_size=max_pool_size)
|
||||||
|
if max_overflow is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_overflow=max_overflow)
|
||||||
|
if pool_timeout is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
pool_timeout=pool_timeout)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
"""Returns a list of oslo.config options available in the library.
|
||||||
|
|
||||||
|
The returned list includes all oslo.config options which may be registered
|
||||||
|
at runtime by the library.
|
||||||
|
|
||||||
|
Each element of the list is a tuple. The first element is the name of the
|
||||||
|
group under which the list of elements in the second element will be
|
||||||
|
registered. A group name of None corresponds to the [DEFAULT] group in
|
||||||
|
config files.
|
||||||
|
|
||||||
|
The purpose of this is to allow tools like the Oslo sample config file
|
||||||
|
generator to discover the options exposed to users by this library.
|
||||||
|
|
||||||
|
:returns: a list of (group_name, opts) tuples
|
||||||
|
"""
|
||||||
|
return [('database', copy.deepcopy(database_opts))]
|
||||||
0
rack/openstack/common/db/sqlalchemy/__init__.py
Normal file
0
rack/openstack/common/db/sqlalchemy/__init__.py
Normal file
268
rack/openstack/common/db/sqlalchemy/migration.py
Normal file
268
rack/openstack/common/db/sqlalchemy/migration.py
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Base on code in migrate/changeset/databases/sqlite.py which is under
|
||||||
|
# the following license:
|
||||||
|
#
|
||||||
|
# The MIT License
|
||||||
|
#
|
||||||
|
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from migrate.changeset import ansisql
|
||||||
|
from migrate.changeset.databases import sqlite
|
||||||
|
from migrate import exceptions as versioning_exceptions
|
||||||
|
from migrate.versioning import api as versioning_api
|
||||||
|
from migrate.versioning.repository import Repository
|
||||||
|
import sqlalchemy
|
||||||
|
from sqlalchemy.schema import UniqueConstraint
|
||||||
|
|
||||||
|
from rack.openstack.common.db import exception
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
def _get_unique_constraints(self, table):
|
||||||
|
"""Retrieve information about existing unique constraints of the table
|
||||||
|
|
||||||
|
This feature is needed for _recreate_table() to work properly.
|
||||||
|
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
data = table.metadata.bind.execute(
|
||||||
|
"""SELECT sql
|
||||||
|
FROM sqlite_master
|
||||||
|
WHERE
|
||||||
|
type='table' AND
|
||||||
|
name=:table_name""",
|
||||||
|
table_name=table.name
|
||||||
|
).fetchone()[0]
|
||||||
|
|
||||||
|
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
|
||||||
|
return [
|
||||||
|
UniqueConstraint(
|
||||||
|
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
|
||||||
|
name=name
|
||||||
|
)
|
||||||
|
for name, cols in re.findall(UNIQUE_PATTERN, data)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
|
||||||
|
"""Recreate the table properly
|
||||||
|
|
||||||
|
Unlike the corresponding original method of sqlalchemy-migrate this one
|
||||||
|
doesn't drop existing unique constraints when creating a new one.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
table_name = self.preparer.format_table(table)
|
||||||
|
|
||||||
|
# we remove all indexes so as not to have
|
||||||
|
# problems during copy and re-create
|
||||||
|
for index in table.indexes:
|
||||||
|
index.drop()
|
||||||
|
|
||||||
|
# reflect existing unique constraints
|
||||||
|
for uc in self._get_unique_constraints(table):
|
||||||
|
table.append_constraint(uc)
|
||||||
|
# omit given unique constraints when creating a new table if required
|
||||||
|
table.constraints = set([
|
||||||
|
cons for cons in table.constraints
|
||||||
|
if omit_uniques is None or cons.name not in omit_uniques
|
||||||
|
])
|
||||||
|
|
||||||
|
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
|
||||||
|
self.execute()
|
||||||
|
|
||||||
|
insertion_string = self._modify_table(table, column, delta)
|
||||||
|
|
||||||
|
table.create(bind=self.connection)
|
||||||
|
self.append(insertion_string % {'table_name': table_name})
|
||||||
|
self.execute()
|
||||||
|
self.append('DROP TABLE migration_tmp')
|
||||||
|
self.execute()
|
||||||
|
|
||||||
|
|
||||||
|
def _visit_migrate_unique_constraint(self, *p, **k):
|
||||||
|
"""Drop the given unique constraint
|
||||||
|
|
||||||
|
The corresponding original method of sqlalchemy-migrate just
|
||||||
|
raises NotImplemented error
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
|
||||||
|
|
||||||
|
|
||||||
|
def patch_migrate():
|
||||||
|
"""A workaround for SQLite's inability to alter things
|
||||||
|
|
||||||
|
SQLite abilities to alter tables are very limited (please read
|
||||||
|
http://www.sqlite.org/lang_altertable.html for more details).
|
||||||
|
E. g. one can't drop a column or a constraint in SQLite. The
|
||||||
|
workaround for this is to recreate the original table omitting
|
||||||
|
the corresponding constraint (or column).
|
||||||
|
|
||||||
|
sqlalchemy-migrate library has recreate_table() method that
|
||||||
|
implements this workaround, but it does it wrong:
|
||||||
|
|
||||||
|
- information about unique constraints of a table
|
||||||
|
is not retrieved. So if you have a table with one
|
||||||
|
unique constraint and a migration adding another one
|
||||||
|
you will end up with a table that has only the
|
||||||
|
latter unique constraint, and the former will be lost
|
||||||
|
|
||||||
|
- dropping of unique constraints is not supported at all
|
||||||
|
|
||||||
|
The proper way to fix this is to provide a pull-request to
|
||||||
|
sqlalchemy-migrate, but the project seems to be dead. So we
|
||||||
|
can go on with monkey-patching of the lib at least for now.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# this patch is needed to ensure that recreate_table() doesn't drop
|
||||||
|
# existing unique constraints of the table when creating a new one
|
||||||
|
helper_cls = sqlite.SQLiteHelper
|
||||||
|
helper_cls.recreate_table = _recreate_table
|
||||||
|
helper_cls._get_unique_constraints = _get_unique_constraints
|
||||||
|
|
||||||
|
# this patch is needed to be able to drop existing unique constraints
|
||||||
|
constraint_cls = sqlite.SQLiteConstraintDropper
|
||||||
|
constraint_cls.visit_migrate_unique_constraint = \
|
||||||
|
_visit_migrate_unique_constraint
|
||||||
|
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
|
||||||
|
sqlite.SQLiteConstraintGenerator)
|
||||||
|
|
||||||
|
|
||||||
|
def db_sync(engine, abs_path, version=None, init_version=0):
|
||||||
|
"""Upgrade or downgrade a database.
|
||||||
|
|
||||||
|
Function runs the upgrade() or downgrade() functions in change scripts.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
:param abs_path: Absolute path to migrate repository.
|
||||||
|
:param version: Database will upgrade/downgrade until this version.
|
||||||
|
If None - database will update to the latest
|
||||||
|
available version.
|
||||||
|
:param init_version: Initial database version
|
||||||
|
"""
|
||||||
|
if version is not None:
|
||||||
|
try:
|
||||||
|
version = int(version)
|
||||||
|
except ValueError:
|
||||||
|
raise exception.DbMigrationError(
|
||||||
|
message=_("version should be an integer"))
|
||||||
|
|
||||||
|
current_version = db_version(engine, abs_path, init_version)
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
_db_schema_sanity_check(engine)
|
||||||
|
if version is None or version > current_version:
|
||||||
|
return versioning_api.upgrade(engine, repository, version)
|
||||||
|
else:
|
||||||
|
return versioning_api.downgrade(engine, repository,
|
||||||
|
version)
|
||||||
|
|
||||||
|
|
||||||
|
def _db_schema_sanity_check(engine):
|
||||||
|
"""Ensure all database tables were created with required parameters.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
if engine.name == 'mysql':
|
||||||
|
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
||||||
|
'from information_schema.TABLES '
|
||||||
|
'where TABLE_SCHEMA=%s and '
|
||||||
|
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
|
||||||
|
|
||||||
|
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
|
||||||
|
engine.url.database)]
|
||||||
|
if len(table_names) > 0:
|
||||||
|
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
||||||
|
'please make sure all tables are CHARSET=utf8'
|
||||||
|
) % ','.join(table_names))
|
||||||
|
|
||||||
|
|
||||||
|
def db_version(engine, abs_path, init_version):
|
||||||
|
"""Show the current version of the repository.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
:param version: Initial database version
|
||||||
|
"""
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
try:
|
||||||
|
return versioning_api.db_version(engine, repository)
|
||||||
|
except versioning_exceptions.DatabaseNotControlledError:
|
||||||
|
meta = sqlalchemy.MetaData()
|
||||||
|
meta.reflect(bind=engine)
|
||||||
|
tables = meta.tables
|
||||||
|
if len(tables) == 0 or 'alembic_version' in tables:
|
||||||
|
db_version_control(engine, abs_path, version=init_version)
|
||||||
|
return versioning_api.db_version(engine, repository)
|
||||||
|
else:
|
||||||
|
raise exception.DbMigrationError(
|
||||||
|
message=_(
|
||||||
|
"The database is not under version control, but has "
|
||||||
|
"tables. Please stamp the current version of the schema "
|
||||||
|
"manually."))
|
||||||
|
|
||||||
|
|
||||||
|
def db_version_control(engine, abs_path, version=None):
|
||||||
|
"""Mark a database as under this repository's version control.
|
||||||
|
|
||||||
|
Once a database is under version control, schema changes should
|
||||||
|
only be done via change scripts in this repository.
|
||||||
|
|
||||||
|
:param engine: SQLAlchemy engine instance for a given database
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
:param version: Initial database version
|
||||||
|
"""
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
versioning_api.version_control(engine, repository, version)
|
||||||
|
return version
|
||||||
|
|
||||||
|
|
||||||
|
def _find_migrate_repo(abs_path):
|
||||||
|
"""Get the project's change script repository
|
||||||
|
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
"""
|
||||||
|
if not os.path.exists(abs_path):
|
||||||
|
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
||||||
|
return Repository(abs_path)
|
||||||
115
rack/openstack/common/db/sqlalchemy/models.py
Normal file
115
rack/openstack/common/db/sqlalchemy/models.py
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||||
|
# Copyright 2012 Cloudscaling Group, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
SQLAlchemy models.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer
|
||||||
|
from sqlalchemy import DateTime
|
||||||
|
from sqlalchemy.orm import object_mapper
|
||||||
|
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
class ModelBase(object):
|
||||||
|
"""Base class for models."""
|
||||||
|
__table_initialized__ = False
|
||||||
|
|
||||||
|
def save(self, session):
|
||||||
|
"""Save this object."""
|
||||||
|
|
||||||
|
# NOTE(boris-42): This part of code should be look like:
|
||||||
|
# session.add(self)
|
||||||
|
# session.flush()
|
||||||
|
# But there is a bug in sqlalchemy and eventlet that
|
||||||
|
# raises NoneType exception if there is no running
|
||||||
|
# transaction and rollback is called. As long as
|
||||||
|
# sqlalchemy has this bug we have to create transaction
|
||||||
|
# explicitly.
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
session.add(self)
|
||||||
|
session.flush()
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
return getattr(self, key)
|
||||||
|
|
||||||
|
def get(self, key, default=None):
|
||||||
|
return getattr(self, key, default)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _extra_keys(self):
|
||||||
|
"""Specifies custom fields
|
||||||
|
|
||||||
|
Subclasses can override this property to return a list
|
||||||
|
of custom fields that should be included in their dict
|
||||||
|
representation.
|
||||||
|
|
||||||
|
For reference check tests/db/sqlalchemy/test_models.py
|
||||||
|
"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
columns = dict(object_mapper(self).columns).keys()
|
||||||
|
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||||
|
# up, beyond the actual db columns. An example would be the 'name'
|
||||||
|
# property for an Instance.
|
||||||
|
columns.extend(self._extra_keys)
|
||||||
|
self._i = iter(columns)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
n = six.advance_iterator(self._i)
|
||||||
|
return n, getattr(self, n)
|
||||||
|
|
||||||
|
def update(self, values):
|
||||||
|
"""Make the model object behave like a dict."""
|
||||||
|
for k, v in six.iteritems(values):
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
def iteritems(self):
|
||||||
|
"""Make the model object behave like a dict.
|
||||||
|
|
||||||
|
Includes attributes from joins.
|
||||||
|
"""
|
||||||
|
local = dict(self)
|
||||||
|
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
||||||
|
if not k[0] == '_'])
|
||||||
|
local.update(joined)
|
||||||
|
return six.iteritems(local)
|
||||||
|
|
||||||
|
|
||||||
|
class TimestampMixin(object):
|
||||||
|
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
|
||||||
|
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
|
||||||
|
|
||||||
|
|
||||||
|
class SoftDeleteMixin(object):
|
||||||
|
deleted_at = Column(DateTime)
|
||||||
|
deleted = Column(Integer, default=0)
|
||||||
|
|
||||||
|
def soft_delete(self, session):
|
||||||
|
"""Mark this object as deleted."""
|
||||||
|
self.deleted = self.id
|
||||||
|
self.deleted_at = timeutils.utcnow()
|
||||||
|
self.save(session=session)
|
||||||
187
rack/openstack/common/db/sqlalchemy/provision.py
Normal file
187
rack/openstack/common/db/sqlalchemy/provision.py
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
# Copyright 2013 Mirantis.inc
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Provision test environment for specific DB backends"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from rack.openstack.common.db import exception as exc
|
||||||
|
|
||||||
|
|
||||||
|
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://')
|
||||||
|
|
||||||
|
|
||||||
|
def _gen_credentials(*names):
|
||||||
|
"""Generate credentials."""
|
||||||
|
auth_dict = {}
|
||||||
|
for name in names:
|
||||||
|
val = ''.join(random.choice(string.ascii_lowercase)
|
||||||
|
for i in moves.range(10))
|
||||||
|
auth_dict[name] = val
|
||||||
|
return auth_dict
|
||||||
|
|
||||||
|
|
||||||
|
def _get_engine(uri=SQL_CONNECTION):
|
||||||
|
"""Engine creation
|
||||||
|
|
||||||
|
By default the uri is SQL_CONNECTION which is admin credentials.
|
||||||
|
Call the function without arguments to get admin connection. Admin
|
||||||
|
connection required to create temporary user and database for each
|
||||||
|
particular test. Otherwise use existing connection to recreate connection
|
||||||
|
to the temporary database.
|
||||||
|
"""
|
||||||
|
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
|
||||||
|
|
||||||
|
|
||||||
|
def _execute_sql(engine, sql, driver):
|
||||||
|
"""Initialize connection, execute sql query and close it."""
|
||||||
|
try:
|
||||||
|
with engine.connect() as conn:
|
||||||
|
if driver == 'postgresql':
|
||||||
|
conn.connection.set_isolation_level(0)
|
||||||
|
for s in sql:
|
||||||
|
conn.execute(s)
|
||||||
|
except sqlalchemy.exc.OperationalError:
|
||||||
|
msg = ('%s does not match database admin '
|
||||||
|
'credentials or database does not exist.')
|
||||||
|
raise exc.DBConnectionError(msg % SQL_CONNECTION)
|
||||||
|
|
||||||
|
|
||||||
|
def create_database(engine):
|
||||||
|
"""Provide temporary user and database for each particular test."""
|
||||||
|
driver = engine.name
|
||||||
|
|
||||||
|
auth = _gen_credentials('database', 'user', 'passwd')
|
||||||
|
|
||||||
|
sqls = {
|
||||||
|
'mysql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"grant all on %(database)s.* to '%(user)s'@'localhost'"
|
||||||
|
" identified by '%(passwd)s';",
|
||||||
|
"create database %(database)s;",
|
||||||
|
],
|
||||||
|
'postgresql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user if exists %(user)s;",
|
||||||
|
"create user %(user)s with password '%(passwd)s';",
|
||||||
|
"create database %(database)s owner %(user)s;",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
if driver == 'sqlite':
|
||||||
|
return 'sqlite:////tmp/%s' % auth['database']
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql_rows = sqls[driver]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
|
sql_query = map(lambda x: x % auth, sql_rows)
|
||||||
|
|
||||||
|
_execute_sql(engine, sql_query, driver)
|
||||||
|
|
||||||
|
params = auth.copy()
|
||||||
|
params['backend'] = driver
|
||||||
|
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
|
||||||
|
|
||||||
|
|
||||||
|
def drop_database(engine, current_uri):
|
||||||
|
"""Drop temporary database and user after each particular test."""
|
||||||
|
engine = _get_engine(current_uri)
|
||||||
|
admin_engine = _get_engine()
|
||||||
|
driver = engine.name
|
||||||
|
auth = {'database': engine.url.database, 'user': engine.url.username}
|
||||||
|
|
||||||
|
if driver == 'sqlite':
|
||||||
|
try:
|
||||||
|
os.remove(auth['database'])
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
return
|
||||||
|
|
||||||
|
sqls = {
|
||||||
|
'mysql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user '%(user)s'@'localhost';",
|
||||||
|
],
|
||||||
|
'postgresql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user if exists %(user)s;",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql_rows = sqls[driver]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
|
sql_query = map(lambda x: x % auth, sql_rows)
|
||||||
|
|
||||||
|
_execute_sql(admin_engine, sql_query, driver)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Controller to handle commands
|
||||||
|
|
||||||
|
::create: Create test user and database with random names.
|
||||||
|
::drop: Drop user and database created by previous command.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Controller to handle database creation and dropping'
|
||||||
|
' commands.',
|
||||||
|
epilog='Under normal circumstances is not used directly.'
|
||||||
|
' Used in .testr.conf to automate test database creation'
|
||||||
|
' and dropping processes.')
|
||||||
|
subparsers = parser.add_subparsers(
|
||||||
|
help='Subcommands to manipulate temporary test databases.')
|
||||||
|
|
||||||
|
create = subparsers.add_parser(
|
||||||
|
'create',
|
||||||
|
help='Create temporary test '
|
||||||
|
'databases and users.')
|
||||||
|
create.set_defaults(which='create')
|
||||||
|
create.add_argument(
|
||||||
|
'instances_count',
|
||||||
|
type=int,
|
||||||
|
help='Number of databases to create.')
|
||||||
|
|
||||||
|
drop = subparsers.add_parser(
|
||||||
|
'drop',
|
||||||
|
help='Drop temporary test databases and users.')
|
||||||
|
drop.set_defaults(which='drop')
|
||||||
|
drop.add_argument(
|
||||||
|
'instances',
|
||||||
|
nargs='+',
|
||||||
|
help='List of databases uri to be dropped.')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
engine = _get_engine()
|
||||||
|
which = args.which
|
||||||
|
|
||||||
|
if which == "create":
|
||||||
|
for i in range(int(args.instances_count)):
|
||||||
|
print(create_database(engine))
|
||||||
|
elif which == "drop":
|
||||||
|
for db in args.instances:
|
||||||
|
drop_database(engine, db)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
860
rack/openstack/common/db/sqlalchemy/session.py
Normal file
860
rack/openstack/common/db/sqlalchemy/session.py
Normal file
@@ -0,0 +1,860 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Session Handling for SQLAlchemy backend.
|
||||||
|
|
||||||
|
Recommended ways to use sessions within this framework:
|
||||||
|
|
||||||
|
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
|
||||||
|
`model_query()` will implicitly use a session when called without one
|
||||||
|
supplied. This is the ideal situation because it will allow queries
|
||||||
|
to be automatically retried if the database connection is interrupted.
|
||||||
|
|
||||||
|
.. note:: Automatic retry will be enabled in a future patch.
|
||||||
|
|
||||||
|
It is generally fine to issue several queries in a row like this. Even though
|
||||||
|
they may be run in separate transactions and/or separate sessions, each one
|
||||||
|
will see the data from the prior calls. If needed, undo- or rollback-like
|
||||||
|
functionality should be handled at a logical level. For an example, look at
|
||||||
|
the code around quotas and `reservation_rollback()`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def get_foo(context, foo):
|
||||||
|
return (model_query(context, models.Foo).
|
||||||
|
filter_by(foo=foo).
|
||||||
|
first())
|
||||||
|
|
||||||
|
def update_foo(context, id, newfoo):
|
||||||
|
(model_query(context, models.Foo).
|
||||||
|
filter_by(id=id).
|
||||||
|
update({'foo': newfoo}))
|
||||||
|
|
||||||
|
def create_foo(context, values):
|
||||||
|
foo_ref = models.Foo()
|
||||||
|
foo_ref.update(values)
|
||||||
|
foo_ref.save()
|
||||||
|
return foo_ref
|
||||||
|
|
||||||
|
|
||||||
|
* Within the scope of a single method, keep all the reads and writes within
|
||||||
|
the context managed by a single session. In this way, the session's
|
||||||
|
`__exit__` handler will take care of calling `flush()` and `commit()` for
|
||||||
|
you. If using this approach, you should not explicitly call `flush()` or
|
||||||
|
`commit()`. Any error within the context of the session will cause the
|
||||||
|
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
|
||||||
|
raised in `session`'s `__exit__` handler, and any try/except within the
|
||||||
|
context managed by `session` will not be triggered. And catching other
|
||||||
|
non-database errors in the session will not trigger the ROLLBACK, so
|
||||||
|
exception handlers should always be outside the session, unless the
|
||||||
|
developer wants to do a partial commit on purpose. If the connection is
|
||||||
|
dropped before this is possible, the database will implicitly roll back the
|
||||||
|
transaction.
|
||||||
|
|
||||||
|
.. note:: Statements in the session scope will not be automatically retried.
|
||||||
|
|
||||||
|
If you create models within the session, they need to be added, but you
|
||||||
|
do not need to call `model.save()`:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def create_many_foo(context, foos):
|
||||||
|
session = sessionmaker()
|
||||||
|
with session.begin():
|
||||||
|
for foo in foos:
|
||||||
|
foo_ref = models.Foo()
|
||||||
|
foo_ref.update(foo)
|
||||||
|
session.add(foo_ref)
|
||||||
|
|
||||||
|
def update_bar(context, foo_id, newbar):
|
||||||
|
session = sessionmaker()
|
||||||
|
with session.begin():
|
||||||
|
foo_ref = (model_query(context, models.Foo, session).
|
||||||
|
filter_by(id=foo_id).
|
||||||
|
first())
|
||||||
|
(model_query(context, models.Bar, session).
|
||||||
|
filter_by(id=foo_ref['bar_id']).
|
||||||
|
update({'bar': newbar}))
|
||||||
|
|
||||||
|
.. note:: `update_bar` is a trivially simple example of using
|
||||||
|
``with session.begin``. Whereas `create_many_foo` is a good example of
|
||||||
|
when a transaction is needed, it is always best to use as few queries as
|
||||||
|
possible.
|
||||||
|
|
||||||
|
The two queries in `update_bar` can be better expressed using a single query
|
||||||
|
which avoids the need for an explicit transaction. It can be expressed like
|
||||||
|
so:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def update_bar(context, foo_id, newbar):
|
||||||
|
subq = (model_query(context, models.Foo.id).
|
||||||
|
filter_by(id=foo_id).
|
||||||
|
limit(1).
|
||||||
|
subquery())
|
||||||
|
(model_query(context, models.Bar).
|
||||||
|
filter_by(id=subq.as_scalar()).
|
||||||
|
update({'bar': newbar}))
|
||||||
|
|
||||||
|
For reference, this emits approximately the following SQL statement:
|
||||||
|
|
||||||
|
.. code:: sql
|
||||||
|
|
||||||
|
UPDATE bar SET bar = ${newbar}
|
||||||
|
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||||
|
|
||||||
|
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
|
||||||
|
exception while using ``with session.begin``. Here create two duplicate
|
||||||
|
instances with same primary key, must catch the exception out of context
|
||||||
|
managed by a single session:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def create_duplicate_foo(context):
|
||||||
|
foo1 = models.Foo()
|
||||||
|
foo2 = models.Foo()
|
||||||
|
foo1.id = foo2.id = 1
|
||||||
|
session = sessionmaker()
|
||||||
|
try:
|
||||||
|
with session.begin():
|
||||||
|
session.add(foo1)
|
||||||
|
session.add(foo2)
|
||||||
|
except exception.DBDuplicateEntry as e:
|
||||||
|
handle_error(e)
|
||||||
|
|
||||||
|
* Passing an active session between methods. Sessions should only be passed
|
||||||
|
to private methods. The private method must use a subtransaction; otherwise
|
||||||
|
SQLAlchemy will throw an error when you call `session.begin()` on an existing
|
||||||
|
transaction. Public methods should not accept a session parameter and should
|
||||||
|
not be involved in sessions within the caller's scope.
|
||||||
|
|
||||||
|
Note that this incurs more overhead in SQLAlchemy than the above means
|
||||||
|
due to nesting transactions, and it is not possible to implicitly retry
|
||||||
|
failed database operations when using this approach.
|
||||||
|
|
||||||
|
This also makes code somewhat more difficult to read and debug, because a
|
||||||
|
single database transaction spans more than one method. Error handling
|
||||||
|
becomes less clear in this situation. When this is needed for code clarity,
|
||||||
|
it should be clearly documented.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def myfunc(foo):
|
||||||
|
session = sessionmaker()
|
||||||
|
with session.begin():
|
||||||
|
# do some database things
|
||||||
|
bar = _private_func(foo, session)
|
||||||
|
return bar
|
||||||
|
|
||||||
|
def _private_func(foo, session=None):
|
||||||
|
if not session:
|
||||||
|
session = sessionmaker()
|
||||||
|
with session.begin(subtransaction=True):
|
||||||
|
# do some other database things
|
||||||
|
return bar
|
||||||
|
|
||||||
|
|
||||||
|
There are some things which it is best to avoid:
|
||||||
|
|
||||||
|
* Don't keep a transaction open any longer than necessary.
|
||||||
|
|
||||||
|
This means that your ``with session.begin()`` block should be as short
|
||||||
|
as possible, while still containing all the related calls for that
|
||||||
|
transaction.
|
||||||
|
|
||||||
|
* Avoid ``with_lockmode('UPDATE')`` when possible.
|
||||||
|
|
||||||
|
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
|
||||||
|
any rows, it will take a gap-lock. This is a form of write-lock on the
|
||||||
|
"gap" where no rows exist, and prevents any other writes to that space.
|
||||||
|
This can effectively prevent any INSERT into a table by locking the gap
|
||||||
|
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
||||||
|
has an overly broad WHERE clause, or doesn't properly use an index.
|
||||||
|
|
||||||
|
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
||||||
|
number of rows matching a query, and if only one row is returned,
|
||||||
|
then issue the SELECT FOR UPDATE.
|
||||||
|
|
||||||
|
The better long-term solution is to use
|
||||||
|
``INSERT .. ON DUPLICATE KEY UPDATE``.
|
||||||
|
However, this can not be done until the "deleted" columns are removed and
|
||||||
|
proper UNIQUE constraints are added to the tables.
|
||||||
|
|
||||||
|
|
||||||
|
Enabling soft deletes:
|
||||||
|
|
||||||
|
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
|
||||||
|
to your model class. For example:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
Efficient use of soft deletes:
|
||||||
|
|
||||||
|
* There are two possible ways to mark a record as deleted:
|
||||||
|
`model.soft_delete()` and `query.soft_delete()`.
|
||||||
|
|
||||||
|
The `model.soft_delete()` method works with a single already-fetched entry.
|
||||||
|
`query.soft_delete()` makes only one db request for all entries that
|
||||||
|
correspond to the query.
|
||||||
|
|
||||||
|
* In almost all cases you should use `query.soft_delete()`. Some examples:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def soft_delete_bar():
|
||||||
|
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||||
|
if count == 0:
|
||||||
|
raise Exception("0 entries were soft deleted")
|
||||||
|
|
||||||
|
def complex_soft_delete_with_synchronization_bar(session=None):
|
||||||
|
if session is None:
|
||||||
|
session = sessionmaker()
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
count = (model_query(BarModel).
|
||||||
|
find(some_condition).
|
||||||
|
soft_delete(synchronize_session=True))
|
||||||
|
# Here synchronize_session is required, because we
|
||||||
|
# don't know what is going on in outer session.
|
||||||
|
if count == 0:
|
||||||
|
raise Exception("0 entries were soft deleted")
|
||||||
|
|
||||||
|
* There is only one situation where `model.soft_delete()` is appropriate: when
|
||||||
|
you fetch a single record, work with it, and mark it as deleted in the same
|
||||||
|
transaction.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def soft_delete_bar_model():
|
||||||
|
session = sessionmaker()
|
||||||
|
with session.begin():
|
||||||
|
bar_ref = model_query(BarModel).find(some_condition).first()
|
||||||
|
# Work with bar_ref
|
||||||
|
bar_ref.soft_delete(session=session)
|
||||||
|
|
||||||
|
However, if you need to work with all entries that correspond to query and
|
||||||
|
then soft delete them you should use the `query.soft_delete()` method:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def soft_delete_multi_models():
|
||||||
|
session = sessionmaker()
|
||||||
|
with session.begin():
|
||||||
|
query = (model_query(BarModel, session=session).
|
||||||
|
find(some_condition))
|
||||||
|
model_refs = query.all()
|
||||||
|
# Work with model_refs
|
||||||
|
query.soft_delete(synchronize_session=False)
|
||||||
|
# synchronize_session=False should be set if there is no outer
|
||||||
|
# session and these entries are not used after this.
|
||||||
|
|
||||||
|
When working with many rows, it is very important to use query.soft_delete,
|
||||||
|
which issues a single query. Using `model.soft_delete()`, as in the following
|
||||||
|
example, is very inefficient.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
for bar_ref in bar_refs:
|
||||||
|
bar_ref.soft_delete(session=session)
|
||||||
|
# This will produce count(bar_refs) db requests.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
|
||||||
|
import six
|
||||||
|
from sqlalchemy import exc as sqla_exc
|
||||||
|
from sqlalchemy.interfaces import PoolListener
|
||||||
|
import sqlalchemy.orm
|
||||||
|
from sqlalchemy.pool import NullPool, StaticPool
|
||||||
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
|
||||||
|
from rack.openstack.common.db import exception
|
||||||
|
from rack.openstack.common.gettextutils import _LE, _LW, _LI
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SqliteForeignKeysListener(PoolListener):
|
||||||
|
"""Ensures that the foreign key constraints are enforced in SQLite.
|
||||||
|
|
||||||
|
The foreign key constraints are disabled by default in SQLite,
|
||||||
|
so the foreign key constraints will be enabled here for every
|
||||||
|
database connection
|
||||||
|
"""
|
||||||
|
def connect(self, dbapi_con, con_record):
|
||||||
|
dbapi_con.execute('pragma foreign_keys=ON')
|
||||||
|
|
||||||
|
|
||||||
|
# note(boris-42): In current versions of DB backends unique constraint
|
||||||
|
# violation messages follow the structure:
|
||||||
|
#
|
||||||
|
# sqlite:
|
||||||
|
# 1 column - (IntegrityError) column c1 is not unique
|
||||||
|
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||||
|
#
|
||||||
|
# sqlite since 3.7.16:
|
||||||
|
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
|
||||||
|
#
|
||||||
|
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
|
||||||
|
#
|
||||||
|
# postgres:
|
||||||
|
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||||
|
# constraint "users_c1_key"
|
||||||
|
# N columns - (IntegrityError) duplicate key value violates unique
|
||||||
|
# constraint "name_of_our_constraint"
|
||||||
|
#
|
||||||
|
# mysql:
|
||||||
|
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
||||||
|
# 'c1'")
|
||||||
|
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||||
|
# with -' for key 'name_of_our_constraint'")
|
||||||
|
#
|
||||||
|
# ibm_db_sa:
|
||||||
|
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
|
||||||
|
# statement, UPDATE statement, or foreign key update caused by a
|
||||||
|
# DELETE statement are not valid because the primary key, unique
|
||||||
|
# constraint or unique index identified by "2" constrains table
|
||||||
|
# "NOVA.KEY_PAIRS" from having duplicate values for the index
|
||||||
|
# key.
|
||||||
|
_DUP_KEY_RE_DB = {
|
||||||
|
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||||
|
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
||||||
|
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
||||||
|
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
|
||||||
|
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
||||||
|
"""Raise exception if two entries are duplicated.
|
||||||
|
|
||||||
|
In this function will be raised DBDuplicateEntry exception if integrity
|
||||||
|
error wrap unique constraint violation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_columns_from_uniq_cons_or_name(columns):
|
||||||
|
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
|
||||||
|
# where `t` it is table name and columns `c1`, `c2`
|
||||||
|
# are in UniqueConstraint.
|
||||||
|
uniqbase = "uniq_"
|
||||||
|
if not columns.startswith(uniqbase):
|
||||||
|
if engine_name == "postgresql":
|
||||||
|
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
||||||
|
return [columns]
|
||||||
|
return columns[len(uniqbase):].split("0")[1:]
|
||||||
|
|
||||||
|
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
|
||||||
|
return
|
||||||
|
|
||||||
|
# FIXME(johannes): The usage of the .message attribute has been
|
||||||
|
# deprecated since Python 2.6. However, the exceptions raised by
|
||||||
|
# SQLAlchemy can differ when using unicode() and accessing .message.
|
||||||
|
# An audit across all three supported engines will be necessary to
|
||||||
|
# ensure there are no regressions.
|
||||||
|
for pattern in _DUP_KEY_RE_DB[engine_name]:
|
||||||
|
match = pattern.match(integrity_error.message)
|
||||||
|
if match:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
|
||||||
|
# columns so we have to omit that from the DBDuplicateEntry error.
|
||||||
|
columns = ''
|
||||||
|
|
||||||
|
if engine_name != 'ibm_db_sa':
|
||||||
|
columns = match.group(1)
|
||||||
|
|
||||||
|
if engine_name == "sqlite":
|
||||||
|
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
|
||||||
|
else:
|
||||||
|
columns = get_columns_from_uniq_cons_or_name(columns)
|
||||||
|
raise exception.DBDuplicateEntry(columns, integrity_error)
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
||||||
|
# messages follow the structure:
|
||||||
|
#
|
||||||
|
# mysql:
|
||||||
|
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
||||||
|
# 'restarting transaction') <query_str> <query_args>
|
||||||
|
_DEADLOCK_RE_DB = {
|
||||||
|
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _raise_if_deadlock_error(operational_error, engine_name):
|
||||||
|
"""Raise exception on deadlock condition.
|
||||||
|
|
||||||
|
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
||||||
|
condition.
|
||||||
|
"""
|
||||||
|
re = _DEADLOCK_RE_DB.get(engine_name)
|
||||||
|
if re is None:
|
||||||
|
return
|
||||||
|
# FIXME(johannes): The usage of the .message attribute has been
|
||||||
|
# deprecated since Python 2.6. However, the exceptions raised by
|
||||||
|
# SQLAlchemy can differ when using unicode() and accessing .message.
|
||||||
|
# An audit across all three supported engines will be necessary to
|
||||||
|
# ensure there are no regressions.
|
||||||
|
m = re.match(operational_error.message)
|
||||||
|
if not m:
|
||||||
|
return
|
||||||
|
raise exception.DBDeadlock(operational_error)
|
||||||
|
|
||||||
|
|
||||||
|
def _wrap_db_error(f):
|
||||||
|
#TODO(rpodolyaka): in a subsequent commit make this a class decorator to
|
||||||
|
# ensure it can only applied to Session subclasses instances (as we use
|
||||||
|
# Session instance bind attribute below)
|
||||||
|
|
||||||
|
@functools.wraps(f)
|
||||||
|
def _wrap(self, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
return f(self, *args, **kwargs)
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
raise exception.DBInvalidUnicodeParameter()
|
||||||
|
except sqla_exc.OperationalError as e:
|
||||||
|
_raise_if_db_connection_lost(e, self.bind)
|
||||||
|
_raise_if_deadlock_error(e, self.bind.dialect.name)
|
||||||
|
# NOTE(comstud): A lot of code is checking for OperationalError
|
||||||
|
# so let's not wrap it for now.
|
||||||
|
raise
|
||||||
|
# note(boris-42): We should catch unique constraint violation and
|
||||||
|
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
||||||
|
# violation is wrapped by IntegrityError.
|
||||||
|
except sqla_exc.IntegrityError as e:
|
||||||
|
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
||||||
|
# DBs so we must do this. Also in some tables (for example
|
||||||
|
# instance_types) there are more than one unique constraint. This
|
||||||
|
# means we should get names of columns, which values violate
|
||||||
|
# unique constraint, from error message.
|
||||||
|
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
|
||||||
|
raise exception.DBError(e)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(_LE('DB exception wrapped.'))
|
||||||
|
raise exception.DBError(e)
|
||||||
|
return _wrap
|
||||||
|
|
||||||
|
|
||||||
|
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
||||||
|
"""Switch sqlite connections to non-synchronous mode."""
|
||||||
|
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
||||||
|
|
||||||
|
|
||||||
|
def _add_regexp_listener(dbapi_con, con_record):
|
||||||
|
"""Add REGEXP function to sqlite connections."""
|
||||||
|
|
||||||
|
def regexp(expr, item):
|
||||||
|
reg = re.compile(expr)
|
||||||
|
return reg.search(six.text_type(item)) is not None
|
||||||
|
dbapi_con.create_function('regexp', 2, regexp)
|
||||||
|
|
||||||
|
|
||||||
|
def _thread_yield(dbapi_con, con_record):
|
||||||
|
"""Ensure other greenthreads get a chance to be executed.
|
||||||
|
|
||||||
|
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
|
||||||
|
execute instead of time.sleep(0).
|
||||||
|
Force a context switch. With common database backends (eg MySQLdb and
|
||||||
|
sqlite), there is no implicit yield caused by network I/O since they are
|
||||||
|
implemented by C libraries that eventlet cannot monkey patch.
|
||||||
|
"""
|
||||||
|
time.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
|
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
||||||
|
"""Ensures that MySQL and DB2 connections are alive.
|
||||||
|
|
||||||
|
Borrowed from:
|
||||||
|
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||||
|
"""
|
||||||
|
cursor = dbapi_conn.cursor()
|
||||||
|
try:
|
||||||
|
ping_sql = 'select 1'
|
||||||
|
if engine.name == 'ibm_db_sa':
|
||||||
|
# DB2 requires a table expression
|
||||||
|
ping_sql = 'select 1 from (values (1)) AS t1'
|
||||||
|
cursor.execute(ping_sql)
|
||||||
|
except Exception as ex:
|
||||||
|
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
|
||||||
|
msg = _LW('Database server has gone away: %s') % ex
|
||||||
|
LOG.warning(msg)
|
||||||
|
raise sqla_exc.DisconnectionError(msg)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
|
||||||
|
"""Set engine mode to 'traditional'.
|
||||||
|
|
||||||
|
Required to prevent silent truncates at insert or update operations
|
||||||
|
under MySQL. By default MySQL truncates inserted string if it longer
|
||||||
|
than a declared field just with warning. That is fraught with data
|
||||||
|
corruption.
|
||||||
|
"""
|
||||||
|
_set_session_sql_mode(dbapi_con, connection_rec,
|
||||||
|
connection_proxy, 'TRADITIONAL')
|
||||||
|
|
||||||
|
|
||||||
|
def _set_session_sql_mode(dbapi_con, connection_rec,
|
||||||
|
connection_proxy, sql_mode=None):
|
||||||
|
"""Set the sql_mode session variable.
|
||||||
|
|
||||||
|
MySQL supports several server modes. The default is None, but sessions
|
||||||
|
may choose to enable server modes like TRADITIONAL, ANSI,
|
||||||
|
several STRICT_* modes and others.
|
||||||
|
|
||||||
|
Note: passing in '' (empty string) for sql_mode clears
|
||||||
|
the SQL mode for the session, overriding a potentially set
|
||||||
|
server default. Passing in None (the default) makes this
|
||||||
|
a no-op, meaning if a server-side SQL mode is set, it still applies.
|
||||||
|
"""
|
||||||
|
cursor = dbapi_con.cursor()
|
||||||
|
if sql_mode is not None:
|
||||||
|
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
|
||||||
|
|
||||||
|
# Check against the real effective SQL mode. Even when unset by
|
||||||
|
# our own config, the server may still be operating in a specific
|
||||||
|
# SQL mode as set by the server configuration
|
||||||
|
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
|
||||||
|
row = cursor.fetchone()
|
||||||
|
if row is None:
|
||||||
|
LOG.warning(_LW('Unable to detect effective SQL mode'))
|
||||||
|
return
|
||||||
|
realmode = row[1]
|
||||||
|
LOG.info(_LI('MySQL server mode set to %s') % realmode)
|
||||||
|
# 'TRADITIONAL' mode enables several other modes, so
|
||||||
|
# we need a substring match here
|
||||||
|
if not ('TRADITIONAL' in realmode.upper() or
|
||||||
|
'STRICT_ALL_TABLES' in realmode.upper()):
|
||||||
|
LOG.warning(_LW("MySQL SQL mode is '%s', "
|
||||||
|
"consider enabling TRADITIONAL or STRICT_ALL_TABLES")
|
||||||
|
% realmode)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_db_connection_error(args):
|
||||||
|
"""Return True if error in connecting to db."""
|
||||||
|
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||||
|
# to support Postgres and others.
|
||||||
|
# For the db2, the error code is -30081 since the db2 is still not ready
|
||||||
|
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
|
||||||
|
for err_code in conn_err_codes:
|
||||||
|
if args.find(err_code) != -1:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _raise_if_db_connection_lost(error, engine):
|
||||||
|
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
|
||||||
|
# requires connection and cursor in incoming parameters,
|
||||||
|
# but we have no possibility to create connection if DB
|
||||||
|
# is not available, so in such case reconnect fails.
|
||||||
|
# But is_disconnect() ignores these parameters, so it
|
||||||
|
# makes sense to pass to function None as placeholder
|
||||||
|
# instead of connection and cursor.
|
||||||
|
if engine.dialect.is_disconnect(error, None, None):
|
||||||
|
raise exception.DBConnectionError(error)
|
||||||
|
|
||||||
|
|
||||||
|
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
|
||||||
|
mysql_traditional_mode=False, idle_timeout=3600,
|
||||||
|
connection_debug=0, max_pool_size=None, max_overflow=None,
|
||||||
|
pool_timeout=None, sqlite_synchronous=True,
|
||||||
|
connection_trace=False, max_retries=10, retry_interval=10):
|
||||||
|
"""Return a new SQLAlchemy engine."""
|
||||||
|
|
||||||
|
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
||||||
|
|
||||||
|
engine_args = {
|
||||||
|
"pool_recycle": idle_timeout,
|
||||||
|
'convert_unicode': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
logger = logging.getLogger('sqlalchemy.engine')
|
||||||
|
|
||||||
|
# Map SQL debug level to Python log level
|
||||||
|
if connection_debug >= 100:
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
|
elif connection_debug >= 50:
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
else:
|
||||||
|
logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
if "sqlite" in connection_dict.drivername:
|
||||||
|
if sqlite_fk:
|
||||||
|
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
||||||
|
engine_args["poolclass"] = NullPool
|
||||||
|
|
||||||
|
if sql_connection == "sqlite://":
|
||||||
|
engine_args["poolclass"] = StaticPool
|
||||||
|
engine_args["connect_args"] = {'check_same_thread': False}
|
||||||
|
else:
|
||||||
|
if max_pool_size is not None:
|
||||||
|
engine_args['pool_size'] = max_pool_size
|
||||||
|
if max_overflow is not None:
|
||||||
|
engine_args['max_overflow'] = max_overflow
|
||||||
|
if pool_timeout is not None:
|
||||||
|
engine_args['pool_timeout'] = pool_timeout
|
||||||
|
|
||||||
|
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
||||||
|
|
||||||
|
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
||||||
|
|
||||||
|
if engine.name in ['mysql', 'ibm_db_sa']:
|
||||||
|
ping_callback = functools.partial(_ping_listener, engine)
|
||||||
|
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
|
||||||
|
if engine.name == 'mysql':
|
||||||
|
if mysql_traditional_mode:
|
||||||
|
mysql_sql_mode = 'TRADITIONAL'
|
||||||
|
if mysql_sql_mode:
|
||||||
|
mode_callback = functools.partial(_set_session_sql_mode,
|
||||||
|
sql_mode=mysql_sql_mode)
|
||||||
|
sqlalchemy.event.listen(engine, 'checkout', mode_callback)
|
||||||
|
elif 'sqlite' in connection_dict.drivername:
|
||||||
|
if not sqlite_synchronous:
|
||||||
|
sqlalchemy.event.listen(engine, 'connect',
|
||||||
|
_synchronous_switch_listener)
|
||||||
|
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
||||||
|
|
||||||
|
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
|
||||||
|
_patch_mysqldb_with_stacktrace_comments()
|
||||||
|
|
||||||
|
try:
|
||||||
|
engine.connect()
|
||||||
|
except sqla_exc.OperationalError as e:
|
||||||
|
if not _is_db_connection_error(e.args[0]):
|
||||||
|
raise
|
||||||
|
|
||||||
|
remaining = max_retries
|
||||||
|
if remaining == -1:
|
||||||
|
remaining = 'infinite'
|
||||||
|
while True:
|
||||||
|
msg = _LW('SQL connection failed. %s attempts left.')
|
||||||
|
LOG.warning(msg % remaining)
|
||||||
|
if remaining != 'infinite':
|
||||||
|
remaining -= 1
|
||||||
|
time.sleep(retry_interval)
|
||||||
|
try:
|
||||||
|
engine.connect()
|
||||||
|
break
|
||||||
|
except sqla_exc.OperationalError as e:
|
||||||
|
if (remaining != 'infinite' and remaining == 0) or \
|
||||||
|
not _is_db_connection_error(e.args[0]):
|
||||||
|
raise
|
||||||
|
return engine
|
||||||
|
|
||||||
|
|
||||||
|
class Query(sqlalchemy.orm.query.Query):
|
||||||
|
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
||||||
|
def soft_delete(self, synchronize_session='evaluate'):
|
||||||
|
return self.update({'deleted': literal_column('id'),
|
||||||
|
'updated_at': literal_column('updated_at'),
|
||||||
|
'deleted_at': timeutils.utcnow()},
|
||||||
|
synchronize_session=synchronize_session)
|
||||||
|
|
||||||
|
|
||||||
|
class Session(sqlalchemy.orm.session.Session):
|
||||||
|
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
||||||
|
@_wrap_db_error
|
||||||
|
def query(self, *args, **kwargs):
|
||||||
|
return super(Session, self).query(*args, **kwargs)
|
||||||
|
|
||||||
|
@_wrap_db_error
|
||||||
|
def flush(self, *args, **kwargs):
|
||||||
|
return super(Session, self).flush(*args, **kwargs)
|
||||||
|
|
||||||
|
@_wrap_db_error
|
||||||
|
def execute(self, *args, **kwargs):
|
||||||
|
return super(Session, self).execute(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
||||||
|
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
||||||
|
return sqlalchemy.orm.sessionmaker(bind=engine,
|
||||||
|
class_=Session,
|
||||||
|
autocommit=autocommit,
|
||||||
|
expire_on_commit=expire_on_commit,
|
||||||
|
query_cls=Query)
|
||||||
|
|
||||||
|
|
||||||
|
def _patch_mysqldb_with_stacktrace_comments():
|
||||||
|
"""Adds current stack trace as a comment in queries.
|
||||||
|
|
||||||
|
Patches MySQLdb.cursors.BaseCursor._do_query.
|
||||||
|
"""
|
||||||
|
import MySQLdb.cursors
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
||||||
|
|
||||||
|
def _do_query(self, q):
|
||||||
|
stack = ''
|
||||||
|
for filename, line, method, function in traceback.extract_stack():
|
||||||
|
# exclude various common things from trace
|
||||||
|
if filename.endswith('session.py') and method == '_do_query':
|
||||||
|
continue
|
||||||
|
if filename.endswith('api.py') and method == 'wrapper':
|
||||||
|
continue
|
||||||
|
if filename.endswith('utils.py') and method == '_inner':
|
||||||
|
continue
|
||||||
|
if filename.endswith('exception.py') and method == '_wrap':
|
||||||
|
continue
|
||||||
|
# db/api is just a wrapper around db/sqlalchemy/api
|
||||||
|
if filename.endswith('db/api.py'):
|
||||||
|
continue
|
||||||
|
# only trace inside rack
|
||||||
|
index = filename.rfind('rack')
|
||||||
|
if index == -1:
|
||||||
|
continue
|
||||||
|
stack += "File:%s:%s Method:%s() Line:%s | " \
|
||||||
|
% (filename[index:], line, method, function)
|
||||||
|
|
||||||
|
# strip trailing " | " from stack
|
||||||
|
if stack:
|
||||||
|
stack = stack[:-3]
|
||||||
|
qq = "%s /* %s */" % (q, stack)
|
||||||
|
else:
|
||||||
|
qq = q
|
||||||
|
old_mysql_do_query(self, qq)
|
||||||
|
|
||||||
|
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
||||||
|
|
||||||
|
|
||||||
|
class EngineFacade(object):
|
||||||
|
"""A helper class for removing of global engine instances from rack.db.
|
||||||
|
|
||||||
|
As a library, rack.db can't decide where to store/when to create engine
|
||||||
|
and sessionmaker instances, so this must be left for a target application.
|
||||||
|
|
||||||
|
On the other hand, in order to simplify the adoption of rack.db changes,
|
||||||
|
we'll provide a helper class, which creates engine and sessionmaker
|
||||||
|
on its instantiation and provides get_engine()/get_session() methods
|
||||||
|
that are compatible with corresponding utility functions that currently
|
||||||
|
exist in target projects, e.g. in Nova.
|
||||||
|
|
||||||
|
engine/sessionmaker instances will still be global (and they are meant to
|
||||||
|
be global), but they will be stored in the app context, rather that in the
|
||||||
|
rack.db context.
|
||||||
|
|
||||||
|
Note: using of this helper is completely optional and you are encouraged to
|
||||||
|
integrate engine/sessionmaker instances into your apps any way you like
|
||||||
|
(e.g. one might want to bind a session to a request context). Two important
|
||||||
|
things to remember:
|
||||||
|
1. An Engine instance is effectively a pool of DB connections, so it's
|
||||||
|
meant to be shared (and it's thread-safe).
|
||||||
|
2. A Session instance is not meant to be shared and represents a DB
|
||||||
|
transactional context (i.e. it's not thread-safe). sessionmaker is
|
||||||
|
a factory of sessions.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, sql_connection,
|
||||||
|
sqlite_fk=False, mysql_sql_mode=None,
|
||||||
|
autocommit=True, expire_on_commit=False, **kwargs):
|
||||||
|
"""Initialize engine and sessionmaker instances.
|
||||||
|
|
||||||
|
:param sqlite_fk: enable foreign keys in SQLite
|
||||||
|
:type sqlite_fk: bool
|
||||||
|
|
||||||
|
:param mysql_sql_mode: set SQL mode in MySQL
|
||||||
|
:type mysql_sql_mode: string
|
||||||
|
|
||||||
|
:param autocommit: use autocommit mode for created Session instances
|
||||||
|
:type autocommit: bool
|
||||||
|
|
||||||
|
:param expire_on_commit: expire session objects on commit
|
||||||
|
:type expire_on_commit: bool
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
|
||||||
|
:keyword idle_timeout: timeout before idle sql connections are reaped
|
||||||
|
(defaults to 3600)
|
||||||
|
:keyword connection_debug: verbosity of SQL debugging information.
|
||||||
|
0=None, 100=Everything (defaults to 0)
|
||||||
|
:keyword max_pool_size: maximum number of SQL connections to keep open
|
||||||
|
in a pool (defaults to SQLAlchemy settings)
|
||||||
|
:keyword max_overflow: if set, use this value for max_overflow with
|
||||||
|
sqlalchemy (defaults to SQLAlchemy settings)
|
||||||
|
:keyword pool_timeout: if set, use this value for pool_timeout with
|
||||||
|
sqlalchemy (defaults to SQLAlchemy settings)
|
||||||
|
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
|
||||||
|
(defaults to True)
|
||||||
|
:keyword connection_trace: add python stack traces to SQL as comment
|
||||||
|
strings (defaults to False)
|
||||||
|
:keyword max_retries: maximum db connection retries during startup.
|
||||||
|
(setting -1 implies an infinite retry count)
|
||||||
|
(defaults to 10)
|
||||||
|
:keyword retry_interval: interval between retries of opening a sql
|
||||||
|
connection (defaults to 10)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
super(EngineFacade, self).__init__()
|
||||||
|
|
||||||
|
self._engine = create_engine(
|
||||||
|
sql_connection=sql_connection,
|
||||||
|
sqlite_fk=sqlite_fk,
|
||||||
|
mysql_sql_mode=mysql_sql_mode,
|
||||||
|
idle_timeout=kwargs.get('idle_timeout', 3600),
|
||||||
|
connection_debug=kwargs.get('connection_debug', 0),
|
||||||
|
max_pool_size=kwargs.get('max_pool_size'),
|
||||||
|
max_overflow=kwargs.get('max_overflow'),
|
||||||
|
pool_timeout=kwargs.get('pool_timeout'),
|
||||||
|
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
|
||||||
|
connection_trace=kwargs.get('connection_trace', False),
|
||||||
|
max_retries=kwargs.get('max_retries', 10),
|
||||||
|
retry_interval=kwargs.get('retry_interval', 10))
|
||||||
|
self._session_maker = get_maker(
|
||||||
|
engine=self._engine,
|
||||||
|
autocommit=autocommit,
|
||||||
|
expire_on_commit=expire_on_commit)
|
||||||
|
|
||||||
|
def get_engine(self):
|
||||||
|
"""Get the engine instance (note, that it's shared)."""
|
||||||
|
|
||||||
|
return self._engine
|
||||||
|
|
||||||
|
def get_session(self, **kwargs):
|
||||||
|
"""Get a Session instance.
|
||||||
|
|
||||||
|
If passed, keyword arguments values override the ones used when the
|
||||||
|
sessionmaker instance was created.
|
||||||
|
|
||||||
|
:keyword autocommit: use autocommit mode for created Session instances
|
||||||
|
:type autocommit: bool
|
||||||
|
|
||||||
|
:keyword expire_on_commit: expire session objects on commit
|
||||||
|
:type expire_on_commit: bool
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
for arg in kwargs:
|
||||||
|
if arg not in ('autocommit', 'expire_on_commit'):
|
||||||
|
del kwargs[arg]
|
||||||
|
|
||||||
|
return self._session_maker(**kwargs)
|
||||||
149
rack/openstack/common/db/sqlalchemy/test_base.py
Normal file
149
rack/openstack/common/db/sqlalchemy/test_base.py
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import abc
|
||||||
|
import functools
|
||||||
|
import os
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
import six
|
||||||
|
|
||||||
|
from rack.openstack.common.db.sqlalchemy import session
|
||||||
|
from rack.openstack.common.db.sqlalchemy import utils
|
||||||
|
from rack.openstack.common import test
|
||||||
|
|
||||||
|
|
||||||
|
class DbFixture(fixtures.Fixture):
|
||||||
|
"""Basic database fixture.
|
||||||
|
|
||||||
|
Allows to run tests on various db backends, such as SQLite, MySQL and
|
||||||
|
PostgreSQL. By default use sqlite backend. To override default backend
|
||||||
|
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
|
||||||
|
credentials for specific backend.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _get_uri(self):
|
||||||
|
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
|
||||||
|
|
||||||
|
def __init__(self, test):
|
||||||
|
super(DbFixture, self).__init__()
|
||||||
|
|
||||||
|
self.test = test
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(DbFixture, self).setUp()
|
||||||
|
|
||||||
|
self.test.engine = session.create_engine(self._get_uri())
|
||||||
|
self.test.sessionmaker = session.get_maker(self.test.engine)
|
||||||
|
self.addCleanup(self.test.engine.dispose)
|
||||||
|
|
||||||
|
|
||||||
|
class DbTestCase(test.BaseTestCase):
|
||||||
|
"""Base class for testing of DB code.
|
||||||
|
|
||||||
|
Using `DbFixture`. Intended to be the main database test case to use all
|
||||||
|
the tests on a given backend with user defined uri. Backend specific
|
||||||
|
tests should be decorated with `backend_specific` decorator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
FIXTURE = DbFixture
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(DbTestCase, self).setUp()
|
||||||
|
self.useFixture(self.FIXTURE(self))
|
||||||
|
|
||||||
|
|
||||||
|
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
|
||||||
|
|
||||||
|
|
||||||
|
def backend_specific(*dialects):
|
||||||
|
"""Decorator to skip backend specific tests on inappropriate engines.
|
||||||
|
|
||||||
|
::dialects: list of dialects names under which the test will be launched.
|
||||||
|
"""
|
||||||
|
def wrap(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def ins_wrap(self):
|
||||||
|
if not set(dialects).issubset(ALLOWED_DIALECTS):
|
||||||
|
raise ValueError(
|
||||||
|
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
|
||||||
|
if self.engine.name not in dialects:
|
||||||
|
msg = ('The test "%s" can be run '
|
||||||
|
'only on %s. Current engine is %s.')
|
||||||
|
args = (f.__name__, ' '.join(dialects), self.engine.name)
|
||||||
|
self.skip(msg % args)
|
||||||
|
else:
|
||||||
|
return f(self)
|
||||||
|
return ins_wrap
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class OpportunisticFixture(DbFixture):
|
||||||
|
"""Base fixture to use default CI databases.
|
||||||
|
|
||||||
|
The databases exist in OpenStack CI infrastructure. But for the
|
||||||
|
correct functioning in local environment the databases must be
|
||||||
|
created manually.
|
||||||
|
"""
|
||||||
|
|
||||||
|
DRIVER = abc.abstractproperty(lambda: None)
|
||||||
|
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
|
||||||
|
|
||||||
|
def _get_uri(self):
|
||||||
|
return utils.get_connect_string(backend=self.DRIVER,
|
||||||
|
user=self.USERNAME,
|
||||||
|
passwd=self.PASSWORD,
|
||||||
|
database=self.DBNAME)
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class OpportunisticTestCase(DbTestCase):
|
||||||
|
"""Base test case to use default CI databases.
|
||||||
|
|
||||||
|
The subclasses of the test case are running only when openstack_citest
|
||||||
|
database is available otherwise a tests will be skipped.
|
||||||
|
"""
|
||||||
|
|
||||||
|
FIXTURE = abc.abstractproperty(lambda: None)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
credentials = {
|
||||||
|
'backend': self.FIXTURE.DRIVER,
|
||||||
|
'user': self.FIXTURE.USERNAME,
|
||||||
|
'passwd': self.FIXTURE.PASSWORD,
|
||||||
|
'database': self.FIXTURE.DBNAME}
|
||||||
|
|
||||||
|
if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
|
||||||
|
msg = '%s backend is not available.' % self.FIXTURE.DRIVER
|
||||||
|
return self.skip(msg)
|
||||||
|
|
||||||
|
super(OpportunisticTestCase, self).setUp()
|
||||||
|
|
||||||
|
|
||||||
|
class MySQLOpportunisticFixture(OpportunisticFixture):
|
||||||
|
DRIVER = 'mysql'
|
||||||
|
|
||||||
|
|
||||||
|
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
|
||||||
|
DRIVER = 'postgresql'
|
||||||
|
|
||||||
|
|
||||||
|
class MySQLOpportunisticTestCase(OpportunisticTestCase):
|
||||||
|
FIXTURE = MySQLOpportunisticFixture
|
||||||
|
|
||||||
|
|
||||||
|
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
|
||||||
|
FIXTURE = PostgreSQLOpportunisticFixture
|
||||||
269
rack/openstack/common/db/sqlalchemy/test_migrations.py
Normal file
269
rack/openstack/common/db/sqlalchemy/test_migrations.py
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
# Copyright 2010-2011 OpenStack Foundation
|
||||||
|
# Copyright 2012-2013 IBM Corp.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
import lockfile
|
||||||
|
from six import moves
|
||||||
|
from six.moves.urllib import parse
|
||||||
|
import sqlalchemy
|
||||||
|
import sqlalchemy.exc
|
||||||
|
|
||||||
|
from rack.openstack.common.db.sqlalchemy import utils
|
||||||
|
from rack.openstack.common.gettextutils import _LE
|
||||||
|
from rack.openstack.common import test
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _have_mysql(user, passwd, database):
|
||||||
|
present = os.environ.get('TEST_MYSQL_PRESENT')
|
||||||
|
if present is None:
|
||||||
|
return utils.is_backend_avail(backend='mysql',
|
||||||
|
user=user,
|
||||||
|
passwd=passwd,
|
||||||
|
database=database)
|
||||||
|
return present.lower() in ('', 'true')
|
||||||
|
|
||||||
|
|
||||||
|
def _have_postgresql(user, passwd, database):
|
||||||
|
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
|
||||||
|
if present is None:
|
||||||
|
return utils.is_backend_avail(backend='postgres',
|
||||||
|
user=user,
|
||||||
|
passwd=passwd,
|
||||||
|
database=database)
|
||||||
|
return present.lower() in ('', 'true')
|
||||||
|
|
||||||
|
|
||||||
|
def _set_db_lock(lock_path=None, lock_prefix=None):
|
||||||
|
def decorator(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
path = lock_path or os.environ.get("NOVA_LOCK_PATH")
|
||||||
|
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
|
||||||
|
with lock:
|
||||||
|
LOG.debug('Got lock "%s"' % f.__name__)
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
LOG.debug('Lock released "%s"' % f.__name__)
|
||||||
|
return wrapper
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
class BaseMigrationTestCase(test.BaseTestCase):
|
||||||
|
"""Base class fort testing of migration utils."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
||||||
|
'test_migrations.conf')
|
||||||
|
# Test machines can set the TEST_MIGRATIONS_CONF variable
|
||||||
|
# to override the location of the config file for migration testing
|
||||||
|
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
|
||||||
|
self.DEFAULT_CONFIG_FILE)
|
||||||
|
self.test_databases = {}
|
||||||
|
self.migration_api = None
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(BaseMigrationTestCase, self).setUp()
|
||||||
|
|
||||||
|
# Load test databases from the config file. Only do this
|
||||||
|
# once. No need to re-run this on each test...
|
||||||
|
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
||||||
|
if os.path.exists(self.CONFIG_FILE_PATH):
|
||||||
|
cp = moves.configparser.RawConfigParser()
|
||||||
|
try:
|
||||||
|
cp.read(self.CONFIG_FILE_PATH)
|
||||||
|
defaults = cp.defaults()
|
||||||
|
for key, value in defaults.items():
|
||||||
|
self.test_databases[key] = value
|
||||||
|
except moves.configparser.ParsingError as e:
|
||||||
|
self.fail("Failed to read test_migrations.conf config "
|
||||||
|
"file. Got error: %s" % e)
|
||||||
|
else:
|
||||||
|
self.fail("Failed to find test_migrations.conf config "
|
||||||
|
"file.")
|
||||||
|
|
||||||
|
self.engines = {}
|
||||||
|
for key, value in self.test_databases.items():
|
||||||
|
self.engines[key] = sqlalchemy.create_engine(value)
|
||||||
|
|
||||||
|
# We start each test case with a completely blank slate.
|
||||||
|
self._reset_databases()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
# We destroy the test data store between each test case,
|
||||||
|
# and recreate it, which ensures that we have no side-effects
|
||||||
|
# from the tests
|
||||||
|
self._reset_databases()
|
||||||
|
super(BaseMigrationTestCase, self).tearDown()
|
||||||
|
|
||||||
|
def execute_cmd(self, cmd=None):
|
||||||
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT)
|
||||||
|
output = process.communicate()[0]
|
||||||
|
LOG.debug(output)
|
||||||
|
self.assertEqual(0, process.returncode,
|
||||||
|
"Failed to run: %s\n%s" % (cmd, output))
|
||||||
|
|
||||||
|
def _reset_pg(self, conn_pieces):
|
||||||
|
(user,
|
||||||
|
password,
|
||||||
|
database,
|
||||||
|
host) = utils.get_db_connection_info(conn_pieces)
|
||||||
|
os.environ['PGPASSWORD'] = password
|
||||||
|
os.environ['PGUSER'] = user
|
||||||
|
# note(boris-42): We must create and drop database, we can't
|
||||||
|
# drop database which we have connected to, so for such
|
||||||
|
# operations there is a special database template1.
|
||||||
|
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
|
||||||
|
" '%(sql)s' -d template1")
|
||||||
|
|
||||||
|
sql = ("drop database if exists %s;") % database
|
||||||
|
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(droptable)
|
||||||
|
|
||||||
|
sql = ("create database %s;") % database
|
||||||
|
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(createtable)
|
||||||
|
|
||||||
|
os.unsetenv('PGPASSWORD')
|
||||||
|
os.unsetenv('PGUSER')
|
||||||
|
|
||||||
|
@_set_db_lock(lock_prefix='migration_tests-')
|
||||||
|
def _reset_databases(self):
|
||||||
|
for key, engine in self.engines.items():
|
||||||
|
conn_string = self.test_databases[key]
|
||||||
|
conn_pieces = parse.urlparse(conn_string)
|
||||||
|
engine.dispose()
|
||||||
|
if conn_string.startswith('sqlite'):
|
||||||
|
# We can just delete the SQLite database, which is
|
||||||
|
# the easiest and cleanest solution
|
||||||
|
db_path = conn_pieces.path.strip('/')
|
||||||
|
if os.path.exists(db_path):
|
||||||
|
os.unlink(db_path)
|
||||||
|
# No need to recreate the SQLite DB. SQLite will
|
||||||
|
# create it for us if it's not there...
|
||||||
|
elif conn_string.startswith('mysql'):
|
||||||
|
# We can execute the MySQL client to destroy and re-create
|
||||||
|
# the MYSQL database, which is easier and less error-prone
|
||||||
|
# than using SQLAlchemy to do this via MetaData...trust me.
|
||||||
|
(user, password, database, host) = \
|
||||||
|
utils.get_db_connection_info(conn_pieces)
|
||||||
|
sql = ("drop database if exists %(db)s; "
|
||||||
|
"create database %(db)s;") % {'db': database}
|
||||||
|
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
|
||||||
|
"-e \"%(sql)s\"") % {'user': user, 'password': password,
|
||||||
|
'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(cmd)
|
||||||
|
elif conn_string.startswith('postgresql'):
|
||||||
|
self._reset_pg(conn_pieces)
|
||||||
|
|
||||||
|
|
||||||
|
class WalkVersionsMixin(object):
|
||||||
|
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
|
||||||
|
# Determine latest version script from the repo, then
|
||||||
|
# upgrade from 1 through to the latest, with no data
|
||||||
|
# in the databases. This just checks that the schema itself
|
||||||
|
# upgrades successfully.
|
||||||
|
|
||||||
|
# Place the database under version control
|
||||||
|
self.migration_api.version_control(engine, self.REPOSITORY,
|
||||||
|
self.INIT_VERSION)
|
||||||
|
self.assertEqual(self.INIT_VERSION,
|
||||||
|
self.migration_api.db_version(engine,
|
||||||
|
self.REPOSITORY))
|
||||||
|
|
||||||
|
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
|
||||||
|
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||||
|
|
||||||
|
for version in versions:
|
||||||
|
# upgrade -> downgrade -> upgrade
|
||||||
|
self._migrate_up(engine, version, with_data=True)
|
||||||
|
if snake_walk:
|
||||||
|
downgraded = self._migrate_down(
|
||||||
|
engine, version - 1, with_data=True)
|
||||||
|
if downgraded:
|
||||||
|
self._migrate_up(engine, version)
|
||||||
|
|
||||||
|
if downgrade:
|
||||||
|
# Now walk it back down to 0 from the latest, testing
|
||||||
|
# the downgrade paths.
|
||||||
|
for version in reversed(versions):
|
||||||
|
# downgrade -> upgrade -> downgrade
|
||||||
|
downgraded = self._migrate_down(engine, version - 1)
|
||||||
|
|
||||||
|
if snake_walk and downgraded:
|
||||||
|
self._migrate_up(engine, version)
|
||||||
|
self._migrate_down(engine, version - 1)
|
||||||
|
|
||||||
|
def _migrate_down(self, engine, version, with_data=False):
|
||||||
|
try:
|
||||||
|
self.migration_api.downgrade(engine, self.REPOSITORY, version)
|
||||||
|
except NotImplementedError:
|
||||||
|
# NOTE(sirp): some migrations, namely release-level
|
||||||
|
# migrations, don't support a downgrade.
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
version, self.migration_api.db_version(engine, self.REPOSITORY))
|
||||||
|
|
||||||
|
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
|
||||||
|
# version). So if we have any downgrade checks, they need to be run for
|
||||||
|
# the previous (higher numbered) migration.
|
||||||
|
if with_data:
|
||||||
|
post_downgrade = getattr(
|
||||||
|
self, "_post_downgrade_%03d" % (version + 1), None)
|
||||||
|
if post_downgrade:
|
||||||
|
post_downgrade(engine)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _migrate_up(self, engine, version, with_data=False):
|
||||||
|
"""migrate up to a new version of the db.
|
||||||
|
|
||||||
|
We allow for data insertion and post checks at every
|
||||||
|
migration version with special _pre_upgrade_### and
|
||||||
|
_check_### functions in the main test.
|
||||||
|
"""
|
||||||
|
# NOTE(sdague): try block is here because it's impossible to debug
|
||||||
|
# where a failed data migration happens otherwise
|
||||||
|
try:
|
||||||
|
if with_data:
|
||||||
|
data = None
|
||||||
|
pre_upgrade = getattr(
|
||||||
|
self, "_pre_upgrade_%03d" % version, None)
|
||||||
|
if pre_upgrade:
|
||||||
|
data = pre_upgrade(engine)
|
||||||
|
|
||||||
|
self.migration_api.upgrade(engine, self.REPOSITORY, version)
|
||||||
|
self.assertEqual(version,
|
||||||
|
self.migration_api.db_version(engine,
|
||||||
|
self.REPOSITORY))
|
||||||
|
if with_data:
|
||||||
|
check = getattr(self, "_check_%03d" % version, None)
|
||||||
|
if check:
|
||||||
|
check(engine, data)
|
||||||
|
except Exception:
|
||||||
|
LOG.error(_LE("Failed to migrate to version %s on engine %s") %
|
||||||
|
(version, engine))
|
||||||
|
raise
|
||||||
638
rack/openstack/common/db/sqlalchemy/utils.py
Normal file
638
rack/openstack/common/db/sqlalchemy/utils.py
Normal file
@@ -0,0 +1,638 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2010-2011 OpenStack Foundation.
|
||||||
|
# Copyright 2012 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
|
from migrate.changeset import UniqueConstraint
|
||||||
|
import sqlalchemy
|
||||||
|
from sqlalchemy import Boolean
|
||||||
|
from sqlalchemy import CheckConstraint
|
||||||
|
from sqlalchemy import Column
|
||||||
|
from sqlalchemy.engine import reflection
|
||||||
|
from sqlalchemy.ext.compiler import compiles
|
||||||
|
from sqlalchemy import func
|
||||||
|
from sqlalchemy import Index
|
||||||
|
from sqlalchemy import Integer
|
||||||
|
from sqlalchemy import MetaData
|
||||||
|
from sqlalchemy import or_
|
||||||
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
from sqlalchemy.sql.expression import UpdateBase
|
||||||
|
from sqlalchemy.sql import select
|
||||||
|
from sqlalchemy import String
|
||||||
|
from sqlalchemy import Table
|
||||||
|
from sqlalchemy.types import NullType
|
||||||
|
|
||||||
|
from rack.openstack.common import context as request_context
|
||||||
|
from rack.openstack.common.db.sqlalchemy import models
|
||||||
|
from rack.openstack.common.gettextutils import _, _LI, _LW
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
|
||||||
|
|
||||||
|
|
||||||
|
def sanitize_db_url(url):
|
||||||
|
match = _DBURL_REGEX.match(url)
|
||||||
|
if match:
|
||||||
|
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
|
||||||
|
return url
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidSortKey(Exception):
|
||||||
|
message = _("Sort key supplied was not valid.")
|
||||||
|
|
||||||
|
|
||||||
|
# copy from glance/db/sqlalchemy/api.py
|
||||||
|
def paginate_query(query, model, limit, sort_keys, marker=None,
|
||||||
|
sort_dir=None, sort_dirs=None):
|
||||||
|
"""Returns a query with sorting / pagination criteria added.
|
||||||
|
|
||||||
|
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
||||||
|
(If sort_keys is not unique, then we risk looping through values.)
|
||||||
|
We use the last row in the previous page as the 'marker' for pagination.
|
||||||
|
So we must return values that follow the passed marker in the order.
|
||||||
|
With a single-valued sort_key, this would be easy: sort_key > X.
|
||||||
|
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
||||||
|
the lexicographical ordering:
|
||||||
|
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
||||||
|
|
||||||
|
We also have to cope with different sort_directions.
|
||||||
|
|
||||||
|
Typically, the id of the last row is used as the client-facing pagination
|
||||||
|
marker, then the actual marker object must be fetched from the db and
|
||||||
|
passed in to us as marker.
|
||||||
|
|
||||||
|
:param query: the query object to which we should add paging/sorting
|
||||||
|
:param model: the ORM model class
|
||||||
|
:param limit: maximum number of items to return
|
||||||
|
:param sort_keys: array of attributes by which results should be sorted
|
||||||
|
:param marker: the last item of the previous page; we returns the next
|
||||||
|
results after this value.
|
||||||
|
:param sort_dir: direction in which results should be sorted (asc, desc)
|
||||||
|
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
||||||
|
|
||||||
|
:rtype: sqlalchemy.orm.query.Query
|
||||||
|
:return: The query with sorting/pagination added.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if 'id' not in sort_keys:
|
||||||
|
# TODO(justinsb): If this ever gives a false-positive, check
|
||||||
|
# the actual primary key, rather than assuming its id
|
||||||
|
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
|
||||||
|
|
||||||
|
assert(not (sort_dir and sort_dirs))
|
||||||
|
|
||||||
|
# Default the sort direction to ascending
|
||||||
|
if sort_dirs is None and sort_dir is None:
|
||||||
|
sort_dir = 'asc'
|
||||||
|
|
||||||
|
# Ensure a per-column sort direction
|
||||||
|
if sort_dirs is None:
|
||||||
|
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
||||||
|
|
||||||
|
assert(len(sort_dirs) == len(sort_keys))
|
||||||
|
|
||||||
|
# Add sorting
|
||||||
|
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
||||||
|
try:
|
||||||
|
sort_dir_func = {
|
||||||
|
'asc': sqlalchemy.asc,
|
||||||
|
'desc': sqlalchemy.desc,
|
||||||
|
}[current_sort_dir]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError(_("Unknown sort direction, "
|
||||||
|
"must be 'desc' or 'asc'"))
|
||||||
|
try:
|
||||||
|
sort_key_attr = getattr(model, current_sort_key)
|
||||||
|
except AttributeError:
|
||||||
|
raise InvalidSortKey()
|
||||||
|
query = query.order_by(sort_dir_func(sort_key_attr))
|
||||||
|
|
||||||
|
# Add pagination
|
||||||
|
if marker is not None:
|
||||||
|
marker_values = []
|
||||||
|
for sort_key in sort_keys:
|
||||||
|
v = getattr(marker, sort_key)
|
||||||
|
marker_values.append(v)
|
||||||
|
|
||||||
|
# Build up an array of sort criteria as in the docstring
|
||||||
|
criteria_list = []
|
||||||
|
for i in range(len(sort_keys)):
|
||||||
|
crit_attrs = []
|
||||||
|
for j in range(i):
|
||||||
|
model_attr = getattr(model, sort_keys[j])
|
||||||
|
crit_attrs.append((model_attr == marker_values[j]))
|
||||||
|
|
||||||
|
model_attr = getattr(model, sort_keys[i])
|
||||||
|
if sort_dirs[i] == 'desc':
|
||||||
|
crit_attrs.append((model_attr < marker_values[i]))
|
||||||
|
else:
|
||||||
|
crit_attrs.append((model_attr > marker_values[i]))
|
||||||
|
|
||||||
|
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
||||||
|
criteria_list.append(criteria)
|
||||||
|
|
||||||
|
f = sqlalchemy.sql.or_(*criteria_list)
|
||||||
|
query = query.filter(f)
|
||||||
|
|
||||||
|
if limit is not None:
|
||||||
|
query = query.limit(limit)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def _read_deleted_filter(query, db_model, read_deleted):
|
||||||
|
if 'deleted' not in db_model.__table__.columns:
|
||||||
|
raise ValueError(_("There is no `deleted` column in `%s` table. "
|
||||||
|
"Project doesn't use soft-deleted feature.")
|
||||||
|
% db_model.__name__)
|
||||||
|
|
||||||
|
default_deleted_value = db_model.__table__.c.deleted.default.arg
|
||||||
|
if read_deleted == 'no':
|
||||||
|
query = query.filter(db_model.deleted == default_deleted_value)
|
||||||
|
elif read_deleted == 'yes':
|
||||||
|
pass # omit the filter to include deleted and active
|
||||||
|
elif read_deleted == 'only':
|
||||||
|
query = query.filter(db_model.deleted != default_deleted_value)
|
||||||
|
else:
|
||||||
|
raise ValueError(_("Unrecognized read_deleted value '%s'")
|
||||||
|
% read_deleted)
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def _project_filter(query, db_model, context, project_only):
|
||||||
|
if project_only and 'project_id' not in db_model.__table__.columns:
|
||||||
|
raise ValueError(_("There is no `project_id` column in `%s` table.")
|
||||||
|
% db_model.__name__)
|
||||||
|
|
||||||
|
if request_context.is_user_context(context) and project_only:
|
||||||
|
if project_only == 'allow_none':
|
||||||
|
is_none = None
|
||||||
|
query = query.filter(or_(db_model.project_id == context.project_id,
|
||||||
|
db_model.project_id == is_none))
|
||||||
|
else:
|
||||||
|
query = query.filter(db_model.project_id == context.project_id)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def model_query(context, model, session, args=None, project_only=False,
|
||||||
|
read_deleted=None):
|
||||||
|
"""Query helper that accounts for context's `read_deleted` field.
|
||||||
|
|
||||||
|
:param context: context to query under
|
||||||
|
|
||||||
|
:param model: Model to query. Must be a subclass of ModelBase.
|
||||||
|
:type model: models.ModelBase
|
||||||
|
|
||||||
|
:param session: The session to use.
|
||||||
|
:type session: sqlalchemy.orm.session.Session
|
||||||
|
|
||||||
|
:param args: Arguments to query. If None - model is used.
|
||||||
|
:type args: tuple
|
||||||
|
|
||||||
|
:param project_only: If present and context is user-type, then restrict
|
||||||
|
query to match the context's project_id. If set to
|
||||||
|
'allow_none', restriction includes project_id = None.
|
||||||
|
:type project_only: bool
|
||||||
|
|
||||||
|
:param read_deleted: If present, overrides context's read_deleted field.
|
||||||
|
:type read_deleted: bool
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
result = (utils.model_query(context, models.Instance, session=session)
|
||||||
|
.filter_by(uuid=instance_uuid)
|
||||||
|
.all())
|
||||||
|
|
||||||
|
query = utils.model_query(
|
||||||
|
context, Node,
|
||||||
|
session=session,
|
||||||
|
args=(func.count(Node.id), func.sum(Node.ram))
|
||||||
|
).filter_by(project_id=project_id)
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not read_deleted:
|
||||||
|
if hasattr(context, 'read_deleted'):
|
||||||
|
# NOTE(viktors): some projects use `read_deleted` attribute in
|
||||||
|
# their contexts instead of `show_deleted`.
|
||||||
|
read_deleted = context.read_deleted
|
||||||
|
else:
|
||||||
|
read_deleted = context.show_deleted
|
||||||
|
|
||||||
|
if not issubclass(model, models.ModelBase):
|
||||||
|
raise TypeError(_("model should be a subclass of ModelBase"))
|
||||||
|
|
||||||
|
query = session.query(model) if not args else session.query(*args)
|
||||||
|
query = _read_deleted_filter(query, model, read_deleted)
|
||||||
|
query = _project_filter(query, model, context, project_only)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def get_table(engine, name):
|
||||||
|
"""Returns an sqlalchemy table dynamically from db.
|
||||||
|
|
||||||
|
Needed because the models don't work for us in migrations
|
||||||
|
as models will be far out of sync with the current data.
|
||||||
|
"""
|
||||||
|
metadata = MetaData()
|
||||||
|
metadata.bind = engine
|
||||||
|
return Table(name, metadata, autoload=True)
|
||||||
|
|
||||||
|
|
||||||
|
class InsertFromSelect(UpdateBase):
|
||||||
|
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
|
||||||
|
def __init__(self, table, select):
|
||||||
|
self.table = table
|
||||||
|
self.select = select
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(InsertFromSelect)
|
||||||
|
def visit_insert_from_select(element, compiler, **kw):
|
||||||
|
"""Form the `INSERT INTO table (SELECT ... )` statement."""
|
||||||
|
return "INSERT INTO %s %s" % (
|
||||||
|
compiler.process(element.table, asfrom=True),
|
||||||
|
compiler.process(element.select))
|
||||||
|
|
||||||
|
|
||||||
|
class ColumnError(Exception):
|
||||||
|
"""Error raised when no column or an invalid column is found."""
|
||||||
|
|
||||||
|
|
||||||
|
def _get_not_supported_column(col_name_col_instance, column_name):
|
||||||
|
try:
|
||||||
|
column = col_name_col_instance[column_name]
|
||||||
|
except KeyError:
|
||||||
|
msg = _("Please specify column %s in col_name_col_instance "
|
||||||
|
"param. It is required because column has unsupported "
|
||||||
|
"type by sqlite).")
|
||||||
|
raise ColumnError(msg % column_name)
|
||||||
|
|
||||||
|
if not isinstance(column, Column):
|
||||||
|
msg = _("col_name_col_instance param has wrong type of "
|
||||||
|
"column instance for column %s It should be instance "
|
||||||
|
"of sqlalchemy.Column.")
|
||||||
|
raise ColumnError(msg % column_name)
|
||||||
|
return column
|
||||||
|
|
||||||
|
|
||||||
|
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
||||||
|
**col_name_col_instance):
|
||||||
|
"""Drop unique constraint from table.
|
||||||
|
|
||||||
|
This method drops UC from table and works for mysql, postgresql and sqlite.
|
||||||
|
In mysql and postgresql we are able to use "alter table" construction.
|
||||||
|
Sqlalchemy doesn't support some sqlite column types and replaces their
|
||||||
|
type with NullType in metadata. We process these columns and replace
|
||||||
|
NullType with the correct column type.
|
||||||
|
|
||||||
|
:param migrate_engine: sqlalchemy engine
|
||||||
|
:param table_name: name of table that contains uniq constraint.
|
||||||
|
:param uc_name: name of uniq constraint that will be dropped.
|
||||||
|
:param columns: columns that are in uniq constraint.
|
||||||
|
:param col_name_col_instance: contains pair column_name=column_instance.
|
||||||
|
column_instance is instance of Column. These params
|
||||||
|
are required only for columns that have unsupported
|
||||||
|
types by sqlite. For example BigInteger.
|
||||||
|
"""
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
t = Table(table_name, meta, autoload=True)
|
||||||
|
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
override_cols = [
|
||||||
|
_get_not_supported_column(col_name_col_instance, col.name)
|
||||||
|
for col in t.columns
|
||||||
|
if isinstance(col.type, NullType)
|
||||||
|
]
|
||||||
|
for col in override_cols:
|
||||||
|
t.columns.replace(col)
|
||||||
|
|
||||||
|
uc = UniqueConstraint(*columns, table=t, name=uc_name)
|
||||||
|
uc.drop()
|
||||||
|
|
||||||
|
|
||||||
|
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
||||||
|
use_soft_delete, *uc_column_names):
|
||||||
|
"""Drop all old rows having the same values for columns in uc_columns.
|
||||||
|
|
||||||
|
This method drop (or mark ad `deleted` if use_soft_delete is True) old
|
||||||
|
duplicate rows form table with name `table_name`.
|
||||||
|
|
||||||
|
:param migrate_engine: Sqlalchemy engine
|
||||||
|
:param table_name: Table with duplicates
|
||||||
|
:param use_soft_delete: If True - values will be marked as `deleted`,
|
||||||
|
if False - values will be removed from table
|
||||||
|
:param uc_column_names: Unique constraint columns
|
||||||
|
"""
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
columns_for_group_by = [table.c[name] for name in uc_column_names]
|
||||||
|
|
||||||
|
columns_for_select = [func.max(table.c.id)]
|
||||||
|
columns_for_select.extend(columns_for_group_by)
|
||||||
|
|
||||||
|
duplicated_rows_select = select(columns_for_select,
|
||||||
|
group_by=columns_for_group_by,
|
||||||
|
having=func.count(table.c.id) > 1)
|
||||||
|
|
||||||
|
for row in migrate_engine.execute(duplicated_rows_select):
|
||||||
|
# NOTE(boris-42): Do not remove row that has the biggest ID.
|
||||||
|
delete_condition = table.c.id != row[0]
|
||||||
|
is_none = None # workaround for pyflakes
|
||||||
|
delete_condition &= table.c.deleted_at == is_none
|
||||||
|
for name in uc_column_names:
|
||||||
|
delete_condition &= table.c[name] == row[name]
|
||||||
|
|
||||||
|
rows_to_delete_select = select([table.c.id]).where(delete_condition)
|
||||||
|
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
||||||
|
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
|
||||||
|
"%(table)s") % dict(id=row[0], table=table_name))
|
||||||
|
|
||||||
|
if use_soft_delete:
|
||||||
|
delete_statement = table.update().\
|
||||||
|
where(delete_condition).\
|
||||||
|
values({
|
||||||
|
'deleted': literal_column('id'),
|
||||||
|
'updated_at': literal_column('updated_at'),
|
||||||
|
'deleted_at': timeutils.utcnow()
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
delete_statement = table.delete().where(delete_condition)
|
||||||
|
migrate_engine.execute(delete_statement)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_default_deleted_value(table):
|
||||||
|
if isinstance(table.c.id.type, Integer):
|
||||||
|
return 0
|
||||||
|
if isinstance(table.c.id.type, String):
|
||||||
|
return ""
|
||||||
|
raise ColumnError(_("Unsupported id columns type"))
|
||||||
|
|
||||||
|
|
||||||
|
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
real_indexes = insp.get_indexes(table_name)
|
||||||
|
existing_index_names = dict(
|
||||||
|
[(index['name'], index['column_names']) for index in real_indexes])
|
||||||
|
|
||||||
|
# NOTE(boris-42): Restore indexes on `deleted` column
|
||||||
|
for index in indexes:
|
||||||
|
if 'deleted' not in index['column_names']:
|
||||||
|
continue
|
||||||
|
name = index['name']
|
||||||
|
if name in existing_index_names:
|
||||||
|
column_names = [table.c[c] for c in existing_index_names[name]]
|
||||||
|
old_index = Index(name, *column_names, unique=index["unique"])
|
||||||
|
old_index.drop(migrate_engine)
|
||||||
|
|
||||||
|
column_names = [table.c[c] for c in index['column_names']]
|
||||||
|
new_index = Index(index["name"], *column_names, unique=index["unique"])
|
||||||
|
new_index.create(migrate_engine)
|
||||||
|
|
||||||
|
|
||||||
|
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
return _change_deleted_column_type_to_boolean_sqlite(
|
||||||
|
migrate_engine, table_name, **col_name_col_instance)
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
indexes = insp.get_indexes(table_name)
|
||||||
|
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
old_deleted = Column('old_deleted', Boolean, default=False)
|
||||||
|
old_deleted.create(table, populate_default=False)
|
||||||
|
|
||||||
|
table.update().\
|
||||||
|
where(table.c.deleted == table.c.id).\
|
||||||
|
values(old_deleted=True).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
table.c.deleted.drop()
|
||||||
|
table.c.old_deleted.alter(name="deleted")
|
||||||
|
|
||||||
|
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
||||||
|
|
||||||
|
|
||||||
|
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
columns = []
|
||||||
|
for column in table.columns:
|
||||||
|
column_copy = None
|
||||||
|
if column.name != "deleted":
|
||||||
|
if isinstance(column.type, NullType):
|
||||||
|
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||||
|
column.name)
|
||||||
|
else:
|
||||||
|
column_copy = column.copy()
|
||||||
|
else:
|
||||||
|
column_copy = Column('deleted', Boolean, default=0)
|
||||||
|
columns.append(column_copy)
|
||||||
|
|
||||||
|
constraints = [constraint.copy() for constraint in table.constraints]
|
||||||
|
|
||||||
|
meta = table.metadata
|
||||||
|
new_table = Table(table_name + "__tmp__", meta,
|
||||||
|
*(columns + constraints))
|
||||||
|
new_table.create()
|
||||||
|
|
||||||
|
indexes = []
|
||||||
|
for index in insp.get_indexes(table_name):
|
||||||
|
column_names = [new_table.c[c] for c in index['column_names']]
|
||||||
|
indexes.append(Index(index["name"], *column_names,
|
||||||
|
unique=index["unique"]))
|
||||||
|
|
||||||
|
c_select = []
|
||||||
|
for c in table.c:
|
||||||
|
if c.name != "deleted":
|
||||||
|
c_select.append(c)
|
||||||
|
else:
|
||||||
|
c_select.append(table.c.deleted == table.c.id)
|
||||||
|
|
||||||
|
ins = InsertFromSelect(new_table, select(c_select))
|
||||||
|
migrate_engine.execute(ins)
|
||||||
|
|
||||||
|
table.drop()
|
||||||
|
[index.create(migrate_engine) for index in indexes]
|
||||||
|
|
||||||
|
new_table.rename(table_name)
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == new_table.c.id).\
|
||||||
|
values(deleted=True).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
|
||||||
|
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
return _change_deleted_column_type_to_id_type_sqlite(
|
||||||
|
migrate_engine, table_name, **col_name_col_instance)
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
indexes = insp.get_indexes(table_name)
|
||||||
|
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
new_deleted = Column('new_deleted', table.c.id.type,
|
||||||
|
default=_get_default_deleted_value(table))
|
||||||
|
new_deleted.create(table, populate_default=True)
|
||||||
|
|
||||||
|
deleted = True # workaround for pyflakes
|
||||||
|
table.update().\
|
||||||
|
where(table.c.deleted == deleted).\
|
||||||
|
values(new_deleted=table.c.id).\
|
||||||
|
execute()
|
||||||
|
table.c.deleted.drop()
|
||||||
|
table.c.new_deleted.alter(name="deleted")
|
||||||
|
|
||||||
|
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
||||||
|
|
||||||
|
|
||||||
|
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
|
||||||
|
# constraints in sqlite DB and our `deleted` column has
|
||||||
|
# 2 check constraints. So there is only one way to remove
|
||||||
|
# these constraints:
|
||||||
|
# 1) Create new table with the same columns, constraints
|
||||||
|
# and indexes. (except deleted column).
|
||||||
|
# 2) Copy all data from old to new table.
|
||||||
|
# 3) Drop old table.
|
||||||
|
# 4) Rename new table to old table name.
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
meta = MetaData(bind=migrate_engine)
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
default_deleted_value = _get_default_deleted_value(table)
|
||||||
|
|
||||||
|
columns = []
|
||||||
|
for column in table.columns:
|
||||||
|
column_copy = None
|
||||||
|
if column.name != "deleted":
|
||||||
|
if isinstance(column.type, NullType):
|
||||||
|
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||||
|
column.name)
|
||||||
|
else:
|
||||||
|
column_copy = column.copy()
|
||||||
|
else:
|
||||||
|
column_copy = Column('deleted', table.c.id.type,
|
||||||
|
default=default_deleted_value)
|
||||||
|
columns.append(column_copy)
|
||||||
|
|
||||||
|
def is_deleted_column_constraint(constraint):
|
||||||
|
# NOTE(boris-42): There is no other way to check is CheckConstraint
|
||||||
|
# associated with deleted column.
|
||||||
|
if not isinstance(constraint, CheckConstraint):
|
||||||
|
return False
|
||||||
|
sqltext = str(constraint.sqltext)
|
||||||
|
return (sqltext.endswith("deleted in (0, 1)") or
|
||||||
|
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
|
||||||
|
|
||||||
|
constraints = []
|
||||||
|
for constraint in table.constraints:
|
||||||
|
if not is_deleted_column_constraint(constraint):
|
||||||
|
constraints.append(constraint.copy())
|
||||||
|
|
||||||
|
new_table = Table(table_name + "__tmp__", meta,
|
||||||
|
*(columns + constraints))
|
||||||
|
new_table.create()
|
||||||
|
|
||||||
|
indexes = []
|
||||||
|
for index in insp.get_indexes(table_name):
|
||||||
|
column_names = [new_table.c[c] for c in index['column_names']]
|
||||||
|
indexes.append(Index(index["name"], *column_names,
|
||||||
|
unique=index["unique"]))
|
||||||
|
|
||||||
|
ins = InsertFromSelect(new_table, table.select())
|
||||||
|
migrate_engine.execute(ins)
|
||||||
|
|
||||||
|
table.drop()
|
||||||
|
[index.create(migrate_engine) for index in indexes]
|
||||||
|
|
||||||
|
new_table.rename(table_name)
|
||||||
|
deleted = True # workaround for pyflakes
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == deleted).\
|
||||||
|
values(deleted=new_table.c.id).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
|
||||||
|
deleted = False # workaround for pyflakes
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == deleted).\
|
||||||
|
values(deleted=default_deleted_value).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
|
||||||
|
def get_connect_string(backend, database, user=None, passwd=None):
|
||||||
|
"""Get database connection
|
||||||
|
|
||||||
|
Try to get a connection with a very specific set of values, if we get
|
||||||
|
these then we'll run the tests, otherwise they are skipped
|
||||||
|
"""
|
||||||
|
args = {'backend': backend,
|
||||||
|
'user': user,
|
||||||
|
'passwd': passwd,
|
||||||
|
'database': database}
|
||||||
|
if backend == 'sqlite':
|
||||||
|
template = '%(backend)s:///%(database)s'
|
||||||
|
else:
|
||||||
|
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
|
||||||
|
return template % args
|
||||||
|
|
||||||
|
|
||||||
|
def is_backend_avail(backend, database, user=None, passwd=None):
|
||||||
|
try:
|
||||||
|
connect_uri = get_connect_string(backend=backend,
|
||||||
|
database=database,
|
||||||
|
user=user,
|
||||||
|
passwd=passwd)
|
||||||
|
engine = sqlalchemy.create_engine(connect_uri)
|
||||||
|
connection = engine.connect()
|
||||||
|
except Exception:
|
||||||
|
# intentionally catch all to handle exceptions even if we don't
|
||||||
|
# have any backend code loaded.
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
connection.close()
|
||||||
|
engine.dispose()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_db_connection_info(conn_pieces):
|
||||||
|
database = conn_pieces.path.strip('/')
|
||||||
|
loc_pieces = conn_pieces.netloc.split('@')
|
||||||
|
host = loc_pieces[1]
|
||||||
|
|
||||||
|
auth_pieces = loc_pieces[0].split(':')
|
||||||
|
user = auth_pieces[0]
|
||||||
|
password = ""
|
||||||
|
if len(auth_pieces) > 1:
|
||||||
|
password = auth_pieces[1].strip()
|
||||||
|
|
||||||
|
return (user, password, database, host)
|
||||||
146
rack/openstack/common/eventlet_backdoor.py
Normal file
146
rack/openstack/common/eventlet_backdoor.py
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2012 OpenStack Foundation.
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import gc
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
import eventlet.backdoor
|
||||||
|
import greenlet
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
help_for_backdoor_port = 'Acceptable ' + \
|
||||||
|
'values are 0, <port> and <start>:<end>, where 0 results in ' + \
|
||||||
|
'listening on a random tcp port number, <port> results in ' + \
|
||||||
|
'listening on the specified port number and not enabling backdoor' + \
|
||||||
|
'if it is in use and <start>:<end> results in listening on the ' + \
|
||||||
|
'smallest unused port number within the specified range of port ' + \
|
||||||
|
'numbers. The chosen port is displayed in the service\'s log file.'
|
||||||
|
eventlet_backdoor_opts = [
|
||||||
|
cfg.StrOpt('backdoor_port',
|
||||||
|
default=None,
|
||||||
|
help='Enable eventlet backdoor. %s' % help_for_backdoor_port)
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(eventlet_backdoor_opts)
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class EventletBackdoorConfigValueError(Exception):
|
||||||
|
def __init__(self, port_range, help_msg, ex):
|
||||||
|
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||||
|
'%(help)s' %
|
||||||
|
{'range': port_range, 'ex': ex, 'help': help_msg})
|
||||||
|
super(EventletBackdoorConfigValueError, self).__init__(msg)
|
||||||
|
self.port_range = port_range
|
||||||
|
|
||||||
|
|
||||||
|
def _dont_use_this():
|
||||||
|
print("Don't use this, just disconnect instead")
|
||||||
|
|
||||||
|
|
||||||
|
def _find_objects(t):
|
||||||
|
return filter(lambda o: isinstance(o, t), gc.get_objects())
|
||||||
|
|
||||||
|
|
||||||
|
def _print_greenthreads():
|
||||||
|
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||||
|
print(i, gt)
|
||||||
|
traceback.print_stack(gt.gr_frame)
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def _print_nativethreads():
|
||||||
|
for threadId, stack in sys._current_frames().items():
|
||||||
|
print(threadId)
|
||||||
|
traceback.print_stack(stack)
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_port_range(port_range):
|
||||||
|
if ':' not in port_range:
|
||||||
|
start, end = port_range, port_range
|
||||||
|
else:
|
||||||
|
start, end = port_range.split(':', 1)
|
||||||
|
try:
|
||||||
|
start, end = int(start), int(end)
|
||||||
|
if end < start:
|
||||||
|
raise ValueError
|
||||||
|
return start, end
|
||||||
|
except ValueError as ex:
|
||||||
|
raise EventletBackdoorConfigValueError(port_range, ex,
|
||||||
|
help_for_backdoor_port)
|
||||||
|
|
||||||
|
|
||||||
|
def _listen(host, start_port, end_port, listen_func):
|
||||||
|
try_port = start_port
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return listen_func((host, try_port))
|
||||||
|
except socket.error as exc:
|
||||||
|
if (exc.errno != errno.EADDRINUSE or
|
||||||
|
try_port >= end_port):
|
||||||
|
raise
|
||||||
|
try_port += 1
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_if_enabled():
|
||||||
|
backdoor_locals = {
|
||||||
|
'exit': _dont_use_this, # So we don't exit the entire process
|
||||||
|
'quit': _dont_use_this, # So we don't exit the entire process
|
||||||
|
'fo': _find_objects,
|
||||||
|
'pgt': _print_greenthreads,
|
||||||
|
'pnt': _print_nativethreads,
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONF.backdoor_port is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
|
||||||
|
|
||||||
|
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||||
|
# the last expression and set it to __builtin__._, which overwrites
|
||||||
|
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||||
|
# since it won't interact poorly with gettext, and it's easier to
|
||||||
|
# read the output too.
|
||||||
|
def displayhook(val):
|
||||||
|
if val is not None:
|
||||||
|
pprint.pprint(val)
|
||||||
|
sys.displayhook = displayhook
|
||||||
|
|
||||||
|
sock = _listen('localhost', start_port, end_port, eventlet.listen)
|
||||||
|
|
||||||
|
# In the case of backdoor port being zero, a port number is assigned by
|
||||||
|
# listen(). In any case, pull the port number out here.
|
||||||
|
port = sock.getsockname()[1]
|
||||||
|
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||||
|
{'port': port, 'pid': os.getpid()})
|
||||||
|
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||||
|
locals=backdoor_locals)
|
||||||
|
return port
|
||||||
99
rack/openstack/common/excutils.py
Normal file
99
rack/openstack/common/excutils.py
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# Copyright 2012, Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Exception related utilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _ # noqa
|
||||||
|
|
||||||
|
|
||||||
|
class save_and_reraise_exception(object):
|
||||||
|
"""Save current exception, run some code and then re-raise.
|
||||||
|
|
||||||
|
In some cases the exception context can be cleared, resulting in None
|
||||||
|
being attempted to be re-raised after an exception handler is run. This
|
||||||
|
can happen when eventlet switches greenthreads or when running an
|
||||||
|
exception handler, code raises and catches an exception. In both
|
||||||
|
cases the exception context will be cleared.
|
||||||
|
|
||||||
|
To work around this, we save the exception state, run handler code, and
|
||||||
|
then re-raise the original exception. If another exception occurs, the
|
||||||
|
saved exception is logged and the new exception is re-raised.
|
||||||
|
|
||||||
|
In some cases the caller may not want to re-raise the exception, and
|
||||||
|
for those circumstances this context provides a reraise flag that
|
||||||
|
can be used to suppress the exception. For example:
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
with save_and_reraise_exception() as ctxt:
|
||||||
|
decide_if_need_reraise()
|
||||||
|
if not should_be_reraised:
|
||||||
|
ctxt.reraise = False
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.reraise = True
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.type_, self.value, self.tb, = sys.exc_info()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
if exc_type is not None:
|
||||||
|
logging.error(_('Original exception being dropped: %s'),
|
||||||
|
traceback.format_exception(self.type_,
|
||||||
|
self.value,
|
||||||
|
self.tb))
|
||||||
|
return False
|
||||||
|
if self.reraise:
|
||||||
|
six.reraise(self.type_, self.value, self.tb)
|
||||||
|
|
||||||
|
|
||||||
|
def forever_retry_uncaught_exceptions(infunc):
|
||||||
|
def inner_func(*args, **kwargs):
|
||||||
|
last_log_time = 0
|
||||||
|
last_exc_message = None
|
||||||
|
exc_count = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return infunc(*args, **kwargs)
|
||||||
|
except Exception as exc:
|
||||||
|
this_exc_message = six.u(str(exc))
|
||||||
|
if this_exc_message == last_exc_message:
|
||||||
|
exc_count += 1
|
||||||
|
else:
|
||||||
|
exc_count = 1
|
||||||
|
# Do not log any more frequently than once a minute unless
|
||||||
|
# the exception message changes
|
||||||
|
cur_time = int(time.time())
|
||||||
|
if (cur_time - last_log_time > 60 or
|
||||||
|
this_exc_message != last_exc_message):
|
||||||
|
logging.exception(
|
||||||
|
_('Unexpected exception occurred %d time(s)... '
|
||||||
|
'retrying.') % exc_count)
|
||||||
|
last_log_time = cur_time
|
||||||
|
last_exc_message = this_exc_message
|
||||||
|
exc_count = 0
|
||||||
|
# This should be a very rare event. In case it isn't, do
|
||||||
|
# a sleep.
|
||||||
|
time.sleep(1)
|
||||||
|
return inner_func
|
||||||
137
rack/openstack/common/fileutils.py
Normal file
137
rack/openstack/common/fileutils.py
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from rack.openstack.common import excutils
|
||||||
|
from rack.openstack.common.gettextutils import _ # noqa
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_FILE_CACHE = {}
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_tree(path):
|
||||||
|
"""Create a directory (and any ancestor directories required)
|
||||||
|
|
||||||
|
:param path: Directory to create
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
os.makedirs(path)
|
||||||
|
except OSError as exc:
|
||||||
|
if exc.errno == errno.EEXIST:
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def read_cached_file(filename, force_reload=False):
|
||||||
|
"""Read from a file if it has been modified.
|
||||||
|
|
||||||
|
:param force_reload: Whether to reload the file.
|
||||||
|
:returns: A tuple with a boolean specifying if the data is fresh
|
||||||
|
or not.
|
||||||
|
"""
|
||||||
|
global _FILE_CACHE
|
||||||
|
|
||||||
|
if force_reload and filename in _FILE_CACHE:
|
||||||
|
del _FILE_CACHE[filename]
|
||||||
|
|
||||||
|
reloaded = False
|
||||||
|
mtime = os.path.getmtime(filename)
|
||||||
|
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||||
|
|
||||||
|
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||||
|
LOG.debug(_("Reloading cached file %s") % filename)
|
||||||
|
with open(filename) as fap:
|
||||||
|
cache_info['data'] = fap.read()
|
||||||
|
cache_info['mtime'] = mtime
|
||||||
|
reloaded = True
|
||||||
|
return (reloaded, cache_info['data'])
|
||||||
|
|
||||||
|
|
||||||
|
def delete_if_exists(path, remove=os.unlink):
|
||||||
|
"""Delete a file, but ignore file not found error.
|
||||||
|
|
||||||
|
:param path: File to delete
|
||||||
|
:param remove: Optional function to remove passed path
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
remove(path)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def remove_path_on_error(path, remove=delete_if_exists):
|
||||||
|
"""Protect code that wants to operate on PATH atomically.
|
||||||
|
Any exception will cause PATH to be removed.
|
||||||
|
|
||||||
|
:param path: File to work with
|
||||||
|
:param remove: Optional function to remove passed path
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
except Exception:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
remove(path)
|
||||||
|
|
||||||
|
|
||||||
|
def file_open(*args, **kwargs):
|
||||||
|
"""Open file
|
||||||
|
|
||||||
|
see built-in file() documentation for more details
|
||||||
|
|
||||||
|
Note: The reason this is kept in a separate module is to easily
|
||||||
|
be able to provide a stub module that doesn't alter system
|
||||||
|
state at all (for unit tests)
|
||||||
|
"""
|
||||||
|
return file(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
|
||||||
|
"""Create temporary file or use existing file.
|
||||||
|
|
||||||
|
This util is needed for creating temporary file with
|
||||||
|
specified content, suffix and prefix. If path is not None,
|
||||||
|
it will be used for writing content. If the path doesn't
|
||||||
|
exist it'll be created.
|
||||||
|
|
||||||
|
:param content: content for temporary file.
|
||||||
|
:param path: same as parameter 'dir' for mkstemp
|
||||||
|
:param suffix: same as parameter 'suffix' for mkstemp
|
||||||
|
:param prefix: same as parameter 'prefix' for mkstemp
|
||||||
|
|
||||||
|
For example: it can be used in database tests for creating
|
||||||
|
configuration files.
|
||||||
|
"""
|
||||||
|
if path:
|
||||||
|
ensure_tree(path)
|
||||||
|
|
||||||
|
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
|
||||||
|
try:
|
||||||
|
os.write(fd, content)
|
||||||
|
finally:
|
||||||
|
os.close(fd)
|
||||||
|
return path
|
||||||
0
rack/openstack/common/fixture/__init__.py
Normal file
0
rack/openstack/common/fixture/__init__.py
Normal file
85
rack/openstack/common/fixture/config.py
Normal file
85
rack/openstack/common/fixture/config.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2013 Mirantis, Inc.
|
||||||
|
# Copyright 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
class Config(fixtures.Fixture):
|
||||||
|
"""Allows overriding configuration settings for the test.
|
||||||
|
|
||||||
|
`conf` will be reset on cleanup.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, conf=cfg.CONF):
|
||||||
|
self.conf = conf
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(Config, self).setUp()
|
||||||
|
# NOTE(morganfainberg): unregister must be added to cleanup before
|
||||||
|
# reset is because cleanup works in reverse order of registered items,
|
||||||
|
# and a reset must occur before unregistering options can occur.
|
||||||
|
self.addCleanup(self._unregister_config_opts)
|
||||||
|
self.addCleanup(self.conf.reset)
|
||||||
|
self._registered_config_opts = {}
|
||||||
|
|
||||||
|
def config(self, **kw):
|
||||||
|
"""Override configuration values.
|
||||||
|
|
||||||
|
The keyword arguments are the names of configuration options to
|
||||||
|
override and their values.
|
||||||
|
|
||||||
|
If a `group` argument is supplied, the overrides are applied to
|
||||||
|
the specified configuration option group, otherwise the overrides
|
||||||
|
are applied to the ``default`` group.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
group = kw.pop('group', None)
|
||||||
|
for k, v in six.iteritems(kw):
|
||||||
|
self.conf.set_override(k, v, group)
|
||||||
|
|
||||||
|
def _unregister_config_opts(self):
|
||||||
|
for group in self._registered_config_opts:
|
||||||
|
self.conf.unregister_opts(self._registered_config_opts[group],
|
||||||
|
group=group)
|
||||||
|
|
||||||
|
def register_opt(self, opt, group=None):
|
||||||
|
"""Register a single option for the test run.
|
||||||
|
|
||||||
|
Options registered in this manner will automatically be unregistered
|
||||||
|
during cleanup.
|
||||||
|
|
||||||
|
If a `group` argument is supplied, it will register the new option
|
||||||
|
to that group, otherwise the option is registered to the ``default``
|
||||||
|
group.
|
||||||
|
"""
|
||||||
|
self.conf.register_opt(opt, group=group)
|
||||||
|
self._registered_config_opts.setdefault(group, set()).add(opt)
|
||||||
|
|
||||||
|
def register_opts(self, opts, group=None):
|
||||||
|
"""Register multiple options for the test run.
|
||||||
|
|
||||||
|
This works in the same manner as register_opt() but takes a list of
|
||||||
|
options as the first argument. All arguments will be registered to the
|
||||||
|
same group if the ``group`` argument is supplied, otherwise all options
|
||||||
|
will be registered to the ``default`` group.
|
||||||
|
"""
|
||||||
|
for opt in opts:
|
||||||
|
self.register_opt(opt, group=group)
|
||||||
51
rack/openstack/common/fixture/lockutils.py
Normal file
51
rack/openstack/common/fixture/lockutils.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
from rack.openstack.common import lockutils
|
||||||
|
|
||||||
|
|
||||||
|
class LockFixture(fixtures.Fixture):
|
||||||
|
"""External locking fixture.
|
||||||
|
|
||||||
|
This fixture is basically an alternative to the synchronized decorator with
|
||||||
|
the external flag so that tearDowns and addCleanups will be included in
|
||||||
|
the lock context for locking between tests. The fixture is recommended to
|
||||||
|
be the first line in a test method, like so::
|
||||||
|
|
||||||
|
def test_method(self):
|
||||||
|
self.useFixture(LockFixture)
|
||||||
|
...
|
||||||
|
|
||||||
|
or the first line in setUp if all the test methods in the class are
|
||||||
|
required to be serialized. Something like::
|
||||||
|
|
||||||
|
class TestCase(testtools.testcase):
|
||||||
|
def setUp(self):
|
||||||
|
self.useFixture(LockFixture)
|
||||||
|
super(TestCase, self).setUp()
|
||||||
|
...
|
||||||
|
|
||||||
|
This is because addCleanups are put on a LIFO queue that gets run after the
|
||||||
|
test method exits. (either by completing or raising an exception)
|
||||||
|
"""
|
||||||
|
def __init__(self, name, lock_file_prefix=None):
|
||||||
|
self.mgr = lockutils.lock(name, lock_file_prefix, True)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(LockFixture, self).setUp()
|
||||||
|
self.addCleanup(self.mgr.__exit__, None, None, None)
|
||||||
|
self.mgr.__enter__()
|
||||||
34
rack/openstack/common/fixture/logging.py
Normal file
34
rack/openstack/common/fixture/logging.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
|
||||||
|
def get_logging_handle_error_fixture():
|
||||||
|
"""returns a fixture to make logging raise formatting exceptions.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
self.useFixture(logging.get_logging_handle_error_fixture())
|
||||||
|
"""
|
||||||
|
return fixtures.MonkeyPatch('logging.Handler.handleError',
|
||||||
|
_handleError)
|
||||||
|
|
||||||
|
|
||||||
|
def _handleError(self, record):
|
||||||
|
"""Monkey patch for logging.Handler.handleError.
|
||||||
|
|
||||||
|
The default handleError just logs the error to stderr but we want
|
||||||
|
the option of actually raising an exception.
|
||||||
|
"""
|
||||||
|
raise
|
||||||
51
rack/openstack/common/fixture/mockpatch.py
Normal file
51
rack/openstack/common/fixture/mockpatch.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
import mock
|
||||||
|
|
||||||
|
|
||||||
|
class PatchObject(fixtures.Fixture):
|
||||||
|
"""Deal with code around mock."""
|
||||||
|
|
||||||
|
def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs):
|
||||||
|
self.obj = obj
|
||||||
|
self.attr = attr
|
||||||
|
self.kwargs = kwargs
|
||||||
|
self.new = new
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(PatchObject, self).setUp()
|
||||||
|
_p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs)
|
||||||
|
self.mock = _p.start()
|
||||||
|
self.addCleanup(_p.stop)
|
||||||
|
|
||||||
|
|
||||||
|
class Patch(fixtures.Fixture):
|
||||||
|
|
||||||
|
"""Deal with code around mock.patch."""
|
||||||
|
|
||||||
|
def __init__(self, obj, new=mock.DEFAULT, **kwargs):
|
||||||
|
self.obj = obj
|
||||||
|
self.kwargs = kwargs
|
||||||
|
self.new = new
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(Patch, self).setUp()
|
||||||
|
_p = mock.patch(self.obj, self.new, **self.kwargs)
|
||||||
|
self.mock = _p.start()
|
||||||
|
self.addCleanup(_p.stop)
|
||||||
32
rack/openstack/common/fixture/moxstubout.py
Normal file
32
rack/openstack/common/fixture/moxstubout.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
from six.moves import mox # noqa
|
||||||
|
|
||||||
|
|
||||||
|
class MoxStubout(fixtures.Fixture):
|
||||||
|
"""Deal with code around mox and stubout as a fixture."""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(MoxStubout, self).setUp()
|
||||||
|
# emulate some of the mox stuff, we can't use the metaclass
|
||||||
|
# because it screws with our generators
|
||||||
|
self.mox = mox.Mox()
|
||||||
|
self.stubs = self.mox.stubs
|
||||||
|
self.addCleanup(self.mox.UnsetStubs)
|
||||||
|
self.addCleanup(self.mox.VerifyAll)
|
||||||
474
rack/openstack/common/gettextutils.py
Normal file
474
rack/openstack/common/gettextutils.py
Normal file
@@ -0,0 +1,474 @@
|
|||||||
|
# Copyright 2012 Red Hat, Inc.
|
||||||
|
# Copyright 2013 IBM Corp.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
gettext for openstack-common modules.
|
||||||
|
|
||||||
|
Usual usage in an openstack.common module:
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
"""
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import functools
|
||||||
|
import gettext
|
||||||
|
import locale
|
||||||
|
from logging import handlers
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from babel import localedata
|
||||||
|
import six
|
||||||
|
|
||||||
|
_localedir = os.environ.get('rack'.upper() + '_LOCALEDIR')
|
||||||
|
_t = gettext.translation('rack', localedir=_localedir, fallback=True)
|
||||||
|
|
||||||
|
# We use separate translation catalogs for each log level, so set up a
|
||||||
|
# mapping between the log level name and the translator. The domain
|
||||||
|
# for the log level is project_name + "-log-" + log_level so messages
|
||||||
|
# for each level end up in their own catalog.
|
||||||
|
_t_log_levels = dict(
|
||||||
|
(level, gettext.translation('rack' + '-log-' + level,
|
||||||
|
localedir=_localedir,
|
||||||
|
fallback=True))
|
||||||
|
for level in ['info', 'warning', 'error', 'critical']
|
||||||
|
)
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES = {}
|
||||||
|
USE_LAZY = False
|
||||||
|
|
||||||
|
|
||||||
|
def enable_lazy():
|
||||||
|
"""Convenience function for configuring _() to use lazy gettext
|
||||||
|
|
||||||
|
Call this at the start of execution to enable the gettextutils._
|
||||||
|
function to use lazy gettext functionality. This is useful if
|
||||||
|
your project is importing _ directly instead of using the
|
||||||
|
gettextutils.install() way of importing the _ function.
|
||||||
|
"""
|
||||||
|
global USE_LAZY
|
||||||
|
USE_LAZY = True
|
||||||
|
|
||||||
|
|
||||||
|
def _(msg):
|
||||||
|
if USE_LAZY:
|
||||||
|
return Message(msg, domain='rack')
|
||||||
|
else:
|
||||||
|
if six.PY3:
|
||||||
|
return _t.gettext(msg)
|
||||||
|
return _t.ugettext(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _log_translation(msg, level):
|
||||||
|
"""Build a single translation of a log message
|
||||||
|
"""
|
||||||
|
if USE_LAZY:
|
||||||
|
return Message(msg, domain='rack' + '-log-' + level)
|
||||||
|
else:
|
||||||
|
translator = _t_log_levels[level]
|
||||||
|
if six.PY3:
|
||||||
|
return translator.gettext(msg)
|
||||||
|
return translator.ugettext(msg)
|
||||||
|
|
||||||
|
# Translators for log levels.
|
||||||
|
#
|
||||||
|
# The abbreviated names are meant to reflect the usual use of a short
|
||||||
|
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||||
|
# the level.
|
||||||
|
_LI = functools.partial(_log_translation, level='info')
|
||||||
|
_LW = functools.partial(_log_translation, level='warning')
|
||||||
|
_LE = functools.partial(_log_translation, level='error')
|
||||||
|
_LC = functools.partial(_log_translation, level='critical')
|
||||||
|
|
||||||
|
|
||||||
|
def install(domain, lazy=False):
|
||||||
|
"""Install a _() function using the given translation domain.
|
||||||
|
|
||||||
|
Given a translation domain, install a _() function using gettext's
|
||||||
|
install() function.
|
||||||
|
|
||||||
|
The main difference from gettext.install() is that we allow
|
||||||
|
overriding the default localedir (e.g. /usr/share/locale) using
|
||||||
|
a translation-domain-specific environment variable (e.g.
|
||||||
|
NOVA_LOCALEDIR).
|
||||||
|
|
||||||
|
:param domain: the translation domain
|
||||||
|
:param lazy: indicates whether or not to install the lazy _() function.
|
||||||
|
The lazy _() introduces a way to do deferred translation
|
||||||
|
of messages by installing a _ that builds Message objects,
|
||||||
|
instead of strings, which can then be lazily translated into
|
||||||
|
any available locale.
|
||||||
|
"""
|
||||||
|
if lazy:
|
||||||
|
# NOTE(mrodden): Lazy gettext functionality.
|
||||||
|
#
|
||||||
|
# The following introduces a deferred way to do translations on
|
||||||
|
# messages in OpenStack. We override the standard _() function
|
||||||
|
# and % (format string) operation to build Message objects that can
|
||||||
|
# later be translated when we have more information.
|
||||||
|
def _lazy_gettext(msg):
|
||||||
|
"""Create and return a Message object.
|
||||||
|
|
||||||
|
Lazy gettext function for a given domain, it is a factory method
|
||||||
|
for a project/module to get a lazy gettext function for its own
|
||||||
|
translation domain (i.e. rack, glance, cinder, etc.)
|
||||||
|
|
||||||
|
Message encapsulates a string so that we can translate
|
||||||
|
it later when needed.
|
||||||
|
"""
|
||||||
|
return Message(msg, domain=domain)
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
moves.builtins.__dict__['_'] = _lazy_gettext
|
||||||
|
else:
|
||||||
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
|
if six.PY3:
|
||||||
|
gettext.install(domain,
|
||||||
|
localedir=os.environ.get(localedir))
|
||||||
|
else:
|
||||||
|
gettext.install(domain,
|
||||||
|
localedir=os.environ.get(localedir),
|
||||||
|
unicode=True)
|
||||||
|
|
||||||
|
|
||||||
|
class Message(six.text_type):
|
||||||
|
"""A Message object is a unicode object that can be translated.
|
||||||
|
|
||||||
|
Translation of Message is done explicitly using the translate() method.
|
||||||
|
For all non-translation intents and purposes, a Message is simply unicode,
|
||||||
|
and can be treated as such.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(cls, msgid, msgtext=None, params=None,
|
||||||
|
domain='rack', *args):
|
||||||
|
"""Create a new Message object.
|
||||||
|
|
||||||
|
In order for translation to work gettext requires a message ID, this
|
||||||
|
msgid will be used as the base unicode text. It is also possible
|
||||||
|
for the msgid and the base unicode text to be different by passing
|
||||||
|
the msgtext parameter.
|
||||||
|
"""
|
||||||
|
# If the base msgtext is not given, we use the default translation
|
||||||
|
# of the msgid (which is in English) just in case the system locale is
|
||||||
|
# not English, so that the base text will be in that locale by default.
|
||||||
|
if not msgtext:
|
||||||
|
msgtext = Message._translate_msgid(msgid, domain)
|
||||||
|
# We want to initialize the parent unicode with the actual object that
|
||||||
|
# would have been plain unicode if 'Message' was not enabled.
|
||||||
|
msg = super(Message, cls).__new__(cls, msgtext)
|
||||||
|
msg.msgid = msgid
|
||||||
|
msg.domain = domain
|
||||||
|
msg.params = params
|
||||||
|
return msg
|
||||||
|
|
||||||
|
def translate(self, desired_locale=None):
|
||||||
|
"""Translate this message to the desired locale.
|
||||||
|
|
||||||
|
:param desired_locale: The desired locale to translate the message to,
|
||||||
|
if no locale is provided the message will be
|
||||||
|
translated to the system's default locale.
|
||||||
|
|
||||||
|
:returns: the translated message in unicode
|
||||||
|
"""
|
||||||
|
|
||||||
|
translated_message = Message._translate_msgid(self.msgid,
|
||||||
|
self.domain,
|
||||||
|
desired_locale)
|
||||||
|
if self.params is None:
|
||||||
|
# No need for more translation
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
# This Message object may have been formatted with one or more
|
||||||
|
# Message objects as substitution arguments, given either as a single
|
||||||
|
# argument, part of a tuple, or as one or more values in a dictionary.
|
||||||
|
# When translating this Message we need to translate those Messages too
|
||||||
|
translated_params = _translate_args(self.params, desired_locale)
|
||||||
|
|
||||||
|
translated_message = translated_message % translated_params
|
||||||
|
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _translate_msgid(msgid, domain, desired_locale=None):
|
||||||
|
if not desired_locale:
|
||||||
|
system_locale = locale.getdefaultlocale()
|
||||||
|
# If the system locale is not available to the runtime use English
|
||||||
|
if not system_locale[0]:
|
||||||
|
desired_locale = 'en_US'
|
||||||
|
else:
|
||||||
|
desired_locale = system_locale[0]
|
||||||
|
|
||||||
|
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||||
|
lang = gettext.translation(domain,
|
||||||
|
localedir=locale_dir,
|
||||||
|
languages=[desired_locale],
|
||||||
|
fallback=True)
|
||||||
|
if six.PY3:
|
||||||
|
translator = lang.gettext
|
||||||
|
else:
|
||||||
|
translator = lang.ugettext
|
||||||
|
|
||||||
|
translated_message = translator(msgid)
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
def __mod__(self, other):
|
||||||
|
# When we mod a Message we want the actual operation to be performed
|
||||||
|
# by the parent class (i.e. unicode()), the only thing we do here is
|
||||||
|
# save the original msgid and the parameters in case of a translation
|
||||||
|
params = self._sanitize_mod_params(other)
|
||||||
|
unicode_mod = super(Message, self).__mod__(params)
|
||||||
|
modded = Message(self.msgid,
|
||||||
|
msgtext=unicode_mod,
|
||||||
|
params=params,
|
||||||
|
domain=self.domain)
|
||||||
|
return modded
|
||||||
|
|
||||||
|
def _sanitize_mod_params(self, other):
|
||||||
|
"""Sanitize the object being modded with this Message.
|
||||||
|
|
||||||
|
- Add support for modding 'None' so translation supports it
|
||||||
|
- Trim the modded object, which can be a large dictionary, to only
|
||||||
|
those keys that would actually be used in a translation
|
||||||
|
- Snapshot the object being modded, in case the message is
|
||||||
|
translated, it will be used as it was when the Message was created
|
||||||
|
"""
|
||||||
|
if other is None:
|
||||||
|
params = (other,)
|
||||||
|
elif isinstance(other, dict):
|
||||||
|
params = self._trim_dictionary_parameters(other)
|
||||||
|
else:
|
||||||
|
params = self._copy_param(other)
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _trim_dictionary_parameters(self, dict_param):
|
||||||
|
"""Return a dict that only has matching entries in the msgid."""
|
||||||
|
# NOTE(luisg): Here we trim down the dictionary passed as parameters
|
||||||
|
# to avoid carrying a lot of unnecessary weight around in the message
|
||||||
|
# object, for example if someone passes in Message() % locals() but
|
||||||
|
# only some params are used, and additionally we prevent errors for
|
||||||
|
# non-deepcopyable objects by unicoding() them.
|
||||||
|
|
||||||
|
# Look for %(param) keys in msgid;
|
||||||
|
# Skip %% and deal with the case where % is first character on the line
|
||||||
|
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
|
||||||
|
|
||||||
|
# If we don't find any %(param) keys but have a %s
|
||||||
|
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
|
||||||
|
# Apparently the full dictionary is the parameter
|
||||||
|
params = self._copy_param(dict_param)
|
||||||
|
else:
|
||||||
|
params = {}
|
||||||
|
# Save our existing parameters as defaults to protect
|
||||||
|
# ourselves from losing values if we are called through an
|
||||||
|
# (erroneous) chain that builds a valid Message with
|
||||||
|
# arguments, and then does something like "msg % kwds"
|
||||||
|
# where kwds is an empty dictionary.
|
||||||
|
src = {}
|
||||||
|
if isinstance(self.params, dict):
|
||||||
|
src.update(self.params)
|
||||||
|
src.update(dict_param)
|
||||||
|
for key in keys:
|
||||||
|
params[key] = self._copy_param(src[key])
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _copy_param(self, param):
|
||||||
|
try:
|
||||||
|
return copy.deepcopy(param)
|
||||||
|
except TypeError:
|
||||||
|
# Fallback to casting to unicode this will handle the
|
||||||
|
# python code-like objects that can't be deep-copied
|
||||||
|
return six.text_type(param)
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
msg = _('Message objects do not support addition.')
|
||||||
|
raise TypeError(msg)
|
||||||
|
|
||||||
|
def __radd__(self, other):
|
||||||
|
return self.__add__(other)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||||
|
# and it expects specifically a UnicodeError in order to proceed.
|
||||||
|
msg = _('Message objects do not support str() because they may '
|
||||||
|
'contain non-ascii characters. '
|
||||||
|
'Please use unicode() or translate() instead.')
|
||||||
|
raise UnicodeError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_languages(domain):
|
||||||
|
"""Lists the available languages for the given translation domain.
|
||||||
|
|
||||||
|
:param domain: the domain to get languages for
|
||||||
|
"""
|
||||||
|
if domain in _AVAILABLE_LANGUAGES:
|
||||||
|
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
||||||
|
|
||||||
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
|
find = lambda x: gettext.find(domain,
|
||||||
|
localedir=os.environ.get(localedir),
|
||||||
|
languages=[x])
|
||||||
|
|
||||||
|
# NOTE(mrodden): en_US should always be available (and first in case
|
||||||
|
# order matters) since our in-line message strings are en_US
|
||||||
|
language_list = ['en_US']
|
||||||
|
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||||
|
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||||
|
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||||
|
# this check when the master list updates to >=1.0, and update all projects
|
||||||
|
list_identifiers = (getattr(localedata, 'list', None) or
|
||||||
|
getattr(localedata, 'locale_identifiers'))
|
||||||
|
locale_identifiers = list_identifiers()
|
||||||
|
|
||||||
|
for i in locale_identifiers:
|
||||||
|
if find(i) is not None:
|
||||||
|
language_list.append(i)
|
||||||
|
|
||||||
|
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
|
||||||
|
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
|
||||||
|
# are perfectly legitimate locales:
|
||||||
|
# https://github.com/mitsuhiko/babel/issues/37
|
||||||
|
# In Babel 1.3 they fixed the bug and they support these locales, but
|
||||||
|
# they are still not explicitly "listed" by locale_identifiers().
|
||||||
|
# That is why we add the locales here explicitly if necessary so that
|
||||||
|
# they are listed as supported.
|
||||||
|
aliases = {'zh': 'zh_CN',
|
||||||
|
'zh_Hant_HK': 'zh_HK',
|
||||||
|
'zh_Hant': 'zh_TW',
|
||||||
|
'fil': 'tl_PH'}
|
||||||
|
for (locale, alias) in six.iteritems(aliases):
|
||||||
|
if locale in language_list and alias not in language_list:
|
||||||
|
language_list.append(alias)
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES[domain] = language_list
|
||||||
|
return copy.copy(language_list)
|
||||||
|
|
||||||
|
|
||||||
|
def translate(obj, desired_locale=None):
|
||||||
|
"""Gets the translated unicode representation of the given object.
|
||||||
|
|
||||||
|
If the object is not translatable it is returned as-is.
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param obj: the object to translate
|
||||||
|
:param desired_locale: the locale to translate the message to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: the translated object in unicode, or the original object if
|
||||||
|
it could not be translated
|
||||||
|
"""
|
||||||
|
message = obj
|
||||||
|
if not isinstance(message, Message):
|
||||||
|
# If the object to translate is not already translatable,
|
||||||
|
# let's first get its unicode representation
|
||||||
|
message = six.text_type(obj)
|
||||||
|
if isinstance(message, Message):
|
||||||
|
# Even after unicoding() we still need to check if we are
|
||||||
|
# running with translatable unicode before translating
|
||||||
|
return message.translate(desired_locale)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_args(args, desired_locale=None):
|
||||||
|
"""Translates all the translatable elements of the given arguments object.
|
||||||
|
|
||||||
|
This method is used for translating the translatable values in method
|
||||||
|
arguments which include values of tuples or dictionaries.
|
||||||
|
If the object is not a tuple or a dictionary the object itself is
|
||||||
|
translated if it is translatable.
|
||||||
|
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param args: the args to translate
|
||||||
|
:param desired_locale: the locale to translate the args to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: a new args object with the translated contents of the original
|
||||||
|
"""
|
||||||
|
if isinstance(args, tuple):
|
||||||
|
return tuple(translate(v, desired_locale) for v in args)
|
||||||
|
if isinstance(args, dict):
|
||||||
|
translated_dict = {}
|
||||||
|
for (k, v) in six.iteritems(args):
|
||||||
|
translated_v = translate(v, desired_locale)
|
||||||
|
translated_dict[k] = translated_v
|
||||||
|
return translated_dict
|
||||||
|
return translate(args, desired_locale)
|
||||||
|
|
||||||
|
|
||||||
|
class TranslationHandler(handlers.MemoryHandler):
|
||||||
|
"""Handler that translates records before logging them.
|
||||||
|
|
||||||
|
The TranslationHandler takes a locale and a target logging.Handler object
|
||||||
|
to forward LogRecord objects to after translating them. This handler
|
||||||
|
depends on Message objects being logged, instead of regular strings.
|
||||||
|
|
||||||
|
The handler can be configured declaratively in the logging.conf as follows:
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys = translatedlog, translator
|
||||||
|
|
||||||
|
[handler_translatedlog]
|
||||||
|
class = handlers.WatchedFileHandler
|
||||||
|
args = ('/var/log/api-localized.log',)
|
||||||
|
formatter = context
|
||||||
|
|
||||||
|
[handler_translator]
|
||||||
|
class = openstack.common.log.TranslationHandler
|
||||||
|
target = translatedlog
|
||||||
|
args = ('zh_CN',)
|
||||||
|
|
||||||
|
If the specified locale is not available in the system, the handler will
|
||||||
|
log in the default locale.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, locale=None, target=None):
|
||||||
|
"""Initialize a TranslationHandler
|
||||||
|
|
||||||
|
:param locale: locale to use for translating messages
|
||||||
|
:param target: logging.Handler object to forward
|
||||||
|
LogRecord objects to after translation
|
||||||
|
"""
|
||||||
|
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
||||||
|
# other handlers, such as a FileHandler, and still be able to
|
||||||
|
# configure it using logging.conf, this handler has to extend
|
||||||
|
# MemoryHandler because only the MemoryHandlers' logging.conf
|
||||||
|
# parsing is implemented such that it accepts a target handler.
|
||||||
|
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
||||||
|
self.locale = locale
|
||||||
|
|
||||||
|
def setFormatter(self, fmt):
|
||||||
|
self.target.setFormatter(fmt)
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
# We save the message from the original record to restore it
|
||||||
|
# after translation, so other handlers are not affected by this
|
||||||
|
original_msg = record.msg
|
||||||
|
original_args = record.args
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._translate_and_log_record(record)
|
||||||
|
finally:
|
||||||
|
record.msg = original_msg
|
||||||
|
record.args = original_args
|
||||||
|
|
||||||
|
def _translate_and_log_record(self, record):
|
||||||
|
record.msg = translate(record.msg, self.locale)
|
||||||
|
|
||||||
|
# In addition to translating the message, we also need to translate
|
||||||
|
# arguments that were passed to the log method that were not part
|
||||||
|
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
||||||
|
record.args = _translate_args(record.args, self.locale)
|
||||||
|
|
||||||
|
self.target.emit(record)
|
||||||
144
rack/openstack/common/imageutils.py
Normal file
144
rack/openstack/common/imageutils.py
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helper methods to deal with images.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _ # noqa
|
||||||
|
from rack.openstack.common import strutils
|
||||||
|
|
||||||
|
|
||||||
|
class QemuImgInfo(object):
|
||||||
|
BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
|
||||||
|
r"\s+(.*?)\)\s*$"), re.I)
|
||||||
|
TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
|
||||||
|
SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I)
|
||||||
|
|
||||||
|
def __init__(self, cmd_output=None):
|
||||||
|
details = self._parse(cmd_output or '')
|
||||||
|
self.image = details.get('image')
|
||||||
|
self.backing_file = details.get('backing_file')
|
||||||
|
self.file_format = details.get('file_format')
|
||||||
|
self.virtual_size = details.get('virtual_size')
|
||||||
|
self.cluster_size = details.get('cluster_size')
|
||||||
|
self.disk_size = details.get('disk_size')
|
||||||
|
self.snapshots = details.get('snapshot_list', [])
|
||||||
|
self.encryption = details.get('encryption')
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
lines = [
|
||||||
|
'image: %s' % self.image,
|
||||||
|
'file_format: %s' % self.file_format,
|
||||||
|
'virtual_size: %s' % self.virtual_size,
|
||||||
|
'disk_size: %s' % self.disk_size,
|
||||||
|
'cluster_size: %s' % self.cluster_size,
|
||||||
|
'backing_file: %s' % self.backing_file,
|
||||||
|
]
|
||||||
|
if self.snapshots:
|
||||||
|
lines.append("snapshots: %s" % self.snapshots)
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def _canonicalize(self, field):
|
||||||
|
# Standardize on underscores/lc/no dash and no spaces
|
||||||
|
# since qemu seems to have mixed outputs here... and
|
||||||
|
# this format allows for better integration with python
|
||||||
|
# - ie for usage in kwargs and such...
|
||||||
|
field = field.lower().strip()
|
||||||
|
for c in (" ", "-"):
|
||||||
|
field = field.replace(c, '_')
|
||||||
|
return field
|
||||||
|
|
||||||
|
def _extract_bytes(self, details):
|
||||||
|
# Replace it with the byte amount
|
||||||
|
real_size = self.SIZE_RE.search(details)
|
||||||
|
if real_size:
|
||||||
|
details = real_size.group(1)
|
||||||
|
try:
|
||||||
|
details = strutils.to_bytes(details)
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
return details
|
||||||
|
|
||||||
|
def _extract_details(self, root_cmd, root_details, lines_after):
|
||||||
|
real_details = root_details
|
||||||
|
if root_cmd == 'backing_file':
|
||||||
|
# Replace it with the real backing file
|
||||||
|
backing_match = self.BACKING_FILE_RE.match(root_details)
|
||||||
|
if backing_match:
|
||||||
|
real_details = backing_match.group(2).strip()
|
||||||
|
elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
|
||||||
|
# Replace it with the byte amount (if we can convert it)
|
||||||
|
real_details = self._extract_bytes(root_details)
|
||||||
|
elif root_cmd == 'file_format':
|
||||||
|
real_details = real_details.strip().lower()
|
||||||
|
elif root_cmd == 'snapshot_list':
|
||||||
|
# Next line should be a header, starting with 'ID'
|
||||||
|
if not lines_after or not lines_after[0].startswith("ID"):
|
||||||
|
msg = _("Snapshot list encountered but no header found!")
|
||||||
|
raise ValueError(msg)
|
||||||
|
del lines_after[0]
|
||||||
|
real_details = []
|
||||||
|
# This is the sprintf pattern we will try to match
|
||||||
|
# "%-10s%-20s%7s%20s%15s"
|
||||||
|
# ID TAG VM SIZE DATE VM CLOCK (current header)
|
||||||
|
while lines_after:
|
||||||
|
line = lines_after[0]
|
||||||
|
line_pieces = line.split()
|
||||||
|
if len(line_pieces) != 6:
|
||||||
|
break
|
||||||
|
# Check against this pattern in the final position
|
||||||
|
# "%02d:%02d:%02d.%03d"
|
||||||
|
date_pieces = line_pieces[5].split(":")
|
||||||
|
if len(date_pieces) != 3:
|
||||||
|
break
|
||||||
|
real_details.append({
|
||||||
|
'id': line_pieces[0],
|
||||||
|
'tag': line_pieces[1],
|
||||||
|
'vm_size': line_pieces[2],
|
||||||
|
'date': line_pieces[3],
|
||||||
|
'vm_clock': line_pieces[4] + " " + line_pieces[5],
|
||||||
|
})
|
||||||
|
del lines_after[0]
|
||||||
|
return real_details
|
||||||
|
|
||||||
|
def _parse(self, cmd_output):
|
||||||
|
# Analysis done of qemu-img.c to figure out what is going on here
|
||||||
|
# Find all points start with some chars and then a ':' then a newline
|
||||||
|
# and then handle the results of those 'top level' items in a separate
|
||||||
|
# function.
|
||||||
|
#
|
||||||
|
# TODO(harlowja): newer versions might have a json output format
|
||||||
|
# we should switch to that whenever possible.
|
||||||
|
# see: http://bit.ly/XLJXDX
|
||||||
|
contents = {}
|
||||||
|
lines = [x for x in cmd_output.splitlines() if x.strip()]
|
||||||
|
while lines:
|
||||||
|
line = lines.pop(0)
|
||||||
|
top_level = self.TOP_LEVEL_RE.match(line)
|
||||||
|
if top_level:
|
||||||
|
root = self._canonicalize(top_level.group(1))
|
||||||
|
if not root:
|
||||||
|
continue
|
||||||
|
root_details = top_level.group(2).strip()
|
||||||
|
details = self._extract_details(root, root_details, lines)
|
||||||
|
contents[root] = details
|
||||||
|
return contents
|
||||||
66
rack/openstack/common/importutils.py
Normal file
66
rack/openstack/common/importutils.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Import related utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
|
||||||
|
def import_class(import_str):
|
||||||
|
"""Returns a class from a string including module and class."""
|
||||||
|
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||||
|
try:
|
||||||
|
__import__(mod_str)
|
||||||
|
return getattr(sys.modules[mod_str], class_str)
|
||||||
|
except (ValueError, AttributeError):
|
||||||
|
raise ImportError('Class %s cannot be found (%s)' %
|
||||||
|
(class_str,
|
||||||
|
traceback.format_exception(*sys.exc_info())))
|
||||||
|
|
||||||
|
|
||||||
|
def import_object(import_str, *args, **kwargs):
|
||||||
|
"""Import a class and return an instance of it."""
|
||||||
|
return import_class(import_str)(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||||
|
"""Tries to import object from default namespace.
|
||||||
|
|
||||||
|
Imports a class and return an instance of it, first by trying
|
||||||
|
to find the class in a default namespace, then failing back to
|
||||||
|
a full path if not found in the default namespace.
|
||||||
|
"""
|
||||||
|
import_value = "%s.%s" % (name_space, import_str)
|
||||||
|
try:
|
||||||
|
return import_class(import_value)(*args, **kwargs)
|
||||||
|
except ImportError:
|
||||||
|
return import_class(import_str)(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def import_module(import_str):
|
||||||
|
"""Import a module."""
|
||||||
|
__import__(import_str)
|
||||||
|
return sys.modules[import_str]
|
||||||
|
|
||||||
|
|
||||||
|
def try_import(import_str, default=None):
|
||||||
|
"""Try to import a module and if it fails return default."""
|
||||||
|
try:
|
||||||
|
return import_module(import_str)
|
||||||
|
except ImportError:
|
||||||
|
return default
|
||||||
178
rack/openstack/common/jsonutils.py
Normal file
178
rack/openstack/common/jsonutils.py
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
'''
|
||||||
|
JSON related utilities.
|
||||||
|
|
||||||
|
This module provides a few things:
|
||||||
|
|
||||||
|
1) A handy function for getting an object down to something that can be
|
||||||
|
JSON serialized. See to_primitive().
|
||||||
|
|
||||||
|
2) Wrappers around loads() and dumps(). The dumps() wrapper will
|
||||||
|
automatically use to_primitive() for you if needed.
|
||||||
|
|
||||||
|
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
|
||||||
|
is available.
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import functools
|
||||||
|
import inspect
|
||||||
|
import itertools
|
||||||
|
import json
|
||||||
|
try:
|
||||||
|
import xmlrpclib
|
||||||
|
except ImportError:
|
||||||
|
# NOTE(jd): xmlrpclib is not shipped with Python 3
|
||||||
|
xmlrpclib = None
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from rack.openstack.common import gettextutils
|
||||||
|
from rack.openstack.common import importutils
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
|
||||||
|
netaddr = importutils.try_import("netaddr")
|
||||||
|
|
||||||
|
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||||
|
inspect.isfunction, inspect.isgeneratorfunction,
|
||||||
|
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||||
|
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||||
|
inspect.isabstract]
|
||||||
|
|
||||||
|
_simple_types = (six.string_types + six.integer_types
|
||||||
|
+ (type(None), bool, float))
|
||||||
|
|
||||||
|
|
||||||
|
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||||
|
level=0, max_depth=3):
|
||||||
|
"""Convert a complex object into primitives.
|
||||||
|
|
||||||
|
Handy for JSON serialization. We can optionally handle instances,
|
||||||
|
but since this is a recursive function, we could have cyclical
|
||||||
|
data structures.
|
||||||
|
|
||||||
|
To handle cyclical data structures we could track the actual objects
|
||||||
|
visited in a set, but not all objects are hashable. Instead we just
|
||||||
|
track the depth of the object inspections and don't go too deep.
|
||||||
|
|
||||||
|
Therefore, convert_instances=True is lossy ... be aware.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# handle obvious types first - order of basic types determined by running
|
||||||
|
# full tests on rack project, resulting in the following counts:
|
||||||
|
# 572754 <type 'NoneType'>
|
||||||
|
# 460353 <type 'int'>
|
||||||
|
# 379632 <type 'unicode'>
|
||||||
|
# 274610 <type 'str'>
|
||||||
|
# 199918 <type 'dict'>
|
||||||
|
# 114200 <type 'datetime.datetime'>
|
||||||
|
# 51817 <type 'bool'>
|
||||||
|
# 26164 <type 'list'>
|
||||||
|
# 6491 <type 'float'>
|
||||||
|
# 283 <type 'tuple'>
|
||||||
|
# 19 <type 'long'>
|
||||||
|
if isinstance(value, _simple_types):
|
||||||
|
return value
|
||||||
|
|
||||||
|
if isinstance(value, datetime.datetime):
|
||||||
|
if convert_datetime:
|
||||||
|
return timeutils.strtime(value)
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
|
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||||
|
# and results in infinite loop when list(value) is called.
|
||||||
|
if type(value) == itertools.count:
|
||||||
|
return six.text_type(value)
|
||||||
|
|
||||||
|
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||||
|
# tests that raise an exception in a mocked method that
|
||||||
|
# has a @wrap_exception with a notifier will fail. If
|
||||||
|
# we up the dependency to 0.5.4 (when it is released) we
|
||||||
|
# can remove this workaround.
|
||||||
|
if getattr(value, '__module__', None) == 'mox':
|
||||||
|
return 'mock'
|
||||||
|
|
||||||
|
if level > max_depth:
|
||||||
|
return '?'
|
||||||
|
|
||||||
|
# The try block may not be necessary after the class check above,
|
||||||
|
# but just in case ...
|
||||||
|
try:
|
||||||
|
recursive = functools.partial(to_primitive,
|
||||||
|
convert_instances=convert_instances,
|
||||||
|
convert_datetime=convert_datetime,
|
||||||
|
level=level,
|
||||||
|
max_depth=max_depth)
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||||
|
elif isinstance(value, (list, tuple)):
|
||||||
|
return [recursive(lv) for lv in value]
|
||||||
|
|
||||||
|
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||||
|
# for our purposes, make it a datetime type which is explicitly
|
||||||
|
# handled
|
||||||
|
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
|
||||||
|
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||||
|
|
||||||
|
if convert_datetime and isinstance(value, datetime.datetime):
|
||||||
|
return timeutils.strtime(value)
|
||||||
|
elif isinstance(value, gettextutils.Message):
|
||||||
|
return value.data
|
||||||
|
elif hasattr(value, 'iteritems'):
|
||||||
|
return recursive(dict(value.iteritems()), level=level + 1)
|
||||||
|
elif hasattr(value, '__iter__'):
|
||||||
|
return recursive(list(value))
|
||||||
|
elif convert_instances and hasattr(value, '__dict__'):
|
||||||
|
# Likely an instance of something. Watch for cycles.
|
||||||
|
# Ignore class member vars.
|
||||||
|
return recursive(value.__dict__, level=level + 1)
|
||||||
|
elif netaddr and isinstance(value, netaddr.IPAddress):
|
||||||
|
return six.text_type(value)
|
||||||
|
else:
|
||||||
|
if any(test(value) for test in _nasty_type_tests):
|
||||||
|
return six.text_type(value)
|
||||||
|
return value
|
||||||
|
except TypeError:
|
||||||
|
# Class objects are tricky since they may define something like
|
||||||
|
# __iter__ defined but it isn't callable as list().
|
||||||
|
return six.text_type(value)
|
||||||
|
|
||||||
|
|
||||||
|
def dumps(value, default=to_primitive, **kwargs):
|
||||||
|
return json.dumps(value, default=default, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def loads(s):
|
||||||
|
return json.loads(s)
|
||||||
|
|
||||||
|
|
||||||
|
def load(s):
|
||||||
|
return json.load(s)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import anyjson
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
anyjson._modules.append((__name__, 'dumps', TypeError,
|
||||||
|
'loads', ValueError, 'load'))
|
||||||
|
anyjson.force_implementation(__name__)
|
||||||
45
rack/openstack/common/local.py
Normal file
45
rack/openstack/common/local.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Local storage of variables using weak references"""
|
||||||
|
|
||||||
|
import threading
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
|
||||||
|
class WeakLocal(threading.local):
|
||||||
|
def __getattribute__(self, attr):
|
||||||
|
rval = super(WeakLocal, self).__getattribute__(attr)
|
||||||
|
if rval:
|
||||||
|
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||||
|
# reference, not the value itself. We therefore need to lookup
|
||||||
|
# the weak reference and return the inner value here.
|
||||||
|
rval = rval()
|
||||||
|
return rval
|
||||||
|
|
||||||
|
def __setattr__(self, attr, value):
|
||||||
|
value = weakref.ref(value)
|
||||||
|
return super(WeakLocal, self).__setattr__(attr, value)
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||||
|
store = WeakLocal()
|
||||||
|
|
||||||
|
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||||
|
# when it falls out of scope in the code that uses the thread local storage. A
|
||||||
|
# "strong" store will hold a reference to the object so that it never falls out
|
||||||
|
# of scope.
|
||||||
|
weak_store = WeakLocal()
|
||||||
|
strong_store = threading.local()
|
||||||
303
rack/openstack/common/lockutils.py
Normal file
303
rack/openstack/common/lockutils.py
Normal file
@@ -0,0 +1,303 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import errno
|
||||||
|
import functools
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack.openstack.common import fileutils
|
||||||
|
from rack.openstack.common.gettextutils import _ # noqa
|
||||||
|
from rack.openstack.common import local
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
util_opts = [
|
||||||
|
cfg.BoolOpt('disable_process_locking', default=False,
|
||||||
|
help='Whether to disable inter-process locks'),
|
||||||
|
cfg.StrOpt('lock_path',
|
||||||
|
default=os.environ.get("NOVA_LOCK_PATH"),
|
||||||
|
help=('Directory to use for lock files.'))
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(util_opts)
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(lock_path):
|
||||||
|
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||||
|
|
||||||
|
|
||||||
|
class _InterProcessLock(object):
|
||||||
|
"""Lock implementation which allows multiple locks, working around
|
||||||
|
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||||
|
not require any cleanup. Since the lock is always held on a file
|
||||||
|
descriptor rather than outside of the process, the lock gets dropped
|
||||||
|
automatically if the process crashes, even if __exit__ is not executed.
|
||||||
|
|
||||||
|
There are no guarantees regarding usage by multiple green threads in a
|
||||||
|
single process here. This lock works only between processes. Exclusive
|
||||||
|
access between local threads should be achieved using the semaphores
|
||||||
|
in the @synchronized decorator.
|
||||||
|
|
||||||
|
Note these locks are released when the descriptor is closed, so it's not
|
||||||
|
safe to close the file descriptor while another green thread holds the
|
||||||
|
lock. Just opening and closing the lock file can break synchronisation,
|
||||||
|
so lock files must be accessed only using this abstraction.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, name):
|
||||||
|
self.lockfile = None
|
||||||
|
self.fname = name
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.lockfile = open(self.fname, 'w')
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Using non-blocking locks since green threads are not
|
||||||
|
# patched to deal with blocking locking calls.
|
||||||
|
# Also upon reading the MSDN docs for locking(), it seems
|
||||||
|
# to have a laughable 10 attempts "blocking" mechanism.
|
||||||
|
self.trylock()
|
||||||
|
return self
|
||||||
|
except IOError as e:
|
||||||
|
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||||
|
# external locks synchronise things like iptables
|
||||||
|
# updates - give it some time to prevent busy spinning
|
||||||
|
time.sleep(0.01)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
try:
|
||||||
|
self.unlock()
|
||||||
|
self.lockfile.close()
|
||||||
|
except IOError:
|
||||||
|
LOG.exception(_("Could not release the acquired lock `%s`"),
|
||||||
|
self.fname)
|
||||||
|
|
||||||
|
def trylock(self):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def unlock(self):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class _WindowsLock(_InterProcessLock):
|
||||||
|
def trylock(self):
|
||||||
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||||
|
|
||||||
|
def unlock(self):
|
||||||
|
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class _PosixLock(_InterProcessLock):
|
||||||
|
def trylock(self):
|
||||||
|
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
|
||||||
|
def unlock(self):
|
||||||
|
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
|
||||||
|
if os.name == 'nt':
|
||||||
|
import msvcrt
|
||||||
|
InterProcessLock = _WindowsLock
|
||||||
|
else:
|
||||||
|
import fcntl
|
||||||
|
InterProcessLock = _PosixLock
|
||||||
|
|
||||||
|
_semaphores = weakref.WeakValueDictionary()
|
||||||
|
_semaphores_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
|
"""Context based lock
|
||||||
|
|
||||||
|
This function yields a `threading.Semaphore` instance (if we don't use
|
||||||
|
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
|
||||||
|
True, in which case, it'll yield an InterProcessLock instance.
|
||||||
|
|
||||||
|
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||||
|
lock files on disk with a meaningful prefix.
|
||||||
|
|
||||||
|
:param external: The external keyword argument denotes whether this lock
|
||||||
|
should work across multiple processes. This means that if two different
|
||||||
|
workers both run a a method decorated with @synchronized('mylock',
|
||||||
|
external=True), only one of them will execute at a time.
|
||||||
|
|
||||||
|
:param lock_path: The lock_path keyword argument is used to specify a
|
||||||
|
special location for external lock files to live. If nothing is set, then
|
||||||
|
CONF.lock_path is used as a default.
|
||||||
|
"""
|
||||||
|
with _semaphores_lock:
|
||||||
|
try:
|
||||||
|
sem = _semaphores[name]
|
||||||
|
except KeyError:
|
||||||
|
sem = threading.Semaphore()
|
||||||
|
_semaphores[name] = sem
|
||||||
|
|
||||||
|
with sem:
|
||||||
|
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
|
||||||
|
|
||||||
|
# NOTE(mikal): I know this looks odd
|
||||||
|
if not hasattr(local.strong_store, 'locks_held'):
|
||||||
|
local.strong_store.locks_held = []
|
||||||
|
local.strong_store.locks_held.append(name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if external and not CONF.disable_process_locking:
|
||||||
|
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
|
||||||
|
{'lock': name})
|
||||||
|
|
||||||
|
# We need a copy of lock_path because it is non-local
|
||||||
|
local_lock_path = lock_path or CONF.lock_path
|
||||||
|
if not local_lock_path:
|
||||||
|
raise cfg.RequiredOptError('lock_path')
|
||||||
|
|
||||||
|
if not os.path.exists(local_lock_path):
|
||||||
|
fileutils.ensure_tree(local_lock_path)
|
||||||
|
LOG.info(_('Created lock path: %s'), local_lock_path)
|
||||||
|
|
||||||
|
def add_prefix(name, prefix):
|
||||||
|
if not prefix:
|
||||||
|
return name
|
||||||
|
sep = '' if prefix.endswith('-') else '-'
|
||||||
|
return '%s%s%s' % (prefix, sep, name)
|
||||||
|
|
||||||
|
# NOTE(mikal): the lock name cannot contain directory
|
||||||
|
# separators
|
||||||
|
lock_file_name = add_prefix(name.replace(os.sep, '_'),
|
||||||
|
lock_file_prefix)
|
||||||
|
|
||||||
|
lock_file_path = os.path.join(local_lock_path, lock_file_name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
lock = InterProcessLock(lock_file_path)
|
||||||
|
with lock as lock:
|
||||||
|
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
|
||||||
|
{'lock': name, 'path': lock_file_path})
|
||||||
|
yield lock
|
||||||
|
finally:
|
||||||
|
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
|
||||||
|
{'lock': name, 'path': lock_file_path})
|
||||||
|
else:
|
||||||
|
yield sem
|
||||||
|
|
||||||
|
finally:
|
||||||
|
local.strong_store.locks_held.remove(name)
|
||||||
|
|
||||||
|
|
||||||
|
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||||
|
"""Synchronization decorator.
|
||||||
|
|
||||||
|
Decorating a method like so::
|
||||||
|
|
||||||
|
@synchronized('mylock')
|
||||||
|
def foo(self, *args):
|
||||||
|
...
|
||||||
|
|
||||||
|
ensures that only one thread will execute the foo method at a time.
|
||||||
|
|
||||||
|
Different methods can share the same lock::
|
||||||
|
|
||||||
|
@synchronized('mylock')
|
||||||
|
def foo(self, *args):
|
||||||
|
...
|
||||||
|
|
||||||
|
@synchronized('mylock')
|
||||||
|
def bar(self, *args):
|
||||||
|
...
|
||||||
|
|
||||||
|
This way only one of either foo or bar can be executing at a time.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def wrap(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
with lock(name, lock_file_prefix, external, lock_path):
|
||||||
|
LOG.debug(_('Got semaphore / lock "%(function)s"'),
|
||||||
|
{'function': f.__name__})
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
LOG.debug(_('Semaphore / lock released "%(function)s"'),
|
||||||
|
{'function': f.__name__})
|
||||||
|
return inner
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def synchronized_with_prefix(lock_file_prefix):
|
||||||
|
"""Partial object generator for the synchronization decorator.
|
||||||
|
|
||||||
|
Redefine @synchronized in each project like so::
|
||||||
|
|
||||||
|
(in rack/utils.py)
|
||||||
|
from rack.openstack.common import lockutils
|
||||||
|
|
||||||
|
synchronized = lockutils.synchronized_with_prefix('rack-')
|
||||||
|
|
||||||
|
|
||||||
|
(in rack/foo.py)
|
||||||
|
from rack import utils
|
||||||
|
|
||||||
|
@utils.synchronized('mylock')
|
||||||
|
def bar(self, *args):
|
||||||
|
...
|
||||||
|
|
||||||
|
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||||
|
meaningful prefix.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
"""Create a dir for locks and pass it to command from arguments
|
||||||
|
|
||||||
|
If you run this:
|
||||||
|
python -m openstack.common.lockutils python setup.py testr <etc>
|
||||||
|
|
||||||
|
a temporary directory will be created for all your locks and passed to all
|
||||||
|
your tests in an environment variable. The temporary dir will be deleted
|
||||||
|
afterwards and the return value will be preserved.
|
||||||
|
"""
|
||||||
|
|
||||||
|
lock_dir = tempfile.mkdtemp()
|
||||||
|
os.environ["NOVA_LOCK_PATH"] = lock_dir
|
||||||
|
try:
|
||||||
|
ret_val = subprocess.call(argv[1:])
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(lock_dir, ignore_errors=True)
|
||||||
|
return ret_val
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(sys.argv))
|
||||||
655
rack/openstack/common/log.py
Normal file
655
rack/openstack/common/log.py
Normal file
@@ -0,0 +1,655 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Openstack logging handler.
|
||||||
|
|
||||||
|
This module adds to logging functionality by adding the option to specify
|
||||||
|
a context object when calling the various log methods. If the context object
|
||||||
|
is not specified, default formatting is used. Additionally, an instance uuid
|
||||||
|
may be passed as part of the log message, which is intended to make it easier
|
||||||
|
for admins to find messages related to a specific instance.
|
||||||
|
|
||||||
|
It also allows setting of formatting information through conf.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import itertools
|
||||||
|
import logging
|
||||||
|
import logging.config
|
||||||
|
import logging.handlers
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
from six import moves
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import importutils
|
||||||
|
from rack.openstack.common import jsonutils
|
||||||
|
from rack.openstack.common import local
|
||||||
|
|
||||||
|
|
||||||
|
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||||
|
|
||||||
|
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||||
|
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||||
|
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||||
|
# for XML and JSON automatically.
|
||||||
|
_SANITIZE_PATTERNS = []
|
||||||
|
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||||
|
r'(<%(key)s>).*?(</%(key)s>)',
|
||||||
|
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||||
|
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
|
||||||
|
|
||||||
|
for key in _SANITIZE_KEYS:
|
||||||
|
for pattern in _FORMAT_PATTERNS:
|
||||||
|
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||||
|
_SANITIZE_PATTERNS.append(reg_ex)
|
||||||
|
|
||||||
|
|
||||||
|
common_cli_opts = [
|
||||||
|
cfg.BoolOpt('debug',
|
||||||
|
short='d',
|
||||||
|
default=False,
|
||||||
|
help='Print debugging output (set logging level to '
|
||||||
|
'DEBUG instead of default WARNING level).'),
|
||||||
|
cfg.BoolOpt('verbose',
|
||||||
|
short='v',
|
||||||
|
default=False,
|
||||||
|
help='Print more verbose output (set logging level to '
|
||||||
|
'INFO instead of default WARNING level).'),
|
||||||
|
]
|
||||||
|
|
||||||
|
logging_cli_opts = [
|
||||||
|
cfg.StrOpt('log-config-append',
|
||||||
|
metavar='PATH',
|
||||||
|
deprecated_name='log-config',
|
||||||
|
help='The name of logging configuration file. It does not '
|
||||||
|
'disable existing loggers, but just appends specified '
|
||||||
|
'logging configuration to any other existing logging '
|
||||||
|
'options. Please see the Python logging module '
|
||||||
|
'documentation for details on logging configuration '
|
||||||
|
'files.'),
|
||||||
|
cfg.StrOpt('log-format',
|
||||||
|
default=None,
|
||||||
|
metavar='FORMAT',
|
||||||
|
help='DEPRECATED. '
|
||||||
|
'A logging.Formatter log message format string which may '
|
||||||
|
'use any of the available logging.LogRecord attributes. '
|
||||||
|
'This option is deprecated. Please use '
|
||||||
|
'logging_context_format_string and '
|
||||||
|
'logging_default_format_string instead.'),
|
||||||
|
cfg.StrOpt('log-date-format',
|
||||||
|
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||||
|
metavar='DATE_FORMAT',
|
||||||
|
help='Format string for %%(asctime)s in log records. '
|
||||||
|
'Default: %(default)s'),
|
||||||
|
cfg.StrOpt('log-file',
|
||||||
|
metavar='PATH',
|
||||||
|
deprecated_name='logfile',
|
||||||
|
help='(Optional) Name of log file to output to. '
|
||||||
|
'If no default is set, logging will go to stdout.'),
|
||||||
|
cfg.StrOpt('log-dir',
|
||||||
|
deprecated_name='logdir',
|
||||||
|
help='(Optional) The base directory used for relative '
|
||||||
|
'--log-file paths'),
|
||||||
|
cfg.BoolOpt('use-syslog',
|
||||||
|
default=False,
|
||||||
|
help='Use syslog for logging. '
|
||||||
|
'Existing syslog format is DEPRECATED during I, '
|
||||||
|
'and then will be changed in J to honor RFC5424'),
|
||||||
|
cfg.BoolOpt('use-syslog-rfc-format',
|
||||||
|
# TODO(bogdando) remove or use True after existing
|
||||||
|
# syslog format deprecation in J
|
||||||
|
default=False,
|
||||||
|
help='(Optional) Use syslog rfc5424 format for logging. '
|
||||||
|
'If enabled, will add APP-NAME (RFC5424) before the '
|
||||||
|
'MSG part of the syslog message. The old format '
|
||||||
|
'without APP-NAME is deprecated in I, '
|
||||||
|
'and will be removed in J.'),
|
||||||
|
cfg.StrOpt('syslog-log-facility',
|
||||||
|
default='LOG_USER',
|
||||||
|
help='syslog facility to receive log lines')
|
||||||
|
]
|
||||||
|
|
||||||
|
generic_log_opts = [
|
||||||
|
cfg.BoolOpt('use_stderr',
|
||||||
|
default=True,
|
||||||
|
help='Log output to standard error')
|
||||||
|
]
|
||||||
|
|
||||||
|
log_opts = [
|
||||||
|
cfg.StrOpt('logging_context_format_string',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
|
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
|
||||||
|
'%(instance)s%(message)s',
|
||||||
|
help='format string to use for log messages with context'),
|
||||||
|
cfg.StrOpt('logging_default_format_string',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
|
'%(name)s [-] %(instance)s%(message)s',
|
||||||
|
help='format string to use for log messages without context'),
|
||||||
|
cfg.StrOpt('logging_debug_format_suffix',
|
||||||
|
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||||
|
help='data to append to log format when level is DEBUG'),
|
||||||
|
cfg.StrOpt('logging_exception_prefix',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||||
|
'%(instance)s',
|
||||||
|
help='prefix each line of exception output with this format'),
|
||||||
|
cfg.ListOpt('default_log_levels',
|
||||||
|
default=[
|
||||||
|
'amqp=WARN',
|
||||||
|
'amqplib=WARN',
|
||||||
|
'boto=WARN',
|
||||||
|
'qpid=WARN',
|
||||||
|
'sqlalchemy=WARN',
|
||||||
|
'suds=INFO',
|
||||||
|
'oslo.messaging=INFO',
|
||||||
|
'iso8601=WARN',
|
||||||
|
],
|
||||||
|
help='list of logger=LEVEL pairs'),
|
||||||
|
cfg.BoolOpt('publish_errors',
|
||||||
|
default=False,
|
||||||
|
help='publish error events'),
|
||||||
|
cfg.BoolOpt('fatal_deprecations',
|
||||||
|
default=False,
|
||||||
|
help='make deprecations fatal'),
|
||||||
|
|
||||||
|
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||||
|
# a full instance (and could include more information), and other times we
|
||||||
|
# are just handed a UUID for the instance.
|
||||||
|
cfg.StrOpt('instance_format',
|
||||||
|
default='[instance: %(uuid)s] ',
|
||||||
|
help='If an instance is passed with the log message, format '
|
||||||
|
'it like this'),
|
||||||
|
cfg.StrOpt('instance_uuid_format',
|
||||||
|
default='[instance: %(uuid)s] ',
|
||||||
|
help='If an instance UUID is passed with the log message, '
|
||||||
|
'format it like this'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_cli_opts(common_cli_opts)
|
||||||
|
CONF.register_cli_opts(logging_cli_opts)
|
||||||
|
CONF.register_opts(generic_log_opts)
|
||||||
|
CONF.register_opts(log_opts)
|
||||||
|
|
||||||
|
# our new audit level
|
||||||
|
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
||||||
|
# module aware of it so it acts like other levels.
|
||||||
|
logging.AUDIT = logging.INFO + 1
|
||||||
|
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
NullHandler = logging.NullHandler
|
||||||
|
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
||||||
|
class NullHandler(logging.Handler):
|
||||||
|
def handle(self, record):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def createLock(self):
|
||||||
|
self.lock = None
|
||||||
|
|
||||||
|
|
||||||
|
def _dictify_context(context):
|
||||||
|
if context is None:
|
||||||
|
return None
|
||||||
|
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
||||||
|
context = context.to_dict()
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def _get_binary_name():
|
||||||
|
return os.path.basename(inspect.stack()[-1][1])
|
||||||
|
|
||||||
|
|
||||||
|
def _get_log_file_path(binary=None):
|
||||||
|
logfile = CONF.log_file
|
||||||
|
logdir = CONF.log_dir
|
||||||
|
|
||||||
|
if logfile and not logdir:
|
||||||
|
return logfile
|
||||||
|
|
||||||
|
if logfile and logdir:
|
||||||
|
return os.path.join(logdir, logfile)
|
||||||
|
|
||||||
|
if logdir:
|
||||||
|
binary = binary or _get_binary_name()
|
||||||
|
return '%s.log' % (os.path.join(logdir, binary),)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def mask_password(message, secret="***"):
|
||||||
|
"""Replace password with 'secret' in message.
|
||||||
|
|
||||||
|
:param message: The string which includes security information.
|
||||||
|
:param secret: value with which to replace passwords.
|
||||||
|
:returns: The unicode value of message with the password fields masked.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||||
|
"'adminPass' : '***'"
|
||||||
|
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||||
|
"'admin_pass' : '***'"
|
||||||
|
>>> mask_password('"password" : "aaaaa"')
|
||||||
|
'"password" : "***"'
|
||||||
|
>>> mask_password("'original_password' : 'aaaaa'")
|
||||||
|
"'original_password' : '***'"
|
||||||
|
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||||
|
"u'original_password' : u'***'"
|
||||||
|
"""
|
||||||
|
message = six.text_type(message)
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||||
|
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||||
|
# we don't have to mask any passwords.
|
||||||
|
if not any(key in message for key in _SANITIZE_KEYS):
|
||||||
|
return message
|
||||||
|
|
||||||
|
secret = r'\g<1>' + secret + r'\g<2>'
|
||||||
|
for pattern in _SANITIZE_PATTERNS:
|
||||||
|
message = re.sub(pattern, secret, message)
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||||
|
|
||||||
|
def audit(self, msg, *args, **kwargs):
|
||||||
|
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class LazyAdapter(BaseLoggerAdapter):
|
||||||
|
def __init__(self, name='unknown', version='unknown'):
|
||||||
|
self._logger = None
|
||||||
|
self.extra = {}
|
||||||
|
self.name = name
|
||||||
|
self.version = version
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logger(self):
|
||||||
|
if not self._logger:
|
||||||
|
self._logger = getLogger(self.name, self.version)
|
||||||
|
return self._logger
|
||||||
|
|
||||||
|
|
||||||
|
class ContextAdapter(BaseLoggerAdapter):
|
||||||
|
warn = logging.LoggerAdapter.warning
|
||||||
|
|
||||||
|
def __init__(self, logger, project_name, version_string):
|
||||||
|
self.logger = logger
|
||||||
|
self.project = project_name
|
||||||
|
self.version = version_string
|
||||||
|
|
||||||
|
@property
|
||||||
|
def handlers(self):
|
||||||
|
return self.logger.handlers
|
||||||
|
|
||||||
|
def deprecated(self, msg, *args, **kwargs):
|
||||||
|
stdmsg = _("Deprecated: %s") % msg
|
||||||
|
if CONF.fatal_deprecations:
|
||||||
|
self.critical(stdmsg, *args, **kwargs)
|
||||||
|
raise DeprecatedConfig(msg=stdmsg)
|
||||||
|
else:
|
||||||
|
self.warn(stdmsg, *args, **kwargs)
|
||||||
|
|
||||||
|
def process(self, msg, kwargs):
|
||||||
|
# NOTE(mrodden): catch any Message/other object and
|
||||||
|
# coerce to unicode before they can get
|
||||||
|
# to the python logging and possibly
|
||||||
|
# cause string encoding trouble
|
||||||
|
if not isinstance(msg, six.string_types):
|
||||||
|
msg = six.text_type(msg)
|
||||||
|
|
||||||
|
if 'extra' not in kwargs:
|
||||||
|
kwargs['extra'] = {}
|
||||||
|
extra = kwargs['extra']
|
||||||
|
|
||||||
|
context = kwargs.pop('context', None)
|
||||||
|
if not context:
|
||||||
|
context = getattr(local.store, 'context', None)
|
||||||
|
if context:
|
||||||
|
extra.update(_dictify_context(context))
|
||||||
|
|
||||||
|
instance = kwargs.pop('instance', None)
|
||||||
|
instance_uuid = (extra.get('instance_uuid', None) or
|
||||||
|
kwargs.pop('instance_uuid', None))
|
||||||
|
instance_extra = ''
|
||||||
|
if instance:
|
||||||
|
instance_extra = CONF.instance_format % instance
|
||||||
|
elif instance_uuid:
|
||||||
|
instance_extra = (CONF.instance_uuid_format
|
||||||
|
% {'uuid': instance_uuid})
|
||||||
|
extra.update({'instance': instance_extra})
|
||||||
|
|
||||||
|
extra.update({"project": self.project})
|
||||||
|
extra.update({"version": self.version})
|
||||||
|
extra['extra'] = extra.copy()
|
||||||
|
return msg, kwargs
|
||||||
|
|
||||||
|
|
||||||
|
class JSONFormatter(logging.Formatter):
|
||||||
|
def __init__(self, fmt=None, datefmt=None):
|
||||||
|
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
||||||
|
# since logging.config.fileConfig passes it.
|
||||||
|
self.datefmt = datefmt
|
||||||
|
|
||||||
|
def formatException(self, ei, strip_newlines=True):
|
||||||
|
lines = traceback.format_exception(*ei)
|
||||||
|
if strip_newlines:
|
||||||
|
lines = [moves.filter(
|
||||||
|
lambda x: x,
|
||||||
|
line.rstrip().splitlines()) for line in lines]
|
||||||
|
lines = list(itertools.chain(*lines))
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
message = {'message': record.getMessage(),
|
||||||
|
'asctime': self.formatTime(record, self.datefmt),
|
||||||
|
'name': record.name,
|
||||||
|
'msg': record.msg,
|
||||||
|
'args': record.args,
|
||||||
|
'levelname': record.levelname,
|
||||||
|
'levelno': record.levelno,
|
||||||
|
'pathname': record.pathname,
|
||||||
|
'filename': record.filename,
|
||||||
|
'module': record.module,
|
||||||
|
'lineno': record.lineno,
|
||||||
|
'funcname': record.funcName,
|
||||||
|
'created': record.created,
|
||||||
|
'msecs': record.msecs,
|
||||||
|
'relative_created': record.relativeCreated,
|
||||||
|
'thread': record.thread,
|
||||||
|
'thread_name': record.threadName,
|
||||||
|
'process_name': record.processName,
|
||||||
|
'process': record.process,
|
||||||
|
'traceback': None}
|
||||||
|
|
||||||
|
if hasattr(record, 'extra'):
|
||||||
|
message['extra'] = record.extra
|
||||||
|
|
||||||
|
if record.exc_info:
|
||||||
|
message['traceback'] = self.formatException(record.exc_info)
|
||||||
|
|
||||||
|
return jsonutils.dumps(message)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_logging_excepthook(product_name):
|
||||||
|
def logging_excepthook(exc_type, value, tb):
|
||||||
|
extra = {}
|
||||||
|
if CONF.verbose or CONF.debug:
|
||||||
|
extra['exc_info'] = (exc_type, value, tb)
|
||||||
|
getLogger(product_name).critical(
|
||||||
|
"".join(traceback.format_exception_only(exc_type, value)),
|
||||||
|
**extra)
|
||||||
|
return logging_excepthook
|
||||||
|
|
||||||
|
|
||||||
|
class LogConfigError(Exception):
|
||||||
|
|
||||||
|
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||||
|
|
||||||
|
def __init__(self, log_config, err_msg):
|
||||||
|
self.log_config = log_config
|
||||||
|
self.err_msg = err_msg
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.message % dict(log_config=self.log_config,
|
||||||
|
err_msg=self.err_msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_log_config(log_config_append):
|
||||||
|
try:
|
||||||
|
logging.config.fileConfig(log_config_append,
|
||||||
|
disable_existing_loggers=False)
|
||||||
|
except moves.configparser.Error as exc:
|
||||||
|
raise LogConfigError(log_config_append, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
def setup(product_name):
|
||||||
|
"""Setup logging."""
|
||||||
|
if CONF.log_config_append:
|
||||||
|
_load_log_config(CONF.log_config_append)
|
||||||
|
else:
|
||||||
|
_setup_logging_from_conf()
|
||||||
|
sys.excepthook = _create_logging_excepthook(product_name)
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(logging_context_format_string):
|
||||||
|
cfg.set_defaults(log_opts,
|
||||||
|
logging_context_format_string=
|
||||||
|
logging_context_format_string)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_facility_from_conf():
|
||||||
|
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||||
|
facility = getattr(logging.handlers.SysLogHandler,
|
||||||
|
CONF.syslog_log_facility,
|
||||||
|
None)
|
||||||
|
|
||||||
|
if facility is None and CONF.syslog_log_facility in facility_names:
|
||||||
|
facility = facility_names.get(CONF.syslog_log_facility)
|
||||||
|
|
||||||
|
if facility is None:
|
||||||
|
valid_facilities = facility_names.keys()
|
||||||
|
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
||||||
|
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
||||||
|
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
||||||
|
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
||||||
|
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
||||||
|
valid_facilities.extend(consts)
|
||||||
|
raise TypeError(_('syslog facility must be one of: %s') %
|
||||||
|
', '.join("'%s'" % fac
|
||||||
|
for fac in valid_facilities))
|
||||||
|
|
||||||
|
return facility
|
||||||
|
|
||||||
|
|
||||||
|
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.binary_name = _get_binary_name()
|
||||||
|
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
msg = super(RFCSysLogHandler, self).format(record)
|
||||||
|
msg = self.binary_name + ' ' + msg
|
||||||
|
return msg
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_logging_from_conf():
|
||||||
|
log_root = getLogger(None).logger
|
||||||
|
for handler in log_root.handlers:
|
||||||
|
log_root.removeHandler(handler)
|
||||||
|
|
||||||
|
if CONF.use_syslog:
|
||||||
|
facility = _find_facility_from_conf()
|
||||||
|
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
||||||
|
# after existing syslog format deprecation in J
|
||||||
|
if CONF.use_syslog_rfc_format:
|
||||||
|
syslog = RFCSysLogHandler(address='/dev/log',
|
||||||
|
facility=facility)
|
||||||
|
else:
|
||||||
|
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
||||||
|
facility=facility)
|
||||||
|
log_root.addHandler(syslog)
|
||||||
|
|
||||||
|
logpath = _get_log_file_path()
|
||||||
|
if logpath:
|
||||||
|
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||||
|
log_root.addHandler(filelog)
|
||||||
|
|
||||||
|
if CONF.use_stderr:
|
||||||
|
streamlog = ColorHandler()
|
||||||
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
|
elif not logpath:
|
||||||
|
# pass sys.stdout as a positional argument
|
||||||
|
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||||
|
streamlog = logging.StreamHandler(sys.stdout)
|
||||||
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
|
if CONF.publish_errors:
|
||||||
|
handler = importutils.import_object(
|
||||||
|
"rack.openstack.common.log_handler.PublishErrorsHandler",
|
||||||
|
logging.ERROR)
|
||||||
|
log_root.addHandler(handler)
|
||||||
|
|
||||||
|
datefmt = CONF.log_date_format
|
||||||
|
for handler in log_root.handlers:
|
||||||
|
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||||
|
# should be deprecated in favor of context aware formatting.
|
||||||
|
if CONF.log_format:
|
||||||
|
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||||
|
datefmt=datefmt))
|
||||||
|
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||||
|
'be removed in the next release')
|
||||||
|
else:
|
||||||
|
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
||||||
|
|
||||||
|
if CONF.debug:
|
||||||
|
log_root.setLevel(logging.DEBUG)
|
||||||
|
elif CONF.verbose:
|
||||||
|
log_root.setLevel(logging.INFO)
|
||||||
|
else:
|
||||||
|
log_root.setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
for pair in CONF.default_log_levels:
|
||||||
|
mod, _sep, level_name = pair.partition('=')
|
||||||
|
level = logging.getLevelName(level_name)
|
||||||
|
logger = logging.getLogger(mod)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
_loggers = {}
|
||||||
|
|
||||||
|
|
||||||
|
def getLogger(name='unknown', version='unknown'):
|
||||||
|
if name not in _loggers:
|
||||||
|
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
||||||
|
name,
|
||||||
|
version)
|
||||||
|
return _loggers[name]
|
||||||
|
|
||||||
|
|
||||||
|
def getLazyLogger(name='unknown', version='unknown'):
|
||||||
|
"""Returns lazy logger.
|
||||||
|
|
||||||
|
Creates a pass-through logger that does not create the real logger
|
||||||
|
until it is really needed and delegates all calls to the real logger
|
||||||
|
once it is created.
|
||||||
|
"""
|
||||||
|
return LazyAdapter(name, version)
|
||||||
|
|
||||||
|
|
||||||
|
class WritableLogger(object):
|
||||||
|
"""A thin wrapper that responds to `write` and logs."""
|
||||||
|
|
||||||
|
def __init__(self, logger, level=logging.INFO):
|
||||||
|
self.logger = logger
|
||||||
|
self.level = level
|
||||||
|
|
||||||
|
def write(self, msg):
|
||||||
|
self.logger.log(self.level, msg.rstrip())
|
||||||
|
|
||||||
|
|
||||||
|
class ContextFormatter(logging.Formatter):
|
||||||
|
"""A context.RequestContext aware formatter configured through flags.
|
||||||
|
|
||||||
|
The flags used to set format strings are: logging_context_format_string
|
||||||
|
and logging_default_format_string. You can also specify
|
||||||
|
logging_debug_format_suffix to append extra formatting if the log level is
|
||||||
|
debug.
|
||||||
|
|
||||||
|
For information about what variables are available for the formatter see:
|
||||||
|
http://docs.python.org/library/logging.html#formatter
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
"""Uses contextstring if request_id is set, otherwise default."""
|
||||||
|
# NOTE(sdague): default the fancier formatting params
|
||||||
|
# to an empty string so we don't throw an exception if
|
||||||
|
# they get used
|
||||||
|
for key in ('instance', 'color'):
|
||||||
|
if key not in record.__dict__:
|
||||||
|
record.__dict__[key] = ''
|
||||||
|
|
||||||
|
if record.__dict__.get('request_id', None):
|
||||||
|
self._fmt = CONF.logging_context_format_string
|
||||||
|
else:
|
||||||
|
self._fmt = CONF.logging_default_format_string
|
||||||
|
|
||||||
|
if (record.levelno == logging.DEBUG and
|
||||||
|
CONF.logging_debug_format_suffix):
|
||||||
|
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||||
|
|
||||||
|
# Cache this on the record, Logger will respect our formatted copy
|
||||||
|
if record.exc_info:
|
||||||
|
record.exc_text = self.formatException(record.exc_info, record)
|
||||||
|
return logging.Formatter.format(self, record)
|
||||||
|
|
||||||
|
def formatException(self, exc_info, record=None):
|
||||||
|
"""Format exception output with CONF.logging_exception_prefix."""
|
||||||
|
if not record:
|
||||||
|
return logging.Formatter.formatException(self, exc_info)
|
||||||
|
|
||||||
|
stringbuffer = moves.StringIO()
|
||||||
|
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||||
|
None, stringbuffer)
|
||||||
|
lines = stringbuffer.getvalue().split('\n')
|
||||||
|
stringbuffer.close()
|
||||||
|
|
||||||
|
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
||||||
|
record.asctime = self.formatTime(record, self.datefmt)
|
||||||
|
|
||||||
|
formatted_lines = []
|
||||||
|
for line in lines:
|
||||||
|
pl = CONF.logging_exception_prefix % record.__dict__
|
||||||
|
fl = '%s%s' % (pl, line)
|
||||||
|
formatted_lines.append(fl)
|
||||||
|
return '\n'.join(formatted_lines)
|
||||||
|
|
||||||
|
|
||||||
|
class ColorHandler(logging.StreamHandler):
|
||||||
|
LEVEL_COLORS = {
|
||||||
|
logging.DEBUG: '\033[00;32m', # GREEN
|
||||||
|
logging.INFO: '\033[00;36m', # CYAN
|
||||||
|
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
||||||
|
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
||||||
|
logging.ERROR: '\033[01;31m', # BOLD RED
|
||||||
|
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
||||||
|
}
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
record.color = self.LEVEL_COLORS[record.levelno]
|
||||||
|
return logging.StreamHandler.format(self, record)
|
||||||
|
|
||||||
|
|
||||||
|
class DeprecatedConfig(Exception):
|
||||||
|
message = _("Fatal call to deprecated config: %(msg)s")
|
||||||
|
|
||||||
|
def __init__(self, msg):
|
||||||
|
super(Exception, self).__init__(self.message % dict(msg=msg))
|
||||||
147
rack/openstack/common/loopingcall.py
Normal file
147
rack/openstack/common/loopingcall.py
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from eventlet import event
|
||||||
|
from eventlet import greenthread
|
||||||
|
|
||||||
|
from rack.openstack.common.gettextutils import _
|
||||||
|
from rack.openstack.common import log as logging
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LoopingCallDone(Exception):
|
||||||
|
"""Exception to break out and stop a LoopingCall.
|
||||||
|
|
||||||
|
The poll-function passed to LoopingCall can raise this exception to
|
||||||
|
break out of the loop normally. This is somewhat analogous to
|
||||||
|
StopIteration.
|
||||||
|
|
||||||
|
An optional return-value can be included as the argument to the exception;
|
||||||
|
this return-value will be returned by LoopingCall.wait()
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, retvalue=True):
|
||||||
|
""":param retvalue: Value that LoopingCall.wait() should return."""
|
||||||
|
self.retvalue = retvalue
|
||||||
|
|
||||||
|
|
||||||
|
class LoopingCallBase(object):
|
||||||
|
def __init__(self, f=None, *args, **kw):
|
||||||
|
self.args = args
|
||||||
|
self.kw = kw
|
||||||
|
self.f = f
|
||||||
|
self._running = False
|
||||||
|
self.done = None
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._running = False
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
return self.done.wait()
|
||||||
|
|
||||||
|
|
||||||
|
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||||
|
"""A fixed interval looping call."""
|
||||||
|
|
||||||
|
def start(self, interval, initial_delay=None):
|
||||||
|
self._running = True
|
||||||
|
done = event.Event()
|
||||||
|
|
||||||
|
def _inner():
|
||||||
|
if initial_delay:
|
||||||
|
greenthread.sleep(initial_delay)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while self._running:
|
||||||
|
start = timeutils.utcnow()
|
||||||
|
self.f(*self.args, **self.kw)
|
||||||
|
end = timeutils.utcnow()
|
||||||
|
if not self._running:
|
||||||
|
break
|
||||||
|
delay = interval - timeutils.delta_seconds(start, end)
|
||||||
|
if delay <= 0:
|
||||||
|
LOG.warn(_('task run outlasted interval by %s sec') %
|
||||||
|
-delay)
|
||||||
|
greenthread.sleep(delay if delay > 0 else 0)
|
||||||
|
except LoopingCallDone as e:
|
||||||
|
self.stop()
|
||||||
|
done.send(e.retvalue)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_('in fixed duration looping call'))
|
||||||
|
done.send_exception(*sys.exc_info())
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
done.send(True)
|
||||||
|
|
||||||
|
self.done = done
|
||||||
|
|
||||||
|
greenthread.spawn_n(_inner)
|
||||||
|
return self.done
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(mikal): this class name is deprecated in Havana and should be removed
|
||||||
|
# in the I release
|
||||||
|
LoopingCall = FixedIntervalLoopingCall
|
||||||
|
|
||||||
|
|
||||||
|
class DynamicLoopingCall(LoopingCallBase):
|
||||||
|
"""A looping call which sleeps until the next known event.
|
||||||
|
|
||||||
|
The function called should return how long to sleep for before being
|
||||||
|
called again.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||||
|
self._running = True
|
||||||
|
done = event.Event()
|
||||||
|
|
||||||
|
def _inner():
|
||||||
|
if initial_delay:
|
||||||
|
greenthread.sleep(initial_delay)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while self._running:
|
||||||
|
idle = self.f(*self.args, **self.kw)
|
||||||
|
if not self._running:
|
||||||
|
break
|
||||||
|
|
||||||
|
if periodic_interval_max is not None:
|
||||||
|
idle = min(idle, periodic_interval_max)
|
||||||
|
LOG.debug(_('Dynamic looping call sleeping for %.02f '
|
||||||
|
'seconds'), idle)
|
||||||
|
greenthread.sleep(idle)
|
||||||
|
except LoopingCallDone as e:
|
||||||
|
self.stop()
|
||||||
|
done.send(e.retvalue)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_('in dynamic looping call'))
|
||||||
|
done.send_exception(*sys.exc_info())
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
done.send(True)
|
||||||
|
|
||||||
|
self.done = done
|
||||||
|
|
||||||
|
greenthread.spawn(_inner)
|
||||||
|
return self.done
|
||||||
97
rack/openstack/common/memorycache.py
Normal file
97
rack/openstack/common/memorycache.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Super simple fake memcache client."""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from rack.openstack.common import timeutils
|
||||||
|
|
||||||
|
memcache_opts = [
|
||||||
|
cfg.ListOpt('memcached_servers',
|
||||||
|
default=None,
|
||||||
|
help='Memcached servers or None for in process cache.'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(memcache_opts)
|
||||||
|
|
||||||
|
|
||||||
|
def get_client(memcached_servers=None):
|
||||||
|
client_cls = Client
|
||||||
|
|
||||||
|
if not memcached_servers:
|
||||||
|
memcached_servers = CONF.memcached_servers
|
||||||
|
if memcached_servers:
|
||||||
|
try:
|
||||||
|
import memcache
|
||||||
|
client_cls = memcache.Client
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return client_cls(memcached_servers, debug=0)
|
||||||
|
|
||||||
|
|
||||||
|
class Client(object):
|
||||||
|
"""Replicates a tiny subset of memcached client interface."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
"""Ignores the passed in args."""
|
||||||
|
self.cache = {}
|
||||||
|
|
||||||
|
def get(self, key):
|
||||||
|
"""Retrieves the value for a key or None.
|
||||||
|
|
||||||
|
This expunges expired keys during each get.
|
||||||
|
"""
|
||||||
|
|
||||||
|
now = timeutils.utcnow_ts()
|
||||||
|
for k in self.cache.keys():
|
||||||
|
(timeout, _value) = self.cache[k]
|
||||||
|
if timeout and now >= timeout:
|
||||||
|
del self.cache[k]
|
||||||
|
|
||||||
|
return self.cache.get(key, (0, None))[1]
|
||||||
|
|
||||||
|
def set(self, key, value, time=0, min_compress_len=0):
|
||||||
|
"""Sets the value for a key."""
|
||||||
|
timeout = 0
|
||||||
|
if time != 0:
|
||||||
|
timeout = timeutils.utcnow_ts() + time
|
||||||
|
self.cache[key] = (timeout, value)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def add(self, key, value, time=0, min_compress_len=0):
|
||||||
|
"""Sets the value for a key if it doesn't exist."""
|
||||||
|
if self.get(key) is not None:
|
||||||
|
return False
|
||||||
|
return self.set(key, value, time, min_compress_len)
|
||||||
|
|
||||||
|
def incr(self, key, delta=1):
|
||||||
|
"""Increments the value for a key."""
|
||||||
|
value = self.get(key)
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
new_value = int(value) + delta
|
||||||
|
self.cache[key] = (self.cache[key][0], str(new_value))
|
||||||
|
return new_value
|
||||||
|
|
||||||
|
def delete(self, key, time=0):
|
||||||
|
"""Deletes the value associated with a key."""
|
||||||
|
if key in self.cache:
|
||||||
|
del self.cache[key]
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user