retire project

Change-Id: I0e38bad2a50836d18a7ca08e701f0a5dc9f4dbae
This commit is contained in:
takehirokaneko 2018-01-23 13:47:32 +09:00
parent 2b459e0c87
commit e6ce00b6f7
395 changed files with 8 additions and 52460 deletions

View File

@ -1,7 +0,0 @@
[run]
branch = True
source = rack
omit = rack/tests/*,rack/openstack/*
[report]
ignore-errors = True

46
.gitignore vendored
View File

@ -1,46 +0,0 @@
*.DS_Store
*.egg*
*.log
*.mo
*.pyc
*.swo
*.swp
*.sqlite
*~
.autogenerated
.coverage
.rack-venv
.project
.pydevproject
.ropeproject
.testrepository/
.settings
.tox
.idea
.venv
AUTHORS
Authors
build-stamp
build/*
bin/*
CA/
ChangeLog
coverage.xml
cover/*
covhtml
dist/*
doc/source/api/*
doc/build/*
etc/rack.conf
instances
keeper
keys
local_settings.py
MANIFEST
nosetests.xml
rack/tests/cover/*
rack/vcsversion.py
tools/conf/rack.conf*
tools/lintstack.head.py
tools/pylint_exceptions
/bin

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=stackforge/rack.git

View File

@ -1,8 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \
${PYTHON:-python} -m subunit.run discover -t ./ ./rack/tests $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

201
LICENSE
View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,14 +1,10 @@
# The concept of RACK: "OpenStack Native Application"
This project is no longer maintained.
OpenStack Native Application is the software, which uses OpenStack resource (eg. VM or VNET) directly from application. Recent popular applications are designed before the cloud computing age, so affinity with cloud is not considered. In order to make those applications work on OpenStack, tools such as Chef, Puppet and other tools are required, and it makes systems very complex in design.
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
RACK provides the mechanism to create “After the Cloud” applications. Programmer can write codes that are scalable and migratable on OpenStack platform without cooperating with the external systems.
Concepts of RACK are as follows:
1. RACK handles VM with "functions" as a single execution binary file. “Functions” here means OS, middleware and programs that are necessary for application to function. The programs here are made in such a way as to call and operate RACK API.
2. When this execution binary is deployed onto OpenStack, the VM will behave like a Linux process and then finish its own task.
3. This process is based on the descriptions in the program. It does things such as forking and generating a child process, communicating between processes.
Please take a look at our Wiki page to understand RACK more!
**https://wiki.openstack.org/wiki/RACK**
For any further questions, please email
openstack-dev@lists.openstack.org or join #openstack-dev on
Freenode.

View File

@ -1,33 +0,0 @@
[composite:rackapi]
use = egg:Paste#urlmap
/ = rackversions
/v1 = rackapi_v1
[composite:rackapi_v1]
use = call:rack.api.auth:pipeline_factory
noauth = faultwrap noauth rackapp_v1
keystone = faultwrap authtoken keystonecontext rackapp_v1
[filter:faultwrap]
paste.filter_factory = rack.api:FaultWrapper.factory
[filter:noauth]
paste.filter_factory = rack.api.auth:NoAuthMiddleware.factory
[pipeline:rackversions]
pipeline = faultwrap rackversionapp
[app:rackversionapp]
paste.app_factory = rack.api.versions:Versions.factory
[app:rackapp_v1]
paste.app_factory = rack.api.v1:APIRouter.factory
[filter:keystonecontext]
paste.filter_factory = rack.api.auth:RackKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_port = 35357
auth_protocol = http
auth_version = v2.0

View File

@ -1,15 +0,0 @@
[DEFAULT]
#debug = True
#verbose = True
#lock_path = /var/lib/rack/lock
#state_path = /var/lib/rack
#sql_connection = mysql://root:password@127.0.0.1/rack?charset=utf8
#api_paste_config = /etc/rack/api-paste.ini
#auth_strategy = noauth
#os_username = admin
#os_password = password
#os_tenant_name = demo
#os_auth_url = http://localhost:5000/v2.0
#os_region_name = RegionOne
#ipc_port = 8888
#shm_port = 6379

View File

@ -1,28 +0,0 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator.git
module=config
module=context
module=db
module=db.sqlalchemy
module=eventlet_backdoor
module=excutils
module=fileutils
module=fixture
module=gettextutils
module=importutils
module=jsonutils
module=local
module=lockutils
module=log
module=loopingcall
module=policy
module=processutils
module=service
module=strutils
module=threadgroup
module=timeutils
module=uuidutils
# The base module to hold the copy of openstack.common
base=rack

View File

@ -1,22 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`rack` -- Cloud IaaS Platform
===================================
.. automodule:: rack
:platform: Unix
:synopsis: Infrastructure-as-a-Service Cloud platform.
"""

View File

@ -1,69 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rack.api import wsgi
from rack.openstack.common import gettextutils
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from rack import utils
from rack import wsgi as base_wsgi
import webob.dec
import webob.exc
LOG = logging.getLogger(__name__)
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception(_("Caught error: %s"), unicode(inner))
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
if isinstance(inner.msg_fmt, gettextutils.Message):
user_locale = req.best_match_language()
inner_msg = gettextutils.translate(
inner.msg_fmt, user_locale)
else:
inner_msg = unicode(inner)
outer.explanation = '%s: %s' % (inner.__class__.__name__,
inner_msg)
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)

View File

@ -1,166 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common Auth Middleware.
"""
from oslo.config import cfg
from rack.api import wsgi
from rack import context
from rack.openstack.common.gettextutils import _
from rack.openstack.common import jsonutils
from rack.openstack.common import log as logging
from rack import wsgi as base_wsgi
import webob.dec
import webob.exc
auth_opts = [
cfg.BoolOpt('api_rate_limit',
default=False,
help=('Whether to use per-user rate limiting for the api. ')),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth: noauth or keystone.'),
cfg.BoolOpt('use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
LOG = logging.getLogger(__name__)
def _load_pipeline(loader, pipeline):
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
return _load_pipeline(loader, pipeline)
class InjectContext(base_wsgi.Middleware):
"""Add a 'rack.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.environ['rack.context'] = self.context
return self.application
class RackKeystoneContext(base_wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
roles = self._get_roles(req)
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
user_name = req.headers.get('X_USER_NAME')
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
ctx = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog)
req.environ['rack.context'] = ctx
return self.application
def _get_roles(self, req):
"""Get the list of roles."""
if 'X_ROLES' in req.headers:
roles = req.headers.get('X_ROLES', '')
else:
# Fallback to deprecated role header:
roles = req.headers.get('X_ROLE', '')
if roles:
LOG.warn(_("Sourcing roles from deprecated X-Role HTTP "
"header"))
return [r.strip() for r in roles.split(',')]
class NoAuthMiddlewareBase(base_wsgi.Middleware):
def base_call(self, req, project_id_in_path):
remote_address = getattr(req, 'remote_address', '127.0.0.1')
ctx = context.RequestContext(user_id='noauth',
project_id='noauth',
is_admin=True,
remote_address=remote_address)
req.environ['rack.context'] = ctx
return self.application
class NoAuthMiddleware(NoAuthMiddlewareBase):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.base_call(req, True)

View File

@ -1,455 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
import os
import re
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
from rack.api import wsgi
from rack.api import xmlutil
from rack import exception
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
osapi_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items returned in a single '
'response from a collection resource'),
cfg.StrOpt('osapi_compute_link_prefix',
help='Base URL that will be presented to users in links '
'to the OpenStack Compute API'),
cfg.StrOpt('osapi_glance_link_prefix',
help='Base URL that will be presented to users in links '
'to glance resources'),
]
CONF = cfg.CONF
CONF.register_opts(osapi_opts)
LOG = logging.getLogger(__name__)
VALID_NAME_REGEX = re.compile("^(?! )[\w. _-]+(?<! )$", re.UNICODE)
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_int_param(request, 'limit')
if 'page_size' in request.GET:
params['page_size'] = _get_int_param(request, 'page_size')
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_int_param(request, param):
"""Extract integer param from request or fail."""
try:
int_param = int(request.GET[param])
except ValueError:
msg = _('%s param must be an integer') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
if int_param < 0:
msg = _('%s param must be positive') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
return int_param
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit):
"""get limited parameter from request."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
limit = min(max_limit, limit)
marker = params.get('marker')
return limit, marker
def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
limit, marker = get_limit_and_marker(request, max_limit)
limit = min(max_limit, limit)
start_index = 0
if marker:
start_index = -1
for i, item in enumerate(items):
if 'flavorid' in item:
if item['flavorid'] == marker:
start_index = i + 1
break
elif item['id'] == marker or item.get('uuid') == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
range_end = start_index + limit
return items[start_index:range_end]
def get_id_from_href(href):
"""Return the id or uuid portion of a url.
Given: 'http://www.foo.com/bar/123?q=4'
Returns: '123'
Given: 'http://www.foo.com/bar/abc123?q=4'
Returns: 'abc123'
"""
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.rack.com/v1.1/123'
Returns: 'http://www.rack.com/123'
Given: 'http://www.rack.com/v1.1'
Returns: 'http://www.rack.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = _('href %s does not contain version') % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def dict_to_query_str(params):
# TODO(throughnothing): we should just use urllib.urlencode instead of this
# But currently we don't work with urlencoded url's
param_str = ""
for key, val in params.iteritems():
param_str = param_str + '='.join([str(key), str(val)]) + '&'
return param_str.rstrip('&')
def get_networks_for_instance_from_nw_info(nw_info):
networks = {}
for vif in nw_info:
ips = vif.fixed_ips()
floaters = vif.floating_ips()
label = vif['network']['label']
if label not in networks:
networks[label] = {'ips': [], 'floating_ips': []}
networks[label]['ips'].extend(ips)
networks[label]['floating_ips'].extend(floaters)
for ip in itertools.chain(networks[label]['ips'],
networks[label]['floating_ips']):
ip['mac_address'] = vif['address']
return networks
def raise_http_conflict_for_instance_invalid_state(exc, action):
"""Raises a webob.exc.HTTPConflict instance containing a message
appropriate to return via the API based on the original
InstanceInvalidState exception.
"""
attr = exc.kwargs.get('attr')
state = exc.kwargs.get('state')
not_launched = exc.kwargs.get('not_launched')
if attr and state:
msg = _("Cannot '%(action)s' while instance is in %(attr)s "
"%(state)s") % {'action': action, 'attr': attr, 'state': state}
elif not_launched:
msg = _("Cannot '%s' an instance which has never been active") % action
else:
# At least give some meaningful message
msg = _("Instance is in an invalid state for '%s'") % action
raise webob.exc.HTTPConflict(explanation=msg)
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = xmlutil.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = xmlutil.safe_minidom_parse_string(text)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request."""
if metadata_node is None:
return {}
metadata = {}
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
def _extract_metadata_container(self, datastring):
dom = xmlutil.safe_minidom_parse_string(datastring)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
def create(self, datastring):
return self._extract_metadata_container(datastring)
def update_all(self, datastring):
return self._extract_metadata_container(datastring)
def update(self, datastring):
dom = xmlutil.safe_minidom_parse_string(datastring)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
metadata_nsmap = {None: xmlutil.XMLNS_V11}
class MetaItemTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector('meta', xmlutil.get_items, 0)
root = xmlutil.TemplateElement('meta', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
class MetadataTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return True
class MetadataTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = MetadataTemplateElement('metadata', selector='metadata')
elem = xmlutil.SubTemplateElement(root, 'meta',
selector=xmlutil.get_items)
elem.set('key', 0)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
def check_snapshots_enabled(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.allow_instance_snapshots:
LOG.warn(_('Rejecting snapshot request, snapshots currently'
' disabled'))
msg = _("Instance snapshots are not permitted at this time.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return f(*args, **kwargs)
return inner
class ViewBuilder(object):
"""Model API responses as dictionaries."""
def _get_project_id(self, request):
"""Get project id from request url if present or empty string
otherwise
"""
project_id = request.environ["rack.context"].project_id
if project_id in request.url:
return project_id
return ''
def _get_links(self, request, identifier, collection_name):
return [
{
"rel": "self",
"href": self._get_href_link(request, identifier,
collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
}]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_compute_link_prefix(request.application_url)
url = os.path.join(prefix,
self._get_project_id(request),
collection_name)
return "%s?%s" % (url, dict_to_query_str(params))
def _get_href_link(self, request, identifier, collection_name):
"""Return an href string pointing to this object."""
prefix = self._update_compute_link_prefix(request.application_url)
return os.path.join(prefix,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier, collection_name):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_compute_link_prefix(base_url)
return os.path.join(base_url,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_collection_links(self,
request,
items,
collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable."""
links = []
limit = int(request.params.get("limit", 0))
if limit and limit == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
elif 'id' in last_item:
last_item_id = last_item["id"]
else:
last_item_id = last_item["flavorid"]
links.append({
"rel": "next",
"href": self._get_next_link(request,
last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return urlparse.urlunsplit(url_parts)
def _update_glance_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_glance_link_prefix)
def _update_compute_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_compute_link_prefix)
def get_instance(compute_api, context, instance_id, want_objects=False,
expected_attrs=None):
"""Fetch an instance from the compute API, handling error checking."""
try:
return compute_api.get(context, instance_id,
want_objects=want_objects,
expected_attrs=expected_attrs)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
def check_cells_enabled(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if not CONF.cells.enable:
msg = _("Cells is not enabled.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return function(*args, **kwargs)
return inner

View File

@ -1,206 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WSGI middleware for RACK API controllers.
"""
from oslo.config import cfg
import routes
from rack.api.v1 import groups
from rack.api.v1 import keypairs
from rack.api.v1 import networks
from rack.api.v1 import processes
from rack.api.v1 import securitygroups
from rack.api import versions
from rack.openstack.common import log as logging
from rack import wsgi as base_wsgi
openstack_client_opts = [
cfg.StrOpt('sql_connection',
help='Valid sql_connection for Rack'),
]
CONF = cfg.CONF
CONF.register_opts(openstack_client_opts)
LOG = logging.getLogger(__name__)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kargs):
# NOTE(vish): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kargs.setdefault('requirements', {})
if not kargs['requirements'].get('format'):
kargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kargs)
class APIRouter(base_wsgi.Router):
"""Routes requests on the RACK API to the appropriate controller
and method.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`rack.wsgi.Router` doesn't have one."""
return cls()
def __init__(self):
mapper = APIMapper()
self._setup_routes(mapper)
super(APIRouter, self).__init__(mapper)
def _setup_routes(self, mapper):
versions_resource = versions.create_resource()
mapper.connect("/",
controller=versions_resource,
action="show",
conditions={'method': ['GET']})
mapper.redirect("", "/")
groups_resource = groups.create_resource()
mapper.connect("/groups",
controller=groups_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}",
controller=groups_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups",
controller=groups_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}",
controller=groups_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}",
controller=groups_resource,
action="delete",
conditions={"method": ["DELETE"]})
networks_resource = networks.create_resource()
mapper.connect("/groups/{gid}/networks",
controller=networks_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/networks/{network_id}",
controller=networks_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/networks",
controller=networks_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/networks/{network_id}",
controller=networks_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}/networks/{network_id}",
controller=networks_resource,
action="delete",
conditions={"method": ["DELETE"]})
keypairs_resource = keypairs.create_resource()
mapper.connect("/groups/{gid}/keypairs",
controller=keypairs_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
controller=keypairs_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/keypairs",
controller=keypairs_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
controller=keypairs_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}/keypairs/{keypair_id}",
controller=keypairs_resource,
action="delete",
conditions={"method": ["DELETE"]})
securitygroups_resource = securitygroups.create_resource()
mapper.connect("/groups/{gid}/securitygroups",
controller=securitygroups_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
controller=securitygroups_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/securitygroups",
controller=securitygroups_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
controller=securitygroups_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}",
controller=securitygroups_resource,
action="delete",
conditions={"method": ["DELETE"]})
processes_resource = processes.create_resource()
mapper.connect("/groups/{gid}/processes",
controller=processes_resource,
action="index",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/processes/{pid}",
controller=processes_resource,
action="show",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/processes",
controller=processes_resource,
action="create",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/processes/{pid}",
controller=processes_resource,
action="update",
conditions={"method": ["PUT"]})
mapper.connect("/groups/{gid}/processes/{pid}",
controller=processes_resource,
action="delete",
conditions={"method": ["DELETE"]})
# RACK proxy resources
mapper.connect("/groups/{gid}/proxy",
controller=processes_resource,
action="show_proxy",
conditions={"method": ["GET"]})
mapper.connect("/groups/{gid}/proxy",
controller=processes_resource,
action="create_proxy",
conditions={"method": ["POST"]})
mapper.connect("/groups/{gid}/proxy",
controller=processes_resource,
action="update_proxy",
conditions={"method": ["PUT"]})

View File

@ -1,213 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import six
import webob
from rack.api.v1.views import groups as views_groups
from rack.api import wsgi
from rack import db
from rack import exception
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from rack.openstack.common import uuidutils
from rack import utils
LOG = logging.getLogger(__name__)
class Controller(wsgi.Controller):
"""Group controller for RACK API."""
_view_builder_class = views_groups.ViewBuilder
def __init__(self):
super(Controller, self).__init__()
@wsgi.response(200)
def index(self, req):
filters = {}
project_id = req.params.get('project_id')
name = req.params.get('name')
status = req.params.get('status')
if project_id:
filters['project_id'] = project_id
if name:
filters['display_name'] = name
if status:
filters['status'] = status
context = req.environ['rack.context']
group_list = db.group_get_all(context, filters)
return self._view_builder.index(group_list)
@wsgi.response(200)
def show(self, req, gid):
def _validate(gid):
if not uuidutils.is_uuid_like(gid):
raise exception.GroupNotFound(gid=gid)
try:
_validate(gid)
context = req.environ['rack.context']
group = db.group_get_by_gid(context, gid)
except exception.NotFound:
msg = _("Group could not be found")
raise webob.exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(group)
@wsgi.response(201)
def create(self, req, body):
def _validate(body):
if not self.is_valid_body(body, 'group'):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
values = body["group"]
name = values.get("name")
description = values.get("description")
if not name:
msg = _("Group name is required")
raise exception.InvalidInput(reason=msg)
if isinstance(name, six.string_types):
name = name.strip()
utils.check_string_length(name, 'name', min_length=1,
max_length=255)
if description:
utils.check_string_length(description, 'description',
min_length=0, max_length=255)
valid_values = {}
valid_values["display_name"] = name
valid_values["display_description"] = description
return valid_values
try:
values = _validate(body)
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
context = req.environ['rack.context']
values["gid"] = unicode(uuid.uuid4())
values["user_id"] = context.user_id
values["project_id"] = context.project_id
values["status"] = "ACTIVE"
group = db.group_create(context, values)
return self._view_builder.create(group)
@wsgi.response(200)
def update(self, req, body, gid):
def _validate(body, gid):
if not self.is_valid_body(body, 'group'):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
values = body["group"]
name = values.get("name")
description = values.get("description")
if not uuidutils.is_uuid_like(gid):
raise exception.GroupNotFound(gid=gid)
if name is None and description is None:
msg = _("Group name or description is required")
raise exception.InvalidInput(reason=msg)
if name is not None:
if isinstance(name, six.string_types):
name = name.strip()
utils.check_string_length(name, 'name', min_length=1,
max_length=255)
if description is not None:
utils.check_string_length(description, 'description',
min_length=0, max_length=255)
valid_values = {}
if name:
valid_values["display_name"] = name
# allow blank string to clear description
if description is not None:
valid_values["display_description"] = description
valid_values["gid"] = gid
return valid_values
context = req.environ['rack.context']
try:
values = _validate(body, gid)
group = db.group_update(context, values)
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.GroupNotFound:
msg = _("Group could not be found")
raise webob.exc.HTTPNotFound(explanation=msg)
return self._view_builder.update(group)
@wsgi.response(204)
def delete(self, req, gid):
def _validate(gid):
if not uuidutils.is_uuid_like(gid):
raise exception.GroupNotFound(gid=gid)
try:
_validate(gid)
context = req.environ['rack.context']
keypairs = db.keypair_get_all(context, gid)
if keypairs:
raise exception.GroupInUse(gid=gid)
securitygroups = db.securitygroup_get_all(context, gid)
if securitygroups:
raise exception.GroupInUse(gid=gid)
networks = db.network_get_all(context, gid)
if networks:
raise exception.GroupInUse(gid=gid)
processes = db.process_get_all(context, gid)
if processes:
raise exception.GroupInUse(gid=gid)
db.group_delete(context, gid)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.GroupInUse as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except Exception as e:
LOG.warn(e)
raise exception.GroupDeleteFailed()
def create_resource():
return wsgi.Resource(Controller())

View File

@ -1,191 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import webob
from rack.api.v1.views import keypairs as views_keypairs
from rack.api import wsgi
from rack import db
from rack import exception
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from rack.openstack.common import strutils
from rack.openstack.common import uuidutils
from rack.resourceoperator import manager
LOG = logging.getLogger(__name__)
class Controller(wsgi.Controller):
"""Keypair controller for RACK API."""
_view_builder_class = views_keypairs.ViewBuilder
def __init__(self):
super(Controller, self).__init__()
self.manager = manager.ResourceOperator()
def _uuid_check(self, gid=None, keypair_id=None):
if gid:
if not uuidutils.is_uuid_like(gid):
raise exception.GroupNotFound(gid=gid)
if keypair_id:
if not uuidutils.is_uuid_like(keypair_id):
raise exception.KeypairNotFound(keypair_id=keypair_id)
@wsgi.response(200)
def index(self, req, gid):
try:
self._uuid_check(gid=gid)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
context = req.environ['rack.context']
keypair_list = db.keypair_get_all(context, gid)
keypair_list = self.manager.keypair_list(context, keypair_list)
return self._view_builder.index(keypair_list)
@wsgi.response(200)
def show(self, req, gid, keypair_id):
try:
self._uuid_check(gid=gid, keypair_id=keypair_id)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
context = req.environ['rack.context']
try:
keypair = db.keypair_get_by_keypair_id(context, gid, keypair_id)
self.manager.keypair_show(context, keypair)
except exception.KeypairNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.show(keypair)
@wsgi.response(201)
def create(self, req, body, gid):
def _validate(context, body, gid):
if not self.is_valid_body(body, 'keypair'):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
self._uuid_check(gid)
values = body["keypair"]
name = values.get("name")
is_default = values.get("is_default")
if is_default:
try:
is_default = strutils.bool_from_string(
is_default, strict=True)
keypairs = db.keypair_get_all(context, gid,
filters={"is_default": True})
if keypairs:
msg = _("Default keypair already exists in the "
"group %s" % gid)
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _("is_default must be a boolean")
raise exception.InvalidInput(reason=msg)
else:
is_default = False
valid_values = {}
valid_values["gid"] = gid
valid_values["display_name"] = name
valid_values["is_default"] = is_default
return valid_values
try:
context = req.environ['rack.context']
values = _validate(context, body, gid)
db.group_get_by_gid(context, gid)
values["keypair_id"] = unicode(uuid.uuid4())
if not values["display_name"]:
values["display_name"] = "keypair-" + values["keypair_id"]
result_value = self.manager.keypair_create(
context, values["display_name"])
values.update(result_value)
values["user_id"] = context.user_id
values["project_id"] = context.project_id
keypair = db.keypair_create(context, values)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.GroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.create(keypair)
@wsgi.response(200)
def update(self, req, body, gid, keypair_id):
def _validate(body, gid, keypair_id):
if not self.is_valid_body(body, 'keypair'):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
self._uuid_check(gid, keypair_id)
values = body["keypair"]
is_default = values.get("is_default")
if is_default is not None:
try:
is_default = strutils.bool_from_string(
is_default, strict=True)
except ValueError:
msg = _("is_default must be a boolean")
raise exception.InvalidInput(reason=msg)
else:
msg = _("is_default is required")
raise exception.InvalidInput(reason=msg)
valid_values = {"is_default": is_default}
return valid_values
context = req.environ['rack.context']
try:
values = _validate(body, gid, keypair_id)
keypair = db.keypair_update(context, gid, keypair_id, values)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.update(keypair)
@wsgi.response(204)
def delete(self, req, gid, keypair_id):
context = req.environ['rack.context']
try:
self._uuid_check(gid=gid, keypair_id=keypair_id)
filters = {"keypair_id": keypair_id}
processes = db.process_get_all(context, gid, filters=filters)
if processes:
raise exception.keypairInUse(keypair_id=keypair_id)
keypair = db.keypair_get_by_keypair_id(context, gid, keypair_id)
self.manager.keypair_delete(context, keypair["nova_keypair_id"])
db.keypair_delete(context, gid, keypair_id)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.keypairInUse as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
def create_resource():
return wsgi.Resource(Controller())

View File

@ -1,168 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rack import db
from rack import exception
from rack.api.v1.views import networks as views_networks
from rack.api import wsgi
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from rack.openstack.common import strutils
from rack.openstack.common import uuidutils
from rack.resourceoperator import manager
import uuid
import webob
LOG = logging.getLogger(__name__)
class Controller(wsgi.Controller):
"""Network controller for RACK API."""
_view_builder_class = views_networks.ViewBuilder
def __init__(self):
super(Controller, self).__init__()
self.manager = manager.ResourceOperator()
def _uuid_check(self, gid=None, network_id=None):
if gid:
if not uuidutils.is_uuid_like(gid):
raise exception.GroupNotFound(gid=gid)
if network_id:
if not uuidutils.is_uuid_like(network_id):
raise exception.NetworkNotFound(network_id=network_id)
@wsgi.response(200)
def index(self, req, gid):
try:
self._uuid_check(gid)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
context = req.environ['rack.context']
network_list = db.network_get_all(context, gid)
network_list = self.manager.network_list(context, network_list)
return self._view_builder.index(network_list)
@wsgi.response(200)
def show(self, req, gid, network_id):
context = req.environ['rack.context']
try:
self._uuid_check(gid, network_id)
network = db.network_get_by_network_id(context, gid, network_id)
self.manager.network_show(context, network)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.show(network)
@wsgi.response(201)
def create(self, req, gid, body):
def _validate(context, body, gid):
self._uuid_check(gid)
if not self.is_valid_body(body, "network"):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
values = body.get("network")
cidr = values.get("cidr")
name = values.get("name")
is_admin = values.get("is_admin")
if is_admin:
try:
is_admin = strutils.bool_from_string(
is_admin, strict=True)
except ValueError:
msg = _("is_admin must be a boolean")
raise exception.InvalidInput(reason=msg)
else:
is_admin = False
gateway = values.get("gateway")
ext_router = values.get("ext_router_id")
dns_nameservers = values.get("dns_nameservers")
if dns_nameservers is not None and not isinstance(
dns_nameservers, list):
msg = _("dns_nameservers must be a list")
raise exception.InvalidInput(reason=msg)
valid_values = {}
valid_values["gid"] = gid
valid_values["network_id"] = unicode(uuid.uuid4())
if not name:
name = "network-" + valid_values["network_id"]
valid_values["display_name"] = name
valid_values["cidr"] = cidr
valid_values["is_admin"] = is_admin
valid_values["gateway"] = gateway
valid_values["ext_router"] = ext_router
valid_values["dns_nameservers"] = dns_nameservers
network_values = {}
network_values["name"] = name
network_values["cidr"] = cidr
network_values["gateway"] = gateway
network_values["ext_router"] = ext_router
network_values["dns_nameservers"] = dns_nameservers
return valid_values, network_values
try:
context = req.environ['rack.context']
values, network_values = _validate(context, body, gid)
db.group_get_by_gid(context, gid)
result_value = self.manager.network_create(
context, **network_values)
values.update(result_value)
values["user_id"] = context.user_id
values["project_id"] = context.project_id
network = db.network_create(context, values)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.create(network)
@wsgi.response(204)
def delete(self, req, gid, network_id):
try:
self._uuid_check(gid, network_id)
context = req.environ['rack.context']
network = db.network_get_by_network_id(context, gid, network_id)
if network["processes"]:
raise exception.NetworkInUse(network_id=network_id)
self.manager.network_delete(context, network["neutron_network_id"],
network["ext_router"])
db.network_delete(context, gid, network_id)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.NetworkInUse as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
def create_resource():
return wsgi.Resource(Controller())

View File

@ -1,447 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo.config import cfg
import base64
import uuid
import webob
from rack import db
from rack import exception
from rack import utils
from rack.api.v1.views import processes as views_processes
from rack.api import wsgi
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from rack.openstack.common import uuidutils
from rack.resourceoperator import manager
LOG = logging.getLogger(__name__)
class Controller(wsgi.Controller):
"""Process controller for RACK API."""
_view_builder_class = views_processes.ViewBuilder
def __init__(self):
super(Controller, self).__init__()
self.manager = manager.ResourceOperator()
def _uuid_check(self, gid=None, pid=None, keypair_id=None,
securitygroup_id=None):
if gid:
if not uuidutils.is_uuid_like(gid):
raise exception.GroupNotFound(gid=gid)
if pid:
if not uuidutils.is_uuid_like(pid):
raise exception.ProcessNotFound(pid=pid)
if keypair_id:
if not uuidutils.is_uuid_like(keypair_id):
raise exception.KeypairNotFound(keypair_id=keypair_id)
if securitygroup_id:
if not uuidutils.is_uuid_like(securitygroup_id):
raise exception.SecuritygroupNotFound(
securitygroup_id=securitygroup_id)
@wsgi.response(200)
def index(self, req, gid):
try:
self._uuid_check(gid)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
context = req.environ['rack.context']
process_list = db.process_get_all(context, gid)
process_list = self.manager.process_list(context, process_list)
return self._view_builder.index(process_list)
@wsgi.response(200)
def show(self, req, gid, pid):
try:
self._uuid_check(gid, pid)
context = req.environ['rack.context']
process = db.process_get_by_pid(context, gid, pid)
self.manager.process_show(context, process)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.show(process)
@wsgi.response(200)
def show_proxy(self, req, gid):
try:
self._uuid_check(gid)
context = req.environ['rack.context']
process = db.process_get_all(
context, gid, filters={"is_proxy": True})
if not process:
msg = _("Proxy process does not exist in the group %s" % gid)
raise webob.exc.HTTPBadRequest(explanation=msg)
self.manager.process_show(context, process[0])
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.show_proxy(process[0])
@wsgi.response(202)
def create(self, req, body, gid, is_proxy=False):
def _validate(context, body, gid, is_proxy=False):
proxy = db.process_get_all(
context, gid, filters={"is_proxy": True})
if is_proxy:
if len(proxy) > 0:
msg = _(
"Proxy process already exists in the group %s" % gid)
raise exception.InvalidInput(reason=msg)
else:
if len(proxy) != 1:
msg = _(
"Proxy process does not exist in the group %s" % gid)
raise webob.exc.HTTPBadRequest(explanation=msg)
keyname = "proxy" if is_proxy else "process"
if not self.is_valid_body(body, keyname):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
values = body[keyname]
ppid = values.get("ppid")
name = values.get("name")
keypair_id = values.get("keypair_id")
securitygroup_ids = values.get("securitygroup_ids")
glance_image_id = values.get("glance_image_id")
nova_flavor_id = values.get("nova_flavor_id")
userdata = values.get("userdata")
args = values.get("args")
self._uuid_check(gid, ppid, keypair_id)
pid = unicode(uuid.uuid4())
if not name:
prefix = "proxy-" if is_proxy else "process-"
name = prefix + pid
if ppid:
parent_process = db.process_get_by_pid(context, gid, ppid)
nova_keypair_id = None
if keypair_id:
keypair = db.keypair_get_by_keypair_id(
context, gid, keypair_id)
nova_keypair_id = keypair["nova_keypair_id"]
elif ppid:
keypair_id = parent_process.get("keypair_id")
if keypair_id:
keypair = db.keypair_get_by_keypair_id(
context, gid, keypair_id)
nova_keypair_id = keypair["nova_keypair_id"]
else:
default_keypair = db.keypair_get_all(
context, gid,
filters={"is_default": True})
if default_keypair:
keypair_id = default_keypair[0]["keypair_id"]
nova_keypair_id = default_keypair[0]["nova_keypair_id"]
if securitygroup_ids is not None and\
not isinstance(securitygroup_ids, list):
msg = _("securitygroupids must be a list")
raise exception.InvalidInput(reason=msg)
elif securitygroup_ids:
neutron_securitygroup_ids = []
for id in securitygroup_ids:
self._uuid_check(securitygroup_id=id)
securitygroup = db.securitygroup_get_by_securitygroup_id(
context, gid, id)
neutron_securitygroup_ids.append(
securitygroup["neutron_securitygroup_id"])
elif ppid:
securitygroups = parent_process.get("securitygroups")
securitygroup_ids =\
[securitygroup["securitygroup_id"]
for securitygroup in securitygroups]
neutron_securitygroup_ids =\
[securitygroup["neutron_securitygroup_id"]
for securitygroup in securitygroups]
else:
default_securitygroups = db.securitygroup_get_all(
context, gid,
filters={"is_default": True})
if default_securitygroups:
securitygroup_ids =\
[securitygroup["securitygroup_id"]
for securitygroup in default_securitygroups]
neutron_securitygroup_ids =\
[securitygroup["neutron_securitygroup_id"]
for securitygroup in default_securitygroups]
else:
msg = _(
"securitygroup_ids is required. Default \
securitygroup_ids are not registered.")
raise exception.InvalidInput(reason=msg)
if not glance_image_id and ppid:
glance_image_id = parent_process.get("glance_image_id")
if not nova_flavor_id and ppid:
nova_flavor_id = parent_process.get("nova_flavor_id")
if userdata:
try:
base64.b64decode(userdata)
except TypeError:
msg = _("userdadta must be a base64 encoded value.")
raise exception.InvalidInput(reason=msg)
networks = db.network_get_all(context, gid)
if not networks:
msg = _("Netwoks does not exist in the group %s" % gid)
raise webob.exc.HTTPBadRequest(explanation=msg)
network_ids =\
[network["network_id"] for network in networks]
neutron_network_ids =\
[network["neutron_network_id"] for network in networks]
nics = []
for id in neutron_network_ids:
nics.append({"net-id": id})
if args is None:
args = {}
elif args is not None and\
not isinstance(args, dict):
msg = _("args must be a dict.")
raise exception.InvalidInput(reason=msg)
else:
for key in args.keys():
args[key] = str(args[key])
default_args = {
"gid": gid,
"pid": pid,
}
if ppid:
default_args["ppid"] = ppid
if is_proxy:
default_args["rackapi_ip"] = cfg.CONF.my_ip
default_args["os_username"] = cfg.CONF.os_username
default_args["os_password"] = cfg.CONF.os_password
default_args["os_tenant_name"] = cfg.CONF.os_tenant_name
default_args["os_auth_url"] = cfg.CONF.os_auth_url
default_args["os_region_name"] = cfg.CONF.os_region_name
else:
proxy_instance_id = proxy[0]["nova_instance_id"]
default_args["proxy_ip"] = self.manager.get_process_address(
context, proxy_instance_id)
args.update(default_args)
valid_values = {}
valid_values["gid"] = gid
valid_values["ppid"] = ppid
valid_values["pid"] = pid
valid_values["display_name"] = name
valid_values["keypair_id"] = keypair_id
valid_values["securitygroup_ids"] = securitygroup_ids
valid_values["glance_image_id"] = glance_image_id
valid_values["nova_flavor_id"] = nova_flavor_id
valid_values["userdata"] = userdata
valid_values["args"] = json.dumps(args)
valid_values["is_proxy"] = True if is_proxy else False
valid_values["network_ids"] = network_ids
if is_proxy:
ipc_endpoint = values.get("ipc_endpoint")
shm_endpoint = values.get("shm_endpoint")
fs_endpoint = values.get("fs_endpoint")
if ipc_endpoint:
utils.check_string_length(
ipc_endpoint, 'ipc_endpoint', min_length=1,
max_length=255)
if shm_endpoint:
utils.check_string_length(
shm_endpoint, 'shm_endpoint', min_length=1,
max_length=255)
if fs_endpoint:
utils.check_string_length(
fs_endpoint, 'fs_endpoint', min_length=1,
max_length=255)
valid_values["ipc_endpoint"] = ipc_endpoint
valid_values["shm_endpoint"] = shm_endpoint
valid_values["fs_endpoint"] = fs_endpoint
boot_values = {}
boot_values["name"] = name
boot_values["key_name"] = nova_keypair_id
boot_values["security_groups"] = neutron_securitygroup_ids
boot_values["image"] = glance_image_id
boot_values["flavor"] = nova_flavor_id
boot_values["userdata"] = userdata
boot_values["meta"] = args
boot_values["nics"] = nics
return valid_values, boot_values
try:
context = req.environ['rack.context']
values, boot_values = _validate(context, body, gid, is_proxy)
nova_instance_id, status = self.manager.process_create(
context, **boot_values)
values["nova_instance_id"] = nova_instance_id
values["user_id"] = context.user_id
values["project_id"] = context.project_id
process = db.process_create(context, values,
values.pop("network_ids"),
values.pop("securitygroup_ids"))
process["status"] = status
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.create(process)
@wsgi.response(202)
def create_proxy(self, req, body, gid):
return self.create(req, body, gid, is_proxy=True)
@wsgi.response(200)
def update(self, req, body, gid, pid):
def _validate(body, gid, pid):
self._uuid_check(gid, pid)
if not self.is_valid_body(body, 'process'):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
values = body["process"]
app_status = values.get("app_status")
if not app_status:
msg = _("app_status is required")
raise exception.InvalidInput(reason=msg)
valid_values = {"app_status": app_status}
return valid_values
try:
values = _validate(body, gid, pid)
context = req.environ['rack.context']
process = db.process_update(context, gid, pid, values)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.ProcessNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.update(process)
@wsgi.response(200)
def update_proxy(self, req, body, gid):
def _validate(context, body, gid):
self._uuid_check(gid)
process = db.process_get_all(
context, gid, filters={"is_proxy": True})
if not process:
msg = _("Proxy process does not exist in the group %s" % gid)
raise exception.InvalidInput(reason=msg)
if not self.is_valid_body(body, 'proxy'):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
values = body["proxy"]
app_status = values.get("app_status")
ipc_endpoint = values.get("ipc_endpoint")
shm_endpoint = values.get("shm_endpoint")
fs_endpoint = values.get("fs_endpoint")
valid_values = {}
if app_status:
utils.check_string_length(
app_status, 'app_status', min_length=1, max_length=255)
valid_values["app_status"] = app_status
if ipc_endpoint:
utils.check_string_length(
ipc_endpoint, 'ipc_endpoint', min_length=1, max_length=255)
valid_values["ipc_endpoint"] = ipc_endpoint
if shm_endpoint:
utils.check_string_length(
shm_endpoint, 'shm_endpoint', min_length=1, max_length=255)
valid_values["shm_endpoint"] = shm_endpoint
if fs_endpoint:
utils.check_string_length(
fs_endpoint, 'fs_endpoint', min_length=1, max_length=255)
valid_values["fs_endpoint"] = fs_endpoint
if not valid_values:
msg = _("No keyword is provided.")
raise exception.InvalidInput(reason=msg)
return process[0]["pid"], valid_values
try:
context = req.environ['rack.context']
pid, values = _validate(context, body, gid)
process = db.process_update(context, gid, pid, values)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.ProcessNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.update(process)
@wsgi.response(204)
def delete(self, req, gid, pid):
def _delete_children(context, gid, pid):
processes = db.process_get_all(context, gid, {"ppid": pid})
for process in processes:
_delete_children(context, gid, process["pid"])
_delete(context, gid, process["pid"],
process["nova_instance_id"])
return
def _delete(context, gid, pid, nova_id):
self.manager.process_delete(context, nova_id)
try:
db.process_delete(context, gid, pid)
except exception.NotFound as e:
LOG.exception(e)
self._uuid_check(gid, pid)
context = req.environ['rack.context']
try:
process = db.process_get_by_pid(context, gid, pid)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
_delete_children(context, gid, pid)
_delete(context, gid, pid, process["nova_instance_id"])
def create_resource():
return wsgi.Resource(Controller())

View File

@ -1,214 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import webob
from rack.api.v1.views import securitygroups as views_securitygroups
from rack.api import wsgi
from rack import db
from rack import exception
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from rack.openstack.common import strutils
from rack.openstack.common import uuidutils
from rack.resourceoperator import manager
LOG = logging.getLogger(__name__)
class Controller(wsgi.Controller):
"""Securitygroup controller for RACK API."""
_view_builder_class = views_securitygroups.ViewBuilder
def __init__(self):
super(Controller, self).__init__()
self.manager = manager.ResourceOperator()
def _uuid_check(self, gid=None, securitygroup_id=None):
if gid:
if not uuidutils.is_uuid_like(gid):
raise exception.GroupNotFound(gid=gid)
if securitygroup_id:
if not uuidutils.is_uuid_like(securitygroup_id):
raise exception.SecuritygroupNotFound(
securitygroup_id=securitygroup_id)
@wsgi.response(200)
def index(self, req, gid):
try:
self._uuid_check(gid)
except exception.SecuritygroupNotFound:
msg = _("Securitygroup could not be found")
raise webob.exc.HTTPNotFound(explanation=msg)
context = req.environ['rack.context']
securitygroup_list = db.securitygroup_get_all(context, gid)
securitygroup_list = self.manager.securitygroup_list(
context, securitygroup_list)
return self._view_builder.index(securitygroup_list)
@wsgi.response(200)
def show(self, req, gid, securitygroup_id):
try:
self._uuid_check(gid, securitygroup_id)
context = req.environ['rack.context']
securitygroup = db.securitygroup_get_by_securitygroup_id(
context, gid, securitygroup_id)
securitygroup = self.manager.securitygroup_show(
context, securitygroup)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
return self._view_builder.show(securitygroup)
@wsgi.response(201)
def create(self, req, body, gid):
def _validate(context, body, gid):
if not self.is_valid_body(body, 'securitygroup'):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
self._uuid_check(gid)
db.group_get_by_gid(context, gid)
values = body["securitygroup"]
name = values.get("name")
is_default = values.get("is_default")
if is_default:
try:
is_default = strutils.bool_from_string(
is_default, strict=True)
except ValueError:
msg = _("is_default must be a boolean")
raise exception.InvalidInput(reason=msg)
else:
is_default = False
valid_values = {}
valid_values["gid"] = gid
valid_values["display_name"] = name
valid_values["is_default"] = is_default
rules = values.get("securitygrouprules")
valid_rules = []
if rules is not None:
if not isinstance(rules, list):
msg = _("securitygrouprules must be a list")
raise exception.InvalidInput(reason=msg)
for rule in rules:
valid_rule = {}
valid_rule["protocol"] = rule.get("protocol")
valid_rule["port_range_max"] = rule.get("port_range_max")
valid_rule["port_range_min"] = rule.get("port_range_min")
valid_rule["remote_ip_prefix"] = rule.get(
"remote_ip_prefix")
remote_securitygroup_id = rule.get(
"remote_securitygroup_id")
if remote_securitygroup_id:
ref = db.securitygroup_get_by_securitygroup_id(
context, gid,
remote_securitygroup_id)
valid_rule['remote_neutron_securitygroup_id'] =\
ref['neutron_securitygroup_id']
valid_rules.append(valid_rule)
return valid_values, valid_rules
try:
context = req.environ['rack.context']
values, rules = _validate(context, body, gid)
values["securitygroup_id"] = unicode(uuid.uuid4())
if not values["display_name"]:
values["display_name"] = "securitygroup-" + \
values["securitygroup_id"]
result_value = self.manager.securitygroup_create(
context, values["display_name"], rules)
values.update(result_value)
values["user_id"] = context.user_id
values["project_id"] = context.project_id
securitygroup = db.securitygroup_create(context, values)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.GroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.create(securitygroup)
@wsgi.response(200)
def update(self, req, body, gid, securitygroup_id):
def _validate(body, gid, securitygroup_id):
if not self.is_valid_body(body, 'securitygroup'):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
self._uuid_check(gid, securitygroup_id)
values = body["securitygroup"]
is_default = values.get("is_default")
if is_default is not None:
try:
is_default = strutils.bool_from_string(
is_default, strict=True)
except ValueError:
msg = _("is_default must be a boolean")
raise exception.InvalidInput(reason=msg)
else:
msg = _("SecurityGroup is_default is required")
raise exception.InvalidInput(reason=msg)
valid_values = {}
valid_values["is_default"] = is_default
return valid_values
try:
values = _validate(body, gid, securitygroup_id)
context = req.environ['rack.context']
securitygroup = db.securitygroup_update(
context, gid, securitygroup_id, values)
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
return self._view_builder.update(securitygroup)
@wsgi.response(204)
def delete(self, req, gid, securitygroup_id):
try:
self._uuid_check(gid, securitygroup_id)
context = req.environ['rack.context']
securitygroup = db.securitygroup_get_by_securitygroup_id(
context, gid, securitygroup_id)
if securitygroup["processes"]:
raise exception.SecuritygroupInUse(
securitygroup_id=securitygroup_id)
self.manager.securitygroup_delete(
context, securitygroup['neutron_securitygroup_id'])
db.securitygroup_delete(context, gid, securitygroup_id)
except exception.SecuritygroupInUse as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
def create_resource():
return wsgi.Resource(Controller())

View File

@ -1,48 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rack.api import common
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a group API response as a python dictionary."""
def index(self, group_list):
return dict(groups=[self._base_response(group)
for group in group_list])
def show(self, group):
base = self._base_response(group)
return dict(group=base)
def create(self, group):
base = self._base_response(group)
return dict(group=base)
def update(self, group):
base = self._base_response(group)
return dict(group=base)
def _base_response(self, group):
return {
"gid": group["gid"],
"user_id": group["user_id"],
"project_id": group["project_id"],
"name": group["display_name"],
"description": group["display_description"],
"status": group["status"]
}

View File

@ -1,53 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rack.api import common
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a keypair API response as a python dictionary."""
def index(self, keypair_list):
return dict(keypairs=[self._base_response(keypair)
for keypair in keypair_list])
def show(self, keypair):
base = self._base_response(keypair)
return dict(keypair=base)
def create(self, keypair):
base = self._base_response(keypair)
base.pop('status')
return dict(keypair=base)
def update(self, keypair):
base = self._base_response(keypair)
base.pop("status")
return dict(keypair=base)
def _base_response(self, keypair):
return {
"keypair_id": keypair.get("keypair_id"),
"nova_keypair_id": keypair.get("nova_keypair_id"),
"user_id": keypair.get("user_id"),
"project_id": keypair.get("project_id"),
"gid": keypair.get("gid"),
"name": keypair.get("display_name"),
"private_key": keypair.get("private_key"),
"is_default": keypair.get("is_default"),
"status": keypair.get("status")
}

View File

@ -1,48 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rack.api import common
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a networks API response as a python dictionary."""
def index(self, network_list):
return dict(networks=[
self._base_response(network) for network in network_list])
def show(self, network):
base = self._base_response(network)
return dict(network=base)
def create(self, network):
base = self._base_response(network)
base.pop("status")
return dict(network=base)
def _base_response(self, network):
return {
"network_id": network.get("network_id"),
"neutron_network_id": network.get("neutron_network_id"),
"gid": network.get("gid"),
"user_id": network.get("user_id"),
"project_id": network.get("project_id"),
"name": network.get("display_name"),
"is_admin": network.get("is_admin"),
"cidr": network.get("cidr"),
"ext_router_id": network.get("ext_router"),
"status": network.get("status")
}

View File

@ -1,83 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from rack.api import common
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a process API response as a python dictionary."""
def index(self, process_list):
return dict(processes=[self._base_response(process)
for process in process_list])
def show(self, process):
base = self._base_response(process)
return dict(process=base)
def show_proxy(self, process):
base = self._base_response(process)
return dict(proxy=base)
def create(self, process):
base = self._base_response(process)
if process.get("is_proxy"):
return dict(proxy=base)
else:
return dict(process=base)
def update(self, process):
base = self._base_response(process)
base.pop("status")
if process.get("is_proxy"):
return dict(proxy=base)
else:
return dict(process=base)
def _base_response(self, process):
base = {
"gid": process.get("gid"),
"pid": process.get("pid"),
"ppid": process.get("ppid"),
"user_id": process.get("user_id"),
"project_id": process.get("project_id"),
"name": process.get("display_name"),
"nova_instance_id": process.get("nova_instance_id"),
"glance_image_id": process.get("glance_image_id"),
"nova_flavor_id": process.get("nova_flavor_id"),
"status": process.get("status"),
"keypair_id": process.get("keypair_id"),
"app_status": process.get("app_status"),
"userdata": process.get("userdata"),
"args": json.loads(process.get("args")),
"networks": [{"network_id": network.get("network_id"),
"fixed": network.get("fixed"),
"floating": network.get("floating")}
for network in process.get("networks")],
"securitygroup_ids": [securitygroup.get("securitygroup_id")
for securitygroup in process
.get("securitygroups")],
}
if process.get("is_proxy"):
proxy_body = {
"ipc_endpoint": process.get("ipc_endpoint"),
"shm_endpoint": process.get("shm_endpoint"),
"fs_endpoint": process.get("fs_endpoint")
}
base.update(proxy_body)
return base

View File

@ -1,53 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rack.api import common
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a securitygroup API response as a python dictionary."""
def index(self, securitygroup_list):
return dict(securitygroups=[self._base_response(securitygroup)
for securitygroup in securitygroup_list])
def show(self, securitygroup):
base = self._base_response(securitygroup)
return dict(securitygroup=base)
def create(self, securitygroup):
base = self._base_response(securitygroup)
base.pop('status')
return dict(securitygroup=base)
def update(self, securitygroup):
base = self._base_response(securitygroup)
base.pop("status")
return dict(securitygroup=base)
def _base_response(self, securitygroup):
return {
"securitygroup_id": securitygroup.get("securitygroup_id"),
"neutron_securitygroup_id": securitygroup
.get("neutron_securitygroup_id"),
"user_id": securitygroup.get("user_id"),
"project_id": securitygroup.get("project_id"),
"gid": securitygroup.get("gid"),
"name": securitygroup.get("display_name"),
"is_default": securitygroup.get("is_default"),
"status": securitygroup.get("status")
}

View File

@ -1,251 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lxml import etree
from oslo.config import cfg
from rack.api.views import versions as views_versions
from rack.api import wsgi
from rack.api import xmlutil
from rack.openstack.common import timeutils
CONF = cfg.CONF
LINKS = {
'v2.0': {
'pdf': 'http://docs.openstack.org/'
'api/openstack-compute/2/os-compute-devguide-2.pdf',
'wadl': 'http://docs.openstack.org/'
'api/openstack-compute/2/wadl/os-compute-2.wadl'
},
}
VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v2.0']['wadl'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
}
],
},
}
class MediaTypesTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return 'media-types' in datum
def make_version(elem):
elem.set('id')
elem.set('status')
elem.set('updated')
mts = MediaTypesTemplateElement('media-types')
elem.append(mts)
mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types')
mt.set('base')
mt.set('type')
xmlutil.make_links(elem, 'links')
version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class VersionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('version', selector='version')
make_version(root)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class VersionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('versions')
elem = xmlutil.SubTemplateElement(root, 'version', selector='versions')
make_version(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class ChoicesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('choices')
elem = xmlutil.SubTemplateElement(root, 'version', selector='choices')
make_version(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class AtomSerializer(wsgi.XMLDictSerializer):
NSMAP = {None: xmlutil.XMLNS_ATOM}
def __init__(self, metadata=None, xmlns=None):
self.metadata = metadata or {}
if not xmlns:
self.xmlns = wsgi.XMLNS_ATOM
else:
self.xmlns = xmlns
def _get_most_recent_update(self, versions):
recent = None
for version in versions:
updated = timeutils.parse_strtime(version['updated'],
'%Y-%m-%dT%H:%M:%SZ')
if not recent:
recent = updated
elif updated > recent:
recent = updated
return recent.strftime('%Y-%m-%dT%H:%M:%SZ')
def _get_base_url(self, link_href):
# Make sure no trailing /
link_href = link_href.rstrip('/')
return link_href.rsplit('/', 1)[0] + '/'
def _create_feed(self, versions, feed_title, feed_id):
feed = etree.Element('feed', nsmap=self.NSMAP)
title = etree.SubElement(feed, 'title')
title.set('type', 'text')
title.text = feed_title
# Set this updated to the most recently updated version
recent = self._get_most_recent_update(versions)
etree.SubElement(feed, 'updated').text = recent
etree.SubElement(feed, 'id').text = feed_id
link = etree.SubElement(feed, 'link')
link.set('rel', 'self')
link.set('href', feed_id)
author = etree.SubElement(feed, 'author')
etree.SubElement(author, 'name').text = 'Rackspace'
etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/'
for version in versions:
feed.append(self._create_version_entry(version))
return feed
def _create_version_entry(self, version):
entry = etree.Element('entry')
etree.SubElement(entry, 'id').text = version['links'][0]['href']
title = etree.SubElement(entry, 'title')
title.set('type', 'text')
title.text = 'Version %s' % version['id']
etree.SubElement(entry, 'updated').text = version['updated']
for link in version['links']:
link_elem = etree.SubElement(entry, 'link')
link_elem.set('rel', link['rel'])
link_elem.set('href', link['href'])
if 'type' in link:
link_elem.set('type', link['type'])
content = etree.SubElement(entry, 'content')
content.set('type', 'text')
content.text = 'Version %s %s (%s)' % (version['id'],
version['status'],
version['updated'])
return entry
class VersionsAtomSerializer(AtomSerializer):
def default(self, data):
versions = data['versions']
feed_id = self._get_base_url(versions[0]['links'][0]['href'])
feed = self._create_feed(versions, 'Available API Versions', feed_id)
return self._to_xml(feed)
class VersionAtomSerializer(AtomSerializer):
def default(self, data):
version = data['version']
feed_id = version['links'][0]['href']
feed = self._create_feed([version], 'About This Version', feed_id)
return self._to_xml(feed)
class Versions(wsgi.Resource):
def __init__(self):
super(Versions, self).__init__(None)
@wsgi.serializers(xml=VersionsTemplate,
atom=VersionsAtomSerializer)
def index(self, req):
"""Return all versions."""
builder = views_versions.get_view_builder(req)
return builder.build_versions(VERSIONS)
@wsgi.serializers(xml=ChoicesTemplate)
@wsgi.response(300)
def multi(self, req):
"""Return multiple choices."""
builder = views_versions.get_view_builder(req)
return builder.build_choices(VERSIONS, req)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
args = {}
if request_environment['PATH_INFO'] == '/':
args['action'] = 'index'
else:
args['action'] = 'multi'
return args
class VersionV2(object):
@wsgi.serializers(xml=VersionTemplate,
atom=VersionAtomSerializer)
def show(self, req):
builder = views_versions.get_view_builder(req)
return builder.build_version(VERSIONS['v2.0'])
def create_resource():
return wsgi.Resource(VersionV2())

View File

@ -1,96 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from rack.api import common
def get_view_builder(req):
base_url = req.application_url
return ViewBuilder(base_url)
class ViewBuilder(common.ViewBuilder):
def __init__(self, base_url):
""":param base_url: url of the root wsgi application."""
self.base_url = base_url
def build_choices(self, VERSIONS, req):
version_objs = []
for version in VERSIONS:
version = VERSIONS[version]
version_objs.append({
"id": version['id'],
"status": version['status'],
"links": [
{
"rel": "self",
"href": self.generate_href(version['id'], req.path),
},
],
"media-types": version['media-types'],
})
return dict(choices=version_objs)
def build_versions(self, versions):
version_objs = []
for version in sorted(versions.keys()):
version = versions[version]
version_objs.append({
"id": version['id'],
"status": version['status'],
"updated": version['updated'],
"links": self._build_links(version),
})
return dict(versions=version_objs)
def build_version(self, version):
reval = copy.deepcopy(version)
reval['links'].insert(0, {
"rel": "self",
"href": self.base_url.rstrip('/') + '/',
})
return dict(version=reval)
def _build_links(self, version_data):
"""Generate a container of links that refer to the provided version."""
href = self.generate_href(version_data['id'])
links = [
{
"rel": "self",
"href": href,
},
]
return links
def generate_href(self, version, path=None):
"""Create an url that refers to a specific version_number."""
prefix = self._update_compute_link_prefix(self.base_url)
if version.find('v3.') == 0:
version_number = 'v3'
else:
version_number = 'v2'
if path:
path = path.strip('/')
return os.path.join(prefix, version_number, path)
else:
return os.path.join(prefix, version_number) + '/'

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,33 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before rack/cmd/__init__ '
'(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet
from rack import debugger
if debugger.enabled():
# turn off thread patching to enable the remote debugger
eventlet.monkey_patch(os=False, thread=False)
else:
eventlet.monkey_patch(os=False, thread=False)

View File

@ -1,37 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for RACK API."""
import sys
from oslo.config import cfg
from rack import config
from rack.openstack.common import log as logging
from rack import service
from rack import utils
CONF = cfg.CONF
def main():
config.parse_args(sys.argv)
logging.setup("rack")
utils.monkey_patch()
launcher = service.process_launcher()
server = service.WSGIService('rackapi')
launcher.launch_service(server, workers=server.workers or 1)
launcher.wait()

View File

@ -1,32 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from rack import debugger
from rack.openstack.common.db import options
from rack import paths
from rack import version
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('rack.sqlite')
def parse_args(argv, default_config_files=None):
options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='rack.sqlite')
debugger.register_cli_opts()
cfg.CONF(argv[1:],
project='rack',
version=version.version_string(),
default_config_files=default_config_files)

View File

@ -1,232 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RequestContext: context for requests that persist through all of rack."""
import copy
import uuid
import six
from rack import exception
from rack.openstack.common.gettextutils import _
from rack.openstack.common import local
from rack.openstack.common import log as logging
from rack.openstack.common import timeutils
from rack import policy
LOG = logging.getLogger(__name__)
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
str(kwargs))
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in (
'identity',
'image',
'network',
'compute')]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def update_store(self):
local.store.context = self
def to_dict(self):
return {'user_id': self.user_id,
'project_id': self.project_id,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'quota_class': self.quota_class,
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
'instance_lock_checked': self.instance_lock_checked,
'tenant': self.tenant,
'user': self.user}
@classmethod
def from_dict(cls, values):
values.pop('user', None)
values.pop('tenant', None)
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
# NOTE(sirp): the openstack/common version of RequestContext uses
# tenant/user whereas the Rack version uses project_id/user_id. We need
# this shim in order to use context-aware code from openstack/common, like
# logging, until we make the switch to using openstack/common's version of
# RequestContext.
@property
def tenant(self):
return self.project_id
@property
def user(self):
return self.user_id
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.NotAuthorized() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.NotAuthorized()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()

View File

@ -1,18 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DB abstraction for Nova
"""
from rack.db.api import * # noqa

View File

@ -1,186 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from rack.openstack.common.db import api as db_api
CONF = cfg.CONF
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create')
]
CONF.register_opts(db_opts)
CONF.import_opt('backend', 'rack.openstack.common.db.options',
group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'rack.db.sqlalchemy.api'}
IMPL = db_api.DBAPI(CONF.database.backend, backend_mapping=_BACKEND_MAPPING)
def group_get_all(context, filters=None):
return IMPL.group_get_all(context, filters)
def group_get_by_gid(context, gid):
return IMPL.group_get_by_gid(context, gid)
def group_create(context, values):
return IMPL.group_create(context, values)
def group_update(context, values):
return IMPL.group_update(context, values)
def group_delete(context, gid):
return IMPL.group_delete(context, gid)
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on a service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
def network_create(context, values):
return IMPL.network_create(context, values)
def network_update(context, network_id, values):
IMPL.network_update(context, network_id, values)
def network_get_all(context, gid, filters={}):
return IMPL.network_get_all(context, gid, filters)
def network_get_by_network_id(context, gid, network_id):
return IMPL.network_get_by_network_id(context, gid, network_id)
def network_delete(context, gid, network_id):
return IMPL.network_delete(context, gid, network_id)
def keypair_get_all(context, gid, filters={}):
return IMPL.keypair_get_all(context, gid, filters)
def keypair_get_by_keypair_id(context, gid, keypair_id):
return IMPL.keypair_get_by_keypair_id(context, gid, keypair_id)
def keypair_create(context, values):
return IMPL.keypair_create(context, values)
def keypair_update(context, gid, keypair_id, values):
return IMPL.keypair_update(context, gid, keypair_id, values)
def keypair_delete(context, gid, keypair_id):
return IMPL.keypair_delete(context, gid, keypair_id)
def securitygroup_get_all(context, gid, filters={}):
return IMPL.securitygroup_get_all(context, gid, filters)
def securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id):
return IMPL.securitygroup_get_by_securitygroup_id(context, gid,
securitygroup_id)
def securitygroup_create(context, values):
return IMPL.securitygroup_create(context, values)
def securitygroup_update(context, gid, securitygroup_id, values):
return IMPL.securitygroup_update(context, gid, securitygroup_id, values)
def securitygroup_delete(context, gid, securitygroup_id):
return IMPL.securitygroup_delete(context, gid, securitygroup_id)
def process_get_all(context, gid, filters={}):
return IMPL.process_get_all(context, gid, filters)
def process_get_by_pid(context, gid, pid):
return IMPL.process_get_by_pid(context, gid, pid)
def process_get_not_error_status_for_proxy(context, gid):
return IMPL.process_get_not_error_status_for_proxy(context, gid)
def process_create(context, values, network_ids, securitygroup_ids):
return IMPL.process_create(context, values, network_ids,
securitygroup_ids)
def process_update(context, gid, pid, values):
return IMPL.process_update(context, gid, pid, values)
def process_delete(context, gid, pid):
return IMPL.process_delete(context, gid, pid)

View File

@ -1,36 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for classes that need modular database access."""
from oslo.config import cfg
from rack.openstack.common import importutils
db_driver_opt = cfg.StrOpt('db_driver',
default='rack.db',
help='The driver to use for database access')
CONF = cfg.CONF
CONF.register_opt(db_driver_opt)
class Base(object):
"""DB driver is injected in the init method."""
def __init__(self, db_driver=None):
super(Base, self).__init__()
if not db_driver:
db_driver = CONF.db_driver
self.db = importutils.import_module(db_driver) # pylint: disable=C0103

View File

@ -1,37 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database setup and migration commands."""
from rack import utils
IMPL = utils.LazyPluggable('backend',
config_group='database',
sqlalchemy='rack.db.sqlalchemy.migration')
def db_sync(version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version)
def db_version():
"""Display the current database version."""
return IMPL.db_version()
def db_initial_version():
"""The starting version for the database."""
return IMPL.db_initial_version()

View File

@ -1,21 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'

View File

@ -1,678 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from rack.db.sqlalchemy import models
from rack import exception
from rack.openstack.common import jsonutils
from rack.openstack.common import log as logging
from rack.openstack.common import timeutils
from rack.openstack.common.db import exception as db_exc
from rack.openstack.common.db.sqlalchemy import session as db_session
from rack.openstack.common.gettextutils import _
import functools
import rack.context
import sys
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('connection',
'rack.openstack.common.db.options',
group='database')
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems()))
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
return sys.modules[__name__]
def group_get_all(context, filters=None):
session = get_session()
filters = filters or {}
query = session.query(models.Group).filter_by(user_id=context.user_id)\
.filter_by(deleted=0)
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'name' in filters:
query = query.filter_by(display_name=filters['name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
responce_groups = query.all()
return [dict(group) for group in responce_groups]
def group_get_by_gid(context, gid):
session = get_session()
group = session.query(models.Group)\
.filter_by(user_id=context.user_id)\
.filter_by(gid=gid)\
.filter_by(deleted=0)\
.first()
if not group:
raise exception.GroupNotFound(gid=gid)
return dict(group)
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
rack.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
def group_create(context, values):
session = get_session()
group_ref = models.Group()
group_ref.update(values)
group_ref.save(session)
return dict(group_ref)
def group_update(context, values):
session = get_session()
group_ref = session.query(models.Group). \
filter(models.Group.gid == values["gid"]).first()
if group_ref is None:
raise exception.GroupNotFound(gid=values["gid"])
group_ref.update(values)
group_ref.save(session)
return dict(group_ref)
def group_delete(context, gid):
session = get_session()
group_ref = session.query(models.Group)\
.filter_by(deleted=0)\
.filter_by(gid=gid)\
.first()
if group_ref is None:
raise exception.GroupNotFound(gid=gid)
values = {
"status": "DELETING",
"deleted": 1,
"deleted_at": timeutils.utcnow()
}
group_ref.update(values)
group_ref.save(session)
return dict(group_ref)
def service_model_query(context, model, *args, **kwargs):
session = kwargs.get('session') or get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
query = session.query(model, *args)
default_deleted_value = model.__mapper__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(model.deleted != default_deleted_value)
else:
raise Exception(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
count = service_model_query(context, models.Service,
session=session).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.ServiceNotFound(service_id=service_id)
@require_admin_context
def service_get(context, service_id):
session = get_session()
service_ref = service_model_query(context, models.Service,
session=session).\
filter_by(id=service_id).\
first()
if not service_ref:
raise exception.ServiceNotFound(service_id=service_id)
return jsonutils.to_primitive(service_ref)
@require_admin_context
def service_get_all(context, disabled=None):
session = get_session()
query = service_model_query(context, models.Service,
session=session)
if disabled is not None:
query = query.filter_by(disabled=disabled)
service_refs = query.all()
return jsonutils.to_primitive(service_refs)
@require_admin_context
def service_get_all_by_topic(context, topic):
session = get_session()
service_refs = service_model_query(context, models.Service,
session=session,
read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
return jsonutils.to_primitive(service_refs)
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
session = get_session()
service_ref = service_model_query(context, models.Service,
session=session,
read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
return jsonutils.to_primitive(service_ref)
@require_admin_context
def service_get_all_by_host(context, host):
session = get_session()
service_refs = service_model_query(context, models.Service,
session=session,
read_deleted="no").\
filter_by(host=host).\
all()
return jsonutils.to_primitive(service_refs)
@require_admin_context
def service_get_by_args(context, host, binary):
session = get_session()
service_ref = service_model_query(context, models.Service,
session=session).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not service_ref:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return jsonutils.to_primitive(service_ref)
@require_admin_context
def service_create(context, values):
session = get_session()
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save(session)
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return jsonutils.to_primitive(service_ref)
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = service_model_query(context, models.Service,
session=session).\
filter_by(id=service_id).\
first()
if not service_ref:
raise exception.ServiceNotFound(service_id=service_id)
service_ref.update(values)
return jsonutils.to_primitive(service_ref)
def network_create(context, values):
session = get_session()
network_ref = models.Network()
network_ref.update(values)
network_ref.save(session)
return dict(network_ref)
def network_update(context, network_id, values):
session = get_session()
network_ref = session.query(models.Network)\
.filter(models.Network.deleted == 0)\
.filter(models.Network.network_id == network_id)\
.first()
network_ref.update(values)
network_ref.save(session)
def network_get_all(context, gid, filters):
session = get_session()
query = session.query(models.Network)\
.filter_by(deleted=0)\
.filter_by(gid=gid)
if 'network_id' in filters:
query = query.filter_by(network_id=filters['network_id'])
if 'neutron_network_id' in filters:
query = query.filter_by(
neutron_network_id=filters['neutron_network_id'])
if 'display_name' in filters:
query = query.filter_by(display_name=filters['display_name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
if 'is_admin' in filters:
query = query.filter_by(is_admin=filters['is_admin'])
if 'cidr' in filters:
query = query.filter_by(cidr=filters['cidr'])
if 'ext_router' in filters:
query = query.filter_by(ext_router=filters['ext_router'])
networks = query.all()
return [dict(network) for network in networks]
def network_get_by_network_id(context, gid, network_id):
session = get_session()
network = session.query(models.Network)\
.filter_by(deleted=0)\
.filter_by(gid=gid)\
.filter_by(network_id=network_id)\
.first()
if not network:
raise exception.NetworkNotFound(network_id=network_id)
network_dict = dict(network)
network_dict.update(
dict(processes=[dict(process) for process in network.processes]))
return network_dict
def network_delete(context, gid, network_id):
session = get_session()
network_ref = session.query(models.Network)\
.filter(models.Network.deleted == 0)\
.filter(models.Network.gid == gid)\
.filter(models.Network.network_id == network_id)\
.first()
values = {}
values["deleted"] = 1
values["deleted_at"] = timeutils.utcnow()
values["status"] = "DELETING"
network_ref.update(values)
network_ref.save(session)
return dict(network_ref)
def keypair_get_all(context, gid, filters={}):
session = get_session()
query = session.query(models.Keypair)\
.filter_by(gid=gid)\
.filter_by(deleted=0)
if 'keypair_id' in filters:
query = query.filter_by(keypair_id=filters['keypair_id'])
if 'nova_keypair_id' in filters:
query = query.filter_by(nova_keypair_id=filters['nova_keypair_id'])
if 'display_name' in filters:
query = query.filter_by(display_name=filters['display_name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
if 'is_default' in filters:
query = query.filter_by(is_default=filters['is_default'])
responce_keypairs = query.all()
return [dict(keypair) for keypair in responce_keypairs]
def keypair_get_by_keypair_id(context, gid, keypair_id):
session = get_session()
keypair = session.query(models.Keypair)\
.filter_by(gid=gid)\
.filter_by(keypair_id=keypair_id)\
.filter_by(deleted=0)\
.first()
if not keypair:
raise exception.KeypairNotFound(keypair_id=keypair_id)
return dict(keypair)
def keypair_create(context, values):
session = get_session()
keypair_ref = models.Keypair()
keypair_ref.update(values)
keypair_ref.save(session)
return dict(keypair_ref)
def keypair_update(context, gid, keypair_id, values):
session = get_session()
keypair_ref = session.query(models.Keypair)\
.filter_by(gid=gid)\
.filter_by(keypair_id=keypair_id)\
.filter_by(deleted=0)\
.first()
if keypair_ref is None:
raise exception.KeypairNotFound(keypair_id=keypair_id)
keypair_ref.update(values)
keypair_ref.save(session)
return dict(keypair_ref)
def keypair_delete(context, gid, keypair_id):
session = get_session()
keypair_ref = session.query(models.Keypair)\
.filter_by(gid=gid)\
.filter_by(keypair_id=keypair_id)\
.filter_by(deleted=0)\
.first()
if keypair_ref is None:
raise exception.KeypairNotFound(keypair_id=keypair_id)
values = {
"status": "DELETING",
"deleted": 1,
"deleted_at": timeutils.utcnow()
}
keypair_ref.update(values)
keypair_ref.save(session)
return dict(keypair_ref)
def securitygroup_get_all(context, gid, filters={}):
session = get_session()
query = session.query(models.Securitygroup).filter_by(gid=gid, deleted=0)
if 'securitygroup_id' in filters:
query = query.filter_by(securitygroup_id=filters['securitygroup_id'])
if 'name' in filters:
query = query.filter_by(display_name=filters['name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
if 'is_default' in filters:
query = query.filter_by(is_default=filters['is_default'])
securitygroups = query.all()
return [dict(securitygroup) for securitygroup in securitygroups]
def securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id):
session = get_session()
securitygroup = session.query(models.Securitygroup)\
.filter_by(deleted=0)\
.filter_by(gid=gid)\
.filter_by(securitygroup_id=securitygroup_id)\
.first()
if not securitygroup:
raise exception.SecuritygroupNotFound(
securitygroup_id=securitygroup_id)
securitygroup_dict = dict(securitygroup)
securitygroup_dict.update(
dict(processes=[dict(process) for process in securitygroup.processes]))
return securitygroup_dict
def securitygroup_create(context, values):
session = get_session()
securitygroup_ref = models.Securitygroup()
securitygroup_ref.update(values)
securitygroup_ref.save(session)
return dict(securitygroup_ref)
def securitygroup_update(context, gid, securitygroup_id, values):
session = get_session()
securitygroup_ref = session.query(models.Securitygroup). \
filter_by(deleted=0). \
filter_by(gid=gid). \
filter_by(securitygroup_id=securitygroup_id). \
first()
if securitygroup_ref is None:
raise exception.SecuritygroupNotFound(
securitygroup_id=securitygroup_id)
securitygroup_ref.update(values)
securitygroup_ref.save(session)
return dict(securitygroup_ref)
def securitygroup_delete(context, gid, securitygroup_id):
session = get_session()
securitygroup_ref = session.query(models.Securitygroup). \
filter_by(deleted=0). \
filter_by(gid=gid). \
filter_by(securitygroup_id=securitygroup_id). \
first()
if securitygroup_ref is None:
raise exception.SecuritygroupNotFound(
securitygroup_id=securitygroup_id)
securitygroup_ref.update({"deleted": 1,
'deleted_at': timeutils.utcnow(),
"status": "DELETING"})
securitygroup_ref.save(session)
return dict(securitygroup_ref)
def process_get_all(context, gid, filters={}):
session = get_session()
query = session.query(models.Process).filter_by(gid=gid, deleted=0)
if 'pid' in filters:
query = query.filter_by(pid=filters['pid'])
if 'ppid' in filters:
query = query.filter_by(ppid=filters['ppid'])
if 'name' in filters:
query = query.filter_by(display_name=filters['name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
if 'glance_image_id' in filters:
query = query.filter_by(glance_image_id=filters['glance_image_id'])
if 'nova_flavor_id' in filters:
query = query.filter_by(nova_flavor_id=filters['nova_flavor_id'])
if 'keypair_id' in filters:
query = query.filter_by(keypair_id=filters['keypair_id'])
if 'securitygroup_id' in filters:
query = query.filter(
models.Process.securitygroups.any(
securitygroup_id=filters["securitygroup_id"]))
if 'network_id' in filters:
query = query.filter(
models.Process.networks.any(
network_id=filters["network_id"]))
if 'is_proxy' in filters:
query = query.filter_by(is_proxy=filters['is_proxy'])
if 'app_status' in filters:
query = query.filter_by(app_status=filters['app_status'])
process_refs = query.all()
return [_get_process_dict(process_ref) for process_ref in process_refs]
def process_get_by_pid(context, gid, pid):
session = get_session()
process_ref = session.query(models.Process)\
.filter_by(gid=gid)\
.filter_by(pid=pid)\
.first()
if not process_ref:
raise exception.ProcessNotFound(pid=pid)
return _get_process_dict(process_ref)
def process_get_not_error_status_for_proxy(context, gid):
session = get_session()
query = session.query(models.Process).filter_by(
gid=gid, deleted=0, is_proxy=True)
process_refs = query.filter(models.Process.status != 'ERROR').all()
return [_get_process_dict(process_ref) for process_ref in process_refs]
def process_create(context, values, network_ids, securitygroup_ids):
session = get_session()
with session.begin():
process_ref = models.Process(**values)
session.add(process_ref)
try:
if network_ids:
for network_id in network_ids:
network_ref = session.query(models.Network)\
.filter_by(deleted=0)\
.filter_by(gid=values["gid"])\
.filter_by(network_id=network_id)\
.first()
if network_ref is None:
raise exception.NetworkNotFound(network_id=network_id)
session.add(
models.ProcessNetwork(pid=values["pid"],
network_id=network_ref
.network_id))
if securitygroup_ids:
for securitygroup_id in securitygroup_ids:
securitygroup_ref = session.query(models.Securitygroup)\
.filter_by(deleted=0)\
.filter_by(gid=values["gid"])\
.filter_by(securitygroup_id=securitygroup_id)\
.first()
if securitygroup_ref is None:
raise exception.SecuritygroupNotFound(
securitygroup_id=securitygroup_id)
session.add(models.ProcessSecuritygroup(
pid=values["pid"],
securitygroup_id=securitygroup_ref.securitygroup_id))
session.flush()
except db_exc.DBDuplicateEntry:
msg = _("securitygroup or network is duplicated")
raise exception.InvalidInput(reason=msg)
return _get_process_dict(process_ref)
def process_update(context, gid, pid, values):
session = get_session()
process_ref = session.query(models.Process). \
filter_by(deleted=0). \
filter_by(gid=gid). \
filter_by(pid=pid). \
first()
if process_ref is None:
raise exception.ProcessNotFound(pid=pid)
process_ref.update(values)
process_ref.save(session)
return _get_process_dict(process_ref)
def process_delete(context, gid, pid):
session = get_session()
process_ref = session.query(models.Process). \
filter_by(deleted=0). \
filter_by(gid=gid). \
filter_by(pid=pid). \
first()
if process_ref is None:
raise exception.ProcessNotFound(pid=pid)
process_ref.update({"deleted": 1,
'deleted_at': timeutils.utcnow(),
"status": "DELETING"})
process_ref.save(session)
return _get_process_dict(process_ref)
def _get_process_dict(process_ref):
process_dict = dict(process_ref)
process_dict.update(dict(securitygroups=[dict(securitygroup)
for securitygroup in process_ref
.securitygroups]))
process_dict.update(dict(networks=[dict(network)
for network in process_ref.networks]))
return process_dict

View File

@ -1,4 +0,0 @@
This is a database migration repository.
More information at
http://code.google.com/p/sqlalchemy-migrate/

View File

@ -1,19 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate.versioning.shell import main
if __name__ == '__main__':
main(debug='False', repository='.')

View File

@ -1,20 +0,0 @@
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
repository_id=rack
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
# If this is changed once a database is under version control, you'll need to
# change the table name in each database too.
version_table=migrate_version
# When committing a change script, Migrate will attempt to generate the
# sql for all supported databases; normally, if one of them fails - probably
# because you don't have that database installed - it is ignored and the
# commit continues, perhaps ending successfully.
# Databases in this list MUST compile successfully during a commit, or the
# entire commit will fail. List the databases your application will actually
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs=[]

View File

@ -1,60 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import MetaData, Table, Column, Integer, String, DateTime
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
meta = MetaData()
groups = Table('groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column(
'gid', String(length=255), primary_key=True,
nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('status', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
try:
groups.create()
except Exception:
LOG.info(repr(groups))
LOG.exception(_('Exception while creating groups table.'))
raise
def downgrade(migrate_engine):
meta.bind = migrate_engine
try:
groups.drop()
except Exception:
LOG.info(repr(groups))
LOG.exception(_('Exception while dropping groups table.'))
raise

View File

@ -1,70 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate.changeset import UniqueConstraint
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import Boolean, DateTime, Integer, String
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
meta = MetaData()
services = Table('services', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('binary', String(length=255)),
Column('topic', String(length=255)),
Column('report_count', Integer, nullable=False),
Column('disabled', Boolean),
Column('deleted', Integer),
Column('disabled_reason', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
try:
services.create()
except Exception:
LOG.info(repr(services))
LOG.exception(_('Exception while creating services table.'))
raise
UniqueConstraint('host', 'topic', 'deleted',
table=services,
name='uniq_services0host0topic0deleted').create()
UniqueConstraint('host', 'binary', 'deleted',
table=services,
name='uniq_services0host0binary0deleted').create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
try:
services.drop()
except Exception:
LOG.info(repr(services))
LOG.exception(_('Exception while dropping services table.'))
raise

View File

@ -1,66 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate import ForeignKeyConstraint
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from sqlalchemy import Boolean, DateTime, Integer, String, Text
from sqlalchemy import Column, MetaData, Table
LOG = logging.getLogger(__name__)
meta = MetaData()
keypairs = Table('keypairs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('keypair_id', String(length=36),
primary_key=True, nullable=False),
Column('gid', String(length=36), nullable=False),
Column('nova_keypair_id', String(length=255)),
Column('private_key', Text),
Column('display_name', String(length=255)),
Column('is_default', Boolean),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
try:
keypairs.create()
groups = Table("groups", meta, autoload=True)
ForeignKeyConstraint([keypairs.c.gid], [groups.c.gid]).create()
except Exception:
LOG.info(repr(keypairs))
LOG.exception(_('Exception while creating keypairs table.'))
raise
def downgrade(migrate_engine):
meta.bind = migrate_engine
try:
keypairs.drop()
except Exception:
LOG.info(repr(keypairs))
LOG.exception(_('Exception while dropping keypairs table.'))
raise

View File

@ -1,67 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import Boolean, DateTime, Integer, String
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
meta = MetaData()
securitygroups = Table('securitygroups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('securitygroup_id', String(length=36),
primary_key=True, nullable=False),
Column('gid', String(length=36), nullable=False),
Column('neutron_securitygroup_id', String(length=36)),
Column('is_default', Boolean),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('display_name', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
groups = Table("groups", meta, autoload=True)
try:
securitygroups.create()
except Exception:
LOG.info(repr(securitygroups))
LOG.exception(_('Exception while creating securitygroups table.'))
raise
ForeignKeyConstraint(columns=[securitygroups.c.gid],
refcolumns=[groups.c.gid]).create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
try:
securitygroups.drop()
except Exception:
LOG.info(repr(securitygroups))
LOG.exception(_('Exception while dropping securitygroups table.'))
raise

View File

@ -1,66 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate import ForeignKeyConstraint
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from sqlalchemy import MetaData, Table, Column, Integer
from sqlalchemy import String, DateTime, Boolean
LOG = logging.getLogger(__name__)
meta = MetaData()
networks = Table('networks', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('network_id', String(length=255),
primary_key=True, nullable=False),
Column('gid', String(length=255), nullable=False),
Column('neutron_network_id', String(length=255)),
Column('is_admin', Boolean),
Column('cidr', String(length=255)),
Column('ext_router', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('display_name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
try:
networks.create()
groups = Table("groups", meta, autoload=True)
ForeignKeyConstraint([networks.c.gid], [groups.c.gid]).create()
except Exception:
LOG.info(repr(networks))
LOG.exception(_('Exception while creating networks table.'))
raise
def downgrade(migrate_engine):
meta.bind = migrate_engine
try:
networks.drop()
except Exception:
LOG.info(repr(networks))
LOG.exception(_('Exception while dropping networks table.'))
raise

View File

@ -1,85 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import Boolean, DateTime, Integer, String, Text
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
meta = MetaData()
processes = Table('processes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer, nullable=False),
Column('gid', String(length=36), nullable=False),
Column('keypair_id', String(length=36)),
Column(
'pid', String(length=36), primary_key=True,
nullable=False),
Column('ppid', String(length=36)),
Column('nova_instance_id', String(length=36)),
Column('glance_image_id', String(length=36)),
Column('nova_flavor_id', Integer),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('display_name', String(length=255)),
Column('app_status', Text),
Column('is_proxy', Boolean),
Column('shm_endpoint', Text),
Column('ipc_endpoint', Text),
Column('fs_endpoint', Text),
Column('args', Text),
Column('userdata', Text),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
groups = Table("groups", meta, autoload=True)
keypairs = Table("keypairs", meta, autoload=True)
try:
processes.create()
except Exception:
LOG.info(repr(processes))
LOG.exception(_('Exception while creating processes table.'))
raise
ForeignKeyConstraint(columns=[processes.c.gid],
refcolumns=[groups.c.gid]).create()
ForeignKeyConstraint(columns=[processes.c.keypair_id],
refcolumns=[keypairs.c.keypair_id]).create()
ForeignKeyConstraint(columns=[processes.c.ppid],
refcolumns=[processes.c.pid]).create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
try:
processes.drop()
except Exception:
LOG.info(repr(processes))
LOG.exception(_('Exception while dropping processes table.'))
raise

View File

@ -1,67 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import String
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
meta = MetaData()
processes_securitygroups = Table('processes_securitygroups', meta,
Column(
'pid', String(length=36), nullable=False,
primary_key=True),
Column(
'securitygroup_id', String(length=36),
nullable=False, primary_key=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
processes = Table("processes", meta, autoload=True)
securitygroups = Table("securitygroups", meta, autoload=True)
try:
processes_securitygroups.create()
except Exception:
LOG.info(repr(processes_securitygroups))
LOG.exception(
_('Exception while creating processes_securitygroups table.'))
raise
ForeignKeyConstraint(columns=[processes_securitygroups.c.pid],
refcolumns=[processes.c.pid]).create()
ForeignKeyConstraint(
columns=[processes_securitygroups.c.securitygroup_id],
refcolumns=[securitygroups.c.securitygroup_id]).create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
try:
processes_securitygroups.drop()
except Exception:
LOG.info(repr(processes_securitygroups))
LOG.exception(
_('Exception while dropping processes_securitygroups table.'))
raise

View File

@ -1,64 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from migrate import ForeignKeyConstraint
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import String
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
meta = MetaData()
processes_networks = Table('processes_networks', meta,
Column(
'pid', String(length=36), nullable=False,
primary_key=True),
Column(
'network_id', String(length=36),
nullable=False, primary_key=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
processes = Table("processes", meta, autoload=True)
networks = Table("networks", meta, autoload=True)
try:
processes_networks.create()
except Exception:
LOG.info(repr(processes_networks))
LOG.exception(_('Exception while creating processes_networks table.'))
raise
ForeignKeyConstraint(columns=[processes_networks.c.pid],
refcolumns=[processes.c.pid]).create()
ForeignKeyConstraint(columns=[processes_networks.c.network_id],
refcolumns=[networks.c.network_id]).create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
try:
processes_networks.drop()
except Exception:
LOG.info(repr(processes_networks))
LOG.exception(_('Exception while dropping processes_networks table.'))
raise

View File

@ -1,85 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from rack.db.sqlalchemy import api as db_session
from rack import exception
from rack.openstack.common.gettextutils import _
INIT_VERSION = 0
_REPOSITORY = None
get_engine = db_session.get_engine
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.RackException(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.RackException(
_("Upgrade DB using Essex release first."))
def db_initial_version():
return INIT_VERSION
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY

View File

@ -1,196 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rack.openstack.common.db.sqlalchemy import models
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy import Text, schema
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class Group(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase,
Base):
__tablename__ = 'groups'
securitygroups = relationship("Securitygroup")
processes = relationship("Process")
gid = Column(String(36), primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
display_name = Column(String(255))
display_description = Column(String(255))
status = Column(String(255))
class Service(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase,
Base):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
class Network(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase,
Base):
__tablename__ = 'networks'
network_id = Column(String(255), primary_key=True)
gid = Column(String(255))
neutron_network_id = Column(String(255))
is_admin = Column(Boolean, default=False)
cidr = Column(String(255))
ext_router = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
display_name = Column(String(255))
class Keypair(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase,
Base):
__tablename__ = 'keypairs'
keypair_id = Column(String(36), primary_key=True)
gid = Column(String(36), ForeignKey('groups.gid'), nullable=False)
user_id = Column(String(255))
project_id = Column(String(255))
nova_keypair_id = Column(String(255))
private_key = Column(Text)
display_name = Column(String(255))
is_default = Column(Boolean, default=False)
class Securitygroup(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase,
Base):
__tablename__ = 'securitygroups'
deleted = Column(Integer, nullable=False, default=0)
securitygroup_id = Column(String(36), primary_key=True)
gid = Column(String(36), ForeignKey('groups.gid'))
neutron_securitygroup_id = Column(String(36))
is_default = Column(Boolean, default=False)
user_id = Column(String(255))
project_id = Column(String(255))
display_name = Column(String(255))
group = relationship("Group",
foreign_keys=gid,
primaryjoin='and_('
'Securitygroup.gid == Group.gid,'
'Securitygroup.deleted == 0,'
'Group.deleted == 0)')
class Process(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase,
Base):
__tablename__ = 'processes'
deleted = Column(Integer, nullable=False, default=0)
gid = Column(String(36), ForeignKey('groups.gid'), nullable=False)
keypair_id = Column(String(36), ForeignKey('keypairs.keypair_id'))
pid = Column(String(36), primary_key=True)
ppid = Column(String(36), ForeignKey('processes.pid'))
nova_instance_id = Column(String(36))
glance_image_id = Column(String(36))
nova_flavor_id = Column(Integer)
user_id = Column(String(255))
project_id = Column(String(255))
display_name = Column(String(255))
is_proxy = Column(Boolean(), default=False)
shm_endpoint = Column(Text)
ipc_endpoint = Column(Text)
fs_endpoint = Column(Text)
args = Column(Text)
userdata = Column(Text)
app_status = Column(Text)
group = relationship("Group",
foreign_keys=gid,
primaryjoin='and_('
'Process.gid == Group.gid,'
'Process.deleted == 0,'
'Group.deleted == 0)')
securitygroups = relationship("Securitygroup",
secondary="processes_securitygroups",
primaryjoin='and_('
'Process.pid == ProcessSecuritygroup.pid,'
'Process.deleted == 0)',
secondaryjoin='and_('
'Securitygroup.securitygroup_id == '
'ProcessSecuritygroup.securitygroup_id,'
'Securitygroup.deleted == 0)',
backref="processes")
networks = relationship("Network",
secondary="processes_networks",
primaryjoin='and_('
'Process.pid == ProcessNetwork.pid,'
'Process.deleted == 0)',
secondaryjoin='and_('
'Network.network_id == ProcessNetwork.network_id,'
'Network.deleted == 0)',
backref="processes")
class ProcessSecuritygroup(models.ModelBase, Base):
__tablename__ = 'processes_securitygroups'
pid = Column(String(36), ForeignKey(
'processes.pid'), nullable=False, primary_key=True)
securitygroup_id = Column(String(36), ForeignKey(
'securitygroups.securitygroup_id'), nullable=False, primary_key=True)
class ProcessNetwork(models.ModelBase, Base):
__tablename__ = 'processes_networks'
pid = Column(String(36), ForeignKey(
'processes.pid'), nullable=False, primary_key=True)
network_id = Column(String(36), ForeignKey(
'networks.network_id'), nullable=False, primary_key=True)

View File

@ -1,61 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom SQLAlchemy types."""
from sqlalchemy.dialects import postgresql
from sqlalchemy import types
from rack import utils
class IPAddress(types.TypeDecorator):
"""An SQLAlchemy type representing an IP-address."""
impl = types.String
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.INET())
else:
return dialect.type_descriptor(types.String(39))
def process_bind_param(self, value, dialect):
"""Process/Formats the value before insert it into the db."""
if dialect.name == 'postgresql':
return value
# NOTE(maurosr): The purpose here is to convert ipv6 to the shortened
# form, not validate it.
elif utils.is_valid_ipv6(value):
return utils.get_shortened_ipv6(value)
return value
class CIDR(types.TypeDecorator):
"""An SQLAlchemy type representing a CIDR definition."""
impl = types.String
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.INET())
else:
return dialect.type_descriptor(types.String(43))
def process_bind_param(self, value, dialect):
"""Process/Formats the value before insert it into the db."""
# NOTE(sdague): normalize all the inserts
if utils.is_valid_ipv6_cidr(value):
return utils.get_shortened_ipv6_cidr(value)
return value

View File

@ -1,611 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from migrate.changeset import UniqueConstraint, ForeignKeyConstraint
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import schema
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from rack.db.sqlalchemy import api as db
from rack import exception
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from rack.openstack.common import timeutils
LOG = logging.getLogger(__name__)
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class DeleteFromSelect(UpdateBase):
def __init__(self, table, select, column):
self.table = table
self.select = select
self.column = column
@compiles(DeleteFromSelect)
def visit_delete_from_select(element, compiler, **kw):
return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.column),
element.column.name,
compiler.process(element.select))
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except Exception:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise exception.RackException(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise exception.RackException(msg % column_name)
return column
def _get_unique_constraints_in_sqlite(migrate_engine, table_name):
regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
sql_data = migrate_engine.execute(
"""
SELECT sql
FROM
sqlite_master
WHERE
type = 'table' AND
name = :table_name;
""",
table_name=table_name
).fetchone()[0]
uniques = set([
schema.UniqueConstraint(
*[getattr(table.c, c.strip(' "'))
for c in cols.split(",")], name=name
)
for name, cols in re.findall(regexp, sql_data)
])
return uniques
def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = _get_not_supported_column(col_name_col_instance,
column.name)
columns.append(new_column)
else:
columns.append(column.copy())
uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name)
table.constraints.update(uniques)
constraints = [constraint for constraint in table.constraints
if not constraint.name == uc_name and
not isinstance(constraint, schema.ForeignKeyConstraint)]
new_table = Table(table_name + "__tmp__", meta, *(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"],
*column_names,
unique=index["unique"]))
f_keys = []
for fk in insp.get_foreign_keys(table_name):
refcolumns = [fk['referred_table'] + '.' + col
for col in fk['referred_columns']]
f_keys.append(ForeignKeyConstraint(fk['constrained_columns'],
refcolumns, table=new_table,
name=fk['name']))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
for fkey in f_keys:
fkey.create()
new_table.rename(table_name)
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""This method drops UC from table and works for mysql, postgresql and
sqlite. In mysql and postgresql we are able to use "alter table"
construction. In sqlite is only one way to drop UC:
1) Create new table with same columns, indexes and constraints
(except one that we want to drop).
2) Copy data from old table to new.
3) Drop old table.
4) Rename new table to the name of old table.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
if migrate_engine.name == "sqlite":
_drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance)
else:
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""This method is used to drop all old rows that have the same values for
columns in uc_columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(list(columns_for_group_by))
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleted duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def check_shadow_table(migrate_engine, table_name):
"""This method checks that table with ``table_name`` and
corresponding shadow table have same columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
autoload=True)
columns = dict([(c.name, c) for c in table.columns])
shadow_columns = dict([(c.name, c) for c in shadow_table.columns])
for name, column in columns.iteritems():
if name not in shadow_columns:
raise exception.RackException(
_("Missing column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
shadow_column = shadow_columns[name]
if not isinstance(shadow_column.type, type(column.type)):
raise exception.RackException(
_("Different types in %(table)s.%(column)s and shadow table: "
"%(c_type)s %(shadow_c_type)s")
% {'column': name, 'table': table.name,
'c_type': column.type,
'shadow_c_type': shadow_column.type})
for name, column in shadow_columns.iteritems():
if name not in columns:
raise exception.RackException(
_("Extra column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
return True
def create_shadow_table(migrate_engine, table_name=None, table=None,
**col_name_col_instance):
"""This method create shadow table for table with name ``table_name``
or table instance ``table``.
:param table_name: Autoload table with this name and create shadow table
:param table: Autoloaded table, so just create corresponding shadow table.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
:returns: The created shadow_table object.
"""
meta = MetaData(bind=migrate_engine)
if table_name is None and table is None:
raise exception.RackException(_("Specify `table_name` or `table` "
"param"))
if not (table_name is None or table is None):
raise exception.RackException(_("Specify only one param `table_name` "
"`table`"))
if table is None:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = _get_not_supported_column(col_name_col_instance,
column.name)
columns.append(new_column)
else:
columns.append(column.copy())
shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
return shadow_table
except (OperationalError, ProgrammingError):
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise exception.ShadowTableExists(name=shadow_table_name)
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise exception.RackException(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict([(index['name'], index['column_names'])
for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine,
table_name,
**col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = MetaData(bind=migrate_engine)
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine,
table_name,
**col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
table.update().\
where(table.c.deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
# NOTE(I159): when the type of column `deleted` is changed from boolean
# to int, the corresponding CHECK constraint is dropped too. But
# starting from SQLAlchemy version 0.8.3, those CHECK constraints
# aren't dropped anymore. So despite the fact that column deleted is
# of type int now, we still restrict its values to be either 0 or 1.
constraint_markers = (
"deleted in (0, 1)",
"deleted IN (:deleted_1, :deleted_2)",
"deleted IN (:param_1, :param_2)"
)
return any(sqltext.endswith(marker) for marker in constraint_markers)
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
new_table.update().\
where(not new_table.c.deleted).\
values(deleted=default_deleted_value).\
execute()
def _index_exists(migrate_engine, table_name, index_name):
inspector = reflection.Inspector.from_engine(migrate_engine)
indexes = inspector.get_indexes(table_name)
index_names = [index['name'] for index in indexes]
return index_name in index_names
def _add_index(migrate_engine, table, index_name, idx_columns):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.create()
def _drop_index(migrate_engine, table, index_name, idx_columns):
if _index_exists(migrate_engine, table.name, index_name):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.drop()
def _change_index_columns(migrate_engine, table, index_name,
new_columns, old_columns):
_drop_index(migrate_engine, table, index_name, old_columns)
_add_index(migrate_engine, table, index_name, new_columns)
def modify_indexes(migrate_engine, data, upgrade=True):
if migrate_engine.name == 'sqlite':
return
meta = MetaData()
meta.bind = migrate_engine
for table_name, indexes in data.iteritems():
table = Table(table_name, meta, autoload=True)
for index_name, old_columns, new_columns in indexes:
if not upgrade:
new_columns, old_columns = old_columns, new_columns
if migrate_engine.name == 'postgresql':
if upgrade:
_add_index(migrate_engine, table, index_name, new_columns)
else:
_drop_index(migrate_engine, table, index_name, old_columns)
elif migrate_engine.name == 'mysql':
_change_index_columns(migrate_engine, table, index_name,
new_columns, old_columns)
else:
raise ValueError('Unsupported DB %s' % migrate_engine.name)

View File

@ -1,75 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def enabled():
return ('--remote_debug-host' in sys.argv and
'--remote_debug-port' in sys.argv)
def register_cli_opts():
from oslo.config import cfg
cli_opts = [
cfg.StrOpt('host',
help='Debug host (IP or name) to connect. Note '
'that using the remote debug option changes how '
'Rack uses the eventlet library to support async IO. '
'This could result in failures that do not occur '
'under normal operation. Use at your own risk.'),
cfg.IntOpt('port',
help='Debug port to connect. Note '
'that using the remote debug option changes how '
'Rack uses the eventlet library to support async IO. '
'This could result in failures that do not occur '
'under normal operation. Use at your own risk.')
]
cfg.CONF.register_cli_opts(cli_opts, 'remote_debug')
def init():
from oslo.config import cfg
CONF = cfg.CONF
# NOTE(markmc): gracefully handle the CLI options not being registered
if 'remote_debug' not in CONF:
return
if not (CONF.remote_debug.host and CONF.remote_debug.port):
return
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
LOG.debug(_('Listening on %(host)s:%(port)s for debug connection'),
{'host': CONF.remote_debug.host,
'port': CONF.remote_debug.port})
from pydev import pydevd
pydevd.settrace(host=CONF.remote_debug.host,
port=CONF.remote_debug.port,
stdoutToServer=False,
stderrToServer=False)
LOG.warn(_('WARNING: Using the remote debug option changes how '
'Rack uses the eventlet library to support async IO. This '
'could result in failures that do not occur under normal '
'operation. Use at your own risk.'))

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from oslo.config import cfg
from rack import utils
CONF = cfg.CONF
def _get_my_ip():
"""Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return utils.get_my_ipv4_address()
netconf_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='IP address of this host'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
cfg.BoolOpt('use_ipv6',
default=False,
help='Use IPv6'),
]
CONF.register_opts(netconf_opts)

View File

@ -1,13 +0,0 @@
openstack-common
----------------
A number of modules from openstack-common are imported into this project.
These modules are "incubating" in openstack-common and are kept in sync
with the help of openstack-common's update.py script. See:
https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator
The copy of the code should never be directly modified here. Please
always update openstack-common first and then run the script to copy
the changes across.

View File

@ -1,2 +0,0 @@
import six
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))

View File

@ -1,63 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
class MissingArgs(Exception):
def __init__(self, missing):
self.missing = missing
def __str__(self):
if len(self.missing) == 1:
return "An argument is missing"
else:
return ("%(num)d arguments are missing" %
dict(num=len(self.missing)))
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: An argument is missing
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: 2 arguments are missing
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, 'im_self', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)

View File

@ -1,302 +0,0 @@
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from rack.openstack.common import gettextutils
from rack.openstack.common import importutils
gettextutils.install('rack')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'rack'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()

View File

@ -1,83 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
from rack.openstack.common import uuidutils
def generate_request_id():
return 'req-%s' % uuidutils.generate_uuid()
class RequestContext(object):
"""Helper class to represent useful information about a request context.
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted=False):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None

View File

@ -1,162 +0,0 @@
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Multiple DB API backend support.
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
"""
import functools
import logging
import threading
import time
from rack.openstack.common.db import exception
from rack.openstack.common.gettextutils import _LE
from rack.openstack.common import importutils
LOG = logging.getLogger(__name__)
def safe_for_db_retry(f):
"""Enable db-retry for decorated function, if config option enabled."""
f.__dict__['enable_retry'] = True
return f
class wrap_db_retry(object):
"""Retry db.api methods, if DBConnectionError() raised
Retry decorated db.api methods. If we enabled `use_db_reconnect`
in config, this decorator will be applied to all db.api functions,
marked with @safe_for_db_retry decorator.
Decorator catchs DBConnectionError() and retries function in a
loop until it succeeds, or until maximum retries count will be reached.
"""
def __init__(self, retry_interval, max_retries, inc_retry_interval,
max_retry_interval):
super(wrap_db_retry, self).__init__()
self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
self.max_retry_interval = max_retry_interval
def __call__(self, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries
while True:
try:
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
time.sleep(next_interval)
if self.inc_retry_interval:
next_interval = min(
next_interval * 2,
self.max_retry_interval
)
return wrapper
class DBAPI(object):
def __init__(self, backend_name, backend_mapping=None, lazy=False,
**kwargs):
"""Initialize the chosen DB API backend.
:param backend_name: name of the backend to load
:type backend_name: str
:param backend_mapping: backend name -> module/class to load mapping
:type backend_mapping: dict
:param lazy: load the DB backend lazily on the first DB API method call
:type lazy: bool
Keyword arguments:
:keyword use_db_reconnect: retry DB transactions on disconnect or not
:type use_db_reconnect: bool
:keyword retry_interval: seconds between transaction retries
:type retry_interval: int
:keyword inc_retry_interval: increase retry interval or not
:type inc_retry_interval: bool
:keyword max_retry_interval: max interval value between retries
:type max_retry_interval: int
:keyword max_retries: max number of retries before an error is raised
:type max_retries: int
"""
self._backend = None
self._backend_name = backend_name
self._backend_mapping = backend_mapping or {}
self._lock = threading.Lock()
if not lazy:
self._load_backend()
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
self.retry_interval = kwargs.get('retry_interval', 1)
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
self.max_retries = kwargs.get('max_retries', 20)
def _load_backend(self):
with self._lock:
if not self._backend:
# Import the untranslated name if we don't have a mapping
backend_path = self._backend_mapping.get(self._backend_name,
self._backend_name)
backend_mod = importutils.import_module(backend_path)
self._backend = backend_mod.get_backend()
def __getattr__(self, key):
if not self._backend:
self._load_backend()
attr = getattr(self._backend, key)
if not hasattr(attr, '__call__'):
return attr
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
max_retry_interval=self.max_retry_interval)(attr)
return attr

View File

@ -1,56 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
import six
from rack.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError):
"""Wraps connection specific exception."""
pass

View File

@ -1,168 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='rack.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('mysql_sql_mode',
help='The SQL mode to be used for MySQL sessions '
'(default is empty, meaning do not override '
'any server-side SQL mode setting)'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost'),
cfg.IntOpt('db_retry_interval',
default=1,
help='seconds between db connection retries'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='Whether to increase interval between db connection '
'retries, up to db_max_retry_interval'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='max seconds between db connection retries, if '
'db_inc_retry_interval is enabled'),
cfg.IntOpt('db_max_retries',
default=20,
help='maximum db connection retries before error is raised. '
'(setting -1 implies an infinite retry count)'),
]
CONF = cfg.CONF
CONF.register_opts(database_opts, 'database')
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]

View File

@ -1,268 +0,0 @@
# coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from rack.openstack.common.db import exception
from rack.openstack.common.gettextutils import _
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(engine, abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)
def _db_schema_sanity_check(engine):
"""Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
engine.url.database)]
if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, '
'please make sure all tables are CHARSET=utf8'
) % ','.join(table_names))
def db_version(engine, abs_path, init_version):
"""Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(engine, repository)
else:
raise exception.DbMigrationError(
message=_(
"The database is not under version control, but has "
"tables. Please stamp the current version of the schema "
"manually."))
def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(engine, repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
return Repository(abs_path)

View File

@ -1,115 +0,0 @@
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
import six
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from rack.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
self._i = iter(columns)
return self
def next(self):
n = six.advance_iterator(self._i)
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)

View File

@ -1,187 +0,0 @@
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provision test environment for specific DB backends"""
import argparse
import os
import random
import string
from six import moves
import sqlalchemy
from rack.openstack.common.db import exception as exc
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://')
def _gen_credentials(*names):
"""Generate credentials."""
auth_dict = {}
for name in names:
val = ''.join(random.choice(string.ascii_lowercase)
for i in moves.range(10))
auth_dict[name] = val
return auth_dict
def _get_engine(uri=SQL_CONNECTION):
"""Engine creation
By default the uri is SQL_CONNECTION which is admin credentials.
Call the function without arguments to get admin connection. Admin
connection required to create temporary user and database for each
particular test. Otherwise use existing connection to recreate connection
to the temporary database.
"""
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
def _execute_sql(engine, sql, driver):
"""Initialize connection, execute sql query and close it."""
try:
with engine.connect() as conn:
if driver == 'postgresql':
conn.connection.set_isolation_level(0)
for s in sql:
conn.execute(s)
except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin '
'credentials or database does not exist.')
raise exc.DBConnectionError(msg % SQL_CONNECTION)
def create_database(engine):
"""Provide temporary user and database for each particular test."""
driver = engine.name
auth = _gen_credentials('database', 'user', 'passwd')
sqls = {
'mysql': [
"drop database if exists %(database)s;",
"grant all on %(database)s.* to '%(user)s'@'localhost'"
" identified by '%(passwd)s';",
"create database %(database)s;",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
"create user %(user)s with password '%(passwd)s';",
"create database %(database)s owner %(user)s;",
]
}
if driver == 'sqlite':
return 'sqlite:////tmp/%s' % auth['database']
try:
sql_rows = sqls[driver]
except KeyError:
raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(engine, sql_query, driver)
params = auth.copy()
params['backend'] = driver
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
def drop_database(engine, current_uri):
"""Drop temporary database and user after each particular test."""
engine = _get_engine(current_uri)
admin_engine = _get_engine()
driver = engine.name
auth = {'database': engine.url.database, 'user': engine.url.username}
if driver == 'sqlite':
try:
os.remove(auth['database'])
except OSError:
pass
return
sqls = {
'mysql': [
"drop database if exists %(database)s;",
"drop user '%(user)s'@'localhost';",
],
'postgresql': [
"drop database if exists %(database)s;",
"drop user if exists %(user)s;",
]
}
try:
sql_rows = sqls[driver]
except KeyError:
raise ValueError('Unsupported RDBMS %s' % driver)
sql_query = map(lambda x: x % auth, sql_rows)
_execute_sql(admin_engine, sql_query, driver)
def main():
"""Controller to handle commands
::create: Create test user and database with random names.
::drop: Drop user and database created by previous command.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Under normal circumstances is not used directly.'
' Used in .testr.conf to automate test database creation'
' and dropping processes.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')
create = subparsers.add_parser(
'create',
help='Create temporary test '
'databases and users.')
create.set_defaults(which='create')
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')
drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases and users.')
drop.set_defaults(which='drop')
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')
args = parser.parse_args()
engine = _get_engine()
which = args.which
if which == "create":
for i in range(int(args.instances_count)):
print(create_database(engine))
elif which == "drop":
for db in args.instances:
drop_database(engine, db)
if __name__ == "__main__":
main()

View File

@ -1,860 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
.. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and `reservation_rollback()`.
Examples:
.. code:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.
.. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call `model.save()`:
.. code:: python
def create_many_foo(context, foos):
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
.. code:: python
def myfunc(foo):
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
* In almost all cases you should use `query.soft_delete()`. Some examples:
.. code:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
.. code:: python
def soft_delete_bar_model():
session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use the `query.soft_delete()` method:
.. code:: python
def soft_delete_multi_models():
session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
.. code:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import logging
import re
import time
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from rack.openstack.common.db import exception
from rack.openstack.common.gettextutils import _LE, _LW, _LI
from rack.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
for pattern in _DUP_KEY_RE_DB[engine_name]:
match = pattern.match(integrity_error.message)
if match:
break
else:
return
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite":
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
#TODO(rpodolyaka): in a subsequent commit make this a class decorator to
# ensure it can only applied to Session subclasses instances (as we use
# Session instance bind attribute below)
@functools.wraps(f)
def _wrap(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
except sqla_exc.OperationalError as e:
_raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
cursor = dbapi_conn.cursor()
try:
ping_sql = 'select 1'
if engine.name == 'ibm_db_sa':
# DB2 requires a table expression
ping_sql = 'select 1 from (values (1)) AS t1'
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg)
else:
raise
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
"""Set engine mode to 'traditional'.
Required to prevent silent truncates at insert or update operations
under MySQL. By default MySQL truncates inserted string if it longer
than a declared field just with warning. That is fraught with data
corruption.
"""
_set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, 'TRADITIONAL')
def _set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, sql_mode=None):
"""Set the sql_mode session variable.
MySQL supports several server modes. The default is None, but sessions
may choose to enable server modes like TRADITIONAL, ANSI,
several STRICT_* modes and others.
Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default. Passing in None (the default) makes this
a no-op, meaning if a server-side SQL mode is set, it still applies.
"""
cursor = dbapi_con.cursor()
if sql_mode is not None:
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
# Check against the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
row = cursor.fetchone()
if row is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return
realmode = row[1]
LOG.info(_LI('MySQL server mode set to %s') % realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES")
% realmode)
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def _raise_if_db_connection_lost(error, engine):
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
mysql_traditional_mode=False, idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine."""
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": idle_timeout,
'convert_unicode': True,
}
logger = logging.getLogger('sqlalchemy.engine')
# Map SQL debug level to Python log level
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql':
if mysql_traditional_mode:
mysql_sql_mode = 'TRADITIONAL'
if mysql_sql_mode:
mode_callback = functools.partial(_set_session_sql_mode,
sql_mode=mysql_sql_mode)
sqlalchemy.event.listen(engine, 'checkout', mode_callback)
elif 'sqlite' in connection_dict.drivername:
if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for filename, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if filename.endswith('session.py') and method == '_do_query':
continue
if filename.endswith('api.py') and method == 'wrapper':
continue
if filename.endswith('utils.py') and method == '_inner':
continue
if filename.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if filename.endswith('db/api.py'):
continue
# only trace inside rack
index = filename.rfind('rack')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (filename[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
class EngineFacade(object):
"""A helper class for removing of global engine instances from rack.db.
As a library, rack.db can't decide where to store/when to create engine
and sessionmaker instances, so this must be left for a target application.
On the other hand, in order to simplify the adoption of rack.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
rack.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection,
sqlite_fk=False, mysql_sql_mode=None,
autocommit=True, expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param mysql_sql_mode: set SQL mode in MySQL
:type mysql_sql_mode: string
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
0=None, 100=Everything (defaults to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
"""
super(EngineFacade, self).__init__()
self._engine = create_engine(
sql_connection=sql_connection,
sqlite_fk=sqlite_fk,
mysql_sql_mode=mysql_sql_mode,
idle_timeout=kwargs.get('idle_timeout', 3600),
connection_debug=kwargs.get('connection_debug', 0),
max_pool_size=kwargs.get('max_pool_size'),
max_overflow=kwargs.get('max_overflow'),
pool_timeout=kwargs.get('pool_timeout'),
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
connection_trace=kwargs.get('connection_trace', False),
max_retries=kwargs.get('max_retries', 10),
retry_interval=kwargs.get('retry_interval', 10))
self._session_maker = get_maker(
engine=self._engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine(self):
"""Get the engine instance (note, that it's shared)."""
return self._engine
def get_session(self, **kwargs):
"""Get a Session instance.
If passed, keyword arguments values override the ones used when the
sessionmaker instance was created.
:keyword autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:keyword expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
for arg in kwargs:
if arg not in ('autocommit', 'expire_on_commit'):
del kwargs[arg]
return self._session_maker(**kwargs)

View File

@ -1,149 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import os
import fixtures
import six
from rack.openstack.common.db.sqlalchemy import session
from rack.openstack.common.db.sqlalchemy import utils
from rack.openstack.common import test
class DbFixture(fixtures.Fixture):
"""Basic database fixture.
Allows to run tests on various db backends, such as SQLite, MySQL and
PostgreSQL. By default use sqlite backend. To override default backend
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
credentials for specific backend.
"""
def _get_uri(self):
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
def __init__(self, test):
super(DbFixture, self).__init__()
self.test = test
def setUp(self):
super(DbFixture, self).setUp()
self.test.engine = session.create_engine(self._get_uri())
self.test.sessionmaker = session.get_maker(self.test.engine)
self.addCleanup(self.test.engine.dispose)
class DbTestCase(test.BaseTestCase):
"""Base class for testing of DB code.
Using `DbFixture`. Intended to be the main database test case to use all
the tests on a given backend with user defined uri. Backend specific
tests should be decorated with `backend_specific` decorator.
"""
FIXTURE = DbFixture
def setUp(self):
super(DbTestCase, self).setUp()
self.useFixture(self.FIXTURE(self))
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
def backend_specific(*dialects):
"""Decorator to skip backend specific tests on inappropriate engines.
::dialects: list of dialects names under which the test will be launched.
"""
def wrap(f):
@functools.wraps(f)
def ins_wrap(self):
if not set(dialects).issubset(ALLOWED_DIALECTS):
raise ValueError(
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
if self.engine.name not in dialects:
msg = ('The test "%s" can be run '
'only on %s. Current engine is %s.')
args = (f.__name__, ' '.join(dialects), self.engine.name)
self.skip(msg % args)
else:
return f(self)
return ins_wrap
return wrap
@six.add_metaclass(abc.ABCMeta)
class OpportunisticFixture(DbFixture):
"""Base fixture to use default CI databases.
The databases exist in OpenStack CI infrastructure. But for the
correct functioning in local environment the databases must be
created manually.
"""
DRIVER = abc.abstractproperty(lambda: None)
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
def _get_uri(self):
return utils.get_connect_string(backend=self.DRIVER,
user=self.USERNAME,
passwd=self.PASSWORD,
database=self.DBNAME)
@six.add_metaclass(abc.ABCMeta)
class OpportunisticTestCase(DbTestCase):
"""Base test case to use default CI databases.
The subclasses of the test case are running only when openstack_citest
database is available otherwise a tests will be skipped.
"""
FIXTURE = abc.abstractproperty(lambda: None)
def setUp(self):
credentials = {
'backend': self.FIXTURE.DRIVER,
'user': self.FIXTURE.USERNAME,
'passwd': self.FIXTURE.PASSWORD,
'database': self.FIXTURE.DBNAME}
if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
msg = '%s backend is not available.' % self.FIXTURE.DRIVER
return self.skip(msg)
super(OpportunisticTestCase, self).setUp()
class MySQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'mysql'
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
DRIVER = 'postgresql'
class MySQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = MySQLOpportunisticFixture
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
FIXTURE = PostgreSQLOpportunisticFixture

View File

@ -1,269 +0,0 @@
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import os
import subprocess
import lockfile
from six import moves
from six.moves.urllib import parse
import sqlalchemy
import sqlalchemy.exc
from rack.openstack.common.db.sqlalchemy import utils
from rack.openstack.common.gettextutils import _LE
from rack.openstack.common import test
LOG = logging.getLogger(__name__)
def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None:
return utils.is_backend_avail(backend='mysql',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None:
return utils.is_backend_avail(backend='postgres',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _set_db_lock(lock_path=None, lock_prefix=None):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
path = lock_path or os.environ.get("NOVA_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock:
LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs)
finally:
LOG.debug('Lock released "%s"' % f.__name__)
return wrapper
return decorator
class BaseMigrationTestCase(test.BaseTestCase):
"""Base class fort testing of migration utils."""
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.test_databases = {}
self.migration_api = None
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = moves.configparser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
except moves.configparser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
self._reset_databases()
def tearDown(self):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
super(BaseMigrationTestCase, self).tearDown()
def execute_cmd(self, cmd=None):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
LOG.debug(output)
self.assertEqual(0, process.returncode,
"Failed to run: %s\n%s" % (cmd, output))
def _reset_pg(self, conn_pieces):
(user,
password,
database,
host) = utils.get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("drop database if exists %s;") % database
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(droptable)
sql = ("create database %s;") % database
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
@_set_db_lock(lock_prefix='migration_tests-')
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = parse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
utils.get_db_connection_info(conn_pieces)
sql = ("drop database if exists %(db)s; "
"create database %(db)s;") % {'db': database}
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
"-e \"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
self.execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
self._reset_pg(conn_pieces)
class WalkVersionsMixin(object):
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
self.migration_api.version_control(engine, self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(engine,
self.REPOSITORY))
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
for version in versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
downgraded = self._migrate_down(
engine, version - 1, with_data=True)
if downgraded:
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(versions):
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine, version - 1)
if snake_walk and downgraded:
self._migrate_up(engine, version)
self._migrate_down(engine, version - 1)
def _migrate_down(self, engine, version, with_data=False):
try:
self.migration_api.downgrade(engine, self.REPOSITORY, version)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(
version, self.migration_api.db_version(engine, self.REPOSITORY))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%03d" % (version + 1), None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%03d" % version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self.migration_api.upgrade(engine, self.REPOSITORY, version)
self.assertEqual(version,
self.migration_api.db_version(engine,
self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if check:
check(engine, data)
except Exception:
LOG.error(_LE("Failed to migrate to version %s on engine %s") %
(version, engine))
raise

View File

@ -1,638 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
from migrate.changeset import UniqueConstraint
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from rack.openstack.common import context as request_context
from rack.openstack.common.db.sqlalchemy import models
from rack.openstack.common.gettextutils import _, _LI, _LW
from rack.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)

View File

@ -1,146 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
help_for_backdoor_port = 'Acceptable ' + \
'values are 0, <port> and <start>:<end>, where 0 results in ' + \
'listening on a random tcp port number, <port> results in ' + \
'listening on the specified port number and not enabling backdoor' + \
'if it is in use and <start>:<end> results in listening on the ' + \
'smallest unused port number within the specified range of port ' + \
'numbers. The chosen port is displayed in the service\'s log file.'
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
default=None,
help='Enable eventlet backdoor. %s' % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()})
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

View File

@ -1,99 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception related utilities.
"""
import logging
import sys
import time
import traceback
import six
from rack.openstack.common.gettextutils import _ # noqa
class save_and_reraise_exception(object):
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example:
except Exception:
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
this_exc_message = six.u(str(exc))
if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
# Do not log any more frequently than once a minute unless
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
time.sleep(1)
return inner_func

View File

@ -1,137 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
import tempfile
from rack.openstack.common import excutils
from rack.openstack.common.gettextutils import _ # noqa
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path

View File

@ -1,85 +0,0 @@
#
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo.config import cfg
import six
class Config(fixtures.Fixture):
"""Allows overriding configuration settings for the test.
`conf` will be reset on cleanup.
"""
def __init__(self, conf=cfg.CONF):
self.conf = conf
def setUp(self):
super(Config, self).setUp()
# NOTE(morganfainberg): unregister must be added to cleanup before
# reset is because cleanup works in reverse order of registered items,
# and a reset must occur before unregistering options can occur.
self.addCleanup(self._unregister_config_opts)
self.addCleanup(self.conf.reset)
self._registered_config_opts = {}
def config(self, **kw):
"""Override configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a `group` argument is supplied, the overrides are applied to
the specified configuration option group, otherwise the overrides
are applied to the ``default`` group.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)
def _unregister_config_opts(self):
for group in self._registered_config_opts:
self.conf.unregister_opts(self._registered_config_opts[group],
group=group)
def register_opt(self, opt, group=None):
"""Register a single option for the test run.
Options registered in this manner will automatically be unregistered
during cleanup.
If a `group` argument is supplied, it will register the new option
to that group, otherwise the option is registered to the ``default``
group.
"""
self.conf.register_opt(opt, group=group)
self._registered_config_opts.setdefault(group, set()).add(opt)
def register_opts(self, opts, group=None):
"""Register multiple options for the test run.
This works in the same manner as register_opt() but takes a list of
options as the first argument. All arguments will be registered to the
same group if the ``group`` argument is supplied, otherwise all options
will be registered to the ``default`` group.
"""
for opt in opts:
self.register_opt(opt, group=group)

View File

@ -1,51 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from rack.openstack.common import lockutils
class LockFixture(fixtures.Fixture):
"""External locking fixture.
This fixture is basically an alternative to the synchronized decorator with
the external flag so that tearDowns and addCleanups will be included in
the lock context for locking between tests. The fixture is recommended to
be the first line in a test method, like so::
def test_method(self):
self.useFixture(LockFixture)
...
or the first line in setUp if all the test methods in the class are
required to be serialized. Something like::
class TestCase(testtools.testcase):
def setUp(self):
self.useFixture(LockFixture)
super(TestCase, self).setUp()
...
This is because addCleanups are put on a LIFO queue that gets run after the
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
self.mgr = lockutils.lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()

View File

@ -1,34 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
def get_logging_handle_error_fixture():
"""returns a fixture to make logging raise formatting exceptions.
Usage:
self.useFixture(logging.get_logging_handle_error_fixture())
"""
return fixtures.MonkeyPatch('logging.Handler.handleError',
_handleError)
def _handleError(self, record):
"""Monkey patch for logging.Handler.handleError.
The default handleError just logs the error to stderr but we want
the option of actually raising an exception.
"""
raise

View File

@ -1,51 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
self.new = new
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, new=mock.DEFAULT, **kwargs):
self.obj = obj
self.kwargs = kwargs
self.new = new
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, self.new, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)

View File

@ -1,32 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from six.moves import mox # noqa
class MoxStubout(fixtures.Fixture):
"""Deal with code around mox and stubout as a fixture."""
def setUp(self):
super(MoxStubout, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = self.mox.stubs
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.mox.VerifyAll)

View File

@ -1,474 +0,0 @@
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from rack.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
_localedir = os.environ.get('rack'.upper() + '_LOCALEDIR')
_t = gettext.translation('rack', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('rack' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='rack')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='rack' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. rack, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='rack', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except TypeError:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)

View File

@ -1,144 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
"""
import re
from rack.openstack.common.gettextutils import _ # noqa
from rack.openstack.common import strutils
class QemuImgInfo(object):
BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
r"\s+(.*?)\)\s*$"), re.I)
TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I)
def __init__(self, cmd_output=None):
details = self._parse(cmd_output or '')
self.image = details.get('image')
self.backing_file = details.get('backing_file')
self.file_format = details.get('file_format')
self.virtual_size = details.get('virtual_size')
self.cluster_size = details.get('cluster_size')
self.disk_size = details.get('disk_size')
self.snapshots = details.get('snapshot_list', [])
self.encryption = details.get('encryption')
def __str__(self):
lines = [
'image: %s' % self.image,
'file_format: %s' % self.file_format,
'virtual_size: %s' % self.virtual_size,
'disk_size: %s' % self.disk_size,
'cluster_size: %s' % self.cluster_size,
'backing_file: %s' % self.backing_file,
]
if self.snapshots:
lines.append("snapshots: %s" % self.snapshots)
return "\n".join(lines)
def _canonicalize(self, field):
# Standardize on underscores/lc/no dash and no spaces
# since qemu seems to have mixed outputs here... and
# this format allows for better integration with python
# - ie for usage in kwargs and such...
field = field.lower().strip()
for c in (" ", "-"):
field = field.replace(c, '_')
return field
def _extract_bytes(self, details):
# Replace it with the byte amount
real_size = self.SIZE_RE.search(details)
if real_size:
details = real_size.group(1)
try:
details = strutils.to_bytes(details)
except TypeError:
pass
return details
def _extract_details(self, root_cmd, root_details, lines_after):
real_details = root_details
if root_cmd == 'backing_file':
# Replace it with the real backing file
backing_match = self.BACKING_FILE_RE.match(root_details)
if backing_match:
real_details = backing_match.group(2).strip()
elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
# Replace it with the byte amount (if we can convert it)
real_details = self._extract_bytes(root_details)
elif root_cmd == 'file_format':
real_details = real_details.strip().lower()
elif root_cmd == 'snapshot_list':
# Next line should be a header, starting with 'ID'
if not lines_after or not lines_after[0].startswith("ID"):
msg = _("Snapshot list encountered but no header found!")
raise ValueError(msg)
del lines_after[0]
real_details = []
# This is the sprintf pattern we will try to match
# "%-10s%-20s%7s%20s%15s"
# ID TAG VM SIZE DATE VM CLOCK (current header)
while lines_after:
line = lines_after[0]
line_pieces = line.split()
if len(line_pieces) != 6:
break
# Check against this pattern in the final position
# "%02d:%02d:%02d.%03d"
date_pieces = line_pieces[5].split(":")
if len(date_pieces) != 3:
break
real_details.append({
'id': line_pieces[0],
'tag': line_pieces[1],
'vm_size': line_pieces[2],
'date': line_pieces[3],
'vm_clock': line_pieces[4] + " " + line_pieces[5],
})
del lines_after[0]
return real_details
def _parse(self, cmd_output):
# Analysis done of qemu-img.c to figure out what is going on here
# Find all points start with some chars and then a ':' then a newline
# and then handle the results of those 'top level' items in a separate
# function.
#
# TODO(harlowja): newer versions might have a json output format
# we should switch to that whenever possible.
# see: http://bit.ly/XLJXDX
contents = {}
lines = [x for x in cmd_output.splitlines() if x.strip()]
while lines:
line = lines.pop(0)
top_level = self.TOP_LEVEL_RE.match(line)
if top_level:
root = self._canonicalize(top_level.group(1))
if not root:
continue
root_details = top_level.group(2).strip()
details = self._extract_details(root, root_details, lines)
contents[root] = details
return contents

View File

@ -1,66 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default

View File

@ -1,178 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import six
from rack.openstack.common import gettextutils
from rack.openstack.common import importutils
from rack.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on rack project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)

View File

@ -1,45 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()

View File

@ -1,303 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from rack.openstack.common import fileutils
from rack.openstack.common.gettextutils import _ # noqa
from rack.openstack.common import local
from rack.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("NOVA_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in rack/utils.py)
from rack.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('rack-')
(in rack/foo.py)
from rack import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["NOVA_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -1,655 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from rack.openstack.common.gettextutils import _
from rack.openstack.common import importutils
from rack.openstack.common import jsonutils
from rack.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and then will be changed in J to honor RFC5424'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Use syslog rfc5424 format for logging. '
'If enabled, will add APP-NAME (RFC5424) before the '
'MSG part of the syslog message. The old format '
'without APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
def format(self, record):
msg = super(RFCSysLogHandler, self).format(record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"rack.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))

View File

@ -1,147 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from eventlet import event
from eventlet import greenthread
from rack.openstack.common.gettextutils import _
from rack.openstack.common import log as logging
from rack.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done

View File

@ -1,97 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Super simple fake memcache client."""
from oslo.config import cfg
from rack.openstack.common import timeutils
memcache_opts = [
cfg.ListOpt('memcached_servers',
default=None,
help='Memcached servers or None for in process cache.'),
]
CONF = cfg.CONF
CONF.register_opts(memcache_opts)
def get_client(memcached_servers=None):
client_cls = Client
if not memcached_servers:
memcached_servers = CONF.memcached_servers
if memcached_servers:
try:
import memcache
client_cls = memcache.Client
except ImportError:
pass
return client_cls(memcached_servers, debug=0)
class Client(object):
"""Replicates a tiny subset of memcached client interface."""
def __init__(self, *args, **kwargs):
"""Ignores the passed in args."""
self.cache = {}
def get(self, key):
"""Retrieves the value for a key or None.
This expunges expired keys during each get.
"""
now = timeutils.utcnow_ts()
for k in self.cache.keys():
(timeout, _value) = self.cache[k]
if timeout and now >= timeout:
del self.cache[k]
return self.cache.get(key, (0, None))[1]
def set(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key."""
timeout = 0
if time != 0:
timeout = timeutils.utcnow_ts() + time
self.cache[key] = (timeout, value)
return True
def add(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key if it doesn't exist."""
if self.get(key) is not None:
return False
return self.set(key, value, time, min_compress_len)
def incr(self, key, delta=1):
"""Increments the value for a key."""
value = self.get(key)
if value is None:
return None
new_value = int(value) + delta
self.cache[key] = (self.cache[key][0], str(new_value))
return new_value
def delete(self, key, time=0):
"""Deletes the value associated with a key."""
if key in self.cache:
del self.cache[key]

View File

@ -1,55 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class(es) for WSGI Middleware."""
import webob.dec
class Middleware(object):
"""Base WSGI middleware wrapper.
These classes require an application to be initialized that will be called
next. By default the middleware will simply call its wrapped app, or you
can override __call__ to customize its behavior.
"""
@classmethod
def factory(cls, global_conf, **local_conf):
"""Factory method for paste.deploy."""
return cls
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)

View File

@ -1,38 +0,0 @@
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that ensures request ID.
It ensures to assign request ID for each API request and set it to
request environment. The request ID is also added to API response.
"""
from rack.openstack.common import context
from rack.openstack.common.middleware import base
ENV_REQUEST_ID = 'openstack.request_id'
HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id'
class RequestIdMiddleware(base.Middleware):
def process_request(self, req):
self.req_id = context.generate_request_id()
req.environ[ENV_REQUEST_ID] = self.req_id
def process_response(self, response):
response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, self.req_id)
return response

Some files were not shown because too many files have changed in this diff Show More