diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 3f5dd14..0000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = rack -omit = rack/tests/*,rack/openstack/* - -[report] -ignore-errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index e7ee2db..0000000 --- a/.gitignore +++ /dev/null @@ -1,46 +0,0 @@ -*.DS_Store -*.egg* -*.log -*.mo -*.pyc -*.swo -*.swp -*.sqlite -*~ -.autogenerated -.coverage -.rack-venv -.project -.pydevproject -.ropeproject -.testrepository/ -.settings -.tox -.idea -.venv -AUTHORS -Authors -build-stamp -build/* -bin/* -CA/ -ChangeLog -coverage.xml -cover/* -covhtml -dist/* -doc/source/api/* -doc/build/* -etc/rack.conf -instances -keeper -keys -local_settings.py -MANIFEST -nosetests.xml -rack/tests/cover/* -rack/vcsversion.py -tools/conf/rack.conf* -tools/lintstack.head.py -tools/pylint_exceptions -/bin diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 39817d0..0000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=stackforge/rack.git diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 6982d69..0000000 --- a/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./rack/tests $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/LICENSE b/LICENSE deleted file mode 100644 index ad410e1..0000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index b077ba1..d98af48 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,10 @@ -# The concept of RACK: "OpenStack Native Application" +This project is no longer maintained. -OpenStack Native Application is the software, which uses OpenStack resource (eg. VM or VNET) directly from application. Recent popular applications are designed before the cloud computing age, so affinity with cloud is not considered. In order to make those applications work on OpenStack, tools such as Chef, Puppet and other tools are required, and it makes systems very complex in design. +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -RACK provides the mechanism to create “After the Cloud” applications. Programmer can write codes that are scalable and migratable on OpenStack platform without cooperating with the external systems. - -Concepts of RACK are as follows: - -1. RACK handles VM with "functions" as a single execution binary file. “Functions” here means OS, middleware and programs that are necessary for application to function. The programs here are made in such a way as to call and operate RACK API. -2. When this execution binary is deployed onto OpenStack, the VM will behave like a Linux process and then finish its own task. -3. This process is based on the descriptions in the program. It does things such as forking and generating a child process, communicating between processes. - -Please take a look at our Wiki page to understand RACK more! -**https://wiki.openstack.org/wiki/RACK** +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/etc/api-paste.ini b/etc/api-paste.ini deleted file mode 100644 index 4132e9d..0000000 --- a/etc/api-paste.ini +++ /dev/null @@ -1,33 +0,0 @@ -[composite:rackapi] -use = egg:Paste#urlmap -/ = rackversions -/v1 = rackapi_v1 - -[composite:rackapi_v1] -use = call:rack.api.auth:pipeline_factory -noauth = faultwrap noauth rackapp_v1 -keystone = faultwrap authtoken keystonecontext rackapp_v1 - -[filter:faultwrap] -paste.filter_factory = rack.api:FaultWrapper.factory - -[filter:noauth] -paste.filter_factory = rack.api.auth:NoAuthMiddleware.factory - -[pipeline:rackversions] -pipeline = faultwrap rackversionapp - -[app:rackversionapp] -paste.app_factory = rack.api.versions:Versions.factory - -[app:rackapp_v1] -paste.app_factory = rack.api.v1:APIRouter.factory - -[filter:keystonecontext] -paste.filter_factory = rack.api.auth:RackKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory -auth_port = 35357 -auth_protocol = http -auth_version = v2.0 \ No newline at end of file diff --git a/etc/rack.conf.sample b/etc/rack.conf.sample deleted file mode 100644 index e647ed8..0000000 --- a/etc/rack.conf.sample +++ /dev/null @@ -1,15 +0,0 @@ -[DEFAULT] -#debug = True -#verbose = True -#lock_path = /var/lib/rack/lock -#state_path = /var/lib/rack -#sql_connection = mysql://root:password@127.0.0.1/rack?charset=utf8 -#api_paste_config = /etc/rack/api-paste.ini -#auth_strategy = noauth -#os_username = admin -#os_password = password -#os_tenant_name = demo -#os_auth_url = http://localhost:5000/v2.0 -#os_region_name = RegionOne -#ipc_port = 8888 -#shm_port = 6379 \ No newline at end of file diff --git a/openstack-common.conf b/openstack-common.conf deleted file mode 100644 index adf5107..0000000 --- a/openstack-common.conf +++ /dev/null @@ -1,28 +0,0 @@ -[DEFAULT] - -# The list of modules to copy from oslo-incubator.git -module=config -module=context -module=db -module=db.sqlalchemy -module=eventlet_backdoor -module=excutils -module=fileutils -module=fixture -module=gettextutils -module=importutils -module=jsonutils -module=local -module=lockutils -module=log -module=loopingcall -module=policy -module=processutils -module=service -module=strutils -module=threadgroup -module=timeutils -module=uuidutils - -# The base module to hold the copy of openstack.common -base=rack \ No newline at end of file diff --git a/rack/__init__.py b/rack/__init__.py deleted file mode 100644 index 38c387c..0000000 --- a/rack/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -:mod:`rack` -- Cloud IaaS Platform -=================================== - -.. automodule:: rack - :platform: Unix - :synopsis: Infrastructure-as-a-Service Cloud platform. -""" diff --git a/rack/api/__init__.py b/rack/api/__init__.py deleted file mode 100644 index 0557b2c..0000000 --- a/rack/api/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.api import wsgi -from rack.openstack.common import gettextutils -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack import utils -from rack import wsgi as base_wsgi -import webob.dec -import webob.exc - - -LOG = logging.getLogger(__name__) - - -class FaultWrapper(base_wsgi.Middleware): - - """Calls down the middleware stack, making exceptions into faults.""" - - _status_to_type = {} - - @staticmethod - def status_to_type(status): - if not FaultWrapper._status_to_type: - for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): - FaultWrapper._status_to_type[clazz.code] = clazz - return FaultWrapper._status_to_type.get( - status, webob.exc.HTTPInternalServerError)() - - def _error(self, inner, req): - LOG.exception(_("Caught error: %s"), unicode(inner)) - - headers = getattr(inner, 'headers', None) - status = getattr(inner, 'code', 500) - if status is None: - status = 500 - - msg_dict = dict(url=req.url, status=status) - LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) - outer = self.status_to_type(status) - if headers: - outer.headers = headers - if isinstance(inner.msg_fmt, gettextutils.Message): - user_locale = req.best_match_language() - inner_msg = gettextutils.translate( - inner.msg_fmt, user_locale) - else: - inner_msg = unicode(inner) - outer.explanation = '%s: %s' % (inner.__class__.__name__, - inner_msg) - return wsgi.Fault(outer) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - try: - return req.get_response(self.application) - except Exception as ex: - return self._error(ex, req) diff --git a/rack/api/auth.py b/rack/api/auth.py deleted file mode 100644 index 281999e..0000000 --- a/rack/api/auth.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Common Auth Middleware. - -""" - -from oslo.config import cfg -from rack.api import wsgi -from rack import context -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import jsonutils -from rack.openstack.common import log as logging -from rack import wsgi as base_wsgi -import webob.dec -import webob.exc - - -auth_opts = [ - cfg.BoolOpt('api_rate_limit', - default=False, - help=('Whether to use per-user rate limiting for the api. ')), - cfg.StrOpt('auth_strategy', - default='noauth', - help='The strategy to use for auth: noauth or keystone.'), - cfg.BoolOpt('use_forwarded_for', - default=False, - help='Treat X-Forwarded-For as the canonical remote address. ' - 'Only enable this if you have a sanitizing proxy.'), -] - -CONF = cfg.CONF -CONF.register_opts(auth_opts) - -LOG = logging.getLogger(__name__) - - -def _load_pipeline(loader, pipeline): - filters = [loader.get_filter(n) for n in pipeline[:-1]] - app = loader.get_app(pipeline[-1]) - filters.reverse() - for filter in filters: - app = filter(app) - return app - - -def pipeline_factory(loader, global_conf, **local_conf): - """A paste pipeline replica that keys off of auth_strategy.""" - pipeline = local_conf[CONF.auth_strategy] - if not CONF.api_rate_limit: - limit_name = CONF.auth_strategy + '_nolimit' - pipeline = local_conf.get(limit_name, pipeline) - pipeline = pipeline.split() - return _load_pipeline(loader, pipeline) - - -class InjectContext(base_wsgi.Middleware): - - """Add a 'rack.context' to WSGI environ.""" - - def __init__(self, context, *args, **kwargs): - self.context = context - super(InjectContext, self).__init__(*args, **kwargs) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - req.environ['rack.context'] = self.context - return self.application - - -class RackKeystoneContext(base_wsgi.Middleware): - - """Make a request context from keystone headers.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - user_id = req.headers.get('X_USER') - user_id = req.headers.get('X_USER_ID', user_id) - if user_id is None: - LOG.debug("Neither X_USER_ID nor X_USER found in request") - return webob.exc.HTTPUnauthorized() - - roles = self._get_roles(req) - - if 'X_TENANT_ID' in req.headers: - # This is the new header since Keystone went to ID/Name - project_id = req.headers['X_TENANT_ID'] - else: - # This is for legacy compatibility - project_id = req.headers['X_TENANT'] - project_name = req.headers.get('X_TENANT_NAME') - user_name = req.headers.get('X_USER_NAME') - - # Get the auth token - auth_token = req.headers.get('X_AUTH_TOKEN', - req.headers.get('X_STORAGE_TOKEN')) - - # Build a context, including the auth_token... - remote_address = req.remote_addr - if CONF.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - - service_catalog = None - if req.headers.get('X_SERVICE_CATALOG') is not None: - try: - catalog_header = req.headers.get('X_SERVICE_CATALOG') - service_catalog = jsonutils.loads(catalog_header) - except ValueError: - raise webob.exc.HTTPInternalServerError( - _('Invalid service catalog json.')) - - ctx = context.RequestContext(user_id, - project_id, - user_name=user_name, - project_name=project_name, - roles=roles, - auth_token=auth_token, - remote_address=remote_address, - service_catalog=service_catalog) - - req.environ['rack.context'] = ctx - return self.application - - def _get_roles(self, req): - """Get the list of roles.""" - - if 'X_ROLES' in req.headers: - roles = req.headers.get('X_ROLES', '') - else: - # Fallback to deprecated role header: - roles = req.headers.get('X_ROLE', '') - if roles: - LOG.warn(_("Sourcing roles from deprecated X-Role HTTP " - "header")) - return [r.strip() for r in roles.split(',')] - - -class NoAuthMiddlewareBase(base_wsgi.Middleware): - - def base_call(self, req, project_id_in_path): - remote_address = getattr(req, 'remote_address', '127.0.0.1') - ctx = context.RequestContext(user_id='noauth', - project_id='noauth', - is_admin=True, - remote_address=remote_address) - - req.environ['rack.context'] = ctx - return self.application - - -class NoAuthMiddleware(NoAuthMiddlewareBase): - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - return self.base_call(req, True) diff --git a/rack/api/common.py b/rack/api/common.py deleted file mode 100644 index 6af68d1..0000000 --- a/rack/api/common.py +++ /dev/null @@ -1,455 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import itertools -import os -import re - -from oslo.config import cfg -import six.moves.urllib.parse as urlparse -import webob -from webob import exc - -from rack.api import wsgi -from rack.api import xmlutil -from rack import exception -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -osapi_opts = [ - cfg.IntOpt('osapi_max_limit', - default=1000, - help='The maximum number of items returned in a single ' - 'response from a collection resource'), - cfg.StrOpt('osapi_compute_link_prefix', - help='Base URL that will be presented to users in links ' - 'to the OpenStack Compute API'), - cfg.StrOpt('osapi_glance_link_prefix', - help='Base URL that will be presented to users in links ' - 'to glance resources'), -] -CONF = cfg.CONF -CONF.register_opts(osapi_opts) - -LOG = logging.getLogger(__name__) - - -VALID_NAME_REGEX = re.compile("^(?! )[\w. _-]+(? max_limit, we default to max_limit. Negative values - for either marker or limit will cause - exc.HTTPBadRequest() exceptions to be raised. - - """ - params = {} - if 'limit' in request.GET: - params['limit'] = _get_int_param(request, 'limit') - if 'page_size' in request.GET: - params['page_size'] = _get_int_param(request, 'page_size') - if 'marker' in request.GET: - params['marker'] = _get_marker_param(request) - return params - - -def _get_int_param(request, param): - """Extract integer param from request or fail.""" - try: - int_param = int(request.GET[param]) - except ValueError: - msg = _('%s param must be an integer') % param - raise webob.exc.HTTPBadRequest(explanation=msg) - if int_param < 0: - msg = _('%s param must be positive') % param - raise webob.exc.HTTPBadRequest(explanation=msg) - return int_param - - -def _get_marker_param(request): - """Extract marker id from request or fail.""" - return request.GET['marker'] - - -def limited(items, request, max_limit=CONF.osapi_max_limit): - """Return a slice of items according to requested offset and limit. - - :param items: A sliceable entity - :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' - GET variables. 'offset' is where to start in the list, - and 'limit' is the maximum number of items to return. If - 'limit' is not specified, 0, or > max_limit, we default - to max_limit. Negative values for either offset or limit - will cause exc.HTTPBadRequest() exceptions to be raised. - :kwarg max_limit: The maximum number of items to return from 'items' - """ - try: - offset = int(request.GET.get('offset', 0)) - except ValueError: - msg = _('offset param must be an integer') - raise webob.exc.HTTPBadRequest(explanation=msg) - - try: - limit = int(request.GET.get('limit', max_limit)) - except ValueError: - msg = _('limit param must be an integer') - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit < 0: - msg = _('limit param must be positive') - raise webob.exc.HTTPBadRequest(explanation=msg) - - if offset < 0: - msg = _('offset param must be positive') - raise webob.exc.HTTPBadRequest(explanation=msg) - - limit = min(max_limit, limit or max_limit) - range_end = offset + limit - return items[offset:range_end] - - -def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit): - """get limited parameter from request.""" - params = get_pagination_params(request) - limit = params.get('limit', max_limit) - limit = min(max_limit, limit) - marker = params.get('marker') - - return limit, marker - - -def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit): - """Return a slice of items according to the requested marker and limit.""" - limit, marker = get_limit_and_marker(request, max_limit) - - limit = min(max_limit, limit) - start_index = 0 - if marker: - start_index = -1 - for i, item in enumerate(items): - if 'flavorid' in item: - if item['flavorid'] == marker: - start_index = i + 1 - break - elif item['id'] == marker or item.get('uuid') == marker: - start_index = i + 1 - break - if start_index < 0: - msg = _('marker [%s] not found') % marker - raise webob.exc.HTTPBadRequest(explanation=msg) - range_end = start_index + limit - return items[start_index:range_end] - - -def get_id_from_href(href): - """Return the id or uuid portion of a url. - - Given: 'http://www.foo.com/bar/123?q=4' - Returns: '123' - - Given: 'http://www.foo.com/bar/abc123?q=4' - Returns: 'abc123' - - """ - return urlparse.urlsplit("%s" % href).path.split('/')[-1] - - -def remove_version_from_href(href): - """Removes the first api version from the href. - - Given: 'http://www.rack.com/v1.1/123' - Returns: 'http://www.rack.com/123' - - Given: 'http://www.rack.com/v1.1' - Returns: 'http://www.rack.com' - - """ - parsed_url = urlparse.urlsplit(href) - url_parts = parsed_url.path.split('/', 2) - - # NOTE: this should match vX.X or vX - expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') - if expression.match(url_parts[1]): - del url_parts[1] - - new_path = '/'.join(url_parts) - - if new_path == parsed_url.path: - msg = _('href %s does not contain version') % href - LOG.debug(msg) - raise ValueError(msg) - - parsed_url = list(parsed_url) - parsed_url[2] = new_path - return urlparse.urlunsplit(parsed_url) - - -def dict_to_query_str(params): - # TODO(throughnothing): we should just use urllib.urlencode instead of this - # But currently we don't work with urlencoded url's - param_str = "" - for key, val in params.iteritems(): - param_str = param_str + '='.join([str(key), str(val)]) + '&' - - return param_str.rstrip('&') - - -def get_networks_for_instance_from_nw_info(nw_info): - networks = {} - for vif in nw_info: - ips = vif.fixed_ips() - floaters = vif.floating_ips() - label = vif['network']['label'] - if label not in networks: - networks[label] = {'ips': [], 'floating_ips': []} - - networks[label]['ips'].extend(ips) - networks[label]['floating_ips'].extend(floaters) - for ip in itertools.chain(networks[label]['ips'], - networks[label]['floating_ips']): - ip['mac_address'] = vif['address'] - return networks - - -def raise_http_conflict_for_instance_invalid_state(exc, action): - """Raises a webob.exc.HTTPConflict instance containing a message - appropriate to return via the API based on the original - InstanceInvalidState exception. - """ - attr = exc.kwargs.get('attr') - state = exc.kwargs.get('state') - not_launched = exc.kwargs.get('not_launched') - if attr and state: - msg = _("Cannot '%(action)s' while instance is in %(attr)s " - "%(state)s") % {'action': action, 'attr': attr, 'state': state} - elif not_launched: - msg = _("Cannot '%s' an instance which has never been active") % action - else: - # At least give some meaningful message - msg = _("Instance is in an invalid state for '%s'") % action - raise webob.exc.HTTPConflict(explanation=msg) - - -class MetadataDeserializer(wsgi.MetadataXMLDeserializer): - - def deserialize(self, text): - dom = xmlutil.safe_minidom_parse_string(text) - metadata_node = self.find_first_child_named(dom, "metadata") - metadata = self.extract_metadata(metadata_node) - return {'body': {'metadata': metadata}} - - -class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): - - def deserialize(self, text): - dom = xmlutil.safe_minidom_parse_string(text) - metadata_item = self.extract_metadata(dom) - return {'body': {'meta': metadata_item}} - - -class MetadataXMLDeserializer(wsgi.XMLDeserializer): - - def extract_metadata(self, metadata_node): - """Marshal the metadata attribute of a parsed request.""" - if metadata_node is None: - return {} - metadata = {} - for meta_node in self.find_children_named(metadata_node, "meta"): - key = meta_node.getAttribute("key") - metadata[key] = self.extract_text(meta_node) - return metadata - - def _extract_metadata_container(self, datastring): - dom = xmlutil.safe_minidom_parse_string(datastring) - metadata_node = self.find_first_child_named(dom, "metadata") - metadata = self.extract_metadata(metadata_node) - return {'body': {'metadata': metadata}} - - def create(self, datastring): - return self._extract_metadata_container(datastring) - - def update_all(self, datastring): - return self._extract_metadata_container(datastring) - - def update(self, datastring): - dom = xmlutil.safe_minidom_parse_string(datastring) - metadata_item = self.extract_metadata(dom) - return {'body': {'meta': metadata_item}} - - -metadata_nsmap = {None: xmlutil.XMLNS_V11} - - -class MetaItemTemplate(xmlutil.TemplateBuilder): - - def construct(self): - sel = xmlutil.Selector('meta', xmlutil.get_items, 0) - root = xmlutil.TemplateElement('meta', selector=sel) - root.set('key', 0) - root.text = 1 - return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) - - -class MetadataTemplateElement(xmlutil.TemplateElement): - - def will_render(self, datum): - return True - - -class MetadataTemplate(xmlutil.TemplateBuilder): - - def construct(self): - root = MetadataTemplateElement('metadata', selector='metadata') - elem = xmlutil.SubTemplateElement(root, 'meta', - selector=xmlutil.get_items) - elem.set('key', 0) - elem.text = 1 - return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) - - -def check_snapshots_enabled(f): - @functools.wraps(f) - def inner(*args, **kwargs): - if not CONF.allow_instance_snapshots: - LOG.warn(_('Rejecting snapshot request, snapshots currently' - ' disabled')) - msg = _("Instance snapshots are not permitted at this time.") - raise webob.exc.HTTPBadRequest(explanation=msg) - return f(*args, **kwargs) - return inner - - -class ViewBuilder(object): - - """Model API responses as dictionaries.""" - - def _get_project_id(self, request): - """Get project id from request url if present or empty string - otherwise - """ - project_id = request.environ["rack.context"].project_id - if project_id in request.url: - return project_id - return '' - - def _get_links(self, request, identifier, collection_name): - return [ - { - "rel": "self", - "href": self._get_href_link(request, identifier, - collection_name), - }, - { - "rel": "bookmark", - "href": self._get_bookmark_link(request, - identifier, - collection_name), - }] - - def _get_next_link(self, request, identifier, collection_name): - """Return href string with proper limit and marker params.""" - params = request.params.copy() - params["marker"] = identifier - prefix = self._update_compute_link_prefix(request.application_url) - url = os.path.join(prefix, - self._get_project_id(request), - collection_name) - return "%s?%s" % (url, dict_to_query_str(params)) - - def _get_href_link(self, request, identifier, collection_name): - """Return an href string pointing to this object.""" - prefix = self._update_compute_link_prefix(request.application_url) - return os.path.join(prefix, - self._get_project_id(request), - collection_name, - str(identifier)) - - def _get_bookmark_link(self, request, identifier, collection_name): - """Create a URL that refers to a specific resource.""" - base_url = remove_version_from_href(request.application_url) - base_url = self._update_compute_link_prefix(base_url) - return os.path.join(base_url, - self._get_project_id(request), - collection_name, - str(identifier)) - - def _get_collection_links(self, - request, - items, - collection_name, - id_key="uuid"): - """Retrieve 'next' link, if applicable.""" - links = [] - limit = int(request.params.get("limit", 0)) - if limit and limit == len(items): - last_item = items[-1] - if id_key in last_item: - last_item_id = last_item[id_key] - elif 'id' in last_item: - last_item_id = last_item["id"] - else: - last_item_id = last_item["flavorid"] - links.append({ - "rel": "next", - "href": self._get_next_link(request, - last_item_id, - collection_name), - }) - return links - - def _update_link_prefix(self, orig_url, prefix): - if not prefix: - return orig_url - url_parts = list(urlparse.urlsplit(orig_url)) - prefix_parts = list(urlparse.urlsplit(prefix)) - url_parts[0:2] = prefix_parts[0:2] - return urlparse.urlunsplit(url_parts) - - def _update_glance_link_prefix(self, orig_url): - return self._update_link_prefix(orig_url, - CONF.osapi_glance_link_prefix) - - def _update_compute_link_prefix(self, orig_url): - return self._update_link_prefix(orig_url, - CONF.osapi_compute_link_prefix) - - -def get_instance(compute_api, context, instance_id, want_objects=False, - expected_attrs=None): - """Fetch an instance from the compute API, handling error checking.""" - try: - return compute_api.get(context, instance_id, - want_objects=want_objects, - expected_attrs=expected_attrs) - except exception.InstanceNotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - - -def check_cells_enabled(function): - @functools.wraps(function) - def inner(*args, **kwargs): - if not CONF.cells.enable: - msg = _("Cells is not enabled.") - raise webob.exc.HTTPNotImplemented(explanation=msg) - return function(*args, **kwargs) - return inner diff --git a/rack/api/v1/__init__.py b/rack/api/v1/__init__.py deleted file mode 100644 index b2f240f..0000000 --- a/rack/api/v1/__init__.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -WSGI middleware for RACK API controllers. -""" - -from oslo.config import cfg -import routes - -from rack.api.v1 import groups -from rack.api.v1 import keypairs -from rack.api.v1 import networks -from rack.api.v1 import processes -from rack.api.v1 import securitygroups -from rack.api import versions -from rack.openstack.common import log as logging -from rack import wsgi as base_wsgi - - -openstack_client_opts = [ - cfg.StrOpt('sql_connection', - help='Valid sql_connection for Rack'), -] - -CONF = cfg.CONF -CONF.register_opts(openstack_client_opts) - -LOG = logging.getLogger(__name__) - - -class APIMapper(routes.Mapper): - - def routematch(self, url=None, environ=None): - if url == "": - result = self._match("", environ) - return result[0], result[1] - return routes.Mapper.routematch(self, url, environ) - - def connect(self, *args, **kargs): - # NOTE(vish): Default the format part of a route to only accept json - # and xml so it doesn't eat all characters after a '.' - # in the url. - kargs.setdefault('requirements', {}) - if not kargs['requirements'].get('format'): - kargs['requirements']['format'] = 'json|xml' - return routes.Mapper.connect(self, *args, **kargs) - - -class APIRouter(base_wsgi.Router): - - """Routes requests on the RACK API to the appropriate controller - and method. - """ - @classmethod - def factory(cls, global_config, **local_config): - """Simple paste factory, :class:`rack.wsgi.Router` doesn't have one.""" - return cls() - - def __init__(self): - mapper = APIMapper() - self._setup_routes(mapper) - super(APIRouter, self).__init__(mapper) - - def _setup_routes(self, mapper): - versions_resource = versions.create_resource() - mapper.connect("/", - controller=versions_resource, - action="show", - conditions={'method': ['GET']}) - - mapper.redirect("", "/") - - groups_resource = groups.create_resource() - mapper.connect("/groups", - controller=groups_resource, - action="index", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}", - controller=groups_resource, - action="show", - conditions={"method": ["GET"]}) - mapper.connect("/groups", - controller=groups_resource, - action="create", - conditions={"method": ["POST"]}) - mapper.connect("/groups/{gid}", - controller=groups_resource, - action="update", - conditions={"method": ["PUT"]}) - mapper.connect("/groups/{gid}", - controller=groups_resource, - action="delete", - conditions={"method": ["DELETE"]}) - - networks_resource = networks.create_resource() - mapper.connect("/groups/{gid}/networks", - controller=networks_resource, - action="index", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/networks/{network_id}", - controller=networks_resource, - action="show", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/networks", - controller=networks_resource, - action="create", - conditions={"method": ["POST"]}) - mapper.connect("/groups/{gid}/networks/{network_id}", - controller=networks_resource, - action="update", - conditions={"method": ["PUT"]}) - mapper.connect("/groups/{gid}/networks/{network_id}", - controller=networks_resource, - action="delete", - conditions={"method": ["DELETE"]}) - - keypairs_resource = keypairs.create_resource() - mapper.connect("/groups/{gid}/keypairs", - controller=keypairs_resource, - action="index", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/keypairs/{keypair_id}", - controller=keypairs_resource, - action="show", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/keypairs", - controller=keypairs_resource, - action="create", - conditions={"method": ["POST"]}) - mapper.connect("/groups/{gid}/keypairs/{keypair_id}", - controller=keypairs_resource, - action="update", - conditions={"method": ["PUT"]}) - mapper.connect("/groups/{gid}/keypairs/{keypair_id}", - controller=keypairs_resource, - action="delete", - conditions={"method": ["DELETE"]}) - - securitygroups_resource = securitygroups.create_resource() - mapper.connect("/groups/{gid}/securitygroups", - controller=securitygroups_resource, - action="index", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}", - controller=securitygroups_resource, - action="show", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/securitygroups", - controller=securitygroups_resource, - action="create", - conditions={"method": ["POST"]}) - mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}", - controller=securitygroups_resource, - action="update", - conditions={"method": ["PUT"]}) - mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}", - controller=securitygroups_resource, - action="delete", - conditions={"method": ["DELETE"]}) - - processes_resource = processes.create_resource() - mapper.connect("/groups/{gid}/processes", - controller=processes_resource, - action="index", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/processes/{pid}", - controller=processes_resource, - action="show", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/processes", - controller=processes_resource, - action="create", - conditions={"method": ["POST"]}) - mapper.connect("/groups/{gid}/processes/{pid}", - controller=processes_resource, - action="update", - conditions={"method": ["PUT"]}) - mapper.connect("/groups/{gid}/processes/{pid}", - controller=processes_resource, - action="delete", - conditions={"method": ["DELETE"]}) - - # RACK proxy resources - mapper.connect("/groups/{gid}/proxy", - controller=processes_resource, - action="show_proxy", - conditions={"method": ["GET"]}) - mapper.connect("/groups/{gid}/proxy", - controller=processes_resource, - action="create_proxy", - conditions={"method": ["POST"]}) - mapper.connect("/groups/{gid}/proxy", - controller=processes_resource, - action="update_proxy", - conditions={"method": ["PUT"]}) diff --git a/rack/api/v1/groups.py b/rack/api/v1/groups.py deleted file mode 100644 index a08d3e6..0000000 --- a/rack/api/v1/groups.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import uuid - -import six -import webob - -from rack.api.v1.views import groups as views_groups -from rack.api import wsgi -from rack import db -from rack import exception -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack.openstack.common import uuidutils -from rack import utils - - -LOG = logging.getLogger(__name__) - - -class Controller(wsgi.Controller): - - """Group controller for RACK API.""" - - _view_builder_class = views_groups.ViewBuilder - - def __init__(self): - super(Controller, self).__init__() - - @wsgi.response(200) - def index(self, req): - filters = {} - project_id = req.params.get('project_id') - name = req.params.get('name') - status = req.params.get('status') - - if project_id: - filters['project_id'] = project_id - if name: - filters['display_name'] = name - if status: - filters['status'] = status - - context = req.environ['rack.context'] - group_list = db.group_get_all(context, filters) - - return self._view_builder.index(group_list) - - @wsgi.response(200) - def show(self, req, gid): - - def _validate(gid): - if not uuidutils.is_uuid_like(gid): - raise exception.GroupNotFound(gid=gid) - - try: - _validate(gid) - context = req.environ['rack.context'] - group = db.group_get_by_gid(context, gid) - except exception.NotFound: - msg = _("Group could not be found") - raise webob.exc.HTTPNotFound(explanation=msg) - - return self._view_builder.show(group) - - @wsgi.response(201) - def create(self, req, body): - - def _validate(body): - if not self.is_valid_body(body, 'group'): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - values = body["group"] - name = values.get("name") - description = values.get("description") - - if not name: - msg = _("Group name is required") - raise exception.InvalidInput(reason=msg) - - if isinstance(name, six.string_types): - name = name.strip() - utils.check_string_length(name, 'name', min_length=1, - max_length=255) - - if description: - utils.check_string_length(description, 'description', - min_length=0, max_length=255) - - valid_values = {} - valid_values["display_name"] = name - valid_values["display_description"] = description - return valid_values - - try: - values = _validate(body) - except exception.InvalidInput as exc: - raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) - - context = req.environ['rack.context'] - values["gid"] = unicode(uuid.uuid4()) - values["user_id"] = context.user_id - values["project_id"] = context.project_id - values["status"] = "ACTIVE" - group = db.group_create(context, values) - - return self._view_builder.create(group) - - @wsgi.response(200) - def update(self, req, body, gid): - - def _validate(body, gid): - if not self.is_valid_body(body, 'group'): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - values = body["group"] - name = values.get("name") - description = values.get("description") - - if not uuidutils.is_uuid_like(gid): - raise exception.GroupNotFound(gid=gid) - - if name is None and description is None: - msg = _("Group name or description is required") - raise exception.InvalidInput(reason=msg) - - if name is not None: - if isinstance(name, six.string_types): - name = name.strip() - utils.check_string_length(name, 'name', min_length=1, - max_length=255) - - if description is not None: - utils.check_string_length(description, 'description', - min_length=0, max_length=255) - - valid_values = {} - if name: - valid_values["display_name"] = name - # allow blank string to clear description - if description is not None: - valid_values["display_description"] = description - valid_values["gid"] = gid - return valid_values - - context = req.environ['rack.context'] - - try: - values = _validate(body, gid) - group = db.group_update(context, values) - except exception.InvalidInput as exc: - raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) - except exception.GroupNotFound: - msg = _("Group could not be found") - raise webob.exc.HTTPNotFound(explanation=msg) - - return self._view_builder.update(group) - - @wsgi.response(204) - def delete(self, req, gid): - - def _validate(gid): - if not uuidutils.is_uuid_like(gid): - raise exception.GroupNotFound(gid=gid) - try: - _validate(gid) - - context = req.environ['rack.context'] - - keypairs = db.keypair_get_all(context, gid) - if keypairs: - raise exception.GroupInUse(gid=gid) - - securitygroups = db.securitygroup_get_all(context, gid) - if securitygroups: - raise exception.GroupInUse(gid=gid) - - networks = db.network_get_all(context, gid) - if networks: - raise exception.GroupInUse(gid=gid) - - processes = db.process_get_all(context, gid) - if processes: - raise exception.GroupInUse(gid=gid) - - db.group_delete(context, gid) - - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - except exception.GroupInUse as e: - raise webob.exc.HTTPConflict(explanation=e.format_message()) - - except Exception as e: - LOG.warn(e) - raise exception.GroupDeleteFailed() - - -def create_resource(): - return wsgi.Resource(Controller()) diff --git a/rack/api/v1/keypairs.py b/rack/api/v1/keypairs.py deleted file mode 100644 index b0d75b2..0000000 --- a/rack/api/v1/keypairs.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import uuid -import webob - -from rack.api.v1.views import keypairs as views_keypairs -from rack.api import wsgi -from rack import db -from rack import exception -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack.openstack.common import strutils -from rack.openstack.common import uuidutils -from rack.resourceoperator import manager - - -LOG = logging.getLogger(__name__) - - -class Controller(wsgi.Controller): - - """Keypair controller for RACK API.""" - - _view_builder_class = views_keypairs.ViewBuilder - - def __init__(self): - super(Controller, self).__init__() - self.manager = manager.ResourceOperator() - - def _uuid_check(self, gid=None, keypair_id=None): - if gid: - if not uuidutils.is_uuid_like(gid): - raise exception.GroupNotFound(gid=gid) - if keypair_id: - if not uuidutils.is_uuid_like(keypair_id): - raise exception.KeypairNotFound(keypair_id=keypair_id) - - @wsgi.response(200) - def index(self, req, gid): - try: - self._uuid_check(gid=gid) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - context = req.environ['rack.context'] - keypair_list = db.keypair_get_all(context, gid) - keypair_list = self.manager.keypair_list(context, keypair_list) - return self._view_builder.index(keypair_list) - - @wsgi.response(200) - def show(self, req, gid, keypair_id): - try: - self._uuid_check(gid=gid, keypair_id=keypair_id) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - context = req.environ['rack.context'] - try: - keypair = db.keypair_get_by_keypair_id(context, gid, keypair_id) - self.manager.keypair_show(context, keypair) - except exception.KeypairNotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.show(keypair) - - @wsgi.response(201) - def create(self, req, body, gid): - - def _validate(context, body, gid): - if not self.is_valid_body(body, 'keypair'): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - self._uuid_check(gid) - values = body["keypair"] - name = values.get("name") - is_default = values.get("is_default") - - if is_default: - try: - is_default = strutils.bool_from_string( - is_default, strict=True) - keypairs = db.keypair_get_all(context, gid, - filters={"is_default": True}) - if keypairs: - msg = _("Default keypair already exists in the " - "group %s" % gid) - raise exception.InvalidInput(reason=msg) - except ValueError: - msg = _("is_default must be a boolean") - raise exception.InvalidInput(reason=msg) - else: - is_default = False - - valid_values = {} - valid_values["gid"] = gid - valid_values["display_name"] = name - valid_values["is_default"] = is_default - return valid_values - - try: - context = req.environ['rack.context'] - values = _validate(context, body, gid) - db.group_get_by_gid(context, gid) - values["keypair_id"] = unicode(uuid.uuid4()) - if not values["display_name"]: - values["display_name"] = "keypair-" + values["keypair_id"] - result_value = self.manager.keypair_create( - context, values["display_name"]) - values.update(result_value) - values["user_id"] = context.user_id - values["project_id"] = context.project_id - keypair = db.keypair_create(context, values) - except exception.InvalidInput as e: - raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except exception.GroupNotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.create(keypair) - - @wsgi.response(200) - def update(self, req, body, gid, keypair_id): - - def _validate(body, gid, keypair_id): - if not self.is_valid_body(body, 'keypair'): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - self._uuid_check(gid, keypair_id) - values = body["keypair"] - is_default = values.get("is_default") - - if is_default is not None: - try: - is_default = strutils.bool_from_string( - is_default, strict=True) - except ValueError: - msg = _("is_default must be a boolean") - raise exception.InvalidInput(reason=msg) - else: - msg = _("is_default is required") - raise exception.InvalidInput(reason=msg) - - valid_values = {"is_default": is_default} - return valid_values - - context = req.environ['rack.context'] - - try: - values = _validate(body, gid, keypair_id) - keypair = db.keypair_update(context, gid, keypair_id, values) - except exception.InvalidInput as e: - raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.update(keypair) - - @wsgi.response(204) - def delete(self, req, gid, keypair_id): - context = req.environ['rack.context'] - - try: - self._uuid_check(gid=gid, keypair_id=keypair_id) - filters = {"keypair_id": keypair_id} - processes = db.process_get_all(context, gid, filters=filters) - if processes: - raise exception.keypairInUse(keypair_id=keypair_id) - - keypair = db.keypair_get_by_keypair_id(context, gid, keypair_id) - self.manager.keypair_delete(context, keypair["nova_keypair_id"]) - db.keypair_delete(context, gid, keypair_id) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - except exception.keypairInUse as e: - raise webob.exc.HTTPConflict(explanation=e.format_message()) - - -def create_resource(): - return wsgi.Resource(Controller()) diff --git a/rack/api/v1/networks.py b/rack/api/v1/networks.py deleted file mode 100644 index ea4f941..0000000 --- a/rack/api/v1/networks.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from rack import db -from rack import exception - -from rack.api.v1.views import networks as views_networks -from rack.api import wsgi - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack.openstack.common import strutils -from rack.openstack.common import uuidutils - -from rack.resourceoperator import manager - -import uuid -import webob - - -LOG = logging.getLogger(__name__) - - -class Controller(wsgi.Controller): - - """Network controller for RACK API.""" - - _view_builder_class = views_networks.ViewBuilder - - def __init__(self): - super(Controller, self).__init__() - self.manager = manager.ResourceOperator() - - def _uuid_check(self, gid=None, network_id=None): - if gid: - if not uuidutils.is_uuid_like(gid): - raise exception.GroupNotFound(gid=gid) - if network_id: - if not uuidutils.is_uuid_like(network_id): - raise exception.NetworkNotFound(network_id=network_id) - - @wsgi.response(200) - def index(self, req, gid): - try: - self._uuid_check(gid) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - context = req.environ['rack.context'] - network_list = db.network_get_all(context, gid) - network_list = self.manager.network_list(context, network_list) - - return self._view_builder.index(network_list) - - @wsgi.response(200) - def show(self, req, gid, network_id): - context = req.environ['rack.context'] - try: - self._uuid_check(gid, network_id) - network = db.network_get_by_network_id(context, gid, network_id) - self.manager.network_show(context, network) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.show(network) - - @wsgi.response(201) - def create(self, req, gid, body): - - def _validate(context, body, gid): - self._uuid_check(gid) - if not self.is_valid_body(body, "network"): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - values = body.get("network") - - cidr = values.get("cidr") - name = values.get("name") - - is_admin = values.get("is_admin") - if is_admin: - try: - is_admin = strutils.bool_from_string( - is_admin, strict=True) - except ValueError: - msg = _("is_admin must be a boolean") - raise exception.InvalidInput(reason=msg) - else: - is_admin = False - - gateway = values.get("gateway") - ext_router = values.get("ext_router_id") - dns_nameservers = values.get("dns_nameservers") - if dns_nameservers is not None and not isinstance( - dns_nameservers, list): - msg = _("dns_nameservers must be a list") - raise exception.InvalidInput(reason=msg) - - valid_values = {} - valid_values["gid"] = gid - valid_values["network_id"] = unicode(uuid.uuid4()) - if not name: - name = "network-" + valid_values["network_id"] - valid_values["display_name"] = name - valid_values["cidr"] = cidr - valid_values["is_admin"] = is_admin - valid_values["gateway"] = gateway - valid_values["ext_router"] = ext_router - valid_values["dns_nameservers"] = dns_nameservers - - network_values = {} - network_values["name"] = name - network_values["cidr"] = cidr - network_values["gateway"] = gateway - network_values["ext_router"] = ext_router - network_values["dns_nameservers"] = dns_nameservers - - return valid_values, network_values - - try: - context = req.environ['rack.context'] - values, network_values = _validate(context, body, gid) - db.group_get_by_gid(context, gid) - result_value = self.manager.network_create( - context, **network_values) - values.update(result_value) - values["user_id"] = context.user_id - values["project_id"] = context.project_id - network = db.network_create(context, values) - except exception.InvalidInput as e: - raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.create(network) - - @wsgi.response(204) - def delete(self, req, gid, network_id): - try: - self._uuid_check(gid, network_id) - context = req.environ['rack.context'] - network = db.network_get_by_network_id(context, gid, network_id) - if network["processes"]: - raise exception.NetworkInUse(network_id=network_id) - - self.manager.network_delete(context, network["neutron_network_id"], - network["ext_router"]) - db.network_delete(context, gid, network_id) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - except exception.NetworkInUse as e: - raise webob.exc.HTTPConflict(explanation=e.format_message()) - - -def create_resource(): - return wsgi.Resource(Controller()) diff --git a/rack/api/v1/processes.py b/rack/api/v1/processes.py deleted file mode 100644 index 29e3366..0000000 --- a/rack/api/v1/processes.py +++ /dev/null @@ -1,447 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import json - -from oslo.config import cfg - -import base64 -import uuid -import webob - -from rack import db -from rack import exception -from rack import utils - -from rack.api.v1.views import processes as views_processes -from rack.api import wsgi - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack.openstack.common import uuidutils - -from rack.resourceoperator import manager - - -LOG = logging.getLogger(__name__) - - -class Controller(wsgi.Controller): - - """Process controller for RACK API.""" - - _view_builder_class = views_processes.ViewBuilder - - def __init__(self): - super(Controller, self).__init__() - self.manager = manager.ResourceOperator() - - def _uuid_check(self, gid=None, pid=None, keypair_id=None, - securitygroup_id=None): - if gid: - if not uuidutils.is_uuid_like(gid): - raise exception.GroupNotFound(gid=gid) - if pid: - if not uuidutils.is_uuid_like(pid): - raise exception.ProcessNotFound(pid=pid) - if keypair_id: - if not uuidutils.is_uuid_like(keypair_id): - raise exception.KeypairNotFound(keypair_id=keypair_id) - if securitygroup_id: - if not uuidutils.is_uuid_like(securitygroup_id): - raise exception.SecuritygroupNotFound( - securitygroup_id=securitygroup_id) - - @wsgi.response(200) - def index(self, req, gid): - try: - self._uuid_check(gid) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - context = req.environ['rack.context'] - process_list = db.process_get_all(context, gid) - process_list = self.manager.process_list(context, process_list) - - return self._view_builder.index(process_list) - - @wsgi.response(200) - def show(self, req, gid, pid): - try: - self._uuid_check(gid, pid) - context = req.environ['rack.context'] - process = db.process_get_by_pid(context, gid, pid) - self.manager.process_show(context, process) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.show(process) - - @wsgi.response(200) - def show_proxy(self, req, gid): - try: - self._uuid_check(gid) - context = req.environ['rack.context'] - process = db.process_get_all( - context, gid, filters={"is_proxy": True}) - if not process: - msg = _("Proxy process does not exist in the group %s" % gid) - raise webob.exc.HTTPBadRequest(explanation=msg) - self.manager.process_show(context, process[0]) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.show_proxy(process[0]) - - @wsgi.response(202) - def create(self, req, body, gid, is_proxy=False): - - def _validate(context, body, gid, is_proxy=False): - proxy = db.process_get_all( - context, gid, filters={"is_proxy": True}) - if is_proxy: - if len(proxy) > 0: - msg = _( - "Proxy process already exists in the group %s" % gid) - raise exception.InvalidInput(reason=msg) - else: - if len(proxy) != 1: - msg = _( - "Proxy process does not exist in the group %s" % gid) - raise webob.exc.HTTPBadRequest(explanation=msg) - - keyname = "proxy" if is_proxy else "process" - if not self.is_valid_body(body, keyname): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - values = body[keyname] - ppid = values.get("ppid") - name = values.get("name") - keypair_id = values.get("keypair_id") - securitygroup_ids = values.get("securitygroup_ids") - glance_image_id = values.get("glance_image_id") - nova_flavor_id = values.get("nova_flavor_id") - userdata = values.get("userdata") - args = values.get("args") - - self._uuid_check(gid, ppid, keypair_id) - - pid = unicode(uuid.uuid4()) - if not name: - prefix = "proxy-" if is_proxy else "process-" - name = prefix + pid - - if ppid: - parent_process = db.process_get_by_pid(context, gid, ppid) - - nova_keypair_id = None - if keypair_id: - keypair = db.keypair_get_by_keypair_id( - context, gid, keypair_id) - nova_keypair_id = keypair["nova_keypair_id"] - elif ppid: - keypair_id = parent_process.get("keypair_id") - if keypair_id: - keypair = db.keypair_get_by_keypair_id( - context, gid, keypair_id) - nova_keypair_id = keypair["nova_keypair_id"] - else: - default_keypair = db.keypair_get_all( - context, gid, - filters={"is_default": True}) - if default_keypair: - keypair_id = default_keypair[0]["keypair_id"] - nova_keypair_id = default_keypair[0]["nova_keypair_id"] - - if securitygroup_ids is not None and\ - not isinstance(securitygroup_ids, list): - msg = _("securitygroupids must be a list") - raise exception.InvalidInput(reason=msg) - elif securitygroup_ids: - neutron_securitygroup_ids = [] - for id in securitygroup_ids: - self._uuid_check(securitygroup_id=id) - securitygroup = db.securitygroup_get_by_securitygroup_id( - context, gid, id) - neutron_securitygroup_ids.append( - securitygroup["neutron_securitygroup_id"]) - elif ppid: - securitygroups = parent_process.get("securitygroups") - securitygroup_ids =\ - [securitygroup["securitygroup_id"] - for securitygroup in securitygroups] - neutron_securitygroup_ids =\ - [securitygroup["neutron_securitygroup_id"] - for securitygroup in securitygroups] - else: - default_securitygroups = db.securitygroup_get_all( - context, gid, - filters={"is_default": True}) - if default_securitygroups: - securitygroup_ids =\ - [securitygroup["securitygroup_id"] - for securitygroup in default_securitygroups] - neutron_securitygroup_ids =\ - [securitygroup["neutron_securitygroup_id"] - for securitygroup in default_securitygroups] - else: - msg = _( - "securitygroup_ids is required. Default \ - securitygroup_ids are not registered.") - raise exception.InvalidInput(reason=msg) - - if not glance_image_id and ppid: - glance_image_id = parent_process.get("glance_image_id") - - if not nova_flavor_id and ppid: - nova_flavor_id = parent_process.get("nova_flavor_id") - - if userdata: - try: - base64.b64decode(userdata) - except TypeError: - msg = _("userdadta must be a base64 encoded value.") - raise exception.InvalidInput(reason=msg) - - networks = db.network_get_all(context, gid) - if not networks: - msg = _("Netwoks does not exist in the group %s" % gid) - raise webob.exc.HTTPBadRequest(explanation=msg) - - network_ids =\ - [network["network_id"] for network in networks] - neutron_network_ids =\ - [network["neutron_network_id"] for network in networks] - nics = [] - for id in neutron_network_ids: - nics.append({"net-id": id}) - - if args is None: - args = {} - elif args is not None and\ - not isinstance(args, dict): - msg = _("args must be a dict.") - raise exception.InvalidInput(reason=msg) - else: - for key in args.keys(): - args[key] = str(args[key]) - - default_args = { - "gid": gid, - "pid": pid, - } - if ppid: - default_args["ppid"] = ppid - - if is_proxy: - default_args["rackapi_ip"] = cfg.CONF.my_ip - default_args["os_username"] = cfg.CONF.os_username - default_args["os_password"] = cfg.CONF.os_password - default_args["os_tenant_name"] = cfg.CONF.os_tenant_name - default_args["os_auth_url"] = cfg.CONF.os_auth_url - default_args["os_region_name"] = cfg.CONF.os_region_name - else: - proxy_instance_id = proxy[0]["nova_instance_id"] - default_args["proxy_ip"] = self.manager.get_process_address( - context, proxy_instance_id) - args.update(default_args) - - valid_values = {} - valid_values["gid"] = gid - valid_values["ppid"] = ppid - valid_values["pid"] = pid - valid_values["display_name"] = name - valid_values["keypair_id"] = keypair_id - valid_values["securitygroup_ids"] = securitygroup_ids - valid_values["glance_image_id"] = glance_image_id - valid_values["nova_flavor_id"] = nova_flavor_id - valid_values["userdata"] = userdata - valid_values["args"] = json.dumps(args) - valid_values["is_proxy"] = True if is_proxy else False - valid_values["network_ids"] = network_ids - - if is_proxy: - ipc_endpoint = values.get("ipc_endpoint") - shm_endpoint = values.get("shm_endpoint") - fs_endpoint = values.get("fs_endpoint") - if ipc_endpoint: - utils.check_string_length( - ipc_endpoint, 'ipc_endpoint', min_length=1, - max_length=255) - if shm_endpoint: - utils.check_string_length( - shm_endpoint, 'shm_endpoint', min_length=1, - max_length=255) - if fs_endpoint: - utils.check_string_length( - fs_endpoint, 'fs_endpoint', min_length=1, - max_length=255) - valid_values["ipc_endpoint"] = ipc_endpoint - valid_values["shm_endpoint"] = shm_endpoint - valid_values["fs_endpoint"] = fs_endpoint - - boot_values = {} - boot_values["name"] = name - boot_values["key_name"] = nova_keypair_id - boot_values["security_groups"] = neutron_securitygroup_ids - boot_values["image"] = glance_image_id - boot_values["flavor"] = nova_flavor_id - boot_values["userdata"] = userdata - boot_values["meta"] = args - boot_values["nics"] = nics - - return valid_values, boot_values - - try: - context = req.environ['rack.context'] - values, boot_values = _validate(context, body, gid, is_proxy) - nova_instance_id, status = self.manager.process_create( - context, **boot_values) - values["nova_instance_id"] = nova_instance_id - values["user_id"] = context.user_id - values["project_id"] = context.project_id - process = db.process_create(context, values, - values.pop("network_ids"), - values.pop("securitygroup_ids")) - process["status"] = status - except exception.InvalidInput as e: - raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.create(process) - - @wsgi.response(202) - def create_proxy(self, req, body, gid): - return self.create(req, body, gid, is_proxy=True) - - @wsgi.response(200) - def update(self, req, body, gid, pid): - - def _validate(body, gid, pid): - self._uuid_check(gid, pid) - - if not self.is_valid_body(body, 'process'): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - values = body["process"] - app_status = values.get("app_status") - - if not app_status: - msg = _("app_status is required") - raise exception.InvalidInput(reason=msg) - - valid_values = {"app_status": app_status} - - return valid_values - - try: - values = _validate(body, gid, pid) - context = req.environ['rack.context'] - process = db.process_update(context, gid, pid, values) - except exception.InvalidInput as e: - raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except exception.ProcessNotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.update(process) - - @wsgi.response(200) - def update_proxy(self, req, body, gid): - - def _validate(context, body, gid): - self._uuid_check(gid) - process = db.process_get_all( - context, gid, filters={"is_proxy": True}) - if not process: - msg = _("Proxy process does not exist in the group %s" % gid) - raise exception.InvalidInput(reason=msg) - - if not self.is_valid_body(body, 'proxy'): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - values = body["proxy"] - app_status = values.get("app_status") - ipc_endpoint = values.get("ipc_endpoint") - shm_endpoint = values.get("shm_endpoint") - fs_endpoint = values.get("fs_endpoint") - - valid_values = {} - if app_status: - utils.check_string_length( - app_status, 'app_status', min_length=1, max_length=255) - valid_values["app_status"] = app_status - if ipc_endpoint: - utils.check_string_length( - ipc_endpoint, 'ipc_endpoint', min_length=1, max_length=255) - valid_values["ipc_endpoint"] = ipc_endpoint - if shm_endpoint: - utils.check_string_length( - shm_endpoint, 'shm_endpoint', min_length=1, max_length=255) - valid_values["shm_endpoint"] = shm_endpoint - if fs_endpoint: - utils.check_string_length( - fs_endpoint, 'fs_endpoint', min_length=1, max_length=255) - valid_values["fs_endpoint"] = fs_endpoint - - if not valid_values: - msg = _("No keyword is provided.") - raise exception.InvalidInput(reason=msg) - - return process[0]["pid"], valid_values - - try: - context = req.environ['rack.context'] - pid, values = _validate(context, body, gid) - process = db.process_update(context, gid, pid, values) - except exception.InvalidInput as e: - raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except exception.ProcessNotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.update(process) - - @wsgi.response(204) - def delete(self, req, gid, pid): - - def _delete_children(context, gid, pid): - processes = db.process_get_all(context, gid, {"ppid": pid}) - for process in processes: - _delete_children(context, gid, process["pid"]) - _delete(context, gid, process["pid"], - process["nova_instance_id"]) - return - - def _delete(context, gid, pid, nova_id): - self.manager.process_delete(context, nova_id) - try: - db.process_delete(context, gid, pid) - except exception.NotFound as e: - LOG.exception(e) - - self._uuid_check(gid, pid) - context = req.environ['rack.context'] - try: - process = db.process_get_by_pid(context, gid, pid) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - _delete_children(context, gid, pid) - _delete(context, gid, pid, process["nova_instance_id"]) - - -def create_resource(): - return wsgi.Resource(Controller()) diff --git a/rack/api/v1/securitygroups.py b/rack/api/v1/securitygroups.py deleted file mode 100644 index 504836a..0000000 --- a/rack/api/v1/securitygroups.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid -import webob - -from rack.api.v1.views import securitygroups as views_securitygroups -from rack.api import wsgi -from rack import db -from rack import exception - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack.openstack.common import strutils -from rack.openstack.common import uuidutils -from rack.resourceoperator import manager - - -LOG = logging.getLogger(__name__) - - -class Controller(wsgi.Controller): - - """Securitygroup controller for RACK API.""" - - _view_builder_class = views_securitygroups.ViewBuilder - - def __init__(self): - super(Controller, self).__init__() - self.manager = manager.ResourceOperator() - - def _uuid_check(self, gid=None, securitygroup_id=None): - if gid: - if not uuidutils.is_uuid_like(gid): - raise exception.GroupNotFound(gid=gid) - if securitygroup_id: - if not uuidutils.is_uuid_like(securitygroup_id): - raise exception.SecuritygroupNotFound( - securitygroup_id=securitygroup_id) - - @wsgi.response(200) - def index(self, req, gid): - try: - self._uuid_check(gid) - except exception.SecuritygroupNotFound: - msg = _("Securitygroup could not be found") - raise webob.exc.HTTPNotFound(explanation=msg) - - context = req.environ['rack.context'] - securitygroup_list = db.securitygroup_get_all(context, gid) - securitygroup_list = self.manager.securitygroup_list( - context, securitygroup_list) - - return self._view_builder.index(securitygroup_list) - - @wsgi.response(200) - def show(self, req, gid, securitygroup_id): - try: - self._uuid_check(gid, securitygroup_id) - context = req.environ['rack.context'] - securitygroup = db.securitygroup_get_by_securitygroup_id( - context, gid, securitygroup_id) - securitygroup = self.manager.securitygroup_show( - context, securitygroup) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound(explanation=exc.format_message()) - - return self._view_builder.show(securitygroup) - - @wsgi.response(201) - def create(self, req, body, gid): - - def _validate(context, body, gid): - if not self.is_valid_body(body, 'securitygroup'): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - self._uuid_check(gid) - db.group_get_by_gid(context, gid) - values = body["securitygroup"] - name = values.get("name") - is_default = values.get("is_default") - - if is_default: - try: - is_default = strutils.bool_from_string( - is_default, strict=True) - except ValueError: - msg = _("is_default must be a boolean") - raise exception.InvalidInput(reason=msg) - else: - is_default = False - - valid_values = {} - valid_values["gid"] = gid - valid_values["display_name"] = name - valid_values["is_default"] = is_default - - rules = values.get("securitygrouprules") - valid_rules = [] - if rules is not None: - if not isinstance(rules, list): - msg = _("securitygrouprules must be a list") - raise exception.InvalidInput(reason=msg) - - for rule in rules: - valid_rule = {} - valid_rule["protocol"] = rule.get("protocol") - valid_rule["port_range_max"] = rule.get("port_range_max") - valid_rule["port_range_min"] = rule.get("port_range_min") - valid_rule["remote_ip_prefix"] = rule.get( - "remote_ip_prefix") - remote_securitygroup_id = rule.get( - "remote_securitygroup_id") - if remote_securitygroup_id: - ref = db.securitygroup_get_by_securitygroup_id( - context, gid, - remote_securitygroup_id) - valid_rule['remote_neutron_securitygroup_id'] =\ - ref['neutron_securitygroup_id'] - valid_rules.append(valid_rule) - - return valid_values, valid_rules - - try: - context = req.environ['rack.context'] - values, rules = _validate(context, body, gid) - values["securitygroup_id"] = unicode(uuid.uuid4()) - if not values["display_name"]: - values["display_name"] = "securitygroup-" + \ - values["securitygroup_id"] - result_value = self.manager.securitygroup_create( - context, values["display_name"], rules) - values.update(result_value) - values["user_id"] = context.user_id - values["project_id"] = context.project_id - securitygroup = db.securitygroup_create(context, values) - except exception.InvalidInput as e: - raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except exception.GroupNotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) - - return self._view_builder.create(securitygroup) - - @wsgi.response(200) - def update(self, req, body, gid, securitygroup_id): - - def _validate(body, gid, securitygroup_id): - if not self.is_valid_body(body, 'securitygroup'): - msg = _("Invalid request body") - raise exception.InvalidInput(reason=msg) - - self._uuid_check(gid, securitygroup_id) - values = body["securitygroup"] - is_default = values.get("is_default") - if is_default is not None: - try: - is_default = strutils.bool_from_string( - is_default, strict=True) - except ValueError: - msg = _("is_default must be a boolean") - raise exception.InvalidInput(reason=msg) - else: - msg = _("SecurityGroup is_default is required") - raise exception.InvalidInput(reason=msg) - - valid_values = {} - valid_values["is_default"] = is_default - return valid_values - - try: - values = _validate(body, gid, securitygroup_id) - context = req.environ['rack.context'] - securitygroup = db.securitygroup_update( - context, gid, securitygroup_id, values) - except exception.InvalidInput as exc: - raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound(explanation=exc.format_message()) - - return self._view_builder.update(securitygroup) - - @wsgi.response(204) - def delete(self, req, gid, securitygroup_id): - try: - self._uuid_check(gid, securitygroup_id) - context = req.environ['rack.context'] - securitygroup = db.securitygroup_get_by_securitygroup_id( - context, gid, securitygroup_id) - if securitygroup["processes"]: - raise exception.SecuritygroupInUse( - securitygroup_id=securitygroup_id) - self.manager.securitygroup_delete( - context, securitygroup['neutron_securitygroup_id']) - db.securitygroup_delete(context, gid, securitygroup_id) - except exception.SecuritygroupInUse as exc: - raise webob.exc.HTTPConflict(explanation=exc.format_message()) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound(explanation=exc.format_message()) - - -def create_resource(): - return wsgi.Resource(Controller()) diff --git a/rack/api/v1/views/__init__.py b/rack/api/v1/views/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/api/v1/views/groups.py b/rack/api/v1/views/groups.py deleted file mode 100644 index 3fd3af5..0000000 --- a/rack/api/v1/views/groups.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.api import common -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class ViewBuilder(common.ViewBuilder): - - """Model a group API response as a python dictionary.""" - - def index(self, group_list): - return dict(groups=[self._base_response(group) - for group in group_list]) - - def show(self, group): - base = self._base_response(group) - return dict(group=base) - - def create(self, group): - base = self._base_response(group) - return dict(group=base) - - def update(self, group): - base = self._base_response(group) - return dict(group=base) - - def _base_response(self, group): - return { - "gid": group["gid"], - "user_id": group["user_id"], - "project_id": group["project_id"], - "name": group["display_name"], - "description": group["display_description"], - "status": group["status"] - } diff --git a/rack/api/v1/views/keypairs.py b/rack/api/v1/views/keypairs.py deleted file mode 100644 index 5abfc56..0000000 --- a/rack/api/v1/views/keypairs.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.api import common -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class ViewBuilder(common.ViewBuilder): - - """Model a keypair API response as a python dictionary.""" - - def index(self, keypair_list): - return dict(keypairs=[self._base_response(keypair) - for keypair in keypair_list]) - - def show(self, keypair): - base = self._base_response(keypair) - return dict(keypair=base) - - def create(self, keypair): - base = self._base_response(keypair) - base.pop('status') - return dict(keypair=base) - - def update(self, keypair): - base = self._base_response(keypair) - base.pop("status") - return dict(keypair=base) - - def _base_response(self, keypair): - return { - "keypair_id": keypair.get("keypair_id"), - "nova_keypair_id": keypair.get("nova_keypair_id"), - "user_id": keypair.get("user_id"), - "project_id": keypair.get("project_id"), - "gid": keypair.get("gid"), - "name": keypair.get("display_name"), - "private_key": keypair.get("private_key"), - "is_default": keypair.get("is_default"), - "status": keypair.get("status") - } diff --git a/rack/api/v1/views/networks.py b/rack/api/v1/views/networks.py deleted file mode 100644 index 9a3b81a..0000000 --- a/rack/api/v1/views/networks.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.api import common -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class ViewBuilder(common.ViewBuilder): - """Model a networks API response as a python dictionary.""" - - def index(self, network_list): - return dict(networks=[ - self._base_response(network) for network in network_list]) - - def show(self, network): - base = self._base_response(network) - return dict(network=base) - - def create(self, network): - base = self._base_response(network) - base.pop("status") - return dict(network=base) - - def _base_response(self, network): - return { - "network_id": network.get("network_id"), - "neutron_network_id": network.get("neutron_network_id"), - "gid": network.get("gid"), - "user_id": network.get("user_id"), - "project_id": network.get("project_id"), - "name": network.get("display_name"), - "is_admin": network.get("is_admin"), - "cidr": network.get("cidr"), - "ext_router_id": network.get("ext_router"), - "status": network.get("status") - } diff --git a/rack/api/v1/views/processes.py b/rack/api/v1/views/processes.py deleted file mode 100644 index 51bd09e..0000000 --- a/rack/api/v1/views/processes.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import json -from rack.api import common -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class ViewBuilder(common.ViewBuilder): - - """Model a process API response as a python dictionary.""" - - def index(self, process_list): - return dict(processes=[self._base_response(process) - for process in process_list]) - - def show(self, process): - base = self._base_response(process) - return dict(process=base) - - def show_proxy(self, process): - base = self._base_response(process) - return dict(proxy=base) - - def create(self, process): - base = self._base_response(process) - if process.get("is_proxy"): - return dict(proxy=base) - else: - return dict(process=base) - - def update(self, process): - base = self._base_response(process) - base.pop("status") - if process.get("is_proxy"): - return dict(proxy=base) - else: - return dict(process=base) - - def _base_response(self, process): - base = { - "gid": process.get("gid"), - "pid": process.get("pid"), - "ppid": process.get("ppid"), - "user_id": process.get("user_id"), - "project_id": process.get("project_id"), - "name": process.get("display_name"), - "nova_instance_id": process.get("nova_instance_id"), - "glance_image_id": process.get("glance_image_id"), - "nova_flavor_id": process.get("nova_flavor_id"), - "status": process.get("status"), - "keypair_id": process.get("keypair_id"), - "app_status": process.get("app_status"), - "userdata": process.get("userdata"), - "args": json.loads(process.get("args")), - "networks": [{"network_id": network.get("network_id"), - "fixed": network.get("fixed"), - "floating": network.get("floating")} - for network in process.get("networks")], - "securitygroup_ids": [securitygroup.get("securitygroup_id") - for securitygroup in process - .get("securitygroups")], - } - if process.get("is_proxy"): - proxy_body = { - "ipc_endpoint": process.get("ipc_endpoint"), - "shm_endpoint": process.get("shm_endpoint"), - "fs_endpoint": process.get("fs_endpoint") - } - base.update(proxy_body) - return base diff --git a/rack/api/v1/views/securitygroups.py b/rack/api/v1/views/securitygroups.py deleted file mode 100644 index d53ba0c..0000000 --- a/rack/api/v1/views/securitygroups.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.api import common -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class ViewBuilder(common.ViewBuilder): - - """Model a securitygroup API response as a python dictionary.""" - - def index(self, securitygroup_list): - return dict(securitygroups=[self._base_response(securitygroup) - for securitygroup in securitygroup_list]) - - def show(self, securitygroup): - base = self._base_response(securitygroup) - return dict(securitygroup=base) - - def create(self, securitygroup): - base = self._base_response(securitygroup) - base.pop('status') - return dict(securitygroup=base) - - def update(self, securitygroup): - base = self._base_response(securitygroup) - base.pop("status") - return dict(securitygroup=base) - - def _base_response(self, securitygroup): - return { - "securitygroup_id": securitygroup.get("securitygroup_id"), - "neutron_securitygroup_id": securitygroup - .get("neutron_securitygroup_id"), - "user_id": securitygroup.get("user_id"), - "project_id": securitygroup.get("project_id"), - "gid": securitygroup.get("gid"), - "name": securitygroup.get("display_name"), - "is_default": securitygroup.get("is_default"), - "status": securitygroup.get("status") - } diff --git a/rack/api/versions.py b/rack/api/versions.py deleted file mode 100644 index b1a66a4..0000000 --- a/rack/api/versions.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from lxml import etree -from oslo.config import cfg - -from rack.api.views import versions as views_versions -from rack.api import wsgi -from rack.api import xmlutil -from rack.openstack.common import timeutils - - -CONF = cfg.CONF - -LINKS = { - 'v2.0': { - 'pdf': 'http://docs.openstack.org/' - 'api/openstack-compute/2/os-compute-devguide-2.pdf', - 'wadl': 'http://docs.openstack.org/' - 'api/openstack-compute/2/wadl/os-compute-2.wadl' - }, -} - - -VERSIONS = { - "v2.0": { - "id": "v2.0", - "status": "CURRENT", - "updated": "2011-01-21T11:33:21Z", - "links": [ - { - "rel": "describedby", - "type": "application/pdf", - "href": LINKS['v2.0']['pdf'], - }, - { - "rel": "describedby", - "type": "application/vnd.sun.wadl+xml", - "href": LINKS['v2.0']['wadl'], - }, - ], - "media-types": [ - { - "base": "application/xml", - "type": "application/vnd.openstack.compute+xml;version=2", - }, - { - "base": "application/json", - "type": "application/vnd.openstack.compute+json;version=2", - } - ], - }, -} - - -class MediaTypesTemplateElement(xmlutil.TemplateElement): - - def will_render(self, datum): - return 'media-types' in datum - - -def make_version(elem): - elem.set('id') - elem.set('status') - elem.set('updated') - - mts = MediaTypesTemplateElement('media-types') - elem.append(mts) - - mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') - mt.set('base') - mt.set('type') - - xmlutil.make_links(elem, 'links') - - -version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} - - -class VersionTemplate(xmlutil.TemplateBuilder): - - def construct(self): - root = xmlutil.TemplateElement('version', selector='version') - make_version(root) - return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) - - -class VersionsTemplate(xmlutil.TemplateBuilder): - - def construct(self): - root = xmlutil.TemplateElement('versions') - elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') - make_version(elem) - return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) - - -class ChoicesTemplate(xmlutil.TemplateBuilder): - - def construct(self): - root = xmlutil.TemplateElement('choices') - elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') - make_version(elem) - return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) - - -class AtomSerializer(wsgi.XMLDictSerializer): - - NSMAP = {None: xmlutil.XMLNS_ATOM} - - def __init__(self, metadata=None, xmlns=None): - self.metadata = metadata or {} - if not xmlns: - self.xmlns = wsgi.XMLNS_ATOM - else: - self.xmlns = xmlns - - def _get_most_recent_update(self, versions): - recent = None - for version in versions: - updated = timeutils.parse_strtime(version['updated'], - '%Y-%m-%dT%H:%M:%SZ') - if not recent: - recent = updated - elif updated > recent: - recent = updated - - return recent.strftime('%Y-%m-%dT%H:%M:%SZ') - - def _get_base_url(self, link_href): - # Make sure no trailing / - link_href = link_href.rstrip('/') - return link_href.rsplit('/', 1)[0] + '/' - - def _create_feed(self, versions, feed_title, feed_id): - feed = etree.Element('feed', nsmap=self.NSMAP) - title = etree.SubElement(feed, 'title') - title.set('type', 'text') - title.text = feed_title - - # Set this updated to the most recently updated version - recent = self._get_most_recent_update(versions) - etree.SubElement(feed, 'updated').text = recent - - etree.SubElement(feed, 'id').text = feed_id - - link = etree.SubElement(feed, 'link') - link.set('rel', 'self') - link.set('href', feed_id) - - author = etree.SubElement(feed, 'author') - etree.SubElement(author, 'name').text = 'Rackspace' - etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' - - for version in versions: - feed.append(self._create_version_entry(version)) - - return feed - - def _create_version_entry(self, version): - entry = etree.Element('entry') - etree.SubElement(entry, 'id').text = version['links'][0]['href'] - title = etree.SubElement(entry, 'title') - title.set('type', 'text') - title.text = 'Version %s' % version['id'] - etree.SubElement(entry, 'updated').text = version['updated'] - - for link in version['links']: - link_elem = etree.SubElement(entry, 'link') - link_elem.set('rel', link['rel']) - link_elem.set('href', link['href']) - if 'type' in link: - link_elem.set('type', link['type']) - - content = etree.SubElement(entry, 'content') - content.set('type', 'text') - content.text = 'Version %s %s (%s)' % (version['id'], - version['status'], - version['updated']) - return entry - - -class VersionsAtomSerializer(AtomSerializer): - - def default(self, data): - versions = data['versions'] - feed_id = self._get_base_url(versions[0]['links'][0]['href']) - feed = self._create_feed(versions, 'Available API Versions', feed_id) - return self._to_xml(feed) - - -class VersionAtomSerializer(AtomSerializer): - - def default(self, data): - version = data['version'] - feed_id = version['links'][0]['href'] - feed = self._create_feed([version], 'About This Version', feed_id) - return self._to_xml(feed) - - -class Versions(wsgi.Resource): - - def __init__(self): - super(Versions, self).__init__(None) - - @wsgi.serializers(xml=VersionsTemplate, - atom=VersionsAtomSerializer) - def index(self, req): - """Return all versions.""" - builder = views_versions.get_view_builder(req) - return builder.build_versions(VERSIONS) - - @wsgi.serializers(xml=ChoicesTemplate) - @wsgi.response(300) - def multi(self, req): - """Return multiple choices.""" - builder = views_versions.get_view_builder(req) - return builder.build_choices(VERSIONS, req) - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - args = {} - if request_environment['PATH_INFO'] == '/': - args['action'] = 'index' - else: - args['action'] = 'multi' - - return args - - -class VersionV2(object): - - @wsgi.serializers(xml=VersionTemplate, - atom=VersionAtomSerializer) - def show(self, req): - builder = views_versions.get_view_builder(req) - return builder.build_version(VERSIONS['v2.0']) - - -def create_resource(): - return wsgi.Resource(VersionV2()) diff --git a/rack/api/views/__init__.py b/rack/api/views/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/api/views/versions.py b/rack/api/views/versions.py deleted file mode 100644 index cdb780b..0000000 --- a/rack/api/views/versions.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import os - -from rack.api import common - - -def get_view_builder(req): - base_url = req.application_url - return ViewBuilder(base_url) - - -class ViewBuilder(common.ViewBuilder): - - def __init__(self, base_url): - """:param base_url: url of the root wsgi application.""" - self.base_url = base_url - - def build_choices(self, VERSIONS, req): - version_objs = [] - for version in VERSIONS: - version = VERSIONS[version] - version_objs.append({ - "id": version['id'], - "status": version['status'], - "links": [ - { - "rel": "self", - "href": self.generate_href(version['id'], req.path), - }, - ], - "media-types": version['media-types'], - }) - - return dict(choices=version_objs) - - def build_versions(self, versions): - version_objs = [] - for version in sorted(versions.keys()): - version = versions[version] - version_objs.append({ - "id": version['id'], - "status": version['status'], - "updated": version['updated'], - "links": self._build_links(version), - }) - - return dict(versions=version_objs) - - def build_version(self, version): - reval = copy.deepcopy(version) - reval['links'].insert(0, { - "rel": "self", - "href": self.base_url.rstrip('/') + '/', - }) - return dict(version=reval) - - def _build_links(self, version_data): - """Generate a container of links that refer to the provided version.""" - href = self.generate_href(version_data['id']) - - links = [ - { - "rel": "self", - "href": href, - }, - ] - - return links - - def generate_href(self, version, path=None): - """Create an url that refers to a specific version_number.""" - prefix = self._update_compute_link_prefix(self.base_url) - if version.find('v3.') == 0: - version_number = 'v3' - else: - version_number = 'v2' - - if path: - path = path.strip('/') - return os.path.join(prefix, version_number, path) - else: - return os.path.join(prefix, version_number) + '/' diff --git a/rack/api/wsgi.py b/rack/api/wsgi.py deleted file mode 100644 index 09f30cd..0000000 --- a/rack/api/wsgi.py +++ /dev/null @@ -1,1312 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import math -import time -from xml.dom import minidom - -from lxml import etree -import six -import webob - -from rack.api import xmlutil -from rack import exception -from rack.openstack.common import gettextutils -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import jsonutils -from rack.openstack.common import log as logging -from rack import utils -from rack import wsgi - - -XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' -XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' - -XMLNS_ATOM = 'http://www.w3.org/2005/Atom' - -LOG = logging.getLogger(__name__) - -SUPPORTED_CONTENT_TYPES = ( - 'application/json', - 'application/vnd.openstack.compute+json', - 'application/xml', - 'application/vnd.openstack.compute+xml', -) - -_MEDIA_TYPE_MAP = { - 'application/vnd.openstack.compute+json': 'json', - 'application/json': 'json', - 'application/vnd.openstack.compute+xml': 'xml', - 'application/xml': 'xml', - 'application/atom+xml': 'atom', -} - -_ROUTES_METHODS = [ - 'create', - 'delete', - 'show', - 'update', -] - -_METHODS_WITH_BODY = [ - 'POST', - 'PUT', -] - - -class Request(webob.Request): - - """Add some OpenStack API-specific logic to the base webob.Request.""" - - def __init__(self, *args, **kwargs): - super(Request, self).__init__(*args, **kwargs) - self._extension_data = {'db_items': {}} - - def cache_db_items(self, key, items, item_key='id'): - """Allow API methods to store objects from a DB query to be - used by API extensions within the same API request. - - An instance of this class only lives for the lifetime of a - single API request, so there's no need to implement full - cache management. - """ - db_items = self._extension_data['db_items'].setdefault(key, {}) - for item in items: - db_items[item[item_key]] = item - - def get_db_items(self, key): - """Allow an API extension to get previously stored objects within - the same API request. - - Note that the object data will be slightly stale. - """ - return self._extension_data['db_items'][key] - - def get_db_item(self, key, item_key): - """Allow an API extension to get a previously stored object - within the same API request. - - Note that the object data will be slightly stale. - """ - return self.get_db_items(key).get(item_key) - - def cache_db_instances(self, instances): - self.cache_db_items('instances', instances, 'uuid') - - def cache_db_instance(self, instance): - self.cache_db_items('instances', [instance], 'uuid') - - def get_db_instances(self): - return self.get_db_items('instances') - - def get_db_instance(self, instance_uuid): - return self.get_db_item('instances', instance_uuid) - - def cache_db_flavors(self, flavors): - self.cache_db_items('flavors', flavors, 'flavorid') - - def cache_db_flavor(self, flavor): - self.cache_db_items('flavors', [flavor], 'flavorid') - - def get_db_flavors(self): - return self.get_db_items('flavors') - - def get_db_flavor(self, flavorid): - return self.get_db_item('flavors', flavorid) - - def cache_db_compute_nodes(self, compute_nodes): - self.cache_db_items('compute_nodes', compute_nodes, 'id') - - def cache_db_compute_node(self, compute_node): - self.cache_db_items('compute_nodes', [compute_node], 'id') - - def get_db_compute_nodes(self): - return self.get_db_items('compute_nodes') - - def get_db_compute_node(self, id): - return self.get_db_item('compute_nodes', id) - - def best_match_content_type(self): - """Determine the requested response content-type.""" - if 'rack.best_content_type' not in self.environ: - # Calculate the best MIME type - content_type = None - - # Check URL path suffix - parts = self.path.rsplit('.', 1) - if len(parts) > 1: - possible_type = 'application/' + parts[1] - if possible_type in SUPPORTED_CONTENT_TYPES: - content_type = possible_type - - if not content_type: - content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) - - self.environ['rack.best_content_type'] = (content_type or - 'application/json') - - return self.environ['rack.best_content_type'] - - def get_content_type(self): - """Determine content type of the request body. - - Does not do any body introspection, only checks header - - """ - if "Content-Type" not in self.headers: - return None - - content_type = self.content_type - - # NOTE(markmc): text/plain is the default for eventlet and - # other webservers which use mimetools.Message.gettype() - # whereas twisted defaults to ''. - if not content_type or content_type == 'text/plain': - return None - - if content_type not in SUPPORTED_CONTENT_TYPES: - raise exception.InvalidContentType(content_type=content_type) - - return content_type - - def best_match_language(self): - """Determine the best available language for the request. - - :returns: the best language match or None if the 'Accept-Language' - header was not available in the request. - """ - if not self.accept_language: - return None - return self.accept_language.best_match( - gettextutils.get_available_languages('rack')) - - -class ActionDispatcher(object): - - """Maps method name to local methods through action name.""" - - def dispatch(self, *args, **kwargs): - """Find and call local method.""" - action = kwargs.pop('action', 'default') - action_method = getattr(self, str(action), self.default) - return action_method(*args, **kwargs) - - def default(self, data): - raise NotImplementedError() - - -class TextDeserializer(ActionDispatcher): - - """Default request body deserialization.""" - - def deserialize(self, datastring, action='default'): - return self.dispatch(datastring, action=action) - - def default(self, datastring): - return {} - - -class JSONDeserializer(TextDeserializer): - - def _from_json(self, datastring): - try: - return jsonutils.loads(datastring) - except ValueError: - msg = _("cannot understand JSON") - raise exception.MalformedRequestBody(reason=msg) - - def default(self, datastring): - return {'body': self._from_json(datastring)} - - -class XMLDeserializer(TextDeserializer): - - def __init__(self, metadata=None): - """:param metadata: information needed to deserialize xml into - a dictionary. - """ - super(XMLDeserializer, self).__init__() - self.metadata = metadata or {} - - def _from_xml(self, datastring): - plurals = set(self.metadata.get('plurals', {})) - node = xmlutil.safe_minidom_parse_string(datastring).childNodes[0] - return {node.nodeName: self._from_xml_node(node, plurals)} - - def _from_xml_node(self, node, listnames): - """Convert a minidom node to a simple Python type. - - :param listnames: list of XML node names whose subnodes should - be considered list items. - - """ - if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: - return node.childNodes[0].nodeValue - elif node.nodeName in listnames: - return [self._from_xml_node(n, listnames) for n in node.childNodes] - else: - result = dict() - for attr in node.attributes.keys(): - if not attr.startswith("xmlns"): - result[attr] = node.attributes[attr].nodeValue - for child in node.childNodes: - if child.nodeType != node.TEXT_NODE: - result[child.nodeName] = self._from_xml_node(child, - listnames) - return result - - def find_first_child_named_in_namespace(self, parent, namespace, name): - """Search a nodes children for the first child with a given name.""" - for node in parent.childNodes: - if (node.localName == name and - node.namespaceURI and - node.namespaceURI == namespace): - return node - return None - - def find_first_child_named(self, parent, name): - """Search a nodes children for the first child with a given name.""" - for node in parent.childNodes: - if node.localName == name: - return node - return None - - def find_children_named(self, parent, name): - """Return all of a nodes children who have the given name.""" - for node in parent.childNodes: - if node.localName == name: - yield node - - def extract_text(self, node): - """Get the text field contained by the given node.""" - ret_val = "" - for child in node.childNodes: - if child.nodeType == child.TEXT_NODE: - ret_val += child.nodeValue - return ret_val - - def extract_elements(self, node): - """Get only Element type childs from node.""" - elements = [] - for child in node.childNodes: - if child.nodeType == child.ELEMENT_NODE: - elements.append(child) - return elements - - def find_attribute_or_element(self, parent, name): - """Get an attribute value; fallback to an element if not found.""" - if parent.hasAttribute(name): - return parent.getAttribute(name) - - node = self.find_first_child_named(parent, name) - if node: - return self.extract_text(node) - - return None - - def default(self, datastring): - return {'body': self._from_xml(datastring)} - - -class MetadataXMLDeserializer(XMLDeserializer): - - def extract_metadata(self, metadata_node): - """Marshal the metadata attribute of a parsed request.""" - metadata = {} - if metadata_node is not None: - for meta_node in self.find_children_named(metadata_node, "meta"): - key = meta_node.getAttribute("key") - metadata[key] = self.extract_text(meta_node) - return metadata - - -class DictSerializer(ActionDispatcher): - - """Default request body serialization.""" - - def serialize(self, data, action='default'): - return self.dispatch(data, action=action) - - def default(self, data): - return "" - - -class JSONDictSerializer(DictSerializer): - - """Default JSON request body serialization.""" - - def default(self, data): - return jsonutils.dumps(data) - - -class XMLDictSerializer(DictSerializer): - - def __init__(self, metadata=None, xmlns=None): - """:param metadata: information needed to deserialize xml into - a dictionary. - :param xmlns: XML namespace to include with serialized xml - """ - super(XMLDictSerializer, self).__init__() - self.metadata = metadata or {} - self.xmlns = xmlns - - def default(self, data): - # We expect data to contain a single key which is the XML root. - root_key = data.keys()[0] - doc = minidom.Document() - node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) - - return self.to_xml_string(node) - - def to_xml_string(self, node, has_atom=False): - self._add_xmlns(node, has_atom) - return node.toxml('UTF-8') - - # NOTE (ameade): the has_atom should be removed after all of the - # xml serializers and view builders have been updated to the current - # spec that required all responses include the xmlns:atom, the has_atom - # flag is to prevent current tests from breaking - def _add_xmlns(self, node, has_atom=False): - if self.xmlns is not None: - node.setAttribute('xmlns', self.xmlns) - if has_atom: - node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") - - def _to_xml_node(self, doc, metadata, nodename, data): - """Recursive method to convert data members to XML nodes.""" - result = doc.createElement(nodename) - - # Set the xml namespace if one is specified - # TODO(justinsb): We could also use prefixes on the keys - xmlns = metadata.get('xmlns', None) - if xmlns: - result.setAttribute('xmlns', xmlns) - - # TODO(bcwaldon): accomplish this without a type-check - if isinstance(data, list): - collections = metadata.get('list_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for item in data: - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(item)) - result.appendChild(node) - return result - singular = metadata.get('plurals', {}).get(nodename, None) - if singular is None: - if nodename.endswith('s'): - singular = nodename[:-1] - else: - singular = 'item' - for item in data: - node = self._to_xml_node(doc, metadata, singular, item) - result.appendChild(node) - # TODO(bcwaldon): accomplish this without a type-check - elif isinstance(data, dict): - collections = metadata.get('dict_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for k, v in data.items(): - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(k)) - text = doc.createTextNode(str(v)) - node.appendChild(text) - result.appendChild(node) - return result - attrs = metadata.get('attributes', {}).get(nodename, {}) - for k, v in data.items(): - if k in attrs: - result.setAttribute(k, str(v)) - else: - if k == "deleted": - v = str(bool(v)) - node = self._to_xml_node(doc, metadata, k, v) - result.appendChild(node) - else: - # Type is atom - node = doc.createTextNode(str(data)) - result.appendChild(node) - return result - - def _create_link_nodes(self, xml_doc, links): - link_nodes = [] - for link in links: - link_node = xml_doc.createElement('atom:link') - link_node.setAttribute('rel', link['rel']) - link_node.setAttribute('href', link['href']) - if 'type' in link: - link_node.setAttribute('type', link['type']) - link_nodes.append(link_node) - return link_nodes - - def _to_xml(self, root): - """Convert the xml object to an xml string.""" - return etree.tostring(root, encoding='UTF-8', xml_declaration=True) - - -def serializers(**serializers): - """Attaches serializers to a method. - - This decorator associates a dictionary of serializers with a - method. Note that the function attributes are directly - manipulated; the method is not wrapped. - """ - - def decorator(func): - if not hasattr(func, 'wsgi_serializers'): - func.wsgi_serializers = {} - func.wsgi_serializers.update(serializers) - return func - return decorator - - -def deserializers(**deserializers): - """Attaches deserializers to a method. - - This decorator associates a dictionary of deserializers with a - method. Note that the function attributes are directly - manipulated; the method is not wrapped. - """ - - def decorator(func): - if not hasattr(func, 'wsgi_deserializers'): - func.wsgi_deserializers = {} - func.wsgi_deserializers.update(deserializers) - return func - return decorator - - -def response(code): - """Attaches response code to a method. - - This decorator associates a response code with a method. Note - that the function attributes are directly manipulated; the method - is not wrapped. - """ - - def decorator(func): - func.wsgi_code = code - return func - return decorator - - -class ResponseObject(object): - - """Bundles a response object with appropriate serializers. - - Object that app methods may return in order to bind alternate - serializers with a response object to be serialized. Its use is - optional. - """ - - def __init__(self, obj, code=None, headers=None, **serializers): - """Binds serializers with an object. - - Takes keyword arguments akin to the @serializer() decorator - for specifying serializers. Serializers specified will be - given preference over default serializers or method-specific - serializers on return. - """ - - self.obj = obj - self.serializers = serializers - self._default_code = 200 - self._code = code - self._headers = headers or {} - self.serializer = None - self.media_type = None - - def __getitem__(self, key): - """Retrieves a header with the given name.""" - - return self._headers[key.lower()] - - def __setitem__(self, key, value): - """Sets a header with the given name to the given value.""" - - self._headers[key.lower()] = value - - def __delitem__(self, key): - """Deletes the header with the given name.""" - - del self._headers[key.lower()] - - def _bind_method_serializers(self, meth_serializers): - """Binds method serializers with the response object. - - Binds the method serializers with the response object. - Serializers specified to the constructor will take precedence - over serializers specified to this method. - - :param meth_serializers: A dictionary with keys mapping to - response types and values containing - serializer objects. - """ - - # We can't use update because that would be the wrong - # precedence - for mtype, serializer in meth_serializers.items(): - self.serializers.setdefault(mtype, serializer) - - def get_serializer(self, content_type, default_serializers=None): - """Returns the serializer for the wrapped object. - - Returns the serializer for the wrapped object subject to the - indicated content type. If no serializer matching the content - type is attached, an appropriate serializer drawn from the - default serializers will be used. If no appropriate - serializer is available, raises InvalidContentType. - """ - - default_serializers = default_serializers or {} - - try: - mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) - if mtype in self.serializers: - return mtype, self.serializers[mtype] - else: - return mtype, default_serializers[mtype] - except (KeyError, TypeError): - raise exception.InvalidContentType(content_type=content_type) - - def preserialize(self, content_type, default_serializers=None): - """Prepares the serializer that will be used to serialize. - - Determines the serializer that will be used and prepares an - instance of it for later call. This allows the serializer to - be accessed by extensions for, e.g., template extension. - """ - - mtype, serializer = self.get_serializer(content_type, - default_serializers) - self.media_type = mtype - self.serializer = serializer() - - def attach(self, **kwargs): - """Attach slave templates to serializers.""" - - if self.media_type in kwargs: - self.serializer.attach(kwargs[self.media_type]) - - def serialize(self, request, content_type, default_serializers=None): - """Serializes the wrapped object. - - Utility method for serializing the wrapped object. Returns a - webob.Response object. - """ - - if self.serializer: - serializer = self.serializer - else: - _mtype, _serializer = self.get_serializer(content_type, - default_serializers) - serializer = _serializer() - - response = webob.Response() - response.status_int = self.code - for hdr, value in self._headers.items(): - response.headers[hdr] = utils.utf8(str(value)) - response.headers['Content-Type'] = utils.utf8(content_type) - if self.obj is not None: - response.body = serializer.serialize(self.obj) - - return response - - @property - def code(self): - """Retrieve the response status.""" - - return self._code or self._default_code - - @property - def headers(self): - """Retrieve the headers.""" - - return self._headers.copy() - - -def action_peek_json(body): - """Determine action to invoke.""" - - try: - decoded = jsonutils.loads(body) - except ValueError: - msg = _("cannot understand JSON") - raise exception.MalformedRequestBody(reason=msg) - - # Make sure there's exactly one key... - if len(decoded) != 1: - msg = _("too many body keys") - raise exception.MalformedRequestBody(reason=msg) - - # Return the action and the decoded body... - return decoded.keys()[0] - - -def action_peek_xml(body): - """Determine action to invoke.""" - - dom = xmlutil.safe_minidom_parse_string(body) - action_node = dom.childNodes[0] - - return action_node.tagName - - -class ResourceExceptionHandler(object): - - """Context manager to handle Resource exceptions. - - Used when processing exceptions generated by API implementation - methods (or their extensions). Converts most exceptions to Fault - exceptions, with the appropriate logging. - """ - - def __enter__(self): - return None - - def __exit__(self, ex_type, ex_value, ex_traceback): - if not ex_value: - return True - - if isinstance(ex_value, exception.NotAuthorized): - raise Fault(webob.exc.HTTPForbidden( - explanation=ex_value.format_message())) - elif isinstance(ex_value, exception.Invalid): - raise Fault(exception.ConvertedException( - code=ex_value.code, - explanation=ex_value.format_message())) - - # Under python 2.6, TypeError's exception value is actually a string, - # so test # here via ex_type instead: - # http://bugs.python.org/issue7853 - elif issubclass(ex_type, TypeError): - exc_info = (ex_type, ex_value, ex_traceback) - LOG.error(_('Exception handling resource: %s') % ex_value, - exc_info=exc_info) - raise Fault(webob.exc.HTTPBadRequest()) - elif isinstance(ex_value, Fault): - LOG.info(_("Fault thrown: %s"), unicode(ex_value)) - raise ex_value - elif isinstance(ex_value, webob.exc.HTTPException): - LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) - raise Fault(ex_value) - - # We didn't handle the exception - return False - - -class Resource(wsgi.Application): - - """WSGI app that handles (de)serialization and controller dispatch. - - WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method upon its controller. All - controller action methods must accept a 'req' argument, which is the - incoming wsgi.Request. If the operation is a PUT or POST, the controller - method must also accept a 'body' argument (the deserialized request body). - They may raise a webob.exc exception or return a dict, which will be - serialized by requested content type. - - Exceptions derived from webob.exc.HTTPException will be automatically - wrapped in Fault() to provide API friendly error responses. - - """ - - def __init__(self, controller, action_peek=None, inherits=None, - **deserializers): - """:param controller: object that implement methods created by routes - lib - :param action_peek: dictionary of routines for peeking into an - action request body to determine the - desired action - :param inherits: another resource object that this resource should - inherit extensions from. Any action extensions that - are applied to the parent resource will also apply - to this resource. - """ - - self.controller = controller - - default_deserializers = dict(xml=XMLDeserializer, - json=JSONDeserializer) - default_deserializers.update(deserializers) - - self.default_deserializers = default_deserializers - self.default_serializers = dict(xml=XMLDictSerializer, - json=JSONDictSerializer) - - self.action_peek = dict(xml=action_peek_xml, - json=action_peek_json) - self.action_peek.update(action_peek or {}) - - # Copy over the actions dictionary - self.wsgi_actions = {} - if controller: - self.register_actions(controller) - - # Save a mapping of extensions - self.wsgi_extensions = {} - self.wsgi_action_extensions = {} - self.inherits = inherits - - def register_actions(self, controller): - """Registers controller actions with this resource.""" - - actions = getattr(controller, 'wsgi_actions', {}) - for key, method_name in actions.items(): - self.wsgi_actions[key] = getattr(controller, method_name) - - def register_extensions(self, controller): - """Registers controller extensions with this resource.""" - - extensions = getattr(controller, 'wsgi_extensions', []) - for method_name, action_name in extensions: - # Look up the extending method - extension = getattr(controller, method_name) - - if action_name: - # Extending an action... - if action_name not in self.wsgi_action_extensions: - self.wsgi_action_extensions[action_name] = [] - self.wsgi_action_extensions[action_name].append(extension) - else: - # Extending a regular method - if method_name not in self.wsgi_extensions: - self.wsgi_extensions[method_name] = [] - self.wsgi_extensions[method_name].append(extension) - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - - # NOTE(Vek): Check for get_action_args() override in the - # controller - if hasattr(self.controller, 'get_action_args'): - return self.controller.get_action_args(request_environment) - - try: - args = request_environment['wsgiorg.routing_args'][1].copy() - except (KeyError, IndexError, AttributeError): - return {} - - try: - del args['controller'] - except KeyError: - pass - - try: - del args['format'] - except KeyError: - pass - - return args - - def get_body(self, request): - try: - content_type = request.get_content_type() - except exception.InvalidContentType: - LOG.debug(_("Unrecognized Content-Type provided in request")) - return None, '' - - return content_type, request.body - - def deserialize(self, meth, content_type, body): - meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) - try: - mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) - if mtype in meth_deserializers: - deserializer = meth_deserializers[mtype] - else: - deserializer = self.default_deserializers[mtype] - except (KeyError, TypeError): - raise exception.InvalidContentType(content_type=content_type) - - if (hasattr(deserializer, 'want_controller') - and deserializer.want_controller): - return deserializer(self.controller).deserialize(body) - else: - return deserializer().deserialize(body) - - def pre_process_extensions(self, extensions, request, action_args): - # List of callables for post-processing extensions - post = [] - - for ext in extensions: - if inspect.isgeneratorfunction(ext): - response = None - - # If it's a generator function, the part before the - # yield is the preprocessing stage - try: - with ResourceExceptionHandler(): - gen = ext(req=request, **action_args) - response = gen.next() - except Fault as ex: - response = ex - - # We had a response... - if response: - return response, [] - - # No response, queue up generator for post-processing - post.append(gen) - else: - # Regular functions only perform post-processing - post.append(ext) - - # Run post-processing in the reverse order - return None, reversed(post) - - def post_process_extensions(self, extensions, resp_obj, request, - action_args): - for ext in extensions: - response = None - if inspect.isgenerator(ext): - # If it's a generator, run the second half of - # processing - try: - with ResourceExceptionHandler(): - response = ext.send(resp_obj) - except StopIteration: - # Normal exit of generator - continue - except Fault as ex: - response = ex - else: - # Regular functions get post-processing... - try: - with ResourceExceptionHandler(): - response = ext(req=request, resp_obj=resp_obj, - **action_args) - except Fault as ex: - response = ex - - # We had a response... - if response: - return response - - return None - - def _should_have_body(self, request): - return request.method in _METHODS_WITH_BODY - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """WSGI method that controls (de)serialization and method dispatch.""" - - # Identify the action, its arguments, and the requested - # content type - action_args = self.get_action_args(request.environ) - action = action_args.pop('action', None) - content_type, body = self.get_body(request) - accept = request.best_match_content_type() - - # NOTE(Vek): Splitting the function up this way allows for - # auditing by external tools that wrap the existing - # function. If we try to audit __call__(), we can - # run into troubles due to the @webob.dec.wsgify() - # decorator. - return self._process_stack(request, action, action_args, - content_type, body, accept) - - def _process_stack(self, request, action, action_args, - content_type, body, accept): - """Implement the processing stack.""" - - # Get the implementing method - try: - meth, extensions = self.get_method(request, action, - content_type, body) - except (AttributeError, TypeError): - return Fault(webob.exc.HTTPNotFound()) - except KeyError as ex: - msg = _("There is no such action: %s") % ex.args[0] - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - except exception.MalformedRequestBody: - msg = _("Malformed request body") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - if body: - msg = _("Action: '%(action)s', body: " - "%(body)s") % {'action': action, - 'body': unicode(body, 'utf-8')} - LOG.debug(logging.mask_password(msg)) - LOG.debug(_("Calling method '%(meth)s' (Content-type='%(ctype)s', " - "Accept='%(accept)s')"), - {'meth': str(meth), - 'ctype': content_type, - 'accept': accept}) - - # Now, deserialize the request body... - try: - contents = {} - if self._should_have_body(request): - # allow empty body with PUT and POST - if request.content_length == 0: - contents = {'body': None} - else: - contents = self.deserialize(meth, content_type, body) - except exception.InvalidContentType: - msg = _("Unsupported Content-Type") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - except exception.MalformedRequestBody: - msg = _("Malformed request body") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - # Update the action args - action_args.update(contents) - - project_id = action_args.pop("project_id", None) - context = request.environ.get('rack.context') - if (context and project_id and (project_id != context.project_id)): - msg = _("Malformed request URL: URL's project_id '%(project_id)s'" - " doesn't match Context's project_id" - " '%(context_project_id)s'") % \ - {'project_id': project_id, - 'context_project_id': context.project_id} - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - # Run pre-processing extensions - response, post = self.pre_process_extensions(extensions, - request, action_args) - - if not response: - try: - with ResourceExceptionHandler(): - action_result = self.dispatch(meth, request, action_args) - except Fault as ex: - response = ex - - if not response: - # No exceptions; convert action_result into a - # ResponseObject - resp_obj = None - if type(action_result) is dict or action_result is None: - resp_obj = ResponseObject(action_result) - elif isinstance(action_result, ResponseObject): - resp_obj = action_result - else: - response = action_result - - # Run post-processing extensions - if resp_obj: - # Do a preserialize to set up the response object - serializers = getattr(meth, 'wsgi_serializers', {}) - resp_obj._bind_method_serializers(serializers) - if hasattr(meth, 'wsgi_code'): - resp_obj._default_code = meth.wsgi_code - resp_obj.preserialize(accept, self.default_serializers) - - # Process post-processing extensions - response = self.post_process_extensions(post, resp_obj, - request, action_args) - - if resp_obj and not response: - response = resp_obj.serialize(request, accept, - self.default_serializers) - - if hasattr(response, 'headers'): - if context: - response.headers.add('x-compute-request-id', - context.request_id) - - for hdr, val in response.headers.items(): - # Headers must be utf-8 strings - response.headers[hdr] = utils.utf8(str(val)) - - return response - - def get_method(self, request, action, content_type, body): - meth, extensions = self._get_method(request, - action, - content_type, - body) - if self.inherits: - _meth, parent_ext = self.inherits.get_method(request, - action, - content_type, - body) - extensions.extend(parent_ext) - return meth, extensions - - def _get_method(self, request, action, content_type, body): - """Look up the action-specific method and its extensions.""" - - # Look up the method - try: - if not self.controller: - meth = getattr(self, action) - else: - meth = getattr(self.controller, action) - except AttributeError: - if (not self.wsgi_actions or - action not in _ROUTES_METHODS + ['action']): - # Propagate the error - raise - else: - return meth, self.wsgi_extensions.get(action, []) - - if action == 'action': - # OK, it's an action; figure out which action... - mtype = _MEDIA_TYPE_MAP.get(content_type) - action_name = self.action_peek[mtype](body) - else: - action_name = action - - # Look up the action method - return (self.wsgi_actions[action_name], - self.wsgi_action_extensions.get(action_name, [])) - - def dispatch(self, method, request, action_args): - """Dispatch a call to the action-specific method.""" - - return method(req=request, **action_args) - - -def action(name): - """Mark a function as an action. - - The given name will be taken as the action key in the body. - - This is also overloaded to allow extensions to provide - non-extending definitions of create and delete operations. - """ - - def decorator(func): - func.wsgi_action = name - return func - return decorator - - -def extends(*args, **kwargs): - """Indicate a function extends an operation. - - Can be used as either:: - - @extends - def index(...): - pass - - or as:: - - @extends(action='resize') - def _action_resize(...): - pass - """ - - def decorator(func): - # Store enough information to find what we're extending - func.wsgi_extends = (func.__name__, kwargs.get('action')) - return func - - # If we have positional arguments, call the decorator - if args: - return decorator(*args) - - # OK, return the decorator instead - return decorator - - -class ControllerMetaclass(type): - - """Controller metaclass. - - This metaclass automates the task of assembling a dictionary - mapping action keys to method names. - """ - - def __new__(mcs, name, bases, cls_dict): - """Adds the wsgi_actions dictionary to the class.""" - - # Find all actions - actions = {} - extensions = [] - # start with wsgi actions from base classes - for base in bases: - actions.update(getattr(base, 'wsgi_actions', {})) - for key, value in cls_dict.items(): - if not callable(value): - continue - if getattr(value, 'wsgi_action', None): - actions[value.wsgi_action] = key - elif getattr(value, 'wsgi_extends', None): - extensions.append(value.wsgi_extends) - - # Add the actions and extensions to the class dict - cls_dict['wsgi_actions'] = actions - cls_dict['wsgi_extensions'] = extensions - - return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, - cls_dict) - - -@six.add_metaclass(ControllerMetaclass) -class Controller(object): - - """Default controller.""" - - _view_builder_class = None - - def __init__(self, view_builder=None): - """Initialize controller with a view builder instance.""" - if view_builder: - self._view_builder = view_builder - elif self._view_builder_class: - self._view_builder = self._view_builder_class() - else: - self._view_builder = None - - @staticmethod - def is_valid_body(body, entity_name): - if not (body and entity_name in body): - return False - - def is_dict(d): - try: - d.get(None) - return True - except AttributeError: - return False - - if not is_dict(body[entity_name]): - return False - - return True - - -class Fault(webob.exc.HTTPException): - - """Wrap webob.exc.HTTPException to provide API friendly response.""" - - _fault_names = { - 400: "badRequest", - 401: "unauthorized", - 403: "forbidden", - 404: "itemNotFound", - 405: "badMethod", - 409: "conflictingRequest", - 413: "overLimit", - 415: "badMediaType", - 429: "overLimit", - 501: "notImplemented", - 503: "serviceUnavailable"} - - def __init__(self, exception): - """Create a Fault for the given webob.exc.exception.""" - self.wrapped_exc = exception - for key, value in self.wrapped_exc.headers.items(): - self.wrapped_exc.headers[key] = str(value) - self.status_int = exception.status_int - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Generate a WSGI response based on the exception passed to ctor.""" - - user_locale = req.best_match_language() - # Replace the body with fault details. - code = self.wrapped_exc.status_int - fault_name = self._fault_names.get(code, "computeFault") - explanation = self.wrapped_exc.explanation - LOG.debug(_("Returning %(code)s to user: %(explanation)s"), - {'code': code, 'explanation': explanation}) - - explanation = gettextutils.translate(explanation, - user_locale) - fault_data = { - fault_name: { - 'code': code, - 'message': explanation}} - if code == 413 or code == 429: - retry = self.wrapped_exc.headers.get('Retry-After', None) - if retry: - fault_data[fault_name]['retryAfter'] = retry - - # 'code' is an attribute on the fault tag itself - metadata = {'attributes': {fault_name: 'code'}} - - xml_serializer = XMLDictSerializer(metadata, XMLNS_V11) - - content_type = req.best_match_content_type() - serializer = { - 'application/xml': xml_serializer, - 'application/json': JSONDictSerializer(), - }[content_type] - - self.wrapped_exc.body = serializer.serialize(fault_data) - self.wrapped_exc.content_type = content_type - _set_request_id_header(req, self.wrapped_exc.headers) - - return self.wrapped_exc - - def __str__(self): - return self.wrapped_exc.__str__() - - -class RateLimitFault(webob.exc.HTTPException): - - """Rate-limited request response.""" - - def __init__(self, message, details, retry_time): - """Initialize new `RateLimitFault` with relevant information.""" - hdrs = RateLimitFault._retry_after(retry_time) - self.wrapped_exc = webob.exc.HTTPTooManyRequests(headers=hdrs) - self.content = { - "overLimit": { - "code": self.wrapped_exc.status_int, - "message": message, - "details": details, - "retryAfter": hdrs['Retry-After'], - }, - } - - @staticmethod - def _retry_after(retry_time): - delay = int(math.ceil(retry_time - time.time())) - retry_after = delay if delay > 0 else 0 - headers = {'Retry-After': '%d' % retry_after} - return headers - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """Return the wrapped exception with a serialized body conforming - to our error format. - """ - user_locale = request.best_match_language() - content_type = request.best_match_content_type() - metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}} - - self.content['overLimit']['message'] = \ - gettextutils.translate( - self.content['overLimit']['message'], user_locale) - self.content['overLimit']['details'] = \ - gettextutils.translate( - self.content['overLimit']['details'], user_locale) - - xml_serializer = XMLDictSerializer(metadata, XMLNS_V11) - serializer = { - 'application/xml': xml_serializer, - 'application/json': JSONDictSerializer(), - }[content_type] - - content = serializer.serialize(self.content) - self.wrapped_exc.body = content - self.wrapped_exc.content_type = content_type - - return self.wrapped_exc - - -def _set_request_id_header(req, headers): - context = req.environ.get('rack.context') - if context: - headers['x-compute-request-id'] = context.request_id diff --git a/rack/api/xmlutil.py b/rack/api/xmlutil.py deleted file mode 100644 index d87f6d6..0000000 --- a/rack/api/xmlutil.py +++ /dev/null @@ -1,1003 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os.path - -from lxml import etree -import six -from xml.dom import minidom -from xml.parsers import expat -from xml import sax -from xml.sax import expatreader - -from rack import exception -from rack.openstack.common.gettextutils import _ -from rack import utils - - -XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' -XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' -XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' -XMLNS_ATOM = 'http://www.w3.org/2005/Atom' - - -def validate_schema(xml, schema_name, version='v1.1'): - if isinstance(xml, str): - xml = etree.fromstring(xml) - base_path = 'rack/api/openstack/compute/schemas/' - if schema_name not in ('atom', 'atom-link'): - base_path += '%s/' % version - schema_path = os.path.join(utils.rackdir(), - '%s%s.rng' % (base_path, schema_name)) - schema_doc = etree.parse(schema_path) - relaxng = etree.RelaxNG(schema_doc) - relaxng.assertValid(xml) - - -class Selector(object): - - """Selects datum to operate on from an object.""" - - def __init__(self, *chain): - """Initialize the selector. - - Each argument is a subsequent index into the object. - """ - - self.chain = chain - - def __repr__(self): - """Return a representation of the selector.""" - - return "Selector" + repr(self.chain) - - def __call__(self, obj, do_raise=False): - """Select a datum to operate on. - - Selects the relevant datum within the object. - - :param obj: The object from which to select the object. - :param do_raise: If False (the default), return None if the - indexed datum does not exist. Otherwise, - raise a KeyError. - """ - - # Walk the selector list - for elem in self.chain: - # If it's callable, call it - if callable(elem): - obj = elem(obj) - else: - if obj == '': - return '' - # Use indexing - try: - obj = obj[elem] - except (KeyError, IndexError): - # No sense going any further - if do_raise: - # Convert to a KeyError, for consistency - raise KeyError(elem) - return None - - # Return the finally-selected object - return obj - - -def get_items(obj): - """Get items in obj.""" - - return list(obj.items()) - - -def get_items_without_dict(obj): - """Get items in obj but omit any items containing a dict.""" - - obj_list = list(obj.items()) - for item in obj_list: - if isinstance(list(item)[1], dict): - obj_list.remove(item) - return obj_list - - -class EmptyStringSelector(Selector): - - """Returns the empty string if Selector would return None.""" - - def __call__(self, obj, do_raise=False): - """Returns empty string if the selected value does not exist.""" - - try: - return super(EmptyStringSelector, self).__call__(obj, True) - except KeyError: - return "" - - -class ConstantSelector(object): - - """Returns a constant.""" - - def __init__(self, value): - """Initialize the selector. - - :param value: The value to return. - """ - - self.value = value - - def __repr__(self): - """Return a representation of the selector.""" - - return repr(self.value) - - def __call__(self, _obj, _do_raise=False): - """Select a datum to operate on. - - Returns a constant value. Compatible with - Selector.__call__(). - """ - - return self.value - - -class TemplateElement(object): - - """Represent an element in the template.""" - - def __init__(self, tag, attrib=None, selector=None, subselector=None, - colon_ns=False, **extra): - """Initialize an element. - - Initializes an element in the template. Keyword arguments - specify attributes to be set on the element; values must be - callables. See TemplateElement.set() for more information. - - :param tag: The name of the tag to create. - :param attrib: An optional dictionary of element attributes. - :param selector: An optional callable taking an object and - optional boolean do_raise indicator and - returning the object bound to the element. - :param subselector: An optional callable taking an object and - optional boolean do_raise indicator and - returning the object bound to the element. - This is used to further refine the datum - object returned by selector in the event - that it is a list of objects. - :colon_ns: An optional flag indicating whether to support k:v - type tagname, if True the k:v type tagname will - be supported by adding the k into the namespace. - """ - - # Convert selector into a Selector - if selector is None: - selector = Selector() - elif not callable(selector): - selector = Selector(selector) - - # Convert subselector into a Selector - if subselector is not None and not callable(subselector): - subselector = Selector(subselector) - - self.tag = tag - self.selector = selector - self.subselector = subselector - self.attrib = {} - self._text = None - self._children = [] - self._childmap = {} - self.colon_ns = colon_ns - - # Run the incoming attributes through set() so that they - # become selectorized - if not attrib: - attrib = {} - attrib.update(extra) - for k, v in attrib.items(): - self.set(k, v) - - def __repr__(self): - """Return a representation of the template element.""" - - return ('<%s.%s %r at %#x>' % - (self.__class__.__module__, self.__class__.__name__, - self.tag, id(self))) - - def __len__(self): - """Return the number of child elements.""" - - return len(self._children) - - def __contains__(self, key): - """Determine whether a child node named by key exists.""" - - return key in self._childmap - - def __getitem__(self, idx): - """Retrieve a child node by index or name.""" - - if isinstance(idx, six.string_types): - # Allow access by node name - return self._childmap[idx] - else: - return self._children[idx] - - def append(self, elem): - """Append a child to the element.""" - - # Unwrap templates... - elem = elem.unwrap() - - # Avoid duplications - if elem.tag in self._childmap: - raise KeyError(elem.tag) - - self._children.append(elem) - self._childmap[elem.tag] = elem - - def extend(self, elems): - """Append children to the element.""" - - # Pre-evaluate the elements - elemmap = {} - elemlist = [] - for elem in elems: - # Unwrap templates... - elem = elem.unwrap() - - # Avoid duplications - if elem.tag in self._childmap or elem.tag in elemmap: - raise KeyError(elem.tag) - - elemmap[elem.tag] = elem - elemlist.append(elem) - - # Update the children - self._children.extend(elemlist) - self._childmap.update(elemmap) - - def insert(self, idx, elem): - """Insert a child element at the given index.""" - - # Unwrap templates... - elem = elem.unwrap() - - # Avoid duplications - if elem.tag in self._childmap: - raise KeyError(elem.tag) - - self._children.insert(idx, elem) - self._childmap[elem.tag] = elem - - def remove(self, elem): - """Remove a child element.""" - - # Unwrap templates... - elem = elem.unwrap() - - # Check if element exists - if elem.tag not in self._childmap or self._childmap[elem.tag] != elem: - raise ValueError(_('element is not a child')) - - self._children.remove(elem) - del self._childmap[elem.tag] - - def get(self, key): - """Get an attribute. - - Returns a callable which performs datum selection. - - :param key: The name of the attribute to get. - """ - - return self.attrib[key] - - def set(self, key, value=None): - """Set an attribute. - - :param key: The name of the attribute to set. - - :param value: A callable taking an object and optional boolean - do_raise indicator and returning the datum bound - to the attribute. If None, a Selector() will be - constructed from the key. If a string, a - Selector() will be constructed from the string. - """ - - # Convert value to a selector - if value is None: - value = Selector(key) - elif not callable(value): - value = Selector(value) - - self.attrib[key] = value - - def keys(self): - """Return the attribute names.""" - - return self.attrib.keys() - - def items(self): - """Return the attribute names and values.""" - - return self.attrib.items() - - def unwrap(self): - """Unwraps a template to return a template element.""" - - # We are a template element - return self - - def wrap(self): - """Wraps a template element to return a template.""" - - # Wrap in a basic Template - return Template(self) - - def apply(self, elem, obj): - """Apply text and attributes to an etree.Element. - - Applies the text and attribute instructions in the template - element to an etree.Element instance. - - :param elem: An etree.Element instance. - :param obj: The base object associated with this template - element. - """ - - # Start with the text... - if self.text is not None: - elem.text = unicode(self.text(obj)) - - # Now set up all the attributes... - for key, value in self.attrib.items(): - try: - elem.set(key, unicode(value(obj, True))) - except KeyError: - # Attribute has no value, so don't include it - pass - - def _render(self, parent, datum, patches, nsmap): - """Internal rendering. - - Renders the template node into an etree.Element object. - Returns the etree.Element object. - - :param parent: The parent etree.Element instance. - :param datum: The datum associated with this template element. - :param patches: A list of other template elements that must - also be applied. - :param nsmap: An optional namespace dictionary to be - associated with the etree.Element instance. - """ - - # Allocate a node - if callable(self.tag): - tagname = self.tag(datum) - else: - tagname = self.tag - - if self.colon_ns: - if ':' in tagname: - if nsmap is None: - nsmap = {} - colon_key, colon_name = tagname.split(':') - nsmap[colon_key] = colon_key - tagname = '{%s}%s' % (colon_key, colon_name) - - elem = etree.Element(tagname, nsmap=nsmap) - - # If we have a parent, append the node to the parent - if parent is not None: - parent.append(elem) - - # If the datum is None, do nothing else - if datum is None: - return elem - - # Apply this template element to the element - self.apply(elem, datum) - - # Additionally, apply the patches - for patch in patches: - patch.apply(elem, datum) - - # We have fully rendered the element; return it - return elem - - def render(self, parent, obj, patches=[], nsmap=None): - """Render an object. - - Renders an object against this template node. Returns a list - of two-item tuples, where the first item is an etree.Element - instance and the second item is the datum associated with that - instance. - - :param parent: The parent for the etree.Element instances. - :param obj: The object to render this template element - against. - :param patches: A list of other template elements to apply - when rendering this template element. - :param nsmap: An optional namespace dictionary to attach to - the etree.Element instances. - """ - - # First, get the datum we're rendering - data = None if obj is None else self.selector(obj) - - # Check if we should render at all - if not self.will_render(data): - return [] - elif data is None: - return [(self._render(parent, None, patches, nsmap), None)] - - # Make the data into a list if it isn't already - if not isinstance(data, list): - data = [data] - elif parent is None: - raise ValueError(_('root element selecting a list')) - - # Render all the elements - elems = [] - for datum in data: - if self.subselector is not None: - datum = self.subselector(datum) - elems.append((self._render(parent, datum, patches, nsmap), datum)) - - # Return all the elements rendered, as well as the - # corresponding datum for the next step down the tree - return elems - - def will_render(self, datum): - """Hook method. - - An overridable hook method to determine whether this template - element will be rendered at all. By default, returns False - (inhibiting rendering) if the datum is None. - - :param datum: The datum associated with this template element. - """ - - # Don't render if datum is None - return datum is not None - - def _text_get(self): - """Template element text. - - Either None or a callable taking an object and optional - boolean do_raise indicator and returning the datum bound to - the text of the template element. - """ - - return self._text - - def _text_set(self, value): - # Convert value to a selector - if value is not None and not callable(value): - value = Selector(value) - - self._text = value - - def _text_del(self): - self._text = None - - text = property(_text_get, _text_set, _text_del) - - def tree(self): - """Return string representation of the template tree. - - Returns a representation of the template rooted at this - element as a string, suitable for inclusion in debug logs. - """ - - # Build the inner contents of the tag... - contents = [self.tag, '!selector=%r' % self.selector] - - # Add the text... - if self.text is not None: - contents.append('!text=%r' % self.text) - - # Add all the other attributes - for key, value in self.attrib.items(): - contents.append('%s=%r' % (key, value)) - - # If there are no children, return it as a closed tag - if len(self) == 0: - return '<%s/>' % ' '.join([str(i) for i in contents]) - - # OK, recurse to our children - children = [c.tree() for c in self] - - # Return the result - return ('<%s>%s' % - (' '.join(contents), ''.join(children), self.tag)) - - -def SubTemplateElement(parent, tag, attrib=None, selector=None, - subselector=None, colon_ns=False, **extra): - """Create a template element as a child of another. - - Corresponds to the etree.SubElement interface. Parameters are as - for TemplateElement, with the addition of the parent. - """ - - # Convert attributes - attrib = attrib or {} - attrib.update(extra) - - # Get a TemplateElement - elem = TemplateElement(tag, attrib=attrib, selector=selector, - subselector=subselector, colon_ns=colon_ns) - - # Append the parent safely - if parent is not None: - parent.append(elem) - - return elem - - -class Template(object): - - """Represent a template.""" - - def __init__(self, root, nsmap=None): - """Initialize a template. - - :param root: The root element of the template. - :param nsmap: An optional namespace dictionary to be - associated with the root element of the - template. - """ - - self.root = root.unwrap() if root is not None else None - self.nsmap = nsmap or {} - self.serialize_options = dict(encoding='UTF-8', xml_declaration=True) - - def _serialize(self, parent, obj, siblings, nsmap=None): - """Internal serialization. - - Recursive routine to build a tree of etree.Element instances - from an object based on the template. Returns the first - etree.Element instance rendered, or None. - - :param parent: The parent etree.Element instance. Can be - None. - :param obj: The object to render. - :param siblings: The TemplateElement instances against which - to render the object. - :param nsmap: An optional namespace dictionary to be - associated with the etree.Element instance - rendered. - """ - - # First step, render the element - elems = siblings[0].render(parent, obj, siblings[1:], nsmap) - - # Now, recurse to all child elements - seen = set() - for idx, sibling in enumerate(siblings): - for child in sibling: - # Have we handled this child already? - if child.tag in seen: - continue - seen.add(child.tag) - - # Determine the child's siblings - nieces = [child] - for sib in siblings[idx + 1:]: - if child.tag in sib: - nieces.append(sib[child.tag]) - - # Now we recurse for every data element - for elem, datum in elems: - self._serialize(elem, datum, nieces) - - # Return the first element; at the top level, this will be the - # root element - if elems: - return elems[0][0] - - def serialize(self, obj, *args, **kwargs): - """Serialize an object. - - Serializes an object against the template. Returns a string - with the serialized XML. Positional and keyword arguments are - passed to etree.tostring(). - - :param obj: The object to serialize. - """ - - elem = self.make_tree(obj) - if elem is None: - return '' - - for k, v in self.serialize_options.items(): - kwargs.setdefault(k, v) - - # Serialize it into XML - return etree.tostring(elem, *args, **kwargs) - - def make_tree(self, obj): - """Create a tree. - - Serializes an object against the template. Returns an Element - node with appropriate children. - - :param obj: The object to serialize. - """ - - # If the template is empty, return the empty string - if self.root is None: - return None - - # Get the siblings and nsmap of the root element - siblings = self._siblings() - nsmap = self._nsmap() - - # Form the element tree - return self._serialize(None, obj, siblings, nsmap) - - def _siblings(self): - """Hook method for computing root siblings. - - An overridable hook method to return the siblings of the root - element. By default, this is the root element itself. - """ - - return [self.root] - - def _nsmap(self): - """Hook method for computing the namespace dictionary. - - An overridable hook method to return the namespace dictionary. - """ - - return self.nsmap.copy() - - def unwrap(self): - """Unwraps a template to return a template element.""" - - # Return the root element - return self.root - - def wrap(self): - """Wraps a template element to return a template.""" - - # We are a template - return self - - def apply(self, master): - """Hook method for determining slave applicability. - - An overridable hook method used to determine if this template - is applicable as a slave to a given master template. - - :param master: The master template to test. - """ - - return True - - def tree(self): - """Return string representation of the template tree. - - Returns a representation of the template as a string, suitable - for inclusion in debug logs. - """ - - return "%r: %s" % (self, self.root.tree()) - - -class MasterTemplate(Template): - - """Represent a master template. - - Master templates are versioned derivatives of templates that - additionally allow slave templates to be attached. Slave - templates allow modification of the serialized result without - directly changing the master. - """ - - def __init__(self, root, version, nsmap=None): - """Initialize a master template. - - :param root: The root element of the template. - :param version: The version number of the template. - :param nsmap: An optional namespace dictionary to be - associated with the root element of the - template. - """ - - super(MasterTemplate, self).__init__(root, nsmap) - self.version = version - self.slaves = [] - - def __repr__(self): - """Return string representation of the template.""" - - return ("<%s.%s object version %s at %#x>" % - (self.__class__.__module__, self.__class__.__name__, - self.version, id(self))) - - def _siblings(self): - """Hook method for computing root siblings. - - An overridable hook method to return the siblings of the root - element. This is the root element plus the root elements of - all the slave templates. - """ - - return [self.root] + [slave.root for slave in self.slaves] - - def _nsmap(self): - """Hook method for computing the namespace dictionary. - - An overridable hook method to return the namespace dictionary. - The namespace dictionary is computed by taking the master - template's namespace dictionary and updating it from all the - slave templates. - """ - - nsmap = self.nsmap.copy() - for slave in self.slaves: - nsmap.update(slave._nsmap()) - return nsmap - - def attach(self, *slaves): - """Attach one or more slave templates. - - Attaches one or more slave templates to the master template. - Slave templates must have a root element with the same tag as - the master template. The slave template's apply() method will - be called to determine if the slave should be applied to this - master; if it returns False, that slave will be skipped. - (This allows filtering of slaves based on the version of the - master template.) - """ - - slave_list = [] - for slave in slaves: - slave = slave.wrap() - - # Make sure we have a tree match - if slave.root.tag != self.root.tag: - msg = _("Template tree mismatch; adding slave %(slavetag)s to " - "master %(mastertag)s") % {'slavetag': slave.root.tag, - 'mastertag': self.root.tag} - raise ValueError(msg) - - # Make sure slave applies to this template - if not slave.apply(self): - continue - - slave_list.append(slave) - - # Add the slaves - self.slaves.extend(slave_list) - - def copy(self): - """Return a copy of this master template.""" - - # Return a copy of the MasterTemplate - tmp = self.__class__(self.root, self.version, self.nsmap) - tmp.slaves = self.slaves[:] - return tmp - - -class SlaveTemplate(Template): - - """Represent a slave template. - - Slave templates are versioned derivatives of templates. Each - slave has a minimum version and optional maximum version of the - master template to which they can be attached. - """ - - def __init__(self, root, min_vers, max_vers=None, nsmap=None): - """Initialize a slave template. - - :param root: The root element of the template. - :param min_vers: The minimum permissible version of the master - template for this slave template to apply. - :param max_vers: An optional upper bound for the master - template version. - :param nsmap: An optional namespace dictionary to be - associated with the root element of the - template. - """ - - super(SlaveTemplate, self).__init__(root, nsmap) - self.min_vers = min_vers - self.max_vers = max_vers - - def __repr__(self): - """Return string representation of the template.""" - - return ("<%s.%s object versions %s-%s at %#x>" % - (self.__class__.__module__, self.__class__.__name__, - self.min_vers, self.max_vers, id(self))) - - def apply(self, master): - """Hook method for determining slave applicability. - - An overridable hook method used to determine if this template - is applicable as a slave to a given master template. This - version requires the master template to have a version number - between min_vers and max_vers. - - :param master: The master template to test. - """ - - # Does the master meet our minimum version requirement? - if master.version < self.min_vers: - return False - - # How about our maximum version requirement? - if self.max_vers is not None and master.version > self.max_vers: - return False - - return True - - -class TemplateBuilder(object): - - """Template builder. - - This class exists to allow templates to be lazily built without - having to build them each time they are needed. It must be - subclassed, and the subclass must implement the construct() - method, which must return a Template (or subclass) instance. The - constructor will always return the template returned by - construct(), or, if it has a copy() method, a copy of that - template. - """ - - _tmpl = None - - def __new__(cls, copy=True): - """Construct and return a template. - - :param copy: If True (the default), a copy of the template - will be constructed and returned, if possible. - """ - - # Do we need to construct the template? - if cls._tmpl is None: - tmp = super(TemplateBuilder, cls).__new__(cls) - - # Construct the template - cls._tmpl = tmp.construct() - - # If the template has a copy attribute, return the result of - # calling it - if copy and hasattr(cls._tmpl, 'copy'): - return cls._tmpl.copy() - - # Return the template - return cls._tmpl - - def construct(self): - """Construct a template. - - Called to construct a template instance, which it must return. - Only called once. - """ - - raise NotImplementedError(_("subclasses must implement construct()!")) - - -def make_links(parent, selector=None): - """Attach an Atom element to the parent.""" - - elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, - selector=selector) - elem.set('rel') - elem.set('type') - elem.set('href') - - # Just for completeness... - return elem - - -def make_flat_dict(name, selector=None, subselector=None, - ns=None, colon_ns=False, root=None, - ignore_sub_dicts=False): - """Utility for simple XML templates that traditionally used - XMLDictSerializer with no metadata. Returns a template element - where the top-level element has the given tag name, and where - sub-elements have tag names derived from the object's keys and - text derived from the object's values. - - :param root: if None, this will create the root. - :param ignore_sub_dicts: If True, ignores any dict objects inside the - object. If False, causes an error if there is a - dict object present. - """ - - # Set up the names we need... - if ns is None: - elemname = name - tagname = Selector(0) - else: - elemname = '{%s}%s' % (ns, name) - tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0]) - - if selector is None: - selector = name - if not root: - # Build the root element - root = TemplateElement(elemname, selector=selector, - subselector=subselector, colon_ns=colon_ns) - choice = get_items if ignore_sub_dicts is False else get_items_without_dict - # Build an element to represent all the keys and values - elem = SubTemplateElement(root, tagname, selector=choice, - colon_ns=colon_ns) - elem.text = 1 - - # Return the template - return root - - -class ProtectedExpatParser(expatreader.ExpatParser): - - """An expat parser which disables DTD's and entities by default.""" - - def __init__(self, forbid_dtd=True, forbid_entities=True, - *args, **kwargs): - # Python 2.x old style class - expatreader.ExpatParser.__init__(self, *args, **kwargs) - self.forbid_dtd = forbid_dtd - self.forbid_entities = forbid_entities - - def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): - raise ValueError("Inline DTD forbidden") - - def entity_decl(self, entityName, is_parameter_entity, value, base, - systemId, publicId, notationName): - raise ValueError(" entity declaration forbidden") - - def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): - # expat 1.2 - raise ValueError(" unparsed entity forbidden") - - def external_entity_ref(self, context, base, systemId, publicId): - raise ValueError(" external entity forbidden") - - def notation_decl(self, name, base, sysid, pubid): - raise ValueError(" notation forbidden") - - def reset(self): - expatreader.ExpatParser.reset(self) - if self.forbid_dtd: - self._parser.StartDoctypeDeclHandler = self.start_doctype_decl - self._parser.EndDoctypeDeclHandler = None - if self.forbid_entities: - self._parser.EntityDeclHandler = self.entity_decl - self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl - self._parser.ExternalEntityRefHandler = self.external_entity_ref - self._parser.NotationDeclHandler = self.notation_decl - try: - self._parser.SkippedEntityHandler = None - except AttributeError: - # some pyexpat versions do not support SkippedEntity - pass - - -def safe_minidom_parse_string(xml_string): - """Parse an XML string using minidom safely.""" - try: - return minidom.parseString(xml_string, parser=ProtectedExpatParser()) - except (sax.SAXParseException, ValueError, - expat.ExpatError, LookupError) as e: - # NOTE(Vijaya Erukala): XML input such as - # - # raises LookupError: unknown encoding: TF-8 - raise exception.MalformedRequestBody(reason=str(e)) diff --git a/rack/cmd/__init__.py b/rack/cmd/__init__.py deleted file mode 100644 index 51d6433..0000000 --- a/rack/cmd/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -if ('eventlet' in sys.modules and - os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): - raise ImportError('eventlet imported before rack/cmd/__init__ ' - '(env var set to %s)' - % os.environ.get('EVENTLET_NO_GREENDNS')) - -os.environ['EVENTLET_NO_GREENDNS'] = 'yes' - -import eventlet -from rack import debugger - -if debugger.enabled(): - # turn off thread patching to enable the remote debugger - eventlet.monkey_patch(os=False, thread=False) -else: - eventlet.monkey_patch(os=False, thread=False) diff --git a/rack/cmd/api.py b/rack/cmd/api.py deleted file mode 100644 index 3bd2d8c..0000000 --- a/rack/cmd/api.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Starter script for RACK API.""" - -import sys - -from oslo.config import cfg - -from rack import config -from rack.openstack.common import log as logging -from rack import service -from rack import utils - -CONF = cfg.CONF - - -def main(): - config.parse_args(sys.argv) - logging.setup("rack") - utils.monkey_patch() - - launcher = service.process_launcher() - server = service.WSGIService('rackapi') - launcher.launch_service(server, workers=server.workers or 1) - launcher.wait() diff --git a/rack/config.py b/rack/config.py deleted file mode 100644 index 8964457..0000000 --- a/rack/config.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo.config import cfg - -from rack import debugger -from rack.openstack.common.db import options -from rack import paths -from rack import version - -_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('rack.sqlite') - - -def parse_args(argv, default_config_files=None): - options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, - sqlite_db='rack.sqlite') - debugger.register_cli_opts() - cfg.CONF(argv[1:], - project='rack', - version=version.version_string(), - default_config_files=default_config_files) diff --git a/rack/context.py b/rack/context.py deleted file mode 100644 index 90ca91d..0000000 --- a/rack/context.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""RequestContext: context for requests that persist through all of rack.""" - -import copy -import uuid - -import six - -from rack import exception -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import local -from rack.openstack.common import log as logging -from rack.openstack.common import timeutils -from rack import policy - - -LOG = logging.getLogger(__name__) - - -def generate_request_id(): - return 'req-' + str(uuid.uuid4()) - - -class RequestContext(object): - - """Security context and request information. - - Represents the user taking a given action within the system. - - """ - - def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", - roles=None, remote_address=None, timestamp=None, - request_id=None, auth_token=None, overwrite=True, - quota_class=None, user_name=None, project_name=None, - service_catalog=None, instance_lock_checked=False, **kwargs): - """:param read_deleted: 'no' indicates deleted records are hidden, - 'yes' indicates deleted records are visible, - 'only' indicates that *only* deleted records are visible. - - - :param overwrite: Set to False to ensure that the greenthread local - copy of the index is not overwritten. - - :param kwargs: Extra arguments that might be present, but we ignore - because they possibly came in from older rpc messages. - """ - if kwargs: - LOG.warn(_('Arguments dropped when creating context: %s') % - str(kwargs)) - - self.user_id = user_id - self.project_id = project_id - self.roles = roles or [] - self.read_deleted = read_deleted - self.remote_address = remote_address - if not timestamp: - timestamp = timeutils.utcnow() - if isinstance(timestamp, six.string_types): - timestamp = timeutils.parse_strtime(timestamp) - self.timestamp = timestamp - if not request_id: - request_id = generate_request_id() - self.request_id = request_id - self.auth_token = auth_token - - if service_catalog: - # Only include required parts of service_catalog - self.service_catalog = [s for s in service_catalog - if s.get('type') in ( - 'identity', - 'image', - 'network', - 'compute')] - else: - # if list is empty or none - self.service_catalog = [] - - self.instance_lock_checked = instance_lock_checked - - # NOTE(markmc): this attribute is currently only used by the - # rs_limits turnstile pre-processor. - # See https://lists.launchpad.net/openstack/msg12200.html - self.quota_class = quota_class - self.user_name = user_name - self.project_name = project_name - self.is_admin = is_admin - if self.is_admin is None: - self.is_admin = policy.check_is_admin(self) - if overwrite or not hasattr(local.store, 'context'): - self.update_store() - - def _get_read_deleted(self): - return self._read_deleted - - def _set_read_deleted(self, read_deleted): - if read_deleted not in ('no', 'yes', 'only'): - raise ValueError(_("read_deleted can only be one of 'no', " - "'yes' or 'only', not %r") % read_deleted) - self._read_deleted = read_deleted - - def _del_read_deleted(self): - del self._read_deleted - - read_deleted = property(_get_read_deleted, _set_read_deleted, - _del_read_deleted) - - def update_store(self): - local.store.context = self - - def to_dict(self): - return {'user_id': self.user_id, - 'project_id': self.project_id, - 'is_admin': self.is_admin, - 'read_deleted': self.read_deleted, - 'roles': self.roles, - 'remote_address': self.remote_address, - 'timestamp': timeutils.strtime(self.timestamp), - 'request_id': self.request_id, - 'auth_token': self.auth_token, - 'quota_class': self.quota_class, - 'user_name': self.user_name, - 'service_catalog': self.service_catalog, - 'project_name': self.project_name, - 'instance_lock_checked': self.instance_lock_checked, - 'tenant': self.tenant, - 'user': self.user} - - @classmethod - def from_dict(cls, values): - values.pop('user', None) - values.pop('tenant', None) - return cls(**values) - - def elevated(self, read_deleted=None, overwrite=False): - """Return a version of this context with admin flag set.""" - context = copy.copy(self) - context.is_admin = True - - if 'admin' not in context.roles: - context.roles.append('admin') - - if read_deleted is not None: - context.read_deleted = read_deleted - - return context - - # NOTE(sirp): the openstack/common version of RequestContext uses - # tenant/user whereas the Rack version uses project_id/user_id. We need - # this shim in order to use context-aware code from openstack/common, like - # logging, until we make the switch to using openstack/common's version of - # RequestContext. - @property - def tenant(self): - return self.project_id - - @property - def user(self): - return self.user_id - - -def get_admin_context(read_deleted="no"): - return RequestContext(user_id=None, - project_id=None, - is_admin=True, - read_deleted=read_deleted, - overwrite=False) - - -def is_user_context(context): - """Indicates if the request context is a normal user.""" - if not context: - return False - if context.is_admin: - return False - if not context.user_id or not context.project_id: - return False - return True - - -def require_admin_context(ctxt): - """Raise exception.AdminRequired() if context is an admin context.""" - if not ctxt.is_admin: - raise exception.AdminRequired() - - -def require_context(ctxt): - """Raise exception.NotAuthorized() if context is not a user or an - admin context. - """ - if not ctxt.is_admin and not is_user_context(ctxt): - raise exception.NotAuthorized() - - -def authorize_project_context(context, project_id): - """Ensures a request has permission to access the given project.""" - if is_user_context(context): - if not context.project_id: - raise exception.NotAuthorized() - elif context.project_id != project_id: - raise exception.NotAuthorized() - - -def authorize_user_context(context, user_id): - """Ensures a request has permission to access the given user.""" - if is_user_context(context): - if not context.user_id: - raise exception.NotAuthorized() - elif context.user_id != user_id: - raise exception.NotAuthorized() - - -def authorize_quota_class_context(context, class_name): - """Ensures a request has permission to access the given quota class.""" - if is_user_context(context): - if not context.quota_class: - raise exception.NotAuthorized() - elif context.quota_class != class_name: - raise exception.NotAuthorized() diff --git a/rack/db/__init__.py b/rack/db/__init__.py deleted file mode 100644 index 8efc164..0000000 --- a/rack/db/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -DB abstraction for Nova -""" - -from rack.db.api import * # noqa diff --git a/rack/db/api.py b/rack/db/api.py deleted file mode 100644 index a7e7d9c..0000000 --- a/rack/db/api.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo.config import cfg -from rack.openstack.common.db import api as db_api - - -CONF = cfg.CONF -db_opts = [ - cfg.BoolOpt('enable_new_services', - default=True, - help='Services to be added to the available pool on create') -] -CONF.register_opts(db_opts) -CONF.import_opt('backend', 'rack.openstack.common.db.options', - group='database') - -_BACKEND_MAPPING = {'sqlalchemy': 'rack.db.sqlalchemy.api'} - - -IMPL = db_api.DBAPI(CONF.database.backend, backend_mapping=_BACKEND_MAPPING) - - -def group_get_all(context, filters=None): - return IMPL.group_get_all(context, filters) - - -def group_get_by_gid(context, gid): - return IMPL.group_get_by_gid(context, gid) - - -def group_create(context, values): - return IMPL.group_create(context, values) - - -def group_update(context, values): - return IMPL.group_update(context, values) - - -def group_delete(context, gid): - return IMPL.group_delete(context, gid) - - -def service_destroy(context, service_id): - """Destroy the service or raise if it does not exist.""" - return IMPL.service_destroy(context, service_id) - - -def service_get(context, service_id): - """Get a service or raise if it does not exist.""" - return IMPL.service_get(context, service_id) - - -def service_get_by_host_and_topic(context, host, topic): - """Get a service by host it's on and topic it listens to.""" - return IMPL.service_get_by_host_and_topic(context, host, topic) - - -def service_get_all(context, disabled=None): - """Get all services.""" - return IMPL.service_get_all(context, disabled) - - -def service_get_all_by_topic(context, topic): - """Get all services for a given topic.""" - return IMPL.service_get_all_by_topic(context, topic) - - -def service_get_all_by_host(context, host): - """Get all services for a given host.""" - return IMPL.service_get_all_by_host(context, host) - - -def service_get_by_args(context, host, binary): - """Get the state of a service by node name and binary.""" - return IMPL.service_get_by_args(context, host, binary) - - -def service_create(context, values): - """Create a service from the values dictionary.""" - return IMPL.service_create(context, values) - - -def service_update(context, service_id, values): - """Set the given properties on a service and update it. - - Raises NotFound if service does not exist. - - """ - return IMPL.service_update(context, service_id, values) - - -def network_create(context, values): - return IMPL.network_create(context, values) - - -def network_update(context, network_id, values): - IMPL.network_update(context, network_id, values) - - -def network_get_all(context, gid, filters={}): - return IMPL.network_get_all(context, gid, filters) - - -def network_get_by_network_id(context, gid, network_id): - return IMPL.network_get_by_network_id(context, gid, network_id) - - -def network_delete(context, gid, network_id): - return IMPL.network_delete(context, gid, network_id) - - -def keypair_get_all(context, gid, filters={}): - return IMPL.keypair_get_all(context, gid, filters) - - -def keypair_get_by_keypair_id(context, gid, keypair_id): - return IMPL.keypair_get_by_keypair_id(context, gid, keypair_id) - - -def keypair_create(context, values): - return IMPL.keypair_create(context, values) - - -def keypair_update(context, gid, keypair_id, values): - return IMPL.keypair_update(context, gid, keypair_id, values) - - -def keypair_delete(context, gid, keypair_id): - return IMPL.keypair_delete(context, gid, keypair_id) - - -def securitygroup_get_all(context, gid, filters={}): - return IMPL.securitygroup_get_all(context, gid, filters) - - -def securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id): - return IMPL.securitygroup_get_by_securitygroup_id(context, gid, - securitygroup_id) - - -def securitygroup_create(context, values): - return IMPL.securitygroup_create(context, values) - - -def securitygroup_update(context, gid, securitygroup_id, values): - return IMPL.securitygroup_update(context, gid, securitygroup_id, values) - - -def securitygroup_delete(context, gid, securitygroup_id): - return IMPL.securitygroup_delete(context, gid, securitygroup_id) - - -def process_get_all(context, gid, filters={}): - return IMPL.process_get_all(context, gid, filters) - - -def process_get_by_pid(context, gid, pid): - return IMPL.process_get_by_pid(context, gid, pid) - - -def process_get_not_error_status_for_proxy(context, gid): - return IMPL.process_get_not_error_status_for_proxy(context, gid) - - -def process_create(context, values, network_ids, securitygroup_ids): - return IMPL.process_create(context, values, network_ids, - securitygroup_ids) - - -def process_update(context, gid, pid, values): - return IMPL.process_update(context, gid, pid, values) - - -def process_delete(context, gid, pid): - return IMPL.process_delete(context, gid, pid) diff --git a/rack/db/base.py b/rack/db/base.py deleted file mode 100644 index 55bdd00..0000000 --- a/rack/db/base.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Base class for classes that need modular database access.""" - -from oslo.config import cfg - -from rack.openstack.common import importutils - -db_driver_opt = cfg.StrOpt('db_driver', - default='rack.db', - help='The driver to use for database access') - -CONF = cfg.CONF -CONF.register_opt(db_driver_opt) - - -class Base(object): - """DB driver is injected in the init method.""" - - def __init__(self, db_driver=None): - super(Base, self).__init__() - if not db_driver: - db_driver = CONF.db_driver - self.db = importutils.import_module(db_driver) # pylint: disable=C0103 diff --git a/rack/db/migration.py b/rack/db/migration.py deleted file mode 100644 index 1092e15..0000000 --- a/rack/db/migration.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Database setup and migration commands.""" - -from rack import utils - - -IMPL = utils.LazyPluggable('backend', - config_group='database', - sqlalchemy='rack.db.sqlalchemy.migration') - - -def db_sync(version=None): - """Migrate the database to `version` or the most recent version.""" - return IMPL.db_sync(version=version) - - -def db_version(): - """Display the current database version.""" - return IMPL.db_version() - - -def db_initial_version(): - """The starting version for the database.""" - return IMPL.db_initial_version() diff --git a/rack/db/sqlalchemy/__init__.py b/rack/db/sqlalchemy/__init__.py deleted file mode 100644 index a1c0b9a..0000000 --- a/rack/db/sqlalchemy/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sqlalchemy import BigInteger -from sqlalchemy.ext.compiler import compiles - - -@compiles(BigInteger, 'sqlite') -def compile_big_int_sqlite(type_, compiler, **kw): - return 'INTEGER' diff --git a/rack/db/sqlalchemy/api.py b/rack/db/sqlalchemy/api.py deleted file mode 100644 index 5149dff..0000000 --- a/rack/db/sqlalchemy/api.py +++ /dev/null @@ -1,678 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo.config import cfg - -from rack.db.sqlalchemy import models -from rack import exception - -from rack.openstack.common import jsonutils -from rack.openstack.common import log as logging -from rack.openstack.common import timeutils - -from rack.openstack.common.db import exception as db_exc -from rack.openstack.common.db.sqlalchemy import session as db_session -from rack.openstack.common.gettextutils import _ - -import functools -import rack.context -import sys - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.import_opt('connection', - 'rack.openstack.common.db.options', - group='database') - -_FACADE = None - - -def _create_facade_lazily(): - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade( - CONF.database.connection, - **dict(CONF.database.iteritems())) - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def get_backend(): - return sys.modules[__name__] - - -def group_get_all(context, filters=None): - session = get_session() - filters = filters or {} - query = session.query(models.Group).filter_by(user_id=context.user_id)\ - .filter_by(deleted=0) - if 'project_id' in filters: - query = query.filter_by(project_id=filters['project_id']) - if 'name' in filters: - query = query.filter_by(display_name=filters['name']) - if 'status' in filters: - query = query.filter_by(status=filters['status']) - responce_groups = query.all() - - return [dict(group) for group in responce_groups] - - -def group_get_by_gid(context, gid): - session = get_session() - group = session.query(models.Group)\ - .filter_by(user_id=context.user_id)\ - .filter_by(gid=gid)\ - .filter_by(deleted=0)\ - .first() - - if not group: - raise exception.GroupNotFound(gid=gid) - return dict(group) - - -def require_admin_context(f): - """Decorator to require admin request context. - - The first argument to the wrapped function must be the context. - - """ - - @functools.wraps(f) - def wrapper(*args, **kwargs): - rack.context.require_admin_context(args[0]) - return f(*args, **kwargs) - return wrapper - - -def group_create(context, values): - session = get_session() - group_ref = models.Group() - group_ref.update(values) - group_ref.save(session) - - return dict(group_ref) - - -def group_update(context, values): - session = get_session() - group_ref = session.query(models.Group). \ - filter(models.Group.gid == values["gid"]).first() - if group_ref is None: - raise exception.GroupNotFound(gid=values["gid"]) - - group_ref.update(values) - group_ref.save(session) - - return dict(group_ref) - - -def group_delete(context, gid): - session = get_session() - group_ref = session.query(models.Group)\ - .filter_by(deleted=0)\ - .filter_by(gid=gid)\ - .first() - if group_ref is None: - raise exception.GroupNotFound(gid=gid) - - values = { - "status": "DELETING", - "deleted": 1, - "deleted_at": timeutils.utcnow() - } - group_ref.update(values) - group_ref.save(session) - - return dict(group_ref) - - -def service_model_query(context, model, *args, **kwargs): - session = kwargs.get('session') or get_session() - read_deleted = kwargs.get('read_deleted') or context.read_deleted - query = session.query(model, *args) - - default_deleted_value = model.__mapper__.c.deleted.default.arg - if read_deleted == 'no': - query = query.filter(model.deleted == default_deleted_value) - elif read_deleted == 'yes': - pass # omit the filter to include deleted and active - elif read_deleted == 'only': - query = query.filter(model.deleted != default_deleted_value) - else: - raise Exception(_("Unrecognized read_deleted value '%s'") - % read_deleted) - - return query - - -@require_admin_context -def service_destroy(context, service_id): - session = get_session() - with session.begin(): - count = service_model_query(context, models.Service, - session=session).\ - filter_by(id=service_id).\ - soft_delete(synchronize_session=False) - - if count == 0: - raise exception.ServiceNotFound(service_id=service_id) - - -@require_admin_context -def service_get(context, service_id): - session = get_session() - service_ref = service_model_query(context, models.Service, - session=session).\ - filter_by(id=service_id).\ - first() - - if not service_ref: - raise exception.ServiceNotFound(service_id=service_id) - - return jsonutils.to_primitive(service_ref) - - -@require_admin_context -def service_get_all(context, disabled=None): - session = get_session() - query = service_model_query(context, models.Service, - session=session) - - if disabled is not None: - query = query.filter_by(disabled=disabled) - - service_refs = query.all() - return jsonutils.to_primitive(service_refs) - - -@require_admin_context -def service_get_all_by_topic(context, topic): - session = get_session() - service_refs = service_model_query(context, models.Service, - session=session, - read_deleted="no").\ - filter_by(disabled=False).\ - filter_by(topic=topic).\ - all() - - return jsonutils.to_primitive(service_refs) - - -@require_admin_context -def service_get_by_host_and_topic(context, host, topic): - session = get_session() - service_ref = service_model_query(context, models.Service, - session=session, - read_deleted="no").\ - filter_by(disabled=False).\ - filter_by(host=host).\ - filter_by(topic=topic).\ - first() - - return jsonutils.to_primitive(service_ref) - - -@require_admin_context -def service_get_all_by_host(context, host): - session = get_session() - service_refs = service_model_query(context, models.Service, - session=session, - read_deleted="no").\ - filter_by(host=host).\ - all() - - return jsonutils.to_primitive(service_refs) - - -@require_admin_context -def service_get_by_args(context, host, binary): - session = get_session() - service_ref = service_model_query(context, models.Service, - session=session).\ - filter_by(host=host).\ - filter_by(binary=binary).\ - first() - - if not service_ref: - raise exception.HostBinaryNotFound(host=host, binary=binary) - - return jsonutils.to_primitive(service_ref) - - -@require_admin_context -def service_create(context, values): - session = get_session() - service_ref = models.Service() - service_ref.update(values) - if not CONF.enable_new_services: - service_ref.disabled = True - try: - service_ref.save(session) - except db_exc.DBDuplicateEntry as e: - if 'binary' in e.columns: - raise exception.ServiceBinaryExists(host=values.get('host'), - binary=values.get('binary')) - raise exception.ServiceTopicExists(host=values.get('host'), - topic=values.get('topic')) - - return jsonutils.to_primitive(service_ref) - - -@require_admin_context -def service_update(context, service_id, values): - session = get_session() - with session.begin(): - service_ref = service_model_query(context, models.Service, - session=session).\ - filter_by(id=service_id).\ - first() - - if not service_ref: - raise exception.ServiceNotFound(service_id=service_id) - - service_ref.update(values) - - return jsonutils.to_primitive(service_ref) - - -def network_create(context, values): - session = get_session() - network_ref = models.Network() - network_ref.update(values) - network_ref.save(session) - - return dict(network_ref) - - -def network_update(context, network_id, values): - session = get_session() - network_ref = session.query(models.Network)\ - .filter(models.Network.deleted == 0)\ - .filter(models.Network.network_id == network_id)\ - .first() - - network_ref.update(values) - network_ref.save(session) - - -def network_get_all(context, gid, filters): - session = get_session() - query = session.query(models.Network)\ - .filter_by(deleted=0)\ - .filter_by(gid=gid) - - if 'network_id' in filters: - query = query.filter_by(network_id=filters['network_id']) - if 'neutron_network_id' in filters: - query = query.filter_by( - neutron_network_id=filters['neutron_network_id']) - if 'display_name' in filters: - query = query.filter_by(display_name=filters['display_name']) - if 'status' in filters: - query = query.filter_by(status=filters['status']) - if 'is_admin' in filters: - query = query.filter_by(is_admin=filters['is_admin']) - if 'cidr' in filters: - query = query.filter_by(cidr=filters['cidr']) - if 'ext_router' in filters: - query = query.filter_by(ext_router=filters['ext_router']) - - networks = query.all() - - return [dict(network) for network in networks] - - -def network_get_by_network_id(context, gid, network_id): - session = get_session() - network = session.query(models.Network)\ - .filter_by(deleted=0)\ - .filter_by(gid=gid)\ - .filter_by(network_id=network_id)\ - .first() - if not network: - raise exception.NetworkNotFound(network_id=network_id) - - network_dict = dict(network) - network_dict.update( - dict(processes=[dict(process) for process in network.processes])) - - return network_dict - - -def network_delete(context, gid, network_id): - session = get_session() - network_ref = session.query(models.Network)\ - .filter(models.Network.deleted == 0)\ - .filter(models.Network.gid == gid)\ - .filter(models.Network.network_id == network_id)\ - .first() - values = {} - values["deleted"] = 1 - values["deleted_at"] = timeutils.utcnow() - values["status"] = "DELETING" - network_ref.update(values) - network_ref.save(session) - return dict(network_ref) - - -def keypair_get_all(context, gid, filters={}): - session = get_session() - query = session.query(models.Keypair)\ - .filter_by(gid=gid)\ - .filter_by(deleted=0) - if 'keypair_id' in filters: - query = query.filter_by(keypair_id=filters['keypair_id']) - if 'nova_keypair_id' in filters: - query = query.filter_by(nova_keypair_id=filters['nova_keypair_id']) - if 'display_name' in filters: - query = query.filter_by(display_name=filters['display_name']) - if 'status' in filters: - query = query.filter_by(status=filters['status']) - if 'is_default' in filters: - query = query.filter_by(is_default=filters['is_default']) - - responce_keypairs = query.all() - - return [dict(keypair) for keypair in responce_keypairs] - - -def keypair_get_by_keypair_id(context, gid, keypair_id): - session = get_session() - keypair = session.query(models.Keypair)\ - .filter_by(gid=gid)\ - .filter_by(keypair_id=keypair_id)\ - .filter_by(deleted=0)\ - .first() - - if not keypair: - raise exception.KeypairNotFound(keypair_id=keypair_id) - - return dict(keypair) - - -def keypair_create(context, values): - session = get_session() - keypair_ref = models.Keypair() - keypair_ref.update(values) - keypair_ref.save(session) - return dict(keypair_ref) - - -def keypair_update(context, gid, keypair_id, values): - session = get_session() - keypair_ref = session.query(models.Keypair)\ - .filter_by(gid=gid)\ - .filter_by(keypair_id=keypair_id)\ - .filter_by(deleted=0)\ - .first() - if keypair_ref is None: - raise exception.KeypairNotFound(keypair_id=keypair_id) - - keypair_ref.update(values) - keypair_ref.save(session) - - return dict(keypair_ref) - - -def keypair_delete(context, gid, keypair_id): - session = get_session() - keypair_ref = session.query(models.Keypair)\ - .filter_by(gid=gid)\ - .filter_by(keypair_id=keypair_id)\ - .filter_by(deleted=0)\ - .first() - if keypair_ref is None: - raise exception.KeypairNotFound(keypair_id=keypair_id) - - values = { - "status": "DELETING", - "deleted": 1, - "deleted_at": timeutils.utcnow() - } - keypair_ref.update(values) - keypair_ref.save(session) - - return dict(keypair_ref) - - -def securitygroup_get_all(context, gid, filters={}): - session = get_session() - query = session.query(models.Securitygroup).filter_by(gid=gid, deleted=0) - - if 'securitygroup_id' in filters: - query = query.filter_by(securitygroup_id=filters['securitygroup_id']) - if 'name' in filters: - query = query.filter_by(display_name=filters['name']) - if 'status' in filters: - query = query.filter_by(status=filters['status']) - if 'is_default' in filters: - query = query.filter_by(is_default=filters['is_default']) - securitygroups = query.all() - - return [dict(securitygroup) for securitygroup in securitygroups] - - -def securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id): - session = get_session() - securitygroup = session.query(models.Securitygroup)\ - .filter_by(deleted=0)\ - .filter_by(gid=gid)\ - .filter_by(securitygroup_id=securitygroup_id)\ - .first() - - if not securitygroup: - raise exception.SecuritygroupNotFound( - securitygroup_id=securitygroup_id) - - securitygroup_dict = dict(securitygroup) - securitygroup_dict.update( - dict(processes=[dict(process) for process in securitygroup.processes])) - return securitygroup_dict - - -def securitygroup_create(context, values): - session = get_session() - securitygroup_ref = models.Securitygroup() - securitygroup_ref.update(values) - securitygroup_ref.save(session) - - return dict(securitygroup_ref) - - -def securitygroup_update(context, gid, securitygroup_id, values): - session = get_session() - securitygroup_ref = session.query(models.Securitygroup). \ - filter_by(deleted=0). \ - filter_by(gid=gid). \ - filter_by(securitygroup_id=securitygroup_id). \ - first() - if securitygroup_ref is None: - raise exception.SecuritygroupNotFound( - securitygroup_id=securitygroup_id) - - securitygroup_ref.update(values) - securitygroup_ref.save(session) - - return dict(securitygroup_ref) - - -def securitygroup_delete(context, gid, securitygroup_id): - session = get_session() - securitygroup_ref = session.query(models.Securitygroup). \ - filter_by(deleted=0). \ - filter_by(gid=gid). \ - filter_by(securitygroup_id=securitygroup_id). \ - first() - if securitygroup_ref is None: - raise exception.SecuritygroupNotFound( - securitygroup_id=securitygroup_id) - - securitygroup_ref.update({"deleted": 1, - 'deleted_at': timeutils.utcnow(), - "status": "DELETING"}) - securitygroup_ref.save(session) - - return dict(securitygroup_ref) - - -def process_get_all(context, gid, filters={}): - session = get_session() - query = session.query(models.Process).filter_by(gid=gid, deleted=0) - - if 'pid' in filters: - query = query.filter_by(pid=filters['pid']) - if 'ppid' in filters: - query = query.filter_by(ppid=filters['ppid']) - if 'name' in filters: - query = query.filter_by(display_name=filters['name']) - if 'status' in filters: - query = query.filter_by(status=filters['status']) - if 'glance_image_id' in filters: - query = query.filter_by(glance_image_id=filters['glance_image_id']) - if 'nova_flavor_id' in filters: - query = query.filter_by(nova_flavor_id=filters['nova_flavor_id']) - if 'keypair_id' in filters: - query = query.filter_by(keypair_id=filters['keypair_id']) - if 'securitygroup_id' in filters: - query = query.filter( - models.Process.securitygroups.any( - securitygroup_id=filters["securitygroup_id"])) - if 'network_id' in filters: - query = query.filter( - models.Process.networks.any( - network_id=filters["network_id"])) - if 'is_proxy' in filters: - query = query.filter_by(is_proxy=filters['is_proxy']) - if 'app_status' in filters: - query = query.filter_by(app_status=filters['app_status']) - - process_refs = query.all() - return [_get_process_dict(process_ref) for process_ref in process_refs] - - -def process_get_by_pid(context, gid, pid): - session = get_session() - process_ref = session.query(models.Process)\ - .filter_by(gid=gid)\ - .filter_by(pid=pid)\ - .first() - - if not process_ref: - raise exception.ProcessNotFound(pid=pid) - return _get_process_dict(process_ref) - - -def process_get_not_error_status_for_proxy(context, gid): - session = get_session() - query = session.query(models.Process).filter_by( - gid=gid, deleted=0, is_proxy=True) - process_refs = query.filter(models.Process.status != 'ERROR').all() - - return [_get_process_dict(process_ref) for process_ref in process_refs] - - -def process_create(context, values, network_ids, securitygroup_ids): - session = get_session() - with session.begin(): - process_ref = models.Process(**values) - session.add(process_ref) - - try: - if network_ids: - for network_id in network_ids: - network_ref = session.query(models.Network)\ - .filter_by(deleted=0)\ - .filter_by(gid=values["gid"])\ - .filter_by(network_id=network_id)\ - .first() - if network_ref is None: - raise exception.NetworkNotFound(network_id=network_id) - session.add( - models.ProcessNetwork(pid=values["pid"], - network_id=network_ref - .network_id)) - - if securitygroup_ids: - for securitygroup_id in securitygroup_ids: - securitygroup_ref = session.query(models.Securitygroup)\ - .filter_by(deleted=0)\ - .filter_by(gid=values["gid"])\ - .filter_by(securitygroup_id=securitygroup_id)\ - .first() - if securitygroup_ref is None: - raise exception.SecuritygroupNotFound( - securitygroup_id=securitygroup_id) - session.add(models.ProcessSecuritygroup( - pid=values["pid"], - securitygroup_id=securitygroup_ref.securitygroup_id)) - - session.flush() - except db_exc.DBDuplicateEntry: - msg = _("securitygroup or network is duplicated") - raise exception.InvalidInput(reason=msg) - - return _get_process_dict(process_ref) - - -def process_update(context, gid, pid, values): - session = get_session() - process_ref = session.query(models.Process). \ - filter_by(deleted=0). \ - filter_by(gid=gid). \ - filter_by(pid=pid). \ - first() - if process_ref is None: - raise exception.ProcessNotFound(pid=pid) - - process_ref.update(values) - process_ref.save(session) - - return _get_process_dict(process_ref) - - -def process_delete(context, gid, pid): - session = get_session() - process_ref = session.query(models.Process). \ - filter_by(deleted=0). \ - filter_by(gid=gid). \ - filter_by(pid=pid). \ - first() - if process_ref is None: - raise exception.ProcessNotFound(pid=pid) - - process_ref.update({"deleted": 1, - 'deleted_at': timeutils.utcnow(), - "status": "DELETING"}) - process_ref.save(session) - - return _get_process_dict(process_ref) - - -def _get_process_dict(process_ref): - process_dict = dict(process_ref) - process_dict.update(dict(securitygroups=[dict(securitygroup) - for securitygroup in process_ref - .securitygroups])) - process_dict.update(dict(networks=[dict(network) - for network in process_ref.networks])) - return process_dict diff --git a/rack/db/sqlalchemy/migrate_repo/README b/rack/db/sqlalchemy/migrate_repo/README deleted file mode 100644 index 6218f8c..0000000 --- a/rack/db/sqlalchemy/migrate_repo/README +++ /dev/null @@ -1,4 +0,0 @@ -This is a database migration repository. - -More information at -http://code.google.com/p/sqlalchemy-migrate/ diff --git a/rack/db/sqlalchemy/migrate_repo/__init__.py b/rack/db/sqlalchemy/migrate_repo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/db/sqlalchemy/migrate_repo/manage.py b/rack/db/sqlalchemy/migrate_repo/manage.py deleted file mode 100644 index b379141..0000000 --- a/rack/db/sqlalchemy/migrate_repo/manage.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from migrate.versioning.shell import main - - -if __name__ == '__main__': - main(debug='False', repository='.') diff --git a/rack/db/sqlalchemy/migrate_repo/migrate.cfg b/rack/db/sqlalchemy/migrate_repo/migrate.cfg deleted file mode 100644 index e17cc8a..0000000 --- a/rack/db/sqlalchemy/migrate_repo/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=rack - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/rack/db/sqlalchemy/migrate_repo/versions/001_Add_groups_table.py b/rack/db/sqlalchemy/migrate_repo/versions/001_Add_groups_table.py deleted file mode 100644 index 3526125..0000000 --- a/rack/db/sqlalchemy/migrate_repo/versions/001_Add_groups_table.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from sqlalchemy import MetaData, Table, Column, Integer, String, DateTime - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -meta = MetaData() - -groups = Table('groups', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Integer), - Column( - 'gid', String(length=255), primary_key=True, - nullable=False), - Column('user_id', String(length=255)), - Column('project_id', String(length=255)), - Column('display_name', String(length=255)), - Column('display_description', String(length=255)), - Column('status', String(length=255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - try: - groups.create() - except Exception: - LOG.info(repr(groups)) - LOG.exception(_('Exception while creating groups table.')) - raise - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - try: - groups.drop() - except Exception: - LOG.info(repr(groups)) - LOG.exception(_('Exception while dropping groups table.')) - raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/002_Add_services_table.py b/rack/db/sqlalchemy/migrate_repo/versions/002_Add_services_table.py deleted file mode 100644 index 02e9a99..0000000 --- a/rack/db/sqlalchemy/migrate_repo/versions/002_Add_services_table.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from migrate.changeset import UniqueConstraint -from sqlalchemy import Column, MetaData, Table -from sqlalchemy import Boolean, DateTime, Integer, String - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - -meta = MetaData() - - -services = Table('services', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('id', Integer, primary_key=True, nullable=False), - Column('host', String(length=255)), - Column('binary', String(length=255)), - Column('topic', String(length=255)), - Column('report_count', Integer, nullable=False), - Column('disabled', Boolean), - Column('deleted', Integer), - Column('disabled_reason', String(length=255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - try: - services.create() - except Exception: - LOG.info(repr(services)) - LOG.exception(_('Exception while creating services table.')) - raise - - UniqueConstraint('host', 'topic', 'deleted', - table=services, - name='uniq_services0host0topic0deleted').create() - UniqueConstraint('host', 'binary', 'deleted', - table=services, - name='uniq_services0host0binary0deleted').create() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - try: - services.drop() - except Exception: - LOG.info(repr(services)) - LOG.exception(_('Exception while dropping services table.')) - raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/003_Add_keypairs_table.py b/rack/db/sqlalchemy/migrate_repo/versions/003_Add_keypairs_table.py deleted file mode 100644 index 0d5911d..0000000 --- a/rack/db/sqlalchemy/migrate_repo/versions/003_Add_keypairs_table.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from migrate import ForeignKeyConstraint - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -from sqlalchemy import Boolean, DateTime, Integer, String, Text -from sqlalchemy import Column, MetaData, Table - -LOG = logging.getLogger(__name__) - -meta = MetaData() - -keypairs = Table('keypairs', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Integer), - Column('keypair_id', String(length=36), - primary_key=True, nullable=False), - Column('gid', String(length=36), nullable=False), - Column('nova_keypair_id', String(length=255)), - Column('private_key', Text), - Column('display_name', String(length=255)), - Column('is_default', Boolean), - Column('user_id', String(length=255)), - Column('project_id', String(length=255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - try: - keypairs.create() - groups = Table("groups", meta, autoload=True) - ForeignKeyConstraint([keypairs.c.gid], [groups.c.gid]).create() - except Exception: - LOG.info(repr(keypairs)) - LOG.exception(_('Exception while creating keypairs table.')) - raise - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - try: - keypairs.drop() - except Exception: - LOG.info(repr(keypairs)) - LOG.exception(_('Exception while dropping keypairs table.')) - raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/004_Add_securitygroups_table.py b/rack/db/sqlalchemy/migrate_repo/versions/004_Add_securitygroups_table.py deleted file mode 100644 index c895726..0000000 --- a/rack/db/sqlalchemy/migrate_repo/versions/004_Add_securitygroups_table.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from migrate import ForeignKeyConstraint -from sqlalchemy import Column, MetaData, Table -from sqlalchemy import Boolean, DateTime, Integer, String - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -meta = MetaData() - - -securitygroups = Table('securitygroups', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Integer), - Column('securitygroup_id', String(length=36), - primary_key=True, nullable=False), - Column('gid', String(length=36), nullable=False), - Column('neutron_securitygroup_id', String(length=36)), - Column('is_default', Boolean), - Column('user_id', String(length=255)), - Column('project_id', String(length=255)), - Column('display_name', String(length=255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - groups = Table("groups", meta, autoload=True) - - try: - securitygroups.create() - except Exception: - LOG.info(repr(securitygroups)) - LOG.exception(_('Exception while creating securitygroups table.')) - raise - - ForeignKeyConstraint(columns=[securitygroups.c.gid], - refcolumns=[groups.c.gid]).create() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - try: - securitygroups.drop() - except Exception: - LOG.info(repr(securitygroups)) - LOG.exception(_('Exception while dropping securitygroups table.')) - raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/005_Add_networks_table.py b/rack/db/sqlalchemy/migrate_repo/versions/005_Add_networks_table.py deleted file mode 100644 index f0d55d5..0000000 --- a/rack/db/sqlalchemy/migrate_repo/versions/005_Add_networks_table.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from migrate import ForeignKeyConstraint -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from sqlalchemy import MetaData, Table, Column, Integer -from sqlalchemy import String, DateTime, Boolean - - -LOG = logging.getLogger(__name__) - -meta = MetaData() - -networks = Table('networks', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('network_id', String(length=255), - primary_key=True, nullable=False), - Column('gid', String(length=255), nullable=False), - Column('neutron_network_id', String(length=255)), - Column('is_admin', Boolean), - Column('cidr', String(length=255)), - Column('ext_router', String(length=255)), - Column('user_id', String(length=255)), - Column('project_id', String(length=255)), - Column('display_name', String(length=255)), - Column('deleted', Integer), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - try: - networks.create() - groups = Table("groups", meta, autoload=True) - ForeignKeyConstraint([networks.c.gid], [groups.c.gid]).create() - except Exception: - LOG.info(repr(networks)) - LOG.exception(_('Exception while creating networks table.')) - raise - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - try: - networks.drop() - except Exception: - LOG.info(repr(networks)) - LOG.exception(_('Exception while dropping networks table.')) - raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/006_Add_processes_table.py b/rack/db/sqlalchemy/migrate_repo/versions/006_Add_processes_table.py deleted file mode 100644 index 9d0a253..0000000 --- a/rack/db/sqlalchemy/migrate_repo/versions/006_Add_processes_table.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from migrate import ForeignKeyConstraint -from sqlalchemy import Column, MetaData, Table -from sqlalchemy import Boolean, DateTime, Integer, String, Text - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -meta = MetaData() - - -processes = Table('processes', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Integer, nullable=False), - Column('gid', String(length=36), nullable=False), - Column('keypair_id', String(length=36)), - Column( - 'pid', String(length=36), primary_key=True, - nullable=False), - Column('ppid', String(length=36)), - Column('nova_instance_id', String(length=36)), - Column('glance_image_id', String(length=36)), - Column('nova_flavor_id', Integer), - Column('user_id', String(length=255)), - Column('project_id', String(length=255)), - Column('display_name', String(length=255)), - Column('app_status', Text), - Column('is_proxy', Boolean), - Column('shm_endpoint', Text), - Column('ipc_endpoint', Text), - Column('fs_endpoint', Text), - Column('args', Text), - Column('userdata', Text), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - groups = Table("groups", meta, autoload=True) - keypairs = Table("keypairs", meta, autoload=True) - - try: - processes.create() - except Exception: - LOG.info(repr(processes)) - LOG.exception(_('Exception while creating processes table.')) - raise - - ForeignKeyConstraint(columns=[processes.c.gid], - refcolumns=[groups.c.gid]).create() - - ForeignKeyConstraint(columns=[processes.c.keypair_id], - refcolumns=[keypairs.c.keypair_id]).create() - - ForeignKeyConstraint(columns=[processes.c.ppid], - refcolumns=[processes.c.pid]).create() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - try: - processes.drop() - except Exception: - LOG.info(repr(processes)) - LOG.exception(_('Exception while dropping processes table.')) - raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/007_Add_processes_securitygroups_table.py b/rack/db/sqlalchemy/migrate_repo/versions/007_Add_processes_securitygroups_table.py deleted file mode 100644 index 8cbad12..0000000 --- a/rack/db/sqlalchemy/migrate_repo/versions/007_Add_processes_securitygroups_table.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from migrate import ForeignKeyConstraint -from sqlalchemy import Column, MetaData, Table -from sqlalchemy import String - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -meta = MetaData() - - -processes_securitygroups = Table('processes_securitygroups', meta, - Column( - 'pid', String(length=36), nullable=False, - primary_key=True), - Column( - 'securitygroup_id', String(length=36), - nullable=False, primary_key=True), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - processes = Table("processes", meta, autoload=True) - securitygroups = Table("securitygroups", meta, autoload=True) - - try: - processes_securitygroups.create() - except Exception: - LOG.info(repr(processes_securitygroups)) - LOG.exception( - _('Exception while creating processes_securitygroups table.')) - raise - - ForeignKeyConstraint(columns=[processes_securitygroups.c.pid], - refcolumns=[processes.c.pid]).create() - ForeignKeyConstraint( - columns=[processes_securitygroups.c.securitygroup_id], - refcolumns=[securitygroups.c.securitygroup_id]).create() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - try: - processes_securitygroups.drop() - except Exception: - LOG.info(repr(processes_securitygroups)) - LOG.exception( - _('Exception while dropping processes_securitygroups table.')) - raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/008_Add_processes_networks_table.py b/rack/db/sqlalchemy/migrate_repo/versions/008_Add_processes_networks_table.py deleted file mode 100644 index 9cf28e8..0000000 --- a/rack/db/sqlalchemy/migrate_repo/versions/008_Add_processes_networks_table.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from migrate import ForeignKeyConstraint -from sqlalchemy import Column, MetaData, Table -from sqlalchemy import String - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -meta = MetaData() - - -processes_networks = Table('processes_networks', meta, - Column( - 'pid', String(length=36), nullable=False, - primary_key=True), - Column( - 'network_id', String(length=36), - nullable=False, primary_key=True), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - processes = Table("processes", meta, autoload=True) - networks = Table("networks", meta, autoload=True) - - try: - processes_networks.create() - except Exception: - LOG.info(repr(processes_networks)) - LOG.exception(_('Exception while creating processes_networks table.')) - raise - - ForeignKeyConstraint(columns=[processes_networks.c.pid], - refcolumns=[processes.c.pid]).create() - ForeignKeyConstraint(columns=[processes_networks.c.network_id], - refcolumns=[networks.c.network_id]).create() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - try: - processes_networks.drop() - except Exception: - LOG.info(repr(processes_networks)) - LOG.exception(_('Exception while dropping processes_networks table.')) - raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/__init__.py b/rack/db/sqlalchemy/migrate_repo/versions/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/db/sqlalchemy/migration.py b/rack/db/sqlalchemy/migration.py deleted file mode 100644 index dbd72c7..0000000 --- a/rack/db/sqlalchemy/migration.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from migrate import exceptions as versioning_exceptions -from migrate.versioning import api as versioning_api -from migrate.versioning.repository import Repository -import sqlalchemy - -from rack.db.sqlalchemy import api as db_session -from rack import exception -from rack.openstack.common.gettextutils import _ - -INIT_VERSION = 0 -_REPOSITORY = None - -get_engine = db_session.get_engine - - -def db_sync(version=None): - if version is not None: - try: - version = int(version) - except ValueError: - raise exception.RackException(_("version should be an integer")) - - current_version = db_version() - repository = _find_migrate_repo() - if version is None or version > current_version: - return versioning_api.upgrade(get_engine(), repository, version) - else: - return versioning_api.downgrade(get_engine(), repository, - version) - - -def db_version(): - repository = _find_migrate_repo() - try: - return versioning_api.db_version(get_engine(), repository) - except versioning_exceptions.DatabaseNotControlledError: - meta = sqlalchemy.MetaData() - engine = get_engine() - meta.reflect(bind=engine) - tables = meta.tables - if len(tables) == 0: - db_version_control(INIT_VERSION) - return versioning_api.db_version(get_engine(), repository) - else: - # Some pre-Essex DB's may not be version controlled. - # Require them to upgrade using Essex first. - raise exception.RackException( - _("Upgrade DB using Essex release first.")) - - -def db_initial_version(): - return INIT_VERSION - - -def db_version_control(version=None): - repository = _find_migrate_repo() - versioning_api.version_control(get_engine(), repository, version) - return version - - -def _find_migrate_repo(): - """Get the path for the migrate repository.""" - global _REPOSITORY - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'migrate_repo') - assert os.path.exists(path) - if _REPOSITORY is None: - _REPOSITORY = Repository(path) - return _REPOSITORY diff --git a/rack/db/sqlalchemy/models.py b/rack/db/sqlalchemy/models.py deleted file mode 100644 index 1b89422..0000000 --- a/rack/db/sqlalchemy/models.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.openstack.common.db.sqlalchemy import models - -from sqlalchemy import Boolean, Column, ForeignKey, Integer, String -from sqlalchemy import Text, schema -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import relationship - -Base = declarative_base() - - -class Group(models.SoftDeleteMixin, - models.TimestampMixin, - models.ModelBase, - Base): - - __tablename__ = 'groups' - securitygroups = relationship("Securitygroup") - processes = relationship("Process") - - gid = Column(String(36), primary_key=True) - user_id = Column(String(255)) - project_id = Column(String(255)) - display_name = Column(String(255)) - display_description = Column(String(255)) - status = Column(String(255)) - - -class Service(models.SoftDeleteMixin, - models.TimestampMixin, - models.ModelBase, - Base): - - """Represents a running service on a host.""" - - __tablename__ = 'services' - __table_args__ = ( - schema.UniqueConstraint("host", "topic", "deleted", - name="uniq_services0host0topic0deleted"), - schema.UniqueConstraint("host", "binary", "deleted", - name="uniq_services0host0binary0deleted") - ) - - id = Column(Integer, primary_key=True) - host = Column(String(255)) - binary = Column(String(255)) - topic = Column(String(255)) - report_count = Column(Integer, nullable=False, default=0) - disabled = Column(Boolean, default=False) - disabled_reason = Column(String(255)) - - -class Network(models.SoftDeleteMixin, - models.TimestampMixin, - models.ModelBase, - Base): - - __tablename__ = 'networks' - - network_id = Column(String(255), primary_key=True) - gid = Column(String(255)) - neutron_network_id = Column(String(255)) - is_admin = Column(Boolean, default=False) - cidr = Column(String(255)) - ext_router = Column(String(255)) - user_id = Column(String(255)) - project_id = Column(String(255)) - display_name = Column(String(255)) - - -class Keypair(models.SoftDeleteMixin, - models.TimestampMixin, - models.ModelBase, - Base): - - __tablename__ = 'keypairs' - - keypair_id = Column(String(36), primary_key=True) - gid = Column(String(36), ForeignKey('groups.gid'), nullable=False) - user_id = Column(String(255)) - project_id = Column(String(255)) - nova_keypair_id = Column(String(255)) - private_key = Column(Text) - display_name = Column(String(255)) - is_default = Column(Boolean, default=False) - - -class Securitygroup(models.SoftDeleteMixin, - models.TimestampMixin, - models.ModelBase, - Base): - - __tablename__ = 'securitygroups' - - deleted = Column(Integer, nullable=False, default=0) - securitygroup_id = Column(String(36), primary_key=True) - gid = Column(String(36), ForeignKey('groups.gid')) - neutron_securitygroup_id = Column(String(36)) - is_default = Column(Boolean, default=False) - user_id = Column(String(255)) - project_id = Column(String(255)) - display_name = Column(String(255)) - - group = relationship("Group", - foreign_keys=gid, - primaryjoin='and_(' - 'Securitygroup.gid == Group.gid,' - 'Securitygroup.deleted == 0,' - 'Group.deleted == 0)') - - -class Process(models.SoftDeleteMixin, - models.TimestampMixin, - models.ModelBase, - Base): - - __tablename__ = 'processes' - - deleted = Column(Integer, nullable=False, default=0) - gid = Column(String(36), ForeignKey('groups.gid'), nullable=False) - keypair_id = Column(String(36), ForeignKey('keypairs.keypair_id')) - pid = Column(String(36), primary_key=True) - ppid = Column(String(36), ForeignKey('processes.pid')) - nova_instance_id = Column(String(36)) - glance_image_id = Column(String(36)) - nova_flavor_id = Column(Integer) - user_id = Column(String(255)) - project_id = Column(String(255)) - display_name = Column(String(255)) - is_proxy = Column(Boolean(), default=False) - shm_endpoint = Column(Text) - ipc_endpoint = Column(Text) - fs_endpoint = Column(Text) - args = Column(Text) - userdata = Column(Text) - app_status = Column(Text) - - group = relationship("Group", - foreign_keys=gid, - primaryjoin='and_(' - 'Process.gid == Group.gid,' - 'Process.deleted == 0,' - 'Group.deleted == 0)') - - securitygroups = relationship("Securitygroup", - secondary="processes_securitygroups", - primaryjoin='and_(' - 'Process.pid == ProcessSecuritygroup.pid,' - 'Process.deleted == 0)', - secondaryjoin='and_(' - 'Securitygroup.securitygroup_id == ' - 'ProcessSecuritygroup.securitygroup_id,' - 'Securitygroup.deleted == 0)', - backref="processes") - - networks = relationship("Network", - secondary="processes_networks", - primaryjoin='and_(' - 'Process.pid == ProcessNetwork.pid,' - 'Process.deleted == 0)', - secondaryjoin='and_(' - 'Network.network_id == ProcessNetwork.network_id,' - 'Network.deleted == 0)', - backref="processes") - - -class ProcessSecuritygroup(models.ModelBase, Base): - - __tablename__ = 'processes_securitygroups' - - pid = Column(String(36), ForeignKey( - 'processes.pid'), nullable=False, primary_key=True) - securitygroup_id = Column(String(36), ForeignKey( - 'securitygroups.securitygroup_id'), nullable=False, primary_key=True) - - -class ProcessNetwork(models.ModelBase, Base): - - __tablename__ = 'processes_networks' - - pid = Column(String(36), ForeignKey( - 'processes.pid'), nullable=False, primary_key=True) - network_id = Column(String(36), ForeignKey( - 'networks.network_id'), nullable=False, primary_key=True) diff --git a/rack/db/sqlalchemy/types.py b/rack/db/sqlalchemy/types.py deleted file mode 100644 index 4e8cb74..0000000 --- a/rack/db/sqlalchemy/types.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Custom SQLAlchemy types.""" - -from sqlalchemy.dialects import postgresql -from sqlalchemy import types - -from rack import utils - - -class IPAddress(types.TypeDecorator): - """An SQLAlchemy type representing an IP-address.""" - - impl = types.String - - def load_dialect_impl(self, dialect): - if dialect.name == 'postgresql': - return dialect.type_descriptor(postgresql.INET()) - else: - return dialect.type_descriptor(types.String(39)) - - def process_bind_param(self, value, dialect): - """Process/Formats the value before insert it into the db.""" - if dialect.name == 'postgresql': - return value - # NOTE(maurosr): The purpose here is to convert ipv6 to the shortened - # form, not validate it. - elif utils.is_valid_ipv6(value): - return utils.get_shortened_ipv6(value) - return value - - -class CIDR(types.TypeDecorator): - """An SQLAlchemy type representing a CIDR definition.""" - - impl = types.String - - def load_dialect_impl(self, dialect): - if dialect.name == 'postgresql': - return dialect.type_descriptor(postgresql.INET()) - else: - return dialect.type_descriptor(types.String(43)) - - def process_bind_param(self, value, dialect): - """Process/Formats the value before insert it into the db.""" - # NOTE(sdague): normalize all the inserts - if utils.is_valid_ipv6_cidr(value): - return utils.get_shortened_ipv6_cidr(value) - return value diff --git a/rack/db/sqlalchemy/utils.py b/rack/db/sqlalchemy/utils.py deleted file mode 100644 index 7800a7b..0000000 --- a/rack/db/sqlalchemy/utils.py +++ /dev/null @@ -1,611 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from migrate.changeset import UniqueConstraint, ForeignKeyConstraint -from sqlalchemy import Boolean -from sqlalchemy import CheckConstraint -from sqlalchemy import Column -from sqlalchemy.engine import reflection -from sqlalchemy.exc import OperationalError -from sqlalchemy.exc import ProgrammingError -from sqlalchemy.ext.compiler import compiles -from sqlalchemy import func -from sqlalchemy import Index -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import schema -from sqlalchemy.sql.expression import literal_column -from sqlalchemy.sql.expression import UpdateBase -from sqlalchemy.sql import select -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy.types import NullType - -from rack.db.sqlalchemy import api as db -from rack import exception -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack.openstack.common import timeutils - - -LOG = logging.getLogger(__name__) - - -def get_table(engine, name): - """Returns an sqlalchemy table dynamically from db. - - Needed because the models don't work for us in migrations - as models will be far out of sync with the current data. - """ - metadata = MetaData() - metadata.bind = engine - return Table(name, metadata, autoload=True) - - -class InsertFromSelect(UpdateBase): - - def __init__(self, table, select): - self.table = table - self.select = select - - -@compiles(InsertFromSelect) -def visit_insert_from_select(element, compiler, **kw): - return "INSERT INTO %s %s" % ( - compiler.process(element.table, asfrom=True), - compiler.process(element.select)) - - -class DeleteFromSelect(UpdateBase): - - def __init__(self, table, select, column): - self.table = table - self.select = select - self.column = column - - -@compiles(DeleteFromSelect) -def visit_delete_from_select(element, compiler, **kw): - return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % ( - compiler.process(element.table, asfrom=True), - compiler.process(element.column), - element.column.name, - compiler.process(element.select)) - - -def _get_not_supported_column(col_name_col_instance, column_name): - try: - column = col_name_col_instance[column_name] - except Exception: - msg = _("Please specify column %s in col_name_col_instance " - "param. It is required because column has unsupported " - "type by sqlite).") - raise exception.RackException(msg % column_name) - - if not isinstance(column, Column): - msg = _("col_name_col_instance param has wrong type of " - "column instance for column %s It should be instance " - "of sqlalchemy.Column.") - raise exception.RackException(msg % column_name) - return column - - -def _get_unique_constraints_in_sqlite(migrate_engine, table_name): - regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" - - meta = MetaData(bind=migrate_engine) - table = Table(table_name, meta, autoload=True) - - sql_data = migrate_engine.execute( - """ - SELECT sql - FROM - sqlite_master - WHERE - type = 'table' AND - name = :table_name; - """, - table_name=table_name - ).fetchone()[0] - - uniques = set([ - schema.UniqueConstraint( - *[getattr(table.c, c.strip(' "')) - for c in cols.split(",")], name=name - ) - for name, cols in re.findall(regexp, sql_data) - ]) - - return uniques - - -def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, - **col_name_col_instance): - insp = reflection.Inspector.from_engine(migrate_engine) - meta = MetaData(bind=migrate_engine) - - table = Table(table_name, meta, autoload=True) - columns = [] - for column in table.columns: - if isinstance(column.type, NullType): - new_column = _get_not_supported_column(col_name_col_instance, - column.name) - columns.append(new_column) - else: - columns.append(column.copy()) - - uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name) - table.constraints.update(uniques) - - constraints = [constraint for constraint in table.constraints - if not constraint.name == uc_name and - not isinstance(constraint, schema.ForeignKeyConstraint)] - - new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) - new_table.create() - - indexes = [] - for index in insp.get_indexes(table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], - *column_names, - unique=index["unique"])) - f_keys = [] - for fk in insp.get_foreign_keys(table_name): - refcolumns = [fk['referred_table'] + '.' + col - for col in fk['referred_columns']] - f_keys.append(ForeignKeyConstraint(fk['constrained_columns'], - refcolumns, table=new_table, - name=fk['name'])) - - ins = InsertFromSelect(new_table, table.select()) - migrate_engine.execute(ins) - table.drop() - - [index.create(migrate_engine) for index in indexes] - for fkey in f_keys: - fkey.create() - new_table.rename(table_name) - - -def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, - **col_name_col_instance): - """This method drops UC from table and works for mysql, postgresql and - sqlite. In mysql and postgresql we are able to use "alter table" - construction. In sqlite is only one way to drop UC: - 1) Create new table with same columns, indexes and constraints - (except one that we want to drop). - 2) Copy data from old table to new. - 3) Drop old table. - 4) Rename new table to the name of old table. - - :param migrate_engine: sqlalchemy engine - :param table_name: name of table that contains uniq constraint. - :param uc_name: name of uniq constraint that will be dropped. - :param columns: columns that are in uniq constraint. - :param col_name_col_instance: contains pair column_name=column_instance. - column_instance is instance of Column. These params - are required only for columns that have unsupported - types by sqlite. For example BigInteger. - """ - if migrate_engine.name == "sqlite": - _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, - **col_name_col_instance) - else: - meta = MetaData() - meta.bind = migrate_engine - t = Table(table_name, meta, autoload=True) - uc = UniqueConstraint(*columns, table=t, name=uc_name) - uc.drop() - - -def drop_old_duplicate_entries_from_table(migrate_engine, table_name, - use_soft_delete, *uc_column_names): - """This method is used to drop all old rows that have the same values for - columns in uc_columns. - """ - meta = MetaData() - meta.bind = migrate_engine - - table = Table(table_name, meta, autoload=True) - columns_for_group_by = [table.c[name] for name in uc_column_names] - - columns_for_select = [func.max(table.c.id)] - columns_for_select.extend(list(columns_for_group_by)) - - duplicated_rows_select = select(columns_for_select, - group_by=columns_for_group_by, - having=func.count(table.c.id) > 1) - - for row in migrate_engine.execute(duplicated_rows_select): - # NOTE(boris-42): Do not remove row that has the biggest ID. - delete_condition = table.c.id != row[0] - for name in uc_column_names: - delete_condition &= table.c[name] == row[name] - - rows_to_delete_select = select([table.c.id]).where(delete_condition) - for row in migrate_engine.execute(rows_to_delete_select).fetchall(): - LOG.info(_("Deleted duplicated row with id: %(id)s from table: " - "%(table)s") % dict(id=row[0], table=table_name)) - - if use_soft_delete: - delete_statement = table.update().\ - where(delete_condition).\ - values({ - 'deleted': literal_column('id'), - 'updated_at': literal_column('updated_at'), - 'deleted_at': timeutils.utcnow() - }) - else: - delete_statement = table.delete().where(delete_condition) - migrate_engine.execute(delete_statement) - - -def check_shadow_table(migrate_engine, table_name): - """This method checks that table with ``table_name`` and - corresponding shadow table have same columns. - """ - meta = MetaData() - meta.bind = migrate_engine - - table = Table(table_name, meta, autoload=True) - shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, - autoload=True) - - columns = dict([(c.name, c) for c in table.columns]) - shadow_columns = dict([(c.name, c) for c in shadow_table.columns]) - - for name, column in columns.iteritems(): - if name not in shadow_columns: - raise exception.RackException( - _("Missing column %(table)s.%(column)s in shadow table") - % {'column': name, 'table': shadow_table.name}) - shadow_column = shadow_columns[name] - - if not isinstance(shadow_column.type, type(column.type)): - raise exception.RackException( - _("Different types in %(table)s.%(column)s and shadow table: " - "%(c_type)s %(shadow_c_type)s") - % {'column': name, 'table': table.name, - 'c_type': column.type, - 'shadow_c_type': shadow_column.type}) - - for name, column in shadow_columns.iteritems(): - if name not in columns: - raise exception.RackException( - _("Extra column %(table)s.%(column)s in shadow table") - % {'column': name, 'table': shadow_table.name}) - return True - - -def create_shadow_table(migrate_engine, table_name=None, table=None, - **col_name_col_instance): - """This method create shadow table for table with name ``table_name`` - or table instance ``table``. - :param table_name: Autoload table with this name and create shadow table - :param table: Autoloaded table, so just create corresponding shadow table. - :param col_name_col_instance: contains pair column_name=column_instance. - column_instance is instance of Column. These params - are required only for columns that have unsupported - types by sqlite. For example BigInteger. - - :returns: The created shadow_table object. - """ - meta = MetaData(bind=migrate_engine) - - if table_name is None and table is None: - raise exception.RackException(_("Specify `table_name` or `table` " - "param")) - if not (table_name is None or table is None): - raise exception.RackException(_("Specify only one param `table_name` " - "`table`")) - - if table is None: - table = Table(table_name, meta, autoload=True) - - columns = [] - for column in table.columns: - if isinstance(column.type, NullType): - new_column = _get_not_supported_column(col_name_col_instance, - column.name) - columns.append(new_column) - else: - columns.append(column.copy()) - - shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name - shadow_table = Table(shadow_table_name, meta, *columns, - mysql_engine='InnoDB') - try: - shadow_table.create() - return shadow_table - except (OperationalError, ProgrammingError): - LOG.info(repr(shadow_table)) - LOG.exception(_('Exception while creating table.')) - raise exception.ShadowTableExists(name=shadow_table_name) - except Exception: - LOG.info(repr(shadow_table)) - LOG.exception(_('Exception while creating table.')) - - -def _get_default_deleted_value(table): - if isinstance(table.c.id.type, Integer): - return 0 - if isinstance(table.c.id.type, String): - return "" - raise exception.RackException(_("Unsupported id columns type")) - - -def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): - table = get_table(migrate_engine, table_name) - - insp = reflection.Inspector.from_engine(migrate_engine) - real_indexes = insp.get_indexes(table_name) - existing_index_names = dict([(index['name'], index['column_names']) - for index in real_indexes]) - - # NOTE(boris-42): Restore indexes on `deleted` column - for index in indexes: - if 'deleted' not in index['column_names']: - continue - name = index['name'] - if name in existing_index_names: - column_names = [table.c[c] for c in existing_index_names[name]] - old_index = Index(name, *column_names, unique=index["unique"]) - old_index.drop(migrate_engine) - - column_names = [table.c[c] for c in index['column_names']] - new_index = Index(index["name"], *column_names, unique=index["unique"]) - new_index.create(migrate_engine) - - -def change_deleted_column_type_to_boolean(migrate_engine, table_name, - **col_name_col_instance): - if migrate_engine.name == "sqlite": - return _change_deleted_column_type_to_boolean_sqlite( - migrate_engine, - table_name, - **col_name_col_instance) - insp = reflection.Inspector.from_engine(migrate_engine) - indexes = insp.get_indexes(table_name) - - table = get_table(migrate_engine, table_name) - - old_deleted = Column('old_deleted', Boolean, default=False) - old_deleted.create(table, populate_default=False) - - table.update().\ - where(table.c.deleted == table.c.id).\ - values(old_deleted=True).\ - execute() - - table.c.deleted.drop() - table.c.old_deleted.alter(name="deleted") - - _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) - - -def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, - **col_name_col_instance): - insp = reflection.Inspector.from_engine(migrate_engine) - table = get_table(migrate_engine, table_name) - - columns = [] - for column in table.columns: - column_copy = None - if column.name != "deleted": - if isinstance(column.type, NullType): - column_copy = _get_not_supported_column(col_name_col_instance, - column.name) - else: - column_copy = column.copy() - else: - column_copy = Column('deleted', Boolean, default=0) - columns.append(column_copy) - - constraints = [constraint.copy() for constraint in table.constraints] - - meta = MetaData(bind=migrate_engine) - new_table = Table(table_name + "__tmp__", meta, - *(columns + constraints)) - new_table.create() - - indexes = [] - for index in insp.get_indexes(table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], *column_names, - unique=index["unique"])) - - c_select = [] - for c in table.c: - if c.name != "deleted": - c_select.append(c) - else: - c_select.append(table.c.deleted == table.c.id) - - ins = InsertFromSelect(new_table, select(c_select)) - migrate_engine.execute(ins) - - table.drop() - [index.create(migrate_engine) for index in indexes] - - new_table.rename(table_name) - new_table.update().\ - where(new_table.c.deleted == new_table.c.id).\ - values(deleted=True).\ - execute() - - -def change_deleted_column_type_to_id_type(migrate_engine, table_name, - **col_name_col_instance): - if migrate_engine.name == "sqlite": - return _change_deleted_column_type_to_id_type_sqlite( - migrate_engine, - table_name, - **col_name_col_instance) - insp = reflection.Inspector.from_engine(migrate_engine) - indexes = insp.get_indexes(table_name) - - table = get_table(migrate_engine, table_name) - - new_deleted = Column('new_deleted', table.c.id.type, - default=_get_default_deleted_value(table)) - new_deleted.create(table, populate_default=True) - - table.update().\ - where(table.c.deleted).\ - values(new_deleted=table.c.id).\ - execute() - table.c.deleted.drop() - table.c.new_deleted.alter(name="deleted") - - _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) - - -def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, - **col_name_col_instance): - # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check - # constraints in sqlite DB and our `deleted` column has - # 2 check constraints. So there is only one way to remove - # these constraints: - # 1) Create new table with the same columns, constraints - # and indexes. (except deleted column). - # 2) Copy all data from old to new table. - # 3) Drop old table. - # 4) Rename new table to old table name. - insp = reflection.Inspector.from_engine(migrate_engine) - meta = MetaData(bind=migrate_engine) - table = Table(table_name, meta, autoload=True) - default_deleted_value = _get_default_deleted_value(table) - - columns = [] - for column in table.columns: - column_copy = None - if column.name != "deleted": - if isinstance(column.type, NullType): - column_copy = _get_not_supported_column(col_name_col_instance, - column.name) - else: - column_copy = column.copy() - else: - column_copy = Column('deleted', table.c.id.type, - default=default_deleted_value) - columns.append(column_copy) - - def is_deleted_column_constraint(constraint): - # NOTE(boris-42): There is no other way to check is CheckConstraint - # associated with deleted column. - if not isinstance(constraint, CheckConstraint): - return False - sqltext = str(constraint.sqltext) - # NOTE(I159): when the type of column `deleted` is changed from boolean - # to int, the corresponding CHECK constraint is dropped too. But - # starting from SQLAlchemy version 0.8.3, those CHECK constraints - # aren't dropped anymore. So despite the fact that column deleted is - # of type int now, we still restrict its values to be either 0 or 1. - constraint_markers = ( - "deleted in (0, 1)", - "deleted IN (:deleted_1, :deleted_2)", - "deleted IN (:param_1, :param_2)" - ) - return any(sqltext.endswith(marker) for marker in constraint_markers) - - constraints = [] - for constraint in table.constraints: - if not is_deleted_column_constraint(constraint): - constraints.append(constraint.copy()) - - new_table = Table(table_name + "__tmp__", meta, - *(columns + constraints)) - new_table.create() - - indexes = [] - for index in insp.get_indexes(table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], *column_names, - unique=index["unique"])) - - ins = InsertFromSelect(new_table, table.select()) - migrate_engine.execute(ins) - - table.drop() - [index.create(migrate_engine) for index in indexes] - - new_table.rename(table_name) - new_table.update().\ - where(new_table.c.deleted).\ - values(deleted=new_table.c.id).\ - execute() - - # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. - new_table.update().\ - where(not new_table.c.deleted).\ - values(deleted=default_deleted_value).\ - execute() - - -def _index_exists(migrate_engine, table_name, index_name): - inspector = reflection.Inspector.from_engine(migrate_engine) - indexes = inspector.get_indexes(table_name) - index_names = [index['name'] for index in indexes] - - return index_name in index_names - - -def _add_index(migrate_engine, table, index_name, idx_columns): - index = Index( - index_name, *[getattr(table.c, col) for col in idx_columns] - ) - index.create() - - -def _drop_index(migrate_engine, table, index_name, idx_columns): - if _index_exists(migrate_engine, table.name, index_name): - index = Index( - index_name, *[getattr(table.c, col) for col in idx_columns] - ) - index.drop() - - -def _change_index_columns(migrate_engine, table, index_name, - new_columns, old_columns): - _drop_index(migrate_engine, table, index_name, old_columns) - _add_index(migrate_engine, table, index_name, new_columns) - - -def modify_indexes(migrate_engine, data, upgrade=True): - if migrate_engine.name == 'sqlite': - return - - meta = MetaData() - meta.bind = migrate_engine - - for table_name, indexes in data.iteritems(): - table = Table(table_name, meta, autoload=True) - - for index_name, old_columns, new_columns in indexes: - if not upgrade: - new_columns, old_columns = old_columns, new_columns - - if migrate_engine.name == 'postgresql': - if upgrade: - _add_index(migrate_engine, table, index_name, new_columns) - else: - _drop_index(migrate_engine, table, index_name, old_columns) - elif migrate_engine.name == 'mysql': - _change_index_columns(migrate_engine, table, index_name, - new_columns, old_columns) - else: - raise ValueError('Unsupported DB %s' % migrate_engine.name) diff --git a/rack/debugger.py b/rack/debugger.py deleted file mode 100644 index 80f7018..0000000 --- a/rack/debugger.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import sys - - -def enabled(): - return ('--remote_debug-host' in sys.argv and - '--remote_debug-port' in sys.argv) - - -def register_cli_opts(): - from oslo.config import cfg - - cli_opts = [ - cfg.StrOpt('host', - help='Debug host (IP or name) to connect. Note ' - 'that using the remote debug option changes how ' - 'Rack uses the eventlet library to support async IO. ' - 'This could result in failures that do not occur ' - 'under normal operation. Use at your own risk.'), - - cfg.IntOpt('port', - help='Debug port to connect. Note ' - 'that using the remote debug option changes how ' - 'Rack uses the eventlet library to support async IO. ' - 'This could result in failures that do not occur ' - 'under normal operation. Use at your own risk.') - - ] - - cfg.CONF.register_cli_opts(cli_opts, 'remote_debug') - - -def init(): - from oslo.config import cfg - CONF = cfg.CONF - - # NOTE(markmc): gracefully handle the CLI options not being registered - if 'remote_debug' not in CONF: - return - - if not (CONF.remote_debug.host and CONF.remote_debug.port): - return - - from rack.openstack.common.gettextutils import _ - from rack.openstack.common import log as logging - LOG = logging.getLogger(__name__) - - LOG.debug(_('Listening on %(host)s:%(port)s for debug connection'), - {'host': CONF.remote_debug.host, - 'port': CONF.remote_debug.port}) - - from pydev import pydevd - pydevd.settrace(host=CONF.remote_debug.host, - port=CONF.remote_debug.port, - stdoutToServer=False, - stderrToServer=False) - - LOG.warn(_('WARNING: Using the remote debug option changes how ' - 'Rack uses the eventlet library to support async IO. This ' - 'could result in failures that do not occur under normal ' - 'operation. Use at your own risk.')) diff --git a/rack/exception.py b/rack/exception.py deleted file mode 100644 index 2b1be96..0000000 --- a/rack/exception.py +++ /dev/null @@ -1,1636 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import functools -import sys - -from oslo.config import cfg -import webob.exc - -from rack.openstack.common import excutils -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack import safe_utils - -LOG = logging.getLogger(__name__) - -exc_log_opts = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help='Make exception message format errors fatal'), -] - -CONF = cfg.CONF -CONF.register_opts(exc_log_opts) - - -class ConvertedException(webob.exc.WSGIHTTPException): - - def __init__(self, code=0, title="", explanation=""): - self.code = code - self.title = title - self.explanation = explanation - super(ConvertedException, self).__init__() - - -def _cleanse_dict(original): - """Strip all admin_password, new_pass, rescue_pass keys from a dict.""" - return dict((k, v) for k, v in original.iteritems() if not "_pass" in k) - - -def wrap_exception(notifier=None, get_notifier=None): - """This decorator wraps a method to catch any exceptions that may - get thrown. It logs the exception as well as optionally sending - it to the notification system. - """ - def inner(f): - def wrapped(self, context, *args, **kw): - # Don't store self or context in the payload, it now seems to - # contain confidential information. - try: - return f(self, context, *args, **kw) - except Exception as e: - with excutils.save_and_reraise_exception(): - if notifier or get_notifier: - payload = dict(exception=e) - call_dict = safe_utils.getcallargs(f, context, - *args, **kw) - cleansed = _cleanse_dict(call_dict) - payload.update({'args': cleansed}) - - # If f has multiple decorators, they must use - # functools.wraps to ensure the name is - # propagated. - event_type = f.__name__ - - (notifier or get_notifier()).error(context, - event_type, - payload) - - return functools.wraps(f)(wrapped) - return inner - - -class RackException(Exception): - - """Base Rack Exception - - To correctly use this class, inherit from it and define - a 'msg_fmt' property. That msg_fmt will get printf'd - with the keyword arguments provided to the constructor. - - """ - msg_fmt = _("An unknown exception occurred.") - code = 500 - headers = {} - safe = False - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if 'code' not in self.kwargs: - try: - self.kwargs['code'] = self.code - except AttributeError: - pass - - if not message: - try: - message = self.msg_fmt % kwargs - - except Exception: - exc_info = sys.exc_info() - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception(_('Exception in string format operation')) - for name, value in kwargs.iteritems(): - LOG.error("%s: %s" % (name, value)) - - if CONF.fatal_exception_format_errors: - raise exc_info[0], exc_info[1], exc_info[2] - else: - # at least get the core message out if something happened - message = self.msg_fmt - - super(RackException, self).__init__(message) - - def format_message(self): - # NOTE(mrodden): use the first argument to the python Exception object - # which should be our full RackException message, (see __init__) - return self.args[0] - - -class EncryptionFailure(RackException): - msg_fmt = _("Failed to encrypt text: %(reason)s") - - -class DecryptionFailure(RackException): - msg_fmt = _("Failed to decrypt text: %(reason)s") - - -class VirtualInterfaceCreateException(RackException): - msg_fmt = _("Virtual Interface creation failed") - - -class VirtualInterfaceMacAddressException(RackException): - msg_fmt = _("Creation of virtual interface with " - "unique mac address failed") - - -class GlanceConnectionFailed(RackException): - msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: " - "%(reason)s") - - -class NotAuthorized(RackException): - ec2_code = 'AuthFailure' - msg_fmt = _("Not authorized.") - code = 403 - - -class AdminRequired(NotAuthorized): - msg_fmt = _("User does not have admin privileges") - - -class PolicyNotAuthorized(NotAuthorized): - msg_fmt = _("Policy doesn't allow %(action)s to be performed.") - - -class ImageNotActive(RackException): - # NOTE(jruzicka): IncorrectState is used for volumes only in EC2, - # but it still seems like the most appropriate option. - ec2_code = 'IncorrectState' - msg_fmt = _("Image %(image_id)s is not active.") - - -class ImageNotAuthorized(RackException): - msg_fmt = _("Not authorized for image %(image_id)s.") - - -class Invalid(RackException): - msg_fmt = _("Unacceptable parameters.") - code = 400 - - -class InvalidBDM(Invalid): - msg_fmt = _("Block Device Mapping is Invalid.") - - -class InvalidBDMSnapshot(InvalidBDM): - msg_fmt = _("Block Device Mapping is Invalid: " - "failed to get snapshot %(id)s.") - - -class InvalidBDMVolume(InvalidBDM): - msg_fmt = _("Block Device Mapping is Invalid: " - "failed to get volume %(id)s.") - - -class InvalidBDMImage(InvalidBDM): - msg_fmt = _("Block Device Mapping is Invalid: " - "failed to get image %(id)s.") - - -class InvalidBDMBootSequence(InvalidBDM): - msg_fmt = _("Block Device Mapping is Invalid: " - "Boot sequence for the instance " - "and image/block device mapping " - "combination is not valid.") - - -class InvalidBDMLocalsLimit(InvalidBDM): - msg_fmt = _("Block Device Mapping is Invalid: " - "You specified more local devices than the " - "limit allows") - - -class InvalidBDMEphemeralSize(InvalidBDM): - msg_fmt = _("Ephemeral disks requested are larger than " - "the instance type allows.") - - -class InvalidBDMSwapSize(InvalidBDM): - msg_fmt = _("Swap drive requested is larger than instance type allows.") - - -class InvalidBDMFormat(InvalidBDM): - msg_fmt = _("Block Device Mapping is Invalid: " - "%(details)s") - - -class InvalidBDMForLegacy(InvalidBDM): - msg_fmt = _("Block Device Mapping cannot " - "be converted to legacy format. ") - - -class InvalidAttribute(Invalid): - msg_fmt = _("Attribute not supported: %(attr)s") - - -class ValidationError(Invalid): - msg_fmt = "%(detail)s" - - -class VolumeUnattached(Invalid): - ec2_code = 'IncorrectState' - msg_fmt = _("Volume %(volume_id)s is not attached to anything") - - -class VolumeNotCreated(RackException): - msg_fmt = _("Volume %(volume_id)s did not finish being created" - " even after we waited %(seconds)s seconds or %(attempts)s" - " attempts.") - - -class InvalidKeypair(Invalid): - ec2_code = 'InvalidKeyPair.Format' - msg_fmt = _("Keypair data is invalid: %(reason)s") - - -class InvalidRequest(Invalid): - msg_fmt = _("The request is invalid.") - - -class InvalidInput(Invalid): - msg_fmt = _("Invalid input received: %(reason)s") - - -class InvalidVolume(Invalid): - ec2_code = 'UnsupportedOperation' - msg_fmt = _("Invalid volume: %(reason)s") - - -class InvalidVolumeAccessMode(Invalid): - msg_fmt = _("Invalid volume access mode") + ": %(access_mode)s" - - -class InvalidMetadata(Invalid): - msg_fmt = _("Invalid metadata: %(reason)s") - - -class InvalidMetadataSize(Invalid): - msg_fmt = _("Invalid metadata size: %(reason)s") - - -class InvalidPortRange(Invalid): - ec2_code = 'InvalidParameterValue' - msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") - - -class InvalidIpProtocol(Invalid): - msg_fmt = _("Invalid IP protocol %(protocol)s.") - - -class InvalidContentType(Invalid): - msg_fmt = _("Invalid content type %(content_type)s.") - - -class InvalidCidr(Invalid): - msg_fmt = _("Invalid cidr %(cidr)s.") - - -class InvalidUnicodeParameter(Invalid): - msg_fmt = _("Invalid Parameter: " - "Unicode is not supported by the current database.") - - -class InvalidParameterValue(Invalid): - ec2_code = 'InvalidParameterValue' - msg_fmt = _("%(err)s") - - -class InvalidAggregateAction(Invalid): - msg_fmt = _("Cannot perform action '%(action)s' on aggregate " - "%(aggregate_id)s. Reason: %(reason)s.") - - -class InvalidGroup(Invalid): - msg_fmt = _("Group not valid. Reason: %(reason)s") - - -class InvalidSortKey(Invalid): - msg_fmt = _("Sort key supplied was not valid.") - - -class InstanceInvalidState(Invalid): - msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " - "%(method)s while the instance is in this state.") - - -class InstanceNotRunning(Invalid): - msg_fmt = _("Instance %(instance_id)s is not running.") - - -class InstanceNotInRescueMode(Invalid): - msg_fmt = _("Instance %(instance_id)s is not in rescue mode") - - -class InstanceNotRescuable(Invalid): - msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s") - - -class InstanceNotReady(Invalid): - msg_fmt = _("Instance %(instance_id)s is not ready") - - -class InstanceSuspendFailure(Invalid): - msg_fmt = _("Failed to suspend instance: %(reason)s") - - -class InstanceResumeFailure(Invalid): - msg_fmt = _("Failed to resume instance: %(reason)s") - - -class InstancePowerOnFailure(Invalid): - msg_fmt = _("Failed to power on instance: %(reason)s") - - -class InstancePowerOffFailure(Invalid): - msg_fmt = _("Failed to power off instance: %(reason)s") - - -class InstanceRebootFailure(Invalid): - msg_fmt = _("Failed to reboot instance: %(reason)s") - - -class InstanceTerminationFailure(Invalid): - msg_fmt = _("Failed to terminate instance: %(reason)s") - - -class InstanceDeployFailure(Invalid): - msg_fmt = _("Failed to deploy instance: %(reason)s") - - -class MultiplePortsNotApplicable(Invalid): - msg_fmt = _("Failed to launch instances: %(reason)s") - - -class ServiceUnavailable(Invalid): - msg_fmt = _("Service is unavailable at this time.") - - -class ComputeResourcesUnavailable(ServiceUnavailable): - msg_fmt = _("Insufficient compute resources: %(reason)s.") - - -class HypervisorUnavailable(RackException): - msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s") - - -class ComputeServiceUnavailable(ServiceUnavailable): - msg_fmt = _("Compute service of %(host)s is unavailable at this time.") - - -class ComputeServiceInUse(RackException): - msg_fmt = _("Compute service of %(host)s is still in use.") - - -class UnableToMigrateToSelf(Invalid): - msg_fmt = _("Unable to migrate instance (%(instance_id)s) " - "to current host (%(host)s).") - - -class InvalidHypervisorType(Invalid): - msg_fmt = _("The supplied hypervisor type of is invalid.") - - -class DestinationHypervisorTooOld(Invalid): - msg_fmt = _("The instance requires a newer hypervisor version than " - "has been provided.") - - -class DestinationDiskExists(Invalid): - msg_fmt = _("The supplied disk path (%(path)s) already exists, " - "it is expected not to exist.") - - -class InvalidDevicePath(Invalid): - msg_fmt = _("The supplied device path (%(path)s) is invalid.") - - -class DevicePathInUse(Invalid): - msg_fmt = _("The supplied device path (%(path)s) is in use.") - code = 409 - - -class DeviceIsBusy(Invalid): - msg_fmt = _("The supplied device (%(device)s) is busy.") - - -class InvalidCPUInfo(Invalid): - msg_fmt = _("Unacceptable CPU info: %(reason)s") - - -class InvalidIpAddressError(Invalid): - msg_fmt = _("%(address)s is not a valid IP v4/6 address.") - - -class InvalidVLANTag(Invalid): - msg_fmt = _("VLAN tag is not appropriate for the port group " - "%(bridge)s. Expected VLAN tag is %(tag)s, " - "but the one associated with the port group is %(pgroup)s.") - - -class InvalidVLANPortGroup(Invalid): - msg_fmt = _("vSwitch which contains the port group %(bridge)s is " - "not associated with the desired physical adapter. " - "Expected vSwitch is %(expected)s, but the one associated " - "is %(actual)s.") - - -class InvalidDiskFormat(Invalid): - msg_fmt = _("Disk format %(disk_format)s is not acceptable") - - -class InvalidDiskInfo(Invalid): - msg_fmt = _("Disk info file is invalid: %(reason)s") - - -class DiskInfoReadWriteFail(Invalid): - msg_fmt = _("Failed to read or write disk info file: %(reason)s") - - -class ImageUnacceptable(Invalid): - msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s") - - -class InstanceUnacceptable(Invalid): - msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s") - - -class InvalidEc2Id(Invalid): - msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.") - - -class InvalidUUID(Invalid): - msg_fmt = _("Expected a uuid but received %(uuid)s.") - - -class InvalidID(Invalid): - msg_fmt = _("Invalid ID received %(id)s.") - - -class ConstraintNotMet(RackException): - msg_fmt = _("Constraint not met.") - code = 412 - - -class NotFound(RackException): - msg_fmt = _("Resource could not be found.") - code = 404 - - -class AgentBuildNotFound(NotFound): - msg_fmt = _("No agent-build associated with id %(id)s.") - - -class AgentBuildExists(RackException): - msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s " - "architecture %(architecture)s exists.") - - -class VolumeNotFound(NotFound): - ec2_code = 'InvalidVolumeID.NotFound' - msg_fmt = _("Volume %(volume_id)s could not be found.") - - -class VolumeBDMNotFound(NotFound): - msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.") - - -class SnapshotNotFound(NotFound): - ec2_code = 'InvalidSnapshotID.NotFound' - msg_fmt = _("Snapshot %(snapshot_id)s could not be found.") - - -class DiskNotFound(NotFound): - msg_fmt = _("No disk at %(location)s") - - -class VolumeDriverNotFound(NotFound): - msg_fmt = _("Could not find a handler for %(driver_type)s volume.") - - -class InvalidImageRef(Invalid): - msg_fmt = _("Invalid image href %(image_href)s.") - - -class AutoDiskConfigDisabledByImage(Invalid): - msg_fmt = _("Requested image %(image)s " - "has automatic disk resize disabled.") - - -class ImageNotFound(NotFound): - msg_fmt = _("Image %(image_id)s could not be found.") - - -class PreserveEphemeralNotSupported(Invalid): - msg_fmt = _("The current driver does not support " - "preserving ephemeral partitions.") - - -class ImageNotFoundEC2(ImageNotFound): - msg_fmt = _("Image %(image_id)s could not be found. The rack EC2 API " - "assigns image ids dynamically when they are listed for the " - "first time. Have you listed image ids since adding this " - "image?") - - -class ProjectNotFound(NotFound): - msg_fmt = _("Project %(project_id)s could not be found.") - - -class StorageRepositoryNotFound(NotFound): - msg_fmt = _("Cannot find SR to read/write VDI.") - - -class NetworkDuplicated(Invalid): - msg_fmt = _("Network %(network_id)s is duplicated.") - - -class NetworkInUse(RackException): - msg_fmt = _("Network %(network_id)s is still in use.") - - -class NetworkNotCreated(RackException): - msg_fmt = _("%(req)s is required to create a network.") - - -class NetworkNotFound(NotFound): - msg_fmt = _("Network %(network_id)s could not be found.") - - -class PortNotFound(NotFound): - msg_fmt = _("Port id %(port_id)s could not be found.") - - -class NetworkNotFoundForBridge(NetworkNotFound): - msg_fmt = _("Network could not be found for bridge %(bridge)s") - - -class NetworkNotFoundForUUID(NetworkNotFound): - msg_fmt = _("Network could not be found for uuid %(uuid)s") - - -class NetworkNotFoundForCidr(NetworkNotFound): - msg_fmt = _("Network could not be found with cidr %(cidr)s.") - - -class NetworkNotFoundForInstance(NetworkNotFound): - msg_fmt = _("Network could not be found for instance %(instance_id)s.") - - -class NoMoreNetworks(RackException): - msg_fmt = _("No more available networks.") - - -class NetworkNotFoundForProject(NotFound): - msg_fmt = _("Either network uuid %(network_uuid)s is not present or " - "is not assigned to the project %(project_id)s.") - - -class NetworkAmbiguous(Invalid): - msg_fmt = _("More than one possible network found. Specify " - "network ID(s) to select which one(s) to connect to,") - - -class NetworkRequiresSubnet(Invalid): - msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot" - " instances on.") - - -class ExternalNetworkAttachForbidden(NotAuthorized): - msg_fmt = _("It is not allowed to create an interface on " - "external network %(network_uuid)s") - - -class DatastoreNotFound(NotFound): - msg_fmt = _("Could not find the datastore reference(s) which the VM uses.") - - -class PortInUse(Invalid): - msg_fmt = _("Port %(port_id)s is still in use.") - - -class PortRequiresFixedIP(Invalid): - msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.") - - -class PortNotUsable(Invalid): - msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.") - - -class PortNotFree(Invalid): - msg_fmt = _("No free port available for instance %(instance)s.") - - -class FixedIpExists(RackException): - msg_fmt = _("Fixed ip %(address)s already exists.") - - -class FixedIpNotFound(NotFound): - msg_fmt = _("No fixed IP associated with id %(id)s.") - - -class FixedIpNotFoundForAddress(FixedIpNotFound): - msg_fmt = _("Fixed ip not found for address %(address)s.") - - -class FixedIpNotFoundForInstance(FixedIpNotFound): - msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.") - - -class FixedIpNotFoundForNetworkHost(FixedIpNotFound): - msg_fmt = _("Network host %(host)s has zero fixed ips " - "in network %(network_id)s.") - - -class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): - msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.") - - -class FixedIpNotFoundForNetwork(FixedIpNotFound): - msg_fmt = _("Fixed IP address (%(address)s) does not exist in " - "network (%(network_uuid)s).") - - -class FixedIpAlreadyInUse(RackException): - msg_fmt = _("Fixed IP address %(address)s is already in use on instance " - "%(instance_uuid)s.") - - -class FixedIpAssociatedWithMultipleInstances(RackException): - msg_fmt = _("More than one instance is associated with fixed ip address " - "'%(address)s'.") - - -class FixedIpInvalid(Invalid): - msg_fmt = _("Fixed IP address %(address)s is invalid.") - - -class NoMoreFixedIps(RackException): - ec2_code = 'UnsupportedOperation' - msg_fmt = _("Zero fixed ips available.") - - -class NoFixedIpsDefined(NotFound): - msg_fmt = _("Zero fixed ips could be found.") - - -class FloatingIpExists(RackException): - msg_fmt = _("Floating ip %(address)s already exists.") - - -class FloatingIpNotFound(NotFound): - ec2_code = "UnsupportedOperation" - msg_fmt = _("Floating ip not found for id %(id)s.") - - -class FloatingIpDNSExists(Invalid): - msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.") - - -class FloatingIpNotFoundForAddress(FloatingIpNotFound): - msg_fmt = _("Floating ip not found for address %(address)s.") - - -class FloatingIpNotFoundForHost(FloatingIpNotFound): - msg_fmt = _("Floating ip not found for host %(host)s.") - - -class FloatingIpMultipleFoundForAddress(RackException): - msg_fmt = _("Multiple floating ips are found for address %(address)s.") - - -class FloatingIpPoolNotFound(NotFound): - msg_fmt = _("Floating ip pool not found.") - safe = True - - -class NoMoreFloatingIps(FloatingIpNotFound): - msg_fmt = _("Zero floating ips available.") - safe = True - - -class FloatingIpAssociated(RackException): - ec2_code = "UnsupportedOperation" - msg_fmt = _("Floating ip %(address)s is associated.") - - -class FloatingIpNotAssociated(RackException): - msg_fmt = _("Floating ip %(address)s is not associated.") - - -class NoFloatingIpsDefined(NotFound): - msg_fmt = _("Zero floating ips exist.") - - -class NoFloatingIpInterface(NotFound): - ec2_code = "UnsupportedOperation" - msg_fmt = _("Interface %(interface)s not found.") - - -class CannotDisassociateAutoAssignedFloatingIP(RackException): - ec2_code = "UnsupportedOperation" - msg_fmt = _("Cannot disassociate auto assigned floating ip") - - -class ServiceNotFound(NotFound): - msg_fmt = _("Service %(service_id)s could not be found.") - - -class ServiceBinaryExists(RackException): - msg_fmt = _("Service with host %(host)s binary %(binary)s exists.") - - -class ServiceTopicExists(RackException): - msg_fmt = _("Service with host %(host)s topic %(topic)s exists.") - - -class HostNotFound(NotFound): - msg_fmt = _("Host %(host)s could not be found.") - - -class ComputeHostNotFound(HostNotFound): - msg_fmt = _("Compute host %(host)s could not be found.") - - -class HostBinaryNotFound(NotFound): - msg_fmt = _("Could not find binary %(binary)s on host %(host)s.") - - -class InvalidReservationExpiration(Invalid): - msg_fmt = _("Invalid reservation expiration %(expire)s.") - - -class InvalidQuotaValue(Invalid): - msg_fmt = _("Change would make usage less than 0 for the following " - "resources: %(unders)s") - - -class QuotaNotFound(NotFound): - msg_fmt = _("Quota could not be found") - - -class QuotaExists(RackException): - msg_fmt = _("Quota exists for project %(project_id)s, " - "resource %(resource)s") - - -class QuotaResourceUnknown(QuotaNotFound): - msg_fmt = _("Unknown quota resources %(unknown)s.") - - -class ProjectUserQuotaNotFound(QuotaNotFound): - msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s " - "could not be found.") - - -class ProjectQuotaNotFound(QuotaNotFound): - msg_fmt = _("Quota for project %(project_id)s could not be found.") - - -class QuotaClassNotFound(QuotaNotFound): - msg_fmt = _("Quota class %(class_name)s could not be found.") - - -class QuotaUsageNotFound(QuotaNotFound): - msg_fmt = _("Quota usage for project %(project_id)s could not be found.") - - -class ReservationNotFound(QuotaNotFound): - msg_fmt = _("Quota reservation %(uuid)s could not be found.") - - -class OverQuota(RackException): - msg_fmt = _("Quota exceeded for resources: %(overs)s") - - -class SecurityGroupExists(Invalid): - ec2_code = 'InvalidGroup.Duplicate' - msg_fmt = _("Security group %(security_group_name)s already exists " - "for project %(project_id)s.") - - -class SecurityGroupExistsForInstance(Invalid): - msg_fmt = _("Security group %(security_group_id)s is already associated" - " with the instance %(instance_id)s") - - -class SecurityGroupNotExistsForInstance(Invalid): - msg_fmt = _("Security group %(security_group_id)s is not associated with" - " the instance %(instance_id)s") - - -class SecurityGroupDefaultRuleNotFound(Invalid): - msg_fmt = _("Security group default rule (%rule_id)s not found.") - - -class SecurityGroupCannotBeApplied(Invalid): - msg_fmt = _("Network requires port_security_enabled and subnet associated" - " in order to apply security groups.") - - -class SecurityGroupRuleExists(Invalid): - ec2_code = 'InvalidPermission.Duplicate' - msg_fmt = _("Rule already exists in group: %(rule)s") - - -class NoUniqueMatch(RackException): - msg_fmt = _("No Unique Match Found.") - code = 409 - - -class MigrationNotFound(NotFound): - msg_fmt = _("Migration %(migration_id)s could not be found.") - - -class MigrationNotFoundByStatus(MigrationNotFound): - msg_fmt = _("Migration not found for instance %(instance_id)s " - "with status %(status)s.") - - -class ConsolePoolNotFound(NotFound): - msg_fmt = _("Console pool %(pool_id)s could not be found.") - - -class ConsolePoolExists(RackException): - msg_fmt = _("Console pool with host %(host)s, console_type " - "%(console_type)s and compute_host %(compute_host)s " - "already exists.") - - -class ConsolePoolNotFoundForHostType(NotFound): - msg_fmt = _("Console pool of type %(console_type)s " - "for compute host %(compute_host)s " - "on proxy host %(host)s not found.") - - -class ConsoleNotFound(NotFound): - msg_fmt = _("Console %(console_id)s could not be found.") - - -class ConsoleNotFoundForInstance(ConsoleNotFound): - msg_fmt = _("Console for instance %(instance_uuid)s could not be found.") - - -class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): - msg_fmt = _("Console for instance %(instance_uuid)s " - "in pool %(pool_id)s could not be found.") - - -class ConsoleTypeInvalid(Invalid): - msg_fmt = _("Invalid console type %(console_type)s") - - -class ConsoleTypeUnavailable(Invalid): - msg_fmt = _("Unavailable console type %(console_type)s.") - - -class ConsolePortRangeExhausted(RackException): - msg_fmt = _("The console port range %(min_port)d-%(max_port)d is " - "exhausted.") - - -class FlavorNotFound(NotFound): - msg_fmt = _("Flavor %(flavor_id)s could not be found.") - - -class FlavorNotFoundByName(FlavorNotFound): - msg_fmt = _("Flavor with name %(flavor_name)s could not be found.") - - -class FlavorAccessNotFound(NotFound): - msg_fmt = _("Flavor access not found for %(flavor_id)s / " - "%(project_id)s combination.") - - -class CellNotFound(NotFound): - msg_fmt = _("Cell %(cell_name)s doesn't exist.") - - -class CellExists(RackException): - msg_fmt = _("Cell with name %(name)s already exists.") - - -class CellRoutingInconsistency(RackException): - msg_fmt = _("Inconsistency in cell routing: %(reason)s") - - -class CellServiceAPIMethodNotFound(NotFound): - msg_fmt = _("Service API method not found: %(detail)s") - - -class CellTimeout(NotFound): - msg_fmt = _("Timeout waiting for response from cell") - - -class CellMaxHopCountReached(RackException): - msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s") - - -class NoCellsAvailable(RackException): - msg_fmt = _("No cells available matching scheduling criteria.") - - -class CellsUpdateUnsupported(RackException): - msg_fmt = _("Cannot update cells configuration file.") - - -class InstanceUnknownCell(NotFound): - msg_fmt = _("Cell is not known for instance %(instance_uuid)s") - - -class SchedulerHostFilterNotFound(NotFound): - msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.") - - -class FlavorExtraSpecsNotFound(NotFound): - msg_fmt = _("Flavor %(flavor_id)s has no extra specs with " - "key %(extra_specs_key)s.") - - -class ComputeHostMetricNotFound(NotFound): - msg_fmt = _("Metric %(name)s could not be found on the compute " - "host node %(host)s.%(node)s.") - - -class FileNotFound(NotFound): - msg_fmt = _("File %(file_path)s could not be found.") - - -class NoFilesFound(NotFound): - msg_fmt = _("Zero files could be found.") - - -class SwitchNotFoundForNetworkAdapter(NotFound): - msg_fmt = _("Virtual switch associated with the " - "network adapter %(adapter)s not found.") - - -class NetworkAdapterNotFound(NotFound): - msg_fmt = _("Network adapter %(adapter)s could not be found.") - - -class ClassNotFound(NotFound): - msg_fmt = _("Class %(class_name)s could not be found: %(exception)s") - - -class NotAllowed(RackException): - msg_fmt = _("Action not allowed.") - - -class ImageRotationNotAllowed(RackException): - msg_fmt = _("Rotation is not allowed for snapshots") - - -class RotationRequiredForBackup(RackException): - msg_fmt = _("Rotation param is required for backup image_type") - - -class KeyPairExists(RackException): - ec2_code = 'InvalidKeyPair.Duplicate' - msg_fmt = _("Key pair '%(key_name)s' already exists.") - - -class InstanceExists(RackException): - msg_fmt = _("Instance %(name)s already exists.") - - -class FlavorExists(RackException): - msg_fmt = _("Flavor with name %(name)s already exists.") - - -class FlavorIdExists(RackException): - msg_fmt = _("Flavor with ID %(flavor_id)s already exists.") - - -class FlavorAccessExists(RackException): - msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s " - "and project %(project_id)s combination.") - - -class InvalidSharedStorage(RackException): - msg_fmt = _("%(path)s is not on shared storage: %(reason)s") - - -class InvalidLocalStorage(RackException): - msg_fmt = _("%(path)s is not on local storage: %(reason)s") - - -class MigrationError(RackException): - msg_fmt = _("Migration error: %(reason)s") - - -class MigrationPreCheckError(MigrationError): - msg_fmt = _("Migration pre-check error: %(reason)s") - - -class MalformedRequestBody(RackException): - msg_fmt = _("Malformed message body: %(reason)s") - - -class ConfigNotFound(RackException): - msg_fmt = _("Could not find config at %(path)s") - - -class PasteAppNotFound(RackException): - msg_fmt = _("Could not load paste app '%(name)s' from %(path)s") - - -class CannotResizeToSameFlavor(RackException): - msg_fmt = _("When resizing, instances must change flavor!") - - -class ResizeError(RackException): - msg_fmt = _("Resize error: %(reason)s") - - -class CannotResizeDisk(RackException): - msg_fmt = _("Server disk was unable to be resized because: %(reason)s") - - -class FlavorMemoryTooSmall(RackException): - msg_fmt = _("Flavor's memory is too small for requested image.") - - -class FlavorDiskTooSmall(RackException): - msg_fmt = _("Flavor's disk is too small for requested image.") - - -class InsufficientFreeMemory(RackException): - msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.") - - -class NoValidHost(RackException): - msg_fmt = _("No valid host was found. %(reason)s") - - -class QuotaError(RackException): - ec2_code = 'ResourceLimitExceeded' - msg_fmt = _("Quota exceeded: code=%(code)s") - code = 413 - headers = {'Retry-After': 0} - safe = True - - -class TooManyInstances(QuotaError): - msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s," - " but already used %(used)d of %(allowed)d %(resource)s") - - -class FloatingIpLimitExceeded(QuotaError): - msg_fmt = _("Maximum number of floating ips exceeded") - - -class FixedIpLimitExceeded(QuotaError): - msg_fmt = _("Maximum number of fixed ips exceeded") - - -class MetadataLimitExceeded(QuotaError): - msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d") - - -class OnsetFileLimitExceeded(QuotaError): - msg_fmt = _("Personality file limit exceeded") - - -class OnsetFilePathLimitExceeded(QuotaError): - msg_fmt = _("Personality file path too long") - - -class OnsetFileContentLimitExceeded(QuotaError): - msg_fmt = _("Personality file content too long") - - -class KeypairLimitExceeded(QuotaError): - msg_fmt = _("Maximum number of key pairs exceeded") - - -class SecurityGroupLimitExceeded(QuotaError): - ec2_code = 'SecurityGroupLimitExceeded' - msg_fmt = _("Maximum number of security groups or rules exceeded") - - -class PortLimitExceeded(QuotaError): - msg_fmt = _("Maximum number of ports exceeded") - - -class AggregateError(RackException): - msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' " - "caused an error: %(reason)s.") - - -class AggregateNotFound(NotFound): - msg_fmt = _("Aggregate %(aggregate_id)s could not be found.") - - -class AggregateNameExists(RackException): - msg_fmt = _("Aggregate %(aggregate_name)s already exists.") - - -class AggregateHostNotFound(NotFound): - msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.") - - -class AggregateMetadataNotFound(NotFound): - msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with " - "key %(metadata_key)s.") - - -class AggregateHostExists(RackException): - msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.") - - -class FlavorCreateFailed(RackException): - msg_fmt = _("Unable to create flavor") - - -class InstancePasswordSetFailed(RackException): - msg_fmt = _("Failed to set admin password on %(instance)s " - "because %(reason)s") - safe = True - - -class DuplicateVlan(RackException): - msg_fmt = _("Detected existing vlan with id %(vlan)d") - - -class CidrConflict(RackException): - msg_fmt = _("There was a conflict when trying to complete your request.") - code = 409 - - -class InstanceNotFound(NotFound): - ec2_code = 'InvalidInstanceID.NotFound' - msg_fmt = _("Instance %(instance_id)s could not be found.") - - -class InstanceInfoCacheNotFound(NotFound): - msg_fmt = _("Info cache for instance %(instance_uuid)s could not be " - "found.") - - -class NodeNotFound(NotFound): - msg_fmt = _("Node %(node_id)s could not be found.") - - -class NodeNotFoundByUUID(NotFound): - msg_fmt = _("Node with UUID %(node_uuid)s could not be found.") - - -class MarkerNotFound(NotFound): - msg_fmt = _("Marker %(marker)s could not be found.") - - -class InvalidInstanceIDMalformed(Invalid): - ec2_code = 'InvalidInstanceID.Malformed' - msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").") - - -class CouldNotFetchImage(RackException): - msg_fmt = _("Could not fetch image %(image_id)s") - - -class CouldNotUploadImage(RackException): - msg_fmt = _("Could not upload image %(image_id)s") - - -class TaskAlreadyRunning(RackException): - msg_fmt = _("Task %(task_name)s is already running on host %(host)s") - - -class TaskNotRunning(RackException): - msg_fmt = _("Task %(task_name)s is not running on host %(host)s") - - -class InstanceIsLocked(InstanceInvalidState): - msg_fmt = _("Instance %(instance_uuid)s is locked") - - -class ConfigDriveInvalidValue(Invalid): - msg_fmt = _("Invalid value for Config Drive option: %(option)s") - - -class ConfigDriveMountFailed(RackException): - msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. " - "Error: %(error)s") - - -class ConfigDriveUnknownFormat(RackException): - msg_fmt = _("Unknown config drive format %(format)s. Select one of " - "iso9660 or vfat.") - - -class InterfaceAttachFailed(Invalid): - msg_fmt = _("Failed to attach network adapter device to %(instance)s") - - -class InterfaceDetachFailed(Invalid): - msg_fmt = _("Failed to detach network adapter device from %(instance)s") - - -class InstanceUserDataTooLarge(RackException): - msg_fmt = _("User data too large. User data must be no larger than " - "%(maxsize)s bytes once base64 encoded. Your data is " - "%(length)d bytes") - - -class InstanceUserDataMalformed(RackException): - msg_fmt = _("User data needs to be valid base 64.") - - -class UnexpectedTaskStateError(RackException): - msg_fmt = _("Unexpected task state: expecting %(expected)s but " - "the actual state is %(actual)s") - - -class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError): - pass - - -class InstanceActionNotFound(RackException): - msg_fmt = _("Action for request_id %(request_id)s on instance" - " %(instance_uuid)s not found") - - -class InstanceActionEventNotFound(RackException): - msg_fmt = _("Event %(event)s not found for action id %(action_id)s") - - -class UnexpectedVMStateError(RackException): - msg_fmt = _("Unexpected VM state: expecting %(expected)s but " - "the actual state is %(actual)s") - - -class CryptoCAFileNotFound(FileNotFound): - msg_fmt = _("The CA file for %(project)s could not be found") - - -class CryptoCRLFileNotFound(FileNotFound): - msg_fmt = _("The CRL file for %(project)s could not be found") - - -class InstanceRecreateNotSupported(Invalid): - msg_fmt = _('Instance recreate is not supported.') - - -class ServiceGroupUnavailable(RackException): - msg_fmt = _("The service from servicegroup driver %(driver)s is " - "temporarily unavailable.") - - -class DBNotAllowed(RackException): - msg_fmt = _('%(binary)s attempted direct database access which is ' - 'not allowed by policy') - - -class UnsupportedVirtType(Invalid): - msg_fmt = _("Virtualization type '%(virt)s' is not supported by " - "this compute driver") - - -class UnsupportedHardware(Invalid): - msg_fmt = _("Requested hardware '%(model)s' is not supported by " - "the '%(virt)s' virt driver") - - -class Base64Exception(RackException): - msg_fmt = _("Invalid Base 64 data for file %(path)s") - - -class BuildAbortException(RackException): - msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s") - - -class RescheduledException(RackException): - msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: " - "%(reason)s") - - -class ShadowTableExists(RackException): - msg_fmt = _("Shadow table with name %(name)s already exists.") - - -class InstanceFaultRollback(RackException): - - def __init__(self, inner_exception=None): - message = _("Instance rollback performed due to: %s") - self.inner_exception = inner_exception - super(InstanceFaultRollback, self).__init__(message % inner_exception) - - -class UnsupportedObjectError(RackException): - msg_fmt = _('Unsupported object type %(objtype)s') - - -class OrphanedObjectError(RackException): - msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object') - - -class IncompatibleObjectVersion(RackException): - msg_fmt = _('Version %(objver)s of %(objname)s is not supported') - - -class ObjectActionError(RackException): - msg_fmt = _('Object action %(action)s failed because: %(reason)s') - - -class CoreAPIMissing(RackException): - msg_fmt = _("Core API extensions are missing: %(missing_apis)s") - - -class AgentError(RackException): - msg_fmt = _('Error during following call to agent: %(method)s') - - -class AgentTimeout(AgentError): - msg_fmt = _('Unable to contact guest agent. ' - 'The following call timed out: %(method)s') - - -class AgentNotImplemented(AgentError): - msg_fmt = _('Agent does not support the call: %(method)s') - - -class InstanceGroupNotFound(NotFound): - msg_fmt = _("Instance group %(group_uuid)s could not be found.") - - -class InstanceGroupIdExists(RackException): - msg_fmt = _("Instance group %(group_uuid)s already exists.") - - -class InstanceGroupMetadataNotFound(NotFound): - msg_fmt = _("Instance group %(group_uuid)s has no metadata with " - "key %(metadata_key)s.") - - -class InstanceGroupMemberNotFound(NotFound): - msg_fmt = _("Instance group %(group_uuid)s has no member with " - "id %(instance_id)s.") - - -class InstanceGroupPolicyNotFound(NotFound): - msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.") - - -class PluginRetriesExceeded(RackException): - msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.") - - -class ImageDownloadModuleError(RackException): - msg_fmt = _("There was an error with the download module %(module)s. " - "%(reason)s") - - -class ImageDownloadModuleMetaDataError(ImageDownloadModuleError): - msg_fmt = _("The metadata for this location will not work with this " - "module %(module)s. %(reason)s.") - - -class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError): - msg_fmt = _("The method %(method_name)s is not implemented.") - - -class ImageDownloadModuleConfigurationError(ImageDownloadModuleError): - msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.") - - -class ResourceMonitorError(RackException): - msg_fmt = _("Error when creating resource monitor: %(monitor)s") - - -class PciDeviceWrongAddressFormat(RackException): - msg_fmt = _("The PCI address %(address)s has an incorrect format.") - - -class PciDeviceNotFoundById(NotFound): - msg_fmt = _("PCI device %(id)s not found") - - -class PciDeviceNotFound(RackException): - msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.") - - -class PciDeviceInvalidStatus(RackException): - msg_fmt = _( - "PCI device %(compute_node_id)s:%(address)s is %(status)s " - "instead of %(hopestatus)s") - - -class PciDeviceInvalidOwner(RackException): - msg_fmt = _( - "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s " - "instead of %(hopeowner)s") - - -class PciDeviceRequestFailed(RackException): - msg_fmt = _( - "PCI device request (%requests)s failed") - - -class PciDevicePoolEmpty(RackException): - msg_fmt = _( - "Attempt to consume PCI device %(compute_node_id)s:%(address)s " - "from empty pool") - - -class PciInvalidAlias(RackException): - msg_fmt = _("Invalid PCI alias definition: %(reason)s") - - -class PciRequestAliasNotDefined(RackException): - msg_fmt = _("PCI alias %(alias)s is not defined") - - -class MissingParameter(RackException): - ec2_code = 'MissingParameter' - msg_fmt = _("Not enough parameters: %(reason)s") - code = 400 - - -class PciConfigInvalidWhitelist(Invalid): - msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s") - - -class PciTrackerInvalidNodeId(RackException): - msg_fmt = _("Cannot change %(node_id)s to %(new_node_id)s") - - -class InternalError(RackException): - ec2_code = 'InternalError' - msg_fmt = "%(err)s" - - -class PciDevicePrepareFailed(RackException): - msg_fmt = _("Failed to prepare PCI device %(id)s for instance " - "%(instance_uuid)s: %(reason)s") - - -class PciDeviceDetachFailed(RackException): - msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s") - - -class PciDeviceUnsupportedHypervisor(RackException): - msg_fmt = _("%(type)s hypervisor does not support PCI devices") - - -class KeyManagerError(RackException): - msg_fmt = _("Key manager error: %(reason)s") - - -class InvalidVideoMode(Invalid): - msg_fmt = _("Provided video model (%(model)s) is not supported.") - - -class RngDeviceNotExist(Invalid): - msg_fmt = _("The provided RNG device path: (%(path)s) is not " - "present on the host.") - - -class RequestedVRamTooHigh(RackException): - msg_fmt = _("The requested amount of video memory %(req_vram)d is higher " - "than the maximum allowed by flavor %(max_vram)d.") - - -class InvalidWatchdogAction(Invalid): - msg_fmt = _("Provided watchdog action (%(action)s) is not supported.") - - -class NoBlockMigrationForConfigDriveInLibVirt(RackException): - msg_fmt = _("Block migration of instances with config drives is not " - "supported in libvirt.") - - -class ServiceCatalogException(RackException): - msg_fmt = _("Invalid service catalog service: %(service_type)s'") - - -class GroupCreateFailed(RackException): - msg_fmt = _("Unable to create group") - - -class GroupIndexFailed(RackException): - msg_fmt = _("Unable to index group") - - -class GroupNotFound(NotFound): - msg_fmt = _("Group %(gid)s could not be found.") - - -class GroupInUse(RackException): - msg_fmt = _("Group %(gid)s is still in use.") - - -class GroupDeleteFailed(RackException): - msg_fmt = _('Unable to delete Group') - - -class NetworkCreateFailed(RackException): - msg_fmt = _("Unable to create network") - - -class NetworkIndexFailed(RackException): - msg_fmt = _("Unable to index network") - - -class NetworkShowFailed(RackException): - msg_fmt = _("Unable to show network") - - -class NetworkDeleteFailed(RackException): - msg_fmt = _("Unable to delete network") - - -class KeypairNotFound(NotFound): - msg_fmt = _("Keypair %(keypair_id)s could not be found.") - - -class KeypairCreateFailed(RackException): - msg_fmt = _("Unable to create keypair") - - -class KeypairDeleteFailed(RackException): - msg_fmt = _("Unable to delete keypair") - - -class keypairInUse(RackException): - msg_fmt = _("Keypair %(keypair_id)s is still in use.") - - -class InvalidOpenStackCredential(Invalid): - msg_fmt = _("OpenStack credential %(credential)s is required.") - - -class SecuritygroupNotFound(NotFound): - msg_fmt = _("Security group %(securitygroup_id)s not found.") - - -class SecuritygroupCreateFailed(RackException): - msg_fmt = _("Unable to create Securitygroup") - - -class SecuritygroupDeleteFailed(RackException): - msg_fmt = _("Unable to delete Securitygroup") - - -class SecuritygroupruleNotFound(NotFound): - msg_fmt = _("Securitygrouprule %(rule_id)s could not be found.") - - -class SecuritygroupruleCreateFailed(RackException): - msg_fmt = _("Unable to create Securitygrouprule") - - -class SecuritygroupruleDeleteFailed(RackException): - msg_fmt = _("Unable to delete Securitygrouprule") - - -class SecuritygroupInUse(RackException): - msg_fmt = _("Securitygroup %(securitygroup_id)s is still in use.") - - -class ProcessCreateFailed(RackException): - msg_fmt = _("Unable to create Process") - - -class ProcessDeleteFailed(RackException): - msg_fmt = _("Unable to delete Process") - - -class ProcessNotFound(NotFound): - msg_fmt = _("Process %(pid)s could not be found.") - - -class ProxyCreateFailed(RackException): - msg_fmt = _("Unable to create Proxy") - - -class ProxyDeleteFailed(RackException): - msg_fmt = _("Unable to delete Proxy") - - -class ProxyNotFound(NotFound): - msg_fmt = _("Proxy instance could not be found.") - - -class NoNetworksFound(NotFound): - msg_fmt = _("No networks defined for gid %(gid)s.") - - -class OpenStackException(RackException): - def __init__(self, code, message): - super(RackException, self).__init__(message) - self.code = code - self.msg_fmt = message diff --git a/rack/netconf.py b/rack/netconf.py deleted file mode 100644 index a8139d9..0000000 --- a/rack/netconf.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from oslo.config import cfg - -from rack import utils - -CONF = cfg.CONF - - -def _get_my_ip(): - """Returns the actual ip of the local machine. - - This code figures out what source address would be used if some traffic - were to be sent out to some well known address on the Internet. In this - case, a Google DNS server is used, but the specific address does not - matter much. No traffic is actually sent. - """ - try: - csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - csock.connect(('8.8.8.8', 80)) - (addr, port) = csock.getsockname() - csock.close() - return addr - except socket.error: - return utils.get_my_ipv4_address() - - -netconf_opts = [ - cfg.StrOpt('my_ip', - default=_get_my_ip(), - help='IP address of this host'), - cfg.StrOpt('host', - default=socket.gethostname(), - help='Name of this node. This can be an opaque identifier. ' - 'It is not necessarily a hostname, FQDN, or IP address. ' - 'However, the node name must be valid within ' - 'an AMQP key, and if using ZeroMQ, a valid ' - 'hostname, FQDN, or IP address'), - cfg.BoolOpt('use_ipv6', - default=False, - help='Use IPv6'), -] - -CONF.register_opts(netconf_opts) diff --git a/rack/openstack/__init__.py b/rack/openstack/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/openstack/common/README b/rack/openstack/common/README deleted file mode 100644 index 0700c72..0000000 --- a/rack/openstack/common/README +++ /dev/null @@ -1,13 +0,0 @@ -openstack-common ----------------- - -A number of modules from openstack-common are imported into this project. - -These modules are "incubating" in openstack-common and are kept in sync -with the help of openstack-common's update.py script. See: - - https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator - -The copy of the code should never be directly modified here. Please -always update openstack-common first and then run the script to copy -the changes across. diff --git a/rack/openstack/common/__init__.py b/rack/openstack/common/__init__.py deleted file mode 100644 index 2a00f3b..0000000 --- a/rack/openstack/common/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -import six -six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/rack/openstack/common/cliutils.py b/rack/openstack/common/cliutils.py deleted file mode 100644 index 411bd58..0000000 --- a/rack/openstack/common/cliutils.py +++ /dev/null @@ -1,63 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect - - -class MissingArgs(Exception): - - def __init__(self, missing): - self.missing = missing - - def __str__(self): - if len(self.missing) == 1: - return "An argument is missing" - else: - return ("%(num)d arguments are missing" % - dict(num=len(self.missing))) - - -def validate_args(fn, *args, **kwargs): - """Check that the supplied args are sufficient for calling a function. - - >>> validate_args(lambda a: None) - Traceback (most recent call last): - ... - MissingArgs: An argument is missing - >>> validate_args(lambda a, b, c, d: None, 0, c=1) - Traceback (most recent call last): - ... - MissingArgs: 2 arguments are missing - - :param fn: the function to check - :param arg: the positional arguments supplied - :param kwargs: the keyword arguments supplied - """ - argspec = inspect.getargspec(fn) - - num_defaults = len(argspec.defaults or []) - required_args = argspec.args[:len(argspec.args) - num_defaults] - - def isbound(method): - return getattr(method, 'im_self', None) is not None - - if isbound(fn): - required_args.pop(0) - - missing = [arg for arg in required_args if arg not in kwargs] - missing = missing[len(args):] - if missing: - raise MissingArgs(missing) diff --git a/rack/openstack/common/config/__init__.py b/rack/openstack/common/config/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/openstack/common/config/generator.py b/rack/openstack/common/config/generator.py deleted file mode 100644 index 335ddb6..0000000 --- a/rack/openstack/common/config/generator.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright 2012 SINA Corporation -# Copyright 2014 Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Extracts OpenStack config option info from module(s).""" - -from __future__ import print_function - -import argparse -import imp -import os -import re -import socket -import sys -import textwrap - -from oslo.config import cfg -import six -import stevedore.named - -from rack.openstack.common import gettextutils -from rack.openstack.common import importutils - -gettextutils.install('rack') - -STROPT = "StrOpt" -BOOLOPT = "BoolOpt" -INTOPT = "IntOpt" -FLOATOPT = "FloatOpt" -LISTOPT = "ListOpt" -DICTOPT = "DictOpt" -MULTISTROPT = "MultiStrOpt" - -OPT_TYPES = { - STROPT: 'string value', - BOOLOPT: 'boolean value', - INTOPT: 'integer value', - FLOATOPT: 'floating point value', - LISTOPT: 'list value', - DICTOPT: 'dict value', - MULTISTROPT: 'multi valued', -} - -OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT, - FLOATOPT, LISTOPT, DICTOPT, - MULTISTROPT])) - -PY_EXT = ".py" -BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), - "../../../../")) -WORDWRAP_WIDTH = 60 - - -def generate(argv): - parser = argparse.ArgumentParser( - description='generate sample configuration file', - ) - parser.add_argument('-m', dest='modules', action='append') - parser.add_argument('-l', dest='libraries', action='append') - parser.add_argument('srcfiles', nargs='*') - parsed_args = parser.parse_args(argv) - - mods_by_pkg = dict() - for filepath in parsed_args.srcfiles: - pkg_name = filepath.split(os.sep)[1] - mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]), - os.path.basename(filepath).split('.')[0]]) - mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) - # NOTE(lzyeval): place top level modules before packages - pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT)) - ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names) - pkg_names.extend(ext_names) - - # opts_by_group is a mapping of group name to an options list - # The options list is a list of (module, options) tuples - opts_by_group = {'DEFAULT': []} - - if parsed_args.modules: - for module_name in parsed_args.modules: - module = _import_module(module_name) - if module: - for group, opts in _list_opts(module): - opts_by_group.setdefault(group, []).append((module_name, - opts)) - - # Look for entry points defined in libraries (or applications) for - # option discovery, and include their return values in the output. - # - # Each entry point should be a function returning an iterable - # of pairs with the group name (or None for the default group) - # and the list of Opt instances for that group. - if parsed_args.libraries: - loader = stevedore.named.NamedExtensionManager( - 'oslo.config.opts', - names=list(set(parsed_args.libraries)), - invoke_on_load=False, - ) - for ext in loader: - for group, opts in ext.plugin(): - opt_list = opts_by_group.setdefault(group or 'DEFAULT', []) - opt_list.append((ext.name, opts)) - - for pkg_name in pkg_names: - mods = mods_by_pkg.get(pkg_name) - mods.sort() - for mod_str in mods: - if mod_str.endswith('.__init__'): - mod_str = mod_str[:mod_str.rfind(".")] - - mod_obj = _import_module(mod_str) - if not mod_obj: - raise RuntimeError("Unable to import module %s" % mod_str) - - for group, opts in _list_opts(mod_obj): - opts_by_group.setdefault(group, []).append((mod_str, opts)) - - print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', [])) - for group in sorted(opts_by_group.keys()): - print_group_opts(group, opts_by_group[group]) - - -def _import_module(mod_str): - try: - if mod_str.startswith('bin.'): - imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:])) - return sys.modules[mod_str[4:]] - else: - return importutils.import_module(mod_str) - except Exception as e: - sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e))) - return None - - -def _is_in_group(opt, group): - "Check if opt is in group." - for value in group._opts.values(): - # NOTE(llu): Temporary workaround for bug #1262148, wait until - # newly released oslo.config support '==' operator. - if not(value['opt'] != opt): - return True - return False - - -def _guess_groups(opt, mod_obj): - # is it in the DEFAULT group? - if _is_in_group(opt, cfg.CONF): - return 'DEFAULT' - - # what other groups is it in? - for value in cfg.CONF.values(): - if isinstance(value, cfg.CONF.GroupAttr): - if _is_in_group(opt, value._group): - return value._group.name - - raise RuntimeError( - "Unable to find group for option %s, " - "maybe it's defined twice in the same group?" - % opt.name - ) - - -def _list_opts(obj): - def is_opt(o): - return (isinstance(o, cfg.Opt) and - not isinstance(o, cfg.SubCommandOpt)) - - opts = list() - for attr_str in dir(obj): - attr_obj = getattr(obj, attr_str) - if is_opt(attr_obj): - opts.append(attr_obj) - elif (isinstance(attr_obj, list) and - all(map(lambda x: is_opt(x), attr_obj))): - opts.extend(attr_obj) - - ret = {} - for opt in opts: - ret.setdefault(_guess_groups(opt, obj), []).append(opt) - return ret.items() - - -def print_group_opts(group, opts_by_module): - print("[%s]" % group) - print('') - for mod, opts in opts_by_module: - print('#') - print('# Options defined in %s' % mod) - print('#') - print('') - for opt in opts: - _print_opt(opt) - print('') - - -def _get_my_ip(): - try: - csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - csock.connect(('8.8.8.8', 80)) - (addr, port) = csock.getsockname() - csock.close() - return addr - except socket.error: - return None - - -def _sanitize_default(name, value): - """Set up a reasonably sensible default for pybasedir, my_ip and host.""" - if value.startswith(sys.prefix): - # NOTE(jd) Don't use os.path.join, because it is likely to think the - # second part is an absolute pathname and therefore drop the first - # part. - value = os.path.normpath("/usr/" + value[len(sys.prefix):]) - elif value.startswith(BASEDIR): - return value.replace(BASEDIR, '/usr/lib/python/site-packages') - elif BASEDIR in value: - return value.replace(BASEDIR, '') - elif value == _get_my_ip(): - return '10.0.0.1' - elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name: - return 'rack' - elif value.strip() != value: - return '"%s"' % value - return value - - -def _print_opt(opt): - opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help - if not opt_help: - sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name) - opt_help = "" - opt_type = None - try: - opt_type = OPTION_REGEX.search(str(type(opt))).group(0) - except (ValueError, AttributeError) as err: - sys.stderr.write("%s\n" % str(err)) - sys.exit(1) - opt_help = u'%s (%s)' % (opt_help, - OPT_TYPES[opt_type]) - print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))) - if opt.deprecated_opts: - for deprecated_opt in opt.deprecated_opts: - if deprecated_opt.name: - deprecated_group = (deprecated_opt.group if - deprecated_opt.group else "DEFAULT") - print('# Deprecated group/name - [%s]/%s' % - (deprecated_group, - deprecated_opt.name)) - try: - if opt_default is None: - print('#%s=' % opt_name) - elif opt_type == STROPT: - assert(isinstance(opt_default, six.string_types)) - print('#%s=%s' % (opt_name, _sanitize_default(opt_name, - opt_default))) - elif opt_type == BOOLOPT: - assert(isinstance(opt_default, bool)) - print('#%s=%s' % (opt_name, str(opt_default).lower())) - elif opt_type == INTOPT: - assert(isinstance(opt_default, int) and - not isinstance(opt_default, bool)) - print('#%s=%s' % (opt_name, opt_default)) - elif opt_type == FLOATOPT: - assert(isinstance(opt_default, float)) - print('#%s=%s' % (opt_name, opt_default)) - elif opt_type == LISTOPT: - assert(isinstance(opt_default, list)) - print('#%s=%s' % (opt_name, ','.join(opt_default))) - elif opt_type == DICTOPT: - assert(isinstance(opt_default, dict)) - opt_default_strlist = [str(key) + ':' + str(value) - for (key, value) in opt_default.items()] - print('#%s=%s' % (opt_name, ','.join(opt_default_strlist))) - elif opt_type == MULTISTROPT: - assert(isinstance(opt_default, list)) - if not opt_default: - opt_default = [''] - for default in opt_default: - print('#%s=%s' % (opt_name, default)) - print('') - except Exception: - sys.stderr.write('Error in option "%s"\n' % opt_name) - sys.exit(1) - - -def main(): - generate(sys.argv[1:]) - -if __name__ == '__main__': - main() diff --git a/rack/openstack/common/context.py b/rack/openstack/common/context.py deleted file mode 100644 index fe073d6..0000000 --- a/rack/openstack/common/context.py +++ /dev/null @@ -1,83 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Simple class that stores security context information in the web request. - -Projects should subclass this class if they wish to enhance the request -context or provide additional information in their specific WSGI pipeline. -""" - -import itertools - -from rack.openstack.common import uuidutils - - -def generate_request_id(): - return 'req-%s' % uuidutils.generate_uuid() - - -class RequestContext(object): - - """Helper class to represent useful information about a request context. - - Stores information about the security context under which the user - accesses the system, as well as additional request information. - """ - - def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, - read_only=False, show_deleted=False, request_id=None): - self.auth_token = auth_token - self.user = user - self.tenant = tenant - self.is_admin = is_admin - self.read_only = read_only - self.show_deleted = show_deleted - if not request_id: - request_id = generate_request_id() - self.request_id = request_id - - def to_dict(self): - return {'user': self.user, - 'tenant': self.tenant, - 'is_admin': self.is_admin, - 'read_only': self.read_only, - 'show_deleted': self.show_deleted, - 'auth_token': self.auth_token, - 'request_id': self.request_id} - - -def get_admin_context(show_deleted=False): - context = RequestContext(None, - tenant=None, - is_admin=True, - show_deleted=show_deleted) - return context - - -def get_context_from_function_and_args(function, args, kwargs): - """Find an arg of type RequestContext and return it. - - This is useful in a couple of decorators where we don't - know much about the function we're wrapping. - """ - - for arg in itertools.chain(kwargs.values(), args): - if isinstance(arg, RequestContext): - return arg - - return None diff --git a/rack/openstack/common/db/__init__.py b/rack/openstack/common/db/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/openstack/common/db/api.py b/rack/openstack/common/db/api.py deleted file mode 100644 index bcea694..0000000 --- a/rack/openstack/common/db/api.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Multiple DB API backend support. - -A DB backend module should implement a method named 'get_backend' which -takes no arguments. The method can return any object that implements DB -API methods. -""" - -import functools -import logging -import threading -import time - -from rack.openstack.common.db import exception -from rack.openstack.common.gettextutils import _LE -from rack.openstack.common import importutils - - -LOG = logging.getLogger(__name__) - - -def safe_for_db_retry(f): - """Enable db-retry for decorated function, if config option enabled.""" - f.__dict__['enable_retry'] = True - return f - - -class wrap_db_retry(object): - """Retry db.api methods, if DBConnectionError() raised - - Retry decorated db.api methods. If we enabled `use_db_reconnect` - in config, this decorator will be applied to all db.api functions, - marked with @safe_for_db_retry decorator. - Decorator catchs DBConnectionError() and retries function in a - loop until it succeeds, or until maximum retries count will be reached. - """ - - def __init__(self, retry_interval, max_retries, inc_retry_interval, - max_retry_interval): - super(wrap_db_retry, self).__init__() - - self.retry_interval = retry_interval - self.max_retries = max_retries - self.inc_retry_interval = inc_retry_interval - self.max_retry_interval = max_retry_interval - - def __call__(self, f): - @functools.wraps(f) - def wrapper(*args, **kwargs): - next_interval = self.retry_interval - remaining = self.max_retries - - while True: - try: - return f(*args, **kwargs) - except exception.DBConnectionError as e: - if remaining == 0: - LOG.exception(_LE('DB exceeded retry limit.')) - raise exception.DBError(e) - if remaining != -1: - remaining -= 1 - LOG.exception(_LE('DB connection error.')) - # NOTE(vsergeyev): We are using patched time module, so - # this effectively yields the execution - # context to another green thread. - time.sleep(next_interval) - if self.inc_retry_interval: - next_interval = min( - next_interval * 2, - self.max_retry_interval - ) - return wrapper - - -class DBAPI(object): - def __init__(self, backend_name, backend_mapping=None, lazy=False, - **kwargs): - """Initialize the chosen DB API backend. - - :param backend_name: name of the backend to load - :type backend_name: str - - :param backend_mapping: backend name -> module/class to load mapping - :type backend_mapping: dict - - :param lazy: load the DB backend lazily on the first DB API method call - :type lazy: bool - - Keyword arguments: - - :keyword use_db_reconnect: retry DB transactions on disconnect or not - :type use_db_reconnect: bool - - :keyword retry_interval: seconds between transaction retries - :type retry_interval: int - - :keyword inc_retry_interval: increase retry interval or not - :type inc_retry_interval: bool - - :keyword max_retry_interval: max interval value between retries - :type max_retry_interval: int - - :keyword max_retries: max number of retries before an error is raised - :type max_retries: int - - """ - - self._backend = None - self._backend_name = backend_name - self._backend_mapping = backend_mapping or {} - self._lock = threading.Lock() - - if not lazy: - self._load_backend() - - self.use_db_reconnect = kwargs.get('use_db_reconnect', False) - self.retry_interval = kwargs.get('retry_interval', 1) - self.inc_retry_interval = kwargs.get('inc_retry_interval', True) - self.max_retry_interval = kwargs.get('max_retry_interval', 10) - self.max_retries = kwargs.get('max_retries', 20) - - def _load_backend(self): - with self._lock: - if not self._backend: - # Import the untranslated name if we don't have a mapping - backend_path = self._backend_mapping.get(self._backend_name, - self._backend_name) - backend_mod = importutils.import_module(backend_path) - self._backend = backend_mod.get_backend() - - def __getattr__(self, key): - if not self._backend: - self._load_backend() - - attr = getattr(self._backend, key) - if not hasattr(attr, '__call__'): - return attr - # NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry - # DB API methods, decorated with @safe_for_db_retry - # on disconnect. - if self.use_db_reconnect and hasattr(attr, 'enable_retry'): - attr = wrap_db_retry( - retry_interval=self.retry_interval, - max_retries=self.max_retries, - inc_retry_interval=self.inc_retry_interval, - max_retry_interval=self.max_retry_interval)(attr) - - return attr diff --git a/rack/openstack/common/db/exception.py b/rack/openstack/common/db/exception.py deleted file mode 100644 index e1d28e0..0000000 --- a/rack/openstack/common/db/exception.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""DB related custom exceptions.""" - -import six - -from rack.openstack.common.gettextutils import _ - - -class DBError(Exception): - """Wraps an implementation specific exception.""" - def __init__(self, inner_exception=None): - self.inner_exception = inner_exception - super(DBError, self).__init__(six.text_type(inner_exception)) - - -class DBDuplicateEntry(DBError): - """Wraps an implementation specific exception.""" - def __init__(self, columns=[], inner_exception=None): - self.columns = columns - super(DBDuplicateEntry, self).__init__(inner_exception) - - -class DBDeadlock(DBError): - def __init__(self, inner_exception=None): - super(DBDeadlock, self).__init__(inner_exception) - - -class DBInvalidUnicodeParameter(Exception): - message = _("Invalid Parameter: " - "Unicode is not supported by the current database.") - - -class DbMigrationError(DBError): - """Wraps migration specific exception.""" - def __init__(self, message=None): - super(DbMigrationError, self).__init__(message) - - -class DBConnectionError(DBError): - """Wraps connection specific exception.""" - pass diff --git a/rack/openstack/common/db/options.py b/rack/openstack/common/db/options.py deleted file mode 100644 index 9109774..0000000 --- a/rack/openstack/common/db/options.py +++ /dev/null @@ -1,168 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo.config import cfg - - -database_opts = [ - cfg.StrOpt('sqlite_db', - deprecated_group='DEFAULT', - default='rack.sqlite', - help='The file name to use with SQLite'), - cfg.BoolOpt('sqlite_synchronous', - deprecated_group='DEFAULT', - default=True, - help='If True, SQLite uses synchronous mode'), - cfg.StrOpt('backend', - default='sqlalchemy', - deprecated_name='db_backend', - deprecated_group='DEFAULT', - help='The backend to use for db'), - cfg.StrOpt('connection', - help='The SQLAlchemy connection string used to connect to the ' - 'database', - secret=True, - deprecated_opts=[cfg.DeprecatedOpt('sql_connection', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_connection', - group='DATABASE'), - cfg.DeprecatedOpt('connection', - group='sql'), ]), - cfg.StrOpt('mysql_sql_mode', - help='The SQL mode to be used for MySQL sessions ' - '(default is empty, meaning do not override ' - 'any server-side SQL mode setting)'), - cfg.IntOpt('idle_timeout', - default=3600, - deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_idle_timeout', - group='DATABASE'), - cfg.DeprecatedOpt('idle_timeout', - group='sql')], - help='Timeout before idle sql connections are reaped'), - cfg.IntOpt('min_pool_size', - default=1, - deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_min_pool_size', - group='DATABASE')], - help='Minimum number of SQL connections to keep open in a ' - 'pool'), - cfg.IntOpt('max_pool_size', - default=None, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_max_pool_size', - group='DATABASE')], - help='Maximum number of SQL connections to keep open in a ' - 'pool'), - cfg.IntOpt('max_retries', - default=10, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', - group='DEFAULT'), - cfg.DeprecatedOpt('sql_max_retries', - group='DATABASE')], - help='Maximum db connection retries during startup. ' - '(setting -1 implies an infinite retry count)'), - cfg.IntOpt('retry_interval', - default=10, - deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', - group='DEFAULT'), - cfg.DeprecatedOpt('reconnect_interval', - group='DATABASE')], - help='Interval between retries of opening a sql connection'), - cfg.IntOpt('max_overflow', - default=None, - deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', - group='DEFAULT'), - cfg.DeprecatedOpt('sqlalchemy_max_overflow', - group='DATABASE')], - help='If set, use this value for max_overflow with sqlalchemy'), - cfg.IntOpt('connection_debug', - default=0, - deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', - group='DEFAULT')], - help='Verbosity of SQL debugging information. 0=None, ' - '100=Everything'), - cfg.BoolOpt('connection_trace', - default=False, - deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', - group='DEFAULT')], - help='Add python stack traces to SQL as comment strings'), - cfg.IntOpt('pool_timeout', - default=None, - deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', - group='DATABASE')], - help='If set, use this value for pool_timeout with sqlalchemy'), - cfg.BoolOpt('use_db_reconnect', - default=False, - help='Enable the experimental use of database reconnect ' - 'on connection lost'), - cfg.IntOpt('db_retry_interval', - default=1, - help='seconds between db connection retries'), - cfg.BoolOpt('db_inc_retry_interval', - default=True, - help='Whether to increase interval between db connection ' - 'retries, up to db_max_retry_interval'), - cfg.IntOpt('db_max_retry_interval', - default=10, - help='max seconds between db connection retries, if ' - 'db_inc_retry_interval is enabled'), - cfg.IntOpt('db_max_retries', - default=20, - help='maximum db connection retries before error is raised. ' - '(setting -1 implies an infinite retry count)'), -] - -CONF = cfg.CONF -CONF.register_opts(database_opts, 'database') - - -def set_defaults(sql_connection, sqlite_db, max_pool_size=None, - max_overflow=None, pool_timeout=None): - """Set defaults for configuration variables.""" - cfg.set_defaults(database_opts, - connection=sql_connection, - sqlite_db=sqlite_db) - # Update the QueuePool defaults - if max_pool_size is not None: - cfg.set_defaults(database_opts, - max_pool_size=max_pool_size) - if max_overflow is not None: - cfg.set_defaults(database_opts, - max_overflow=max_overflow) - if pool_timeout is not None: - cfg.set_defaults(database_opts, - pool_timeout=pool_timeout) - - -def list_opts(): - """Returns a list of oslo.config options available in the library. - - The returned list includes all oslo.config options which may be registered - at runtime by the library. - - Each element of the list is a tuple. The first element is the name of the - group under which the list of elements in the second element will be - registered. A group name of None corresponds to the [DEFAULT] group in - config files. - - The purpose of this is to allow tools like the Oslo sample config file - generator to discover the options exposed to users by this library. - - :returns: a list of (group_name, opts) tuples - """ - return [('database', copy.deepcopy(database_opts))] diff --git a/rack/openstack/common/db/sqlalchemy/__init__.py b/rack/openstack/common/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/openstack/common/db/sqlalchemy/migration.py b/rack/openstack/common/db/sqlalchemy/migration.py deleted file mode 100644 index 6b63d0f..0000000 --- a/rack/openstack/common/db/sqlalchemy/migration.py +++ /dev/null @@ -1,268 +0,0 @@ -# coding: utf-8 -# -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Base on code in migrate/changeset/databases/sqlite.py which is under -# the following license: -# -# The MIT License -# -# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -import os -import re - -from migrate.changeset import ansisql -from migrate.changeset.databases import sqlite -from migrate import exceptions as versioning_exceptions -from migrate.versioning import api as versioning_api -from migrate.versioning.repository import Repository -import sqlalchemy -from sqlalchemy.schema import UniqueConstraint - -from rack.openstack.common.db import exception -from rack.openstack.common.gettextutils import _ - - -def _get_unique_constraints(self, table): - """Retrieve information about existing unique constraints of the table - - This feature is needed for _recreate_table() to work properly. - Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x. - - """ - - data = table.metadata.bind.execute( - """SELECT sql - FROM sqlite_master - WHERE - type='table' AND - name=:table_name""", - table_name=table.name - ).fetchone()[0] - - UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" - return [ - UniqueConstraint( - *[getattr(table.columns, c.strip(' "')) for c in cols.split(",")], - name=name - ) - for name, cols in re.findall(UNIQUE_PATTERN, data) - ] - - -def _recreate_table(self, table, column=None, delta=None, omit_uniques=None): - """Recreate the table properly - - Unlike the corresponding original method of sqlalchemy-migrate this one - doesn't drop existing unique constraints when creating a new one. - - """ - - table_name = self.preparer.format_table(table) - - # we remove all indexes so as not to have - # problems during copy and re-create - for index in table.indexes: - index.drop() - - # reflect existing unique constraints - for uc in self._get_unique_constraints(table): - table.append_constraint(uc) - # omit given unique constraints when creating a new table if required - table.constraints = set([ - cons for cons in table.constraints - if omit_uniques is None or cons.name not in omit_uniques - ]) - - self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name) - self.execute() - - insertion_string = self._modify_table(table, column, delta) - - table.create(bind=self.connection) - self.append(insertion_string % {'table_name': table_name}) - self.execute() - self.append('DROP TABLE migration_tmp') - self.execute() - - -def _visit_migrate_unique_constraint(self, *p, **k): - """Drop the given unique constraint - - The corresponding original method of sqlalchemy-migrate just - raises NotImplemented error - - """ - - self.recreate_table(p[0].table, omit_uniques=[p[0].name]) - - -def patch_migrate(): - """A workaround for SQLite's inability to alter things - - SQLite abilities to alter tables are very limited (please read - http://www.sqlite.org/lang_altertable.html for more details). - E. g. one can't drop a column or a constraint in SQLite. The - workaround for this is to recreate the original table omitting - the corresponding constraint (or column). - - sqlalchemy-migrate library has recreate_table() method that - implements this workaround, but it does it wrong: - - - information about unique constraints of a table - is not retrieved. So if you have a table with one - unique constraint and a migration adding another one - you will end up with a table that has only the - latter unique constraint, and the former will be lost - - - dropping of unique constraints is not supported at all - - The proper way to fix this is to provide a pull-request to - sqlalchemy-migrate, but the project seems to be dead. So we - can go on with monkey-patching of the lib at least for now. - - """ - - # this patch is needed to ensure that recreate_table() doesn't drop - # existing unique constraints of the table when creating a new one - helper_cls = sqlite.SQLiteHelper - helper_cls.recreate_table = _recreate_table - helper_cls._get_unique_constraints = _get_unique_constraints - - # this patch is needed to be able to drop existing unique constraints - constraint_cls = sqlite.SQLiteConstraintDropper - constraint_cls.visit_migrate_unique_constraint = \ - _visit_migrate_unique_constraint - constraint_cls.__bases__ = (ansisql.ANSIColumnDropper, - sqlite.SQLiteConstraintGenerator) - - -def db_sync(engine, abs_path, version=None, init_version=0): - """Upgrade or downgrade a database. - - Function runs the upgrade() or downgrade() functions in change scripts. - - :param engine: SQLAlchemy engine instance for a given database - :param abs_path: Absolute path to migrate repository. - :param version: Database will upgrade/downgrade until this version. - If None - database will update to the latest - available version. - :param init_version: Initial database version - """ - if version is not None: - try: - version = int(version) - except ValueError: - raise exception.DbMigrationError( - message=_("version should be an integer")) - - current_version = db_version(engine, abs_path, init_version) - repository = _find_migrate_repo(abs_path) - _db_schema_sanity_check(engine) - if version is None or version > current_version: - return versioning_api.upgrade(engine, repository, version) - else: - return versioning_api.downgrade(engine, repository, - version) - - -def _db_schema_sanity_check(engine): - """Ensure all database tables were created with required parameters. - - :param engine: SQLAlchemy engine instance for a given database - - """ - - if engine.name == 'mysql': - onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' - 'from information_schema.TABLES ' - 'where TABLE_SCHEMA=%s and ' - 'TABLE_COLLATION NOT LIKE "%%utf8%%"') - - table_names = [res[0] for res in engine.execute(onlyutf8_sql, - engine.url.database)] - if len(table_names) > 0: - raise ValueError(_('Tables "%s" have non utf8 collation, ' - 'please make sure all tables are CHARSET=utf8' - ) % ','.join(table_names)) - - -def db_version(engine, abs_path, init_version): - """Show the current version of the repository. - - :param engine: SQLAlchemy engine instance for a given database - :param abs_path: Absolute path to migrate repository - :param version: Initial database version - """ - repository = _find_migrate_repo(abs_path) - try: - return versioning_api.db_version(engine, repository) - except versioning_exceptions.DatabaseNotControlledError: - meta = sqlalchemy.MetaData() - meta.reflect(bind=engine) - tables = meta.tables - if len(tables) == 0 or 'alembic_version' in tables: - db_version_control(engine, abs_path, version=init_version) - return versioning_api.db_version(engine, repository) - else: - raise exception.DbMigrationError( - message=_( - "The database is not under version control, but has " - "tables. Please stamp the current version of the schema " - "manually.")) - - -def db_version_control(engine, abs_path, version=None): - """Mark a database as under this repository's version control. - - Once a database is under version control, schema changes should - only be done via change scripts in this repository. - - :param engine: SQLAlchemy engine instance for a given database - :param abs_path: Absolute path to migrate repository - :param version: Initial database version - """ - repository = _find_migrate_repo(abs_path) - versioning_api.version_control(engine, repository, version) - return version - - -def _find_migrate_repo(abs_path): - """Get the project's change script repository - - :param abs_path: Absolute path to migrate repository - """ - if not os.path.exists(abs_path): - raise exception.DbMigrationError("Path %s not found" % abs_path) - return Repository(abs_path) diff --git a/rack/openstack/common/db/sqlalchemy/models.py b/rack/openstack/common/db/sqlalchemy/models.py deleted file mode 100644 index 4a8c9f6..0000000 --- a/rack/openstack/common/db/sqlalchemy/models.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Piston Cloud Computing, Inc. -# Copyright 2012 Cloudscaling Group, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -SQLAlchemy models. -""" - -import six - -from sqlalchemy import Column, Integer -from sqlalchemy import DateTime -from sqlalchemy.orm import object_mapper - -from rack.openstack.common import timeutils - - -class ModelBase(object): - """Base class for models.""" - __table_initialized__ = False - - def save(self, session): - """Save this object.""" - - # NOTE(boris-42): This part of code should be look like: - # session.add(self) - # session.flush() - # But there is a bug in sqlalchemy and eventlet that - # raises NoneType exception if there is no running - # transaction and rollback is called. As long as - # sqlalchemy has this bug we have to create transaction - # explicitly. - with session.begin(subtransactions=True): - session.add(self) - session.flush() - - def __setitem__(self, key, value): - setattr(self, key, value) - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default=None): - return getattr(self, key, default) - - @property - def _extra_keys(self): - """Specifies custom fields - - Subclasses can override this property to return a list - of custom fields that should be included in their dict - representation. - - For reference check tests/db/sqlalchemy/test_models.py - """ - return [] - - def __iter__(self): - columns = dict(object_mapper(self).columns).keys() - # NOTE(russellb): Allow models to specify other keys that can be looked - # up, beyond the actual db columns. An example would be the 'name' - # property for an Instance. - columns.extend(self._extra_keys) - self._i = iter(columns) - return self - - def next(self): - n = six.advance_iterator(self._i) - return n, getattr(self, n) - - def update(self, values): - """Make the model object behave like a dict.""" - for k, v in six.iteritems(values): - setattr(self, k, v) - - def iteritems(self): - """Make the model object behave like a dict. - - Includes attributes from joins. - """ - local = dict(self) - joined = dict([(k, v) for k, v in six.iteritems(self.__dict__) - if not k[0] == '_']) - local.update(joined) - return six.iteritems(local) - - -class TimestampMixin(object): - created_at = Column(DateTime, default=lambda: timeutils.utcnow()) - updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) - - -class SoftDeleteMixin(object): - deleted_at = Column(DateTime) - deleted = Column(Integer, default=0) - - def soft_delete(self, session): - """Mark this object as deleted.""" - self.deleted = self.id - self.deleted_at = timeutils.utcnow() - self.save(session=session) diff --git a/rack/openstack/common/db/sqlalchemy/provision.py b/rack/openstack/common/db/sqlalchemy/provision.py deleted file mode 100644 index 14f8020..0000000 --- a/rack/openstack/common/db/sqlalchemy/provision.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2013 Mirantis.inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provision test environment for specific DB backends""" - -import argparse -import os -import random -import string - -from six import moves -import sqlalchemy - -from rack.openstack.common.db import exception as exc - - -SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://') - - -def _gen_credentials(*names): - """Generate credentials.""" - auth_dict = {} - for name in names: - val = ''.join(random.choice(string.ascii_lowercase) - for i in moves.range(10)) - auth_dict[name] = val - return auth_dict - - -def _get_engine(uri=SQL_CONNECTION): - """Engine creation - - By default the uri is SQL_CONNECTION which is admin credentials. - Call the function without arguments to get admin connection. Admin - connection required to create temporary user and database for each - particular test. Otherwise use existing connection to recreate connection - to the temporary database. - """ - return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool) - - -def _execute_sql(engine, sql, driver): - """Initialize connection, execute sql query and close it.""" - try: - with engine.connect() as conn: - if driver == 'postgresql': - conn.connection.set_isolation_level(0) - for s in sql: - conn.execute(s) - except sqlalchemy.exc.OperationalError: - msg = ('%s does not match database admin ' - 'credentials or database does not exist.') - raise exc.DBConnectionError(msg % SQL_CONNECTION) - - -def create_database(engine): - """Provide temporary user and database for each particular test.""" - driver = engine.name - - auth = _gen_credentials('database', 'user', 'passwd') - - sqls = { - 'mysql': [ - "drop database if exists %(database)s;", - "grant all on %(database)s.* to '%(user)s'@'localhost'" - " identified by '%(passwd)s';", - "create database %(database)s;", - ], - 'postgresql': [ - "drop database if exists %(database)s;", - "drop user if exists %(user)s;", - "create user %(user)s with password '%(passwd)s';", - "create database %(database)s owner %(user)s;", - ] - } - - if driver == 'sqlite': - return 'sqlite:////tmp/%s' % auth['database'] - - try: - sql_rows = sqls[driver] - except KeyError: - raise ValueError('Unsupported RDBMS %s' % driver) - sql_query = map(lambda x: x % auth, sql_rows) - - _execute_sql(engine, sql_query, driver) - - params = auth.copy() - params['backend'] = driver - return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params - - -def drop_database(engine, current_uri): - """Drop temporary database and user after each particular test.""" - engine = _get_engine(current_uri) - admin_engine = _get_engine() - driver = engine.name - auth = {'database': engine.url.database, 'user': engine.url.username} - - if driver == 'sqlite': - try: - os.remove(auth['database']) - except OSError: - pass - return - - sqls = { - 'mysql': [ - "drop database if exists %(database)s;", - "drop user '%(user)s'@'localhost';", - ], - 'postgresql': [ - "drop database if exists %(database)s;", - "drop user if exists %(user)s;", - ] - } - - try: - sql_rows = sqls[driver] - except KeyError: - raise ValueError('Unsupported RDBMS %s' % driver) - sql_query = map(lambda x: x % auth, sql_rows) - - _execute_sql(admin_engine, sql_query, driver) - - -def main(): - """Controller to handle commands - - ::create: Create test user and database with random names. - ::drop: Drop user and database created by previous command. - """ - parser = argparse.ArgumentParser( - description='Controller to handle database creation and dropping' - ' commands.', - epilog='Under normal circumstances is not used directly.' - ' Used in .testr.conf to automate test database creation' - ' and dropping processes.') - subparsers = parser.add_subparsers( - help='Subcommands to manipulate temporary test databases.') - - create = subparsers.add_parser( - 'create', - help='Create temporary test ' - 'databases and users.') - create.set_defaults(which='create') - create.add_argument( - 'instances_count', - type=int, - help='Number of databases to create.') - - drop = subparsers.add_parser( - 'drop', - help='Drop temporary test databases and users.') - drop.set_defaults(which='drop') - drop.add_argument( - 'instances', - nargs='+', - help='List of databases uri to be dropped.') - - args = parser.parse_args() - - engine = _get_engine() - which = args.which - - if which == "create": - for i in range(int(args.instances_count)): - print(create_database(engine)) - elif which == "drop": - for db in args.instances: - drop_database(engine, db) - - -if __name__ == "__main__": - main() diff --git a/rack/openstack/common/db/sqlalchemy/session.py b/rack/openstack/common/db/sqlalchemy/session.py deleted file mode 100644 index 3ec9bd8..0000000 --- a/rack/openstack/common/db/sqlalchemy/session.py +++ /dev/null @@ -1,860 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Session Handling for SQLAlchemy backend. - -Recommended ways to use sessions within this framework: - -* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``. - `model_query()` will implicitly use a session when called without one - supplied. This is the ideal situation because it will allow queries - to be automatically retried if the database connection is interrupted. - - .. note:: Automatic retry will be enabled in a future patch. - - It is generally fine to issue several queries in a row like this. Even though - they may be run in separate transactions and/or separate sessions, each one - will see the data from the prior calls. If needed, undo- or rollback-like - functionality should be handled at a logical level. For an example, look at - the code around quotas and `reservation_rollback()`. - - Examples: - - .. code:: python - - def get_foo(context, foo): - return (model_query(context, models.Foo). - filter_by(foo=foo). - first()) - - def update_foo(context, id, newfoo): - (model_query(context, models.Foo). - filter_by(id=id). - update({'foo': newfoo})) - - def create_foo(context, values): - foo_ref = models.Foo() - foo_ref.update(values) - foo_ref.save() - return foo_ref - - -* Within the scope of a single method, keep all the reads and writes within - the context managed by a single session. In this way, the session's - `__exit__` handler will take care of calling `flush()` and `commit()` for - you. If using this approach, you should not explicitly call `flush()` or - `commit()`. Any error within the context of the session will cause the - session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be - raised in `session`'s `__exit__` handler, and any try/except within the - context managed by `session` will not be triggered. And catching other - non-database errors in the session will not trigger the ROLLBACK, so - exception handlers should always be outside the session, unless the - developer wants to do a partial commit on purpose. If the connection is - dropped before this is possible, the database will implicitly roll back the - transaction. - - .. note:: Statements in the session scope will not be automatically retried. - - If you create models within the session, they need to be added, but you - do not need to call `model.save()`: - - .. code:: python - - def create_many_foo(context, foos): - session = sessionmaker() - with session.begin(): - for foo in foos: - foo_ref = models.Foo() - foo_ref.update(foo) - session.add(foo_ref) - - def update_bar(context, foo_id, newbar): - session = sessionmaker() - with session.begin(): - foo_ref = (model_query(context, models.Foo, session). - filter_by(id=foo_id). - first()) - (model_query(context, models.Bar, session). - filter_by(id=foo_ref['bar_id']). - update({'bar': newbar})) - - .. note:: `update_bar` is a trivially simple example of using - ``with session.begin``. Whereas `create_many_foo` is a good example of - when a transaction is needed, it is always best to use as few queries as - possible. - - The two queries in `update_bar` can be better expressed using a single query - which avoids the need for an explicit transaction. It can be expressed like - so: - - .. code:: python - - def update_bar(context, foo_id, newbar): - subq = (model_query(context, models.Foo.id). - filter_by(id=foo_id). - limit(1). - subquery()) - (model_query(context, models.Bar). - filter_by(id=subq.as_scalar()). - update({'bar': newbar})) - - For reference, this emits approximately the following SQL statement: - - .. code:: sql - - UPDATE bar SET bar = ${newbar} - WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); - - .. note:: `create_duplicate_foo` is a trivially simple example of catching an - exception while using ``with session.begin``. Here create two duplicate - instances with same primary key, must catch the exception out of context - managed by a single session: - - .. code:: python - - def create_duplicate_foo(context): - foo1 = models.Foo() - foo2 = models.Foo() - foo1.id = foo2.id = 1 - session = sessionmaker() - try: - with session.begin(): - session.add(foo1) - session.add(foo2) - except exception.DBDuplicateEntry as e: - handle_error(e) - -* Passing an active session between methods. Sessions should only be passed - to private methods. The private method must use a subtransaction; otherwise - SQLAlchemy will throw an error when you call `session.begin()` on an existing - transaction. Public methods should not accept a session parameter and should - not be involved in sessions within the caller's scope. - - Note that this incurs more overhead in SQLAlchemy than the above means - due to nesting transactions, and it is not possible to implicitly retry - failed database operations when using this approach. - - This also makes code somewhat more difficult to read and debug, because a - single database transaction spans more than one method. Error handling - becomes less clear in this situation. When this is needed for code clarity, - it should be clearly documented. - - .. code:: python - - def myfunc(foo): - session = sessionmaker() - with session.begin(): - # do some database things - bar = _private_func(foo, session) - return bar - - def _private_func(foo, session=None): - if not session: - session = sessionmaker() - with session.begin(subtransaction=True): - # do some other database things - return bar - - -There are some things which it is best to avoid: - -* Don't keep a transaction open any longer than necessary. - - This means that your ``with session.begin()`` block should be as short - as possible, while still containing all the related calls for that - transaction. - -* Avoid ``with_lockmode('UPDATE')`` when possible. - - In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match - any rows, it will take a gap-lock. This is a form of write-lock on the - "gap" where no rows exist, and prevents any other writes to that space. - This can effectively prevent any INSERT into a table by locking the gap - at the end of the index. Similar problems will occur if the SELECT FOR UPDATE - has an overly broad WHERE clause, or doesn't properly use an index. - - One idea proposed at ODS Fall '12 was to use a normal SELECT to test the - number of rows matching a query, and if only one row is returned, - then issue the SELECT FOR UPDATE. - - The better long-term solution is to use - ``INSERT .. ON DUPLICATE KEY UPDATE``. - However, this can not be done until the "deleted" columns are removed and - proper UNIQUE constraints are added to the tables. - - -Enabling soft deletes: - -* To use/enable soft-deletes, the `SoftDeleteMixin` must be added - to your model class. For example: - - .. code:: python - - class NovaBase(models.SoftDeleteMixin, models.ModelBase): - pass - - -Efficient use of soft deletes: - -* There are two possible ways to mark a record as deleted: - `model.soft_delete()` and `query.soft_delete()`. - - The `model.soft_delete()` method works with a single already-fetched entry. - `query.soft_delete()` makes only one db request for all entries that - correspond to the query. - -* In almost all cases you should use `query.soft_delete()`. Some examples: - - .. code:: python - - def soft_delete_bar(): - count = model_query(BarModel).find(some_condition).soft_delete() - if count == 0: - raise Exception("0 entries were soft deleted") - - def complex_soft_delete_with_synchronization_bar(session=None): - if session is None: - session = sessionmaker() - with session.begin(subtransactions=True): - count = (model_query(BarModel). - find(some_condition). - soft_delete(synchronize_session=True)) - # Here synchronize_session is required, because we - # don't know what is going on in outer session. - if count == 0: - raise Exception("0 entries were soft deleted") - -* There is only one situation where `model.soft_delete()` is appropriate: when - you fetch a single record, work with it, and mark it as deleted in the same - transaction. - - .. code:: python - - def soft_delete_bar_model(): - session = sessionmaker() - with session.begin(): - bar_ref = model_query(BarModel).find(some_condition).first() - # Work with bar_ref - bar_ref.soft_delete(session=session) - - However, if you need to work with all entries that correspond to query and - then soft delete them you should use the `query.soft_delete()` method: - - .. code:: python - - def soft_delete_multi_models(): - session = sessionmaker() - with session.begin(): - query = (model_query(BarModel, session=session). - find(some_condition)) - model_refs = query.all() - # Work with model_refs - query.soft_delete(synchronize_session=False) - # synchronize_session=False should be set if there is no outer - # session and these entries are not used after this. - - When working with many rows, it is very important to use query.soft_delete, - which issues a single query. Using `model.soft_delete()`, as in the following - example, is very inefficient. - - .. code:: python - - for bar_ref in bar_refs: - bar_ref.soft_delete(session=session) - # This will produce count(bar_refs) db requests. - -""" - -import functools -import logging -import re -import time - -import six -from sqlalchemy import exc as sqla_exc -from sqlalchemy.interfaces import PoolListener -import sqlalchemy.orm -from sqlalchemy.pool import NullPool, StaticPool -from sqlalchemy.sql.expression import literal_column - -from rack.openstack.common.db import exception -from rack.openstack.common.gettextutils import _LE, _LW, _LI -from rack.openstack.common import timeutils - - -LOG = logging.getLogger(__name__) - - -class SqliteForeignKeysListener(PoolListener): - """Ensures that the foreign key constraints are enforced in SQLite. - - The foreign key constraints are disabled by default in SQLite, - so the foreign key constraints will be enabled here for every - database connection - """ - def connect(self, dbapi_con, con_record): - dbapi_con.execute('pragma foreign_keys=ON') - - -# note(boris-42): In current versions of DB backends unique constraint -# violation messages follow the structure: -# -# sqlite: -# 1 column - (IntegrityError) column c1 is not unique -# N columns - (IntegrityError) column c1, c2, ..., N are not unique -# -# sqlite since 3.7.16: -# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1 -# -# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2 -# -# postgres: -# 1 column - (IntegrityError) duplicate key value violates unique -# constraint "users_c1_key" -# N columns - (IntegrityError) duplicate key value violates unique -# constraint "name_of_our_constraint" -# -# mysql: -# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key -# 'c1'") -# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined -# with -' for key 'name_of_our_constraint'") -# -# ibm_db_sa: -# N columns - (IntegrityError) SQL0803N One or more values in the INSERT -# statement, UPDATE statement, or foreign key update caused by a -# DELETE statement are not valid because the primary key, unique -# constraint or unique index identified by "2" constrains table -# "NOVA.KEY_PAIRS" from having duplicate values for the index -# key. -_DUP_KEY_RE_DB = { - "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), - re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), - "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), - "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),), - "ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),), -} - - -def _raise_if_duplicate_entry_error(integrity_error, engine_name): - """Raise exception if two entries are duplicated. - - In this function will be raised DBDuplicateEntry exception if integrity - error wrap unique constraint violation. - """ - - def get_columns_from_uniq_cons_or_name(columns): - # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" - # where `t` it is table name and columns `c1`, `c2` - # are in UniqueConstraint. - uniqbase = "uniq_" - if not columns.startswith(uniqbase): - if engine_name == "postgresql": - return [columns[columns.index("_") + 1:columns.rindex("_")]] - return [columns] - return columns[len(uniqbase):].split("0")[1:] - - if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]: - return - - # FIXME(johannes): The usage of the .message attribute has been - # deprecated since Python 2.6. However, the exceptions raised by - # SQLAlchemy can differ when using unicode() and accessing .message. - # An audit across all three supported engines will be necessary to - # ensure there are no regressions. - for pattern in _DUP_KEY_RE_DB[engine_name]: - match = pattern.match(integrity_error.message) - if match: - break - else: - return - - # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the - # columns so we have to omit that from the DBDuplicateEntry error. - columns = '' - - if engine_name != 'ibm_db_sa': - columns = match.group(1) - - if engine_name == "sqlite": - columns = [c.split('.')[-1] for c in columns.strip().split(", ")] - else: - columns = get_columns_from_uniq_cons_or_name(columns) - raise exception.DBDuplicateEntry(columns, integrity_error) - - -# NOTE(comstud): In current versions of DB backends, Deadlock violation -# messages follow the structure: -# -# mysql: -# (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' -# 'restarting transaction') -_DEADLOCK_RE_DB = { - "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") -} - - -def _raise_if_deadlock_error(operational_error, engine_name): - """Raise exception on deadlock condition. - - Raise DBDeadlock exception if OperationalError contains a Deadlock - condition. - """ - re = _DEADLOCK_RE_DB.get(engine_name) - if re is None: - return - # FIXME(johannes): The usage of the .message attribute has been - # deprecated since Python 2.6. However, the exceptions raised by - # SQLAlchemy can differ when using unicode() and accessing .message. - # An audit across all three supported engines will be necessary to - # ensure there are no regressions. - m = re.match(operational_error.message) - if not m: - return - raise exception.DBDeadlock(operational_error) - - -def _wrap_db_error(f): - #TODO(rpodolyaka): in a subsequent commit make this a class decorator to - # ensure it can only applied to Session subclasses instances (as we use - # Session instance bind attribute below) - - @functools.wraps(f) - def _wrap(self, *args, **kwargs): - try: - return f(self, *args, **kwargs) - except UnicodeEncodeError: - raise exception.DBInvalidUnicodeParameter() - except sqla_exc.OperationalError as e: - _raise_if_db_connection_lost(e, self.bind) - _raise_if_deadlock_error(e, self.bind.dialect.name) - # NOTE(comstud): A lot of code is checking for OperationalError - # so let's not wrap it for now. - raise - # note(boris-42): We should catch unique constraint violation and - # wrap it by our own DBDuplicateEntry exception. Unique constraint - # violation is wrapped by IntegrityError. - except sqla_exc.IntegrityError as e: - # note(boris-42): SqlAlchemy doesn't unify errors from different - # DBs so we must do this. Also in some tables (for example - # instance_types) there are more than one unique constraint. This - # means we should get names of columns, which values violate - # unique constraint, from error message. - _raise_if_duplicate_entry_error(e, self.bind.dialect.name) - raise exception.DBError(e) - except Exception as e: - LOG.exception(_LE('DB exception wrapped.')) - raise exception.DBError(e) - return _wrap - - -def _synchronous_switch_listener(dbapi_conn, connection_rec): - """Switch sqlite connections to non-synchronous mode.""" - dbapi_conn.execute("PRAGMA synchronous = OFF") - - -def _add_regexp_listener(dbapi_con, con_record): - """Add REGEXP function to sqlite connections.""" - - def regexp(expr, item): - reg = re.compile(expr) - return reg.search(six.text_type(item)) is not None - dbapi_con.create_function('regexp', 2, regexp) - - -def _thread_yield(dbapi_con, con_record): - """Ensure other greenthreads get a chance to be executed. - - If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will - execute instead of time.sleep(0). - Force a context switch. With common database backends (eg MySQLdb and - sqlite), there is no implicit yield caused by network I/O since they are - implemented by C libraries that eventlet cannot monkey patch. - """ - time.sleep(0) - - -def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): - """Ensures that MySQL and DB2 connections are alive. - - Borrowed from: - http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f - """ - cursor = dbapi_conn.cursor() - try: - ping_sql = 'select 1' - if engine.name == 'ibm_db_sa': - # DB2 requires a table expression - ping_sql = 'select 1 from (values (1)) AS t1' - cursor.execute(ping_sql) - except Exception as ex: - if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): - msg = _LW('Database server has gone away: %s') % ex - LOG.warning(msg) - raise sqla_exc.DisconnectionError(msg) - else: - raise - - -def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy): - """Set engine mode to 'traditional'. - - Required to prevent silent truncates at insert or update operations - under MySQL. By default MySQL truncates inserted string if it longer - than a declared field just with warning. That is fraught with data - corruption. - """ - _set_session_sql_mode(dbapi_con, connection_rec, - connection_proxy, 'TRADITIONAL') - - -def _set_session_sql_mode(dbapi_con, connection_rec, - connection_proxy, sql_mode=None): - """Set the sql_mode session variable. - - MySQL supports several server modes. The default is None, but sessions - may choose to enable server modes like TRADITIONAL, ANSI, - several STRICT_* modes and others. - - Note: passing in '' (empty string) for sql_mode clears - the SQL mode for the session, overriding a potentially set - server default. Passing in None (the default) makes this - a no-op, meaning if a server-side SQL mode is set, it still applies. - """ - cursor = dbapi_con.cursor() - if sql_mode is not None: - cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) - - # Check against the real effective SQL mode. Even when unset by - # our own config, the server may still be operating in a specific - # SQL mode as set by the server configuration - cursor.execute("SHOW VARIABLES LIKE 'sql_mode'") - row = cursor.fetchone() - if row is None: - LOG.warning(_LW('Unable to detect effective SQL mode')) - return - realmode = row[1] - LOG.info(_LI('MySQL server mode set to %s') % realmode) - # 'TRADITIONAL' mode enables several other modes, so - # we need a substring match here - if not ('TRADITIONAL' in realmode.upper() or - 'STRICT_ALL_TABLES' in realmode.upper()): - LOG.warning(_LW("MySQL SQL mode is '%s', " - "consider enabling TRADITIONAL or STRICT_ALL_TABLES") - % realmode) - - -def _is_db_connection_error(args): - """Return True if error in connecting to db.""" - # NOTE(adam_g): This is currently MySQL specific and needs to be extended - # to support Postgres and others. - # For the db2, the error code is -30081 since the db2 is still not ready - conn_err_codes = ('2002', '2003', '2006', '2013', '-30081') - for err_code in conn_err_codes: - if args.find(err_code) != -1: - return True - return False - - -def _raise_if_db_connection_lost(error, engine): - # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor) - # requires connection and cursor in incoming parameters, - # but we have no possibility to create connection if DB - # is not available, so in such case reconnect fails. - # But is_disconnect() ignores these parameters, so it - # makes sense to pass to function None as placeholder - # instead of connection and cursor. - if engine.dialect.is_disconnect(error, None, None): - raise exception.DBConnectionError(error) - - -def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, - mysql_traditional_mode=False, idle_timeout=3600, - connection_debug=0, max_pool_size=None, max_overflow=None, - pool_timeout=None, sqlite_synchronous=True, - connection_trace=False, max_retries=10, retry_interval=10): - """Return a new SQLAlchemy engine.""" - - connection_dict = sqlalchemy.engine.url.make_url(sql_connection) - - engine_args = { - "pool_recycle": idle_timeout, - 'convert_unicode': True, - } - - logger = logging.getLogger('sqlalchemy.engine') - - # Map SQL debug level to Python log level - if connection_debug >= 100: - logger.setLevel(logging.DEBUG) - elif connection_debug >= 50: - logger.setLevel(logging.INFO) - else: - logger.setLevel(logging.WARNING) - - if "sqlite" in connection_dict.drivername: - if sqlite_fk: - engine_args["listeners"] = [SqliteForeignKeysListener()] - engine_args["poolclass"] = NullPool - - if sql_connection == "sqlite://": - engine_args["poolclass"] = StaticPool - engine_args["connect_args"] = {'check_same_thread': False} - else: - if max_pool_size is not None: - engine_args['pool_size'] = max_pool_size - if max_overflow is not None: - engine_args['max_overflow'] = max_overflow - if pool_timeout is not None: - engine_args['pool_timeout'] = pool_timeout - - engine = sqlalchemy.create_engine(sql_connection, **engine_args) - - sqlalchemy.event.listen(engine, 'checkin', _thread_yield) - - if engine.name in ['mysql', 'ibm_db_sa']: - ping_callback = functools.partial(_ping_listener, engine) - sqlalchemy.event.listen(engine, 'checkout', ping_callback) - if engine.name == 'mysql': - if mysql_traditional_mode: - mysql_sql_mode = 'TRADITIONAL' - if mysql_sql_mode: - mode_callback = functools.partial(_set_session_sql_mode, - sql_mode=mysql_sql_mode) - sqlalchemy.event.listen(engine, 'checkout', mode_callback) - elif 'sqlite' in connection_dict.drivername: - if not sqlite_synchronous: - sqlalchemy.event.listen(engine, 'connect', - _synchronous_switch_listener) - sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) - - if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb': - _patch_mysqldb_with_stacktrace_comments() - - try: - engine.connect() - except sqla_exc.OperationalError as e: - if not _is_db_connection_error(e.args[0]): - raise - - remaining = max_retries - if remaining == -1: - remaining = 'infinite' - while True: - msg = _LW('SQL connection failed. %s attempts left.') - LOG.warning(msg % remaining) - if remaining != 'infinite': - remaining -= 1 - time.sleep(retry_interval) - try: - engine.connect() - break - except sqla_exc.OperationalError as e: - if (remaining != 'infinite' and remaining == 0) or \ - not _is_db_connection_error(e.args[0]): - raise - return engine - - -class Query(sqlalchemy.orm.query.Query): - """Subclass of sqlalchemy.query with soft_delete() method.""" - def soft_delete(self, synchronize_session='evaluate'): - return self.update({'deleted': literal_column('id'), - 'updated_at': literal_column('updated_at'), - 'deleted_at': timeutils.utcnow()}, - synchronize_session=synchronize_session) - - -class Session(sqlalchemy.orm.session.Session): - """Custom Session class to avoid SqlAlchemy Session monkey patching.""" - @_wrap_db_error - def query(self, *args, **kwargs): - return super(Session, self).query(*args, **kwargs) - - @_wrap_db_error - def flush(self, *args, **kwargs): - return super(Session, self).flush(*args, **kwargs) - - @_wrap_db_error - def execute(self, *args, **kwargs): - return super(Session, self).execute(*args, **kwargs) - - -def get_maker(engine, autocommit=True, expire_on_commit=False): - """Return a SQLAlchemy sessionmaker using the given engine.""" - return sqlalchemy.orm.sessionmaker(bind=engine, - class_=Session, - autocommit=autocommit, - expire_on_commit=expire_on_commit, - query_cls=Query) - - -def _patch_mysqldb_with_stacktrace_comments(): - """Adds current stack trace as a comment in queries. - - Patches MySQLdb.cursors.BaseCursor._do_query. - """ - import MySQLdb.cursors - import traceback - - old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query - - def _do_query(self, q): - stack = '' - for filename, line, method, function in traceback.extract_stack(): - # exclude various common things from trace - if filename.endswith('session.py') and method == '_do_query': - continue - if filename.endswith('api.py') and method == 'wrapper': - continue - if filename.endswith('utils.py') and method == '_inner': - continue - if filename.endswith('exception.py') and method == '_wrap': - continue - # db/api is just a wrapper around db/sqlalchemy/api - if filename.endswith('db/api.py'): - continue - # only trace inside rack - index = filename.rfind('rack') - if index == -1: - continue - stack += "File:%s:%s Method:%s() Line:%s | " \ - % (filename[index:], line, method, function) - - # strip trailing " | " from stack - if stack: - stack = stack[:-3] - qq = "%s /* %s */" % (q, stack) - else: - qq = q - old_mysql_do_query(self, qq) - - setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) - - -class EngineFacade(object): - """A helper class for removing of global engine instances from rack.db. - - As a library, rack.db can't decide where to store/when to create engine - and sessionmaker instances, so this must be left for a target application. - - On the other hand, in order to simplify the adoption of rack.db changes, - we'll provide a helper class, which creates engine and sessionmaker - on its instantiation and provides get_engine()/get_session() methods - that are compatible with corresponding utility functions that currently - exist in target projects, e.g. in Nova. - - engine/sessionmaker instances will still be global (and they are meant to - be global), but they will be stored in the app context, rather that in the - rack.db context. - - Note: using of this helper is completely optional and you are encouraged to - integrate engine/sessionmaker instances into your apps any way you like - (e.g. one might want to bind a session to a request context). Two important - things to remember: - 1. An Engine instance is effectively a pool of DB connections, so it's - meant to be shared (and it's thread-safe). - 2. A Session instance is not meant to be shared and represents a DB - transactional context (i.e. it's not thread-safe). sessionmaker is - a factory of sessions. - - """ - - def __init__(self, sql_connection, - sqlite_fk=False, mysql_sql_mode=None, - autocommit=True, expire_on_commit=False, **kwargs): - """Initialize engine and sessionmaker instances. - - :param sqlite_fk: enable foreign keys in SQLite - :type sqlite_fk: bool - - :param mysql_sql_mode: set SQL mode in MySQL - :type mysql_sql_mode: string - - :param autocommit: use autocommit mode for created Session instances - :type autocommit: bool - - :param expire_on_commit: expire session objects on commit - :type expire_on_commit: bool - - Keyword arguments: - - :keyword idle_timeout: timeout before idle sql connections are reaped - (defaults to 3600) - :keyword connection_debug: verbosity of SQL debugging information. - 0=None, 100=Everything (defaults to 0) - :keyword max_pool_size: maximum number of SQL connections to keep open - in a pool (defaults to SQLAlchemy settings) - :keyword max_overflow: if set, use this value for max_overflow with - sqlalchemy (defaults to SQLAlchemy settings) - :keyword pool_timeout: if set, use this value for pool_timeout with - sqlalchemy (defaults to SQLAlchemy settings) - :keyword sqlite_synchronous: if True, SQLite uses synchronous mode - (defaults to True) - :keyword connection_trace: add python stack traces to SQL as comment - strings (defaults to False) - :keyword max_retries: maximum db connection retries during startup. - (setting -1 implies an infinite retry count) - (defaults to 10) - :keyword retry_interval: interval between retries of opening a sql - connection (defaults to 10) - - """ - - super(EngineFacade, self).__init__() - - self._engine = create_engine( - sql_connection=sql_connection, - sqlite_fk=sqlite_fk, - mysql_sql_mode=mysql_sql_mode, - idle_timeout=kwargs.get('idle_timeout', 3600), - connection_debug=kwargs.get('connection_debug', 0), - max_pool_size=kwargs.get('max_pool_size'), - max_overflow=kwargs.get('max_overflow'), - pool_timeout=kwargs.get('pool_timeout'), - sqlite_synchronous=kwargs.get('sqlite_synchronous', True), - connection_trace=kwargs.get('connection_trace', False), - max_retries=kwargs.get('max_retries', 10), - retry_interval=kwargs.get('retry_interval', 10)) - self._session_maker = get_maker( - engine=self._engine, - autocommit=autocommit, - expire_on_commit=expire_on_commit) - - def get_engine(self): - """Get the engine instance (note, that it's shared).""" - - return self._engine - - def get_session(self, **kwargs): - """Get a Session instance. - - If passed, keyword arguments values override the ones used when the - sessionmaker instance was created. - - :keyword autocommit: use autocommit mode for created Session instances - :type autocommit: bool - - :keyword expire_on_commit: expire session objects on commit - :type expire_on_commit: bool - - """ - - for arg in kwargs: - if arg not in ('autocommit', 'expire_on_commit'): - del kwargs[arg] - - return self._session_maker(**kwargs) diff --git a/rack/openstack/common/db/sqlalchemy/test_base.py b/rack/openstack/common/db/sqlalchemy/test_base.py deleted file mode 100644 index a129da4..0000000 --- a/rack/openstack/common/db/sqlalchemy/test_base.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import functools -import os - -import fixtures -import six - -from rack.openstack.common.db.sqlalchemy import session -from rack.openstack.common.db.sqlalchemy import utils -from rack.openstack.common import test - - -class DbFixture(fixtures.Fixture): - """Basic database fixture. - - Allows to run tests on various db backends, such as SQLite, MySQL and - PostgreSQL. By default use sqlite backend. To override default backend - uri set env variable OS_TEST_DBAPI_CONNECTION with database admin - credentials for specific backend. - """ - - def _get_uri(self): - return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://') - - def __init__(self, test): - super(DbFixture, self).__init__() - - self.test = test - - def setUp(self): - super(DbFixture, self).setUp() - - self.test.engine = session.create_engine(self._get_uri()) - self.test.sessionmaker = session.get_maker(self.test.engine) - self.addCleanup(self.test.engine.dispose) - - -class DbTestCase(test.BaseTestCase): - """Base class for testing of DB code. - - Using `DbFixture`. Intended to be the main database test case to use all - the tests on a given backend with user defined uri. Backend specific - tests should be decorated with `backend_specific` decorator. - """ - - FIXTURE = DbFixture - - def setUp(self): - super(DbTestCase, self).setUp() - self.useFixture(self.FIXTURE(self)) - - -ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] - - -def backend_specific(*dialects): - """Decorator to skip backend specific tests on inappropriate engines. - - ::dialects: list of dialects names under which the test will be launched. - """ - def wrap(f): - @functools.wraps(f) - def ins_wrap(self): - if not set(dialects).issubset(ALLOWED_DIALECTS): - raise ValueError( - "Please use allowed dialects: %s" % ALLOWED_DIALECTS) - if self.engine.name not in dialects: - msg = ('The test "%s" can be run ' - 'only on %s. Current engine is %s.') - args = (f.__name__, ' '.join(dialects), self.engine.name) - self.skip(msg % args) - else: - return f(self) - return ins_wrap - return wrap - - -@six.add_metaclass(abc.ABCMeta) -class OpportunisticFixture(DbFixture): - """Base fixture to use default CI databases. - - The databases exist in OpenStack CI infrastructure. But for the - correct functioning in local environment the databases must be - created manually. - """ - - DRIVER = abc.abstractproperty(lambda: None) - DBNAME = PASSWORD = USERNAME = 'openstack_citest' - - def _get_uri(self): - return utils.get_connect_string(backend=self.DRIVER, - user=self.USERNAME, - passwd=self.PASSWORD, - database=self.DBNAME) - - -@six.add_metaclass(abc.ABCMeta) -class OpportunisticTestCase(DbTestCase): - """Base test case to use default CI databases. - - The subclasses of the test case are running only when openstack_citest - database is available otherwise a tests will be skipped. - """ - - FIXTURE = abc.abstractproperty(lambda: None) - - def setUp(self): - credentials = { - 'backend': self.FIXTURE.DRIVER, - 'user': self.FIXTURE.USERNAME, - 'passwd': self.FIXTURE.PASSWORD, - 'database': self.FIXTURE.DBNAME} - - if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials): - msg = '%s backend is not available.' % self.FIXTURE.DRIVER - return self.skip(msg) - - super(OpportunisticTestCase, self).setUp() - - -class MySQLOpportunisticFixture(OpportunisticFixture): - DRIVER = 'mysql' - - -class PostgreSQLOpportunisticFixture(OpportunisticFixture): - DRIVER = 'postgresql' - - -class MySQLOpportunisticTestCase(OpportunisticTestCase): - FIXTURE = MySQLOpportunisticFixture - - -class PostgreSQLOpportunisticTestCase(OpportunisticTestCase): - FIXTURE = PostgreSQLOpportunisticFixture diff --git a/rack/openstack/common/db/sqlalchemy/test_migrations.py b/rack/openstack/common/db/sqlalchemy/test_migrations.py deleted file mode 100644 index cccacad..0000000 --- a/rack/openstack/common/db/sqlalchemy/test_migrations.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2012-2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import logging -import os -import subprocess - -import lockfile -from six import moves -from six.moves.urllib import parse -import sqlalchemy -import sqlalchemy.exc - -from rack.openstack.common.db.sqlalchemy import utils -from rack.openstack.common.gettextutils import _LE -from rack.openstack.common import test - -LOG = logging.getLogger(__name__) - - -def _have_mysql(user, passwd, database): - present = os.environ.get('TEST_MYSQL_PRESENT') - if present is None: - return utils.is_backend_avail(backend='mysql', - user=user, - passwd=passwd, - database=database) - return present.lower() in ('', 'true') - - -def _have_postgresql(user, passwd, database): - present = os.environ.get('TEST_POSTGRESQL_PRESENT') - if present is None: - return utils.is_backend_avail(backend='postgres', - user=user, - passwd=passwd, - database=database) - return present.lower() in ('', 'true') - - -def _set_db_lock(lock_path=None, lock_prefix=None): - def decorator(f): - @functools.wraps(f) - def wrapper(*args, **kwargs): - try: - path = lock_path or os.environ.get("NOVA_LOCK_PATH") - lock = lockfile.FileLock(os.path.join(path, lock_prefix)) - with lock: - LOG.debug('Got lock "%s"' % f.__name__) - return f(*args, **kwargs) - finally: - LOG.debug('Lock released "%s"' % f.__name__) - return wrapper - return decorator - - -class BaseMigrationTestCase(test.BaseTestCase): - """Base class fort testing of migration utils.""" - - def __init__(self, *args, **kwargs): - super(BaseMigrationTestCase, self).__init__(*args, **kwargs) - - self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), - 'test_migrations.conf') - # Test machines can set the TEST_MIGRATIONS_CONF variable - # to override the location of the config file for migration testing - self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF', - self.DEFAULT_CONFIG_FILE) - self.test_databases = {} - self.migration_api = None - - def setUp(self): - super(BaseMigrationTestCase, self).setUp() - - # Load test databases from the config file. Only do this - # once. No need to re-run this on each test... - LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) - if os.path.exists(self.CONFIG_FILE_PATH): - cp = moves.configparser.RawConfigParser() - try: - cp.read(self.CONFIG_FILE_PATH) - defaults = cp.defaults() - for key, value in defaults.items(): - self.test_databases[key] = value - except moves.configparser.ParsingError as e: - self.fail("Failed to read test_migrations.conf config " - "file. Got error: %s" % e) - else: - self.fail("Failed to find test_migrations.conf config " - "file.") - - self.engines = {} - for key, value in self.test_databases.items(): - self.engines[key] = sqlalchemy.create_engine(value) - - # We start each test case with a completely blank slate. - self._reset_databases() - - def tearDown(self): - # We destroy the test data store between each test case, - # and recreate it, which ensures that we have no side-effects - # from the tests - self._reset_databases() - super(BaseMigrationTestCase, self).tearDown() - - def execute_cmd(self, cmd=None): - process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - output = process.communicate()[0] - LOG.debug(output) - self.assertEqual(0, process.returncode, - "Failed to run: %s\n%s" % (cmd, output)) - - def _reset_pg(self, conn_pieces): - (user, - password, - database, - host) = utils.get_db_connection_info(conn_pieces) - os.environ['PGPASSWORD'] = password - os.environ['PGUSER'] = user - # note(boris-42): We must create and drop database, we can't - # drop database which we have connected to, so for such - # operations there is a special database template1. - sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" - " '%(sql)s' -d template1") - - sql = ("drop database if exists %s;") % database - droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql} - self.execute_cmd(droptable) - - sql = ("create database %s;") % database - createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql} - self.execute_cmd(createtable) - - os.unsetenv('PGPASSWORD') - os.unsetenv('PGUSER') - - @_set_db_lock(lock_prefix='migration_tests-') - def _reset_databases(self): - for key, engine in self.engines.items(): - conn_string = self.test_databases[key] - conn_pieces = parse.urlparse(conn_string) - engine.dispose() - if conn_string.startswith('sqlite'): - # We can just delete the SQLite database, which is - # the easiest and cleanest solution - db_path = conn_pieces.path.strip('/') - if os.path.exists(db_path): - os.unlink(db_path) - # No need to recreate the SQLite DB. SQLite will - # create it for us if it's not there... - elif conn_string.startswith('mysql'): - # We can execute the MySQL client to destroy and re-create - # the MYSQL database, which is easier and less error-prone - # than using SQLAlchemy to do this via MetaData...trust me. - (user, password, database, host) = \ - utils.get_db_connection_info(conn_pieces) - sql = ("drop database if exists %(db)s; " - "create database %(db)s;") % {'db': database} - cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s " - "-e \"%(sql)s\"") % {'user': user, 'password': password, - 'host': host, 'sql': sql} - self.execute_cmd(cmd) - elif conn_string.startswith('postgresql'): - self._reset_pg(conn_pieces) - - -class WalkVersionsMixin(object): - def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): - # Determine latest version script from the repo, then - # upgrade from 1 through to the latest, with no data - # in the databases. This just checks that the schema itself - # upgrades successfully. - - # Place the database under version control - self.migration_api.version_control(engine, self.REPOSITORY, - self.INIT_VERSION) - self.assertEqual(self.INIT_VERSION, - self.migration_api.db_version(engine, - self.REPOSITORY)) - - LOG.debug('latest version is %s' % self.REPOSITORY.latest) - versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) - - for version in versions: - # upgrade -> downgrade -> upgrade - self._migrate_up(engine, version, with_data=True) - if snake_walk: - downgraded = self._migrate_down( - engine, version - 1, with_data=True) - if downgraded: - self._migrate_up(engine, version) - - if downgrade: - # Now walk it back down to 0 from the latest, testing - # the downgrade paths. - for version in reversed(versions): - # downgrade -> upgrade -> downgrade - downgraded = self._migrate_down(engine, version - 1) - - if snake_walk and downgraded: - self._migrate_up(engine, version) - self._migrate_down(engine, version - 1) - - def _migrate_down(self, engine, version, with_data=False): - try: - self.migration_api.downgrade(engine, self.REPOSITORY, version) - except NotImplementedError: - # NOTE(sirp): some migrations, namely release-level - # migrations, don't support a downgrade. - return False - - self.assertEqual( - version, self.migration_api.db_version(engine, self.REPOSITORY)) - - # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' - # version). So if we have any downgrade checks, they need to be run for - # the previous (higher numbered) migration. - if with_data: - post_downgrade = getattr( - self, "_post_downgrade_%03d" % (version + 1), None) - if post_downgrade: - post_downgrade(engine) - - return True - - def _migrate_up(self, engine, version, with_data=False): - """migrate up to a new version of the db. - - We allow for data insertion and post checks at every - migration version with special _pre_upgrade_### and - _check_### functions in the main test. - """ - # NOTE(sdague): try block is here because it's impossible to debug - # where a failed data migration happens otherwise - try: - if with_data: - data = None - pre_upgrade = getattr( - self, "_pre_upgrade_%03d" % version, None) - if pre_upgrade: - data = pre_upgrade(engine) - - self.migration_api.upgrade(engine, self.REPOSITORY, version) - self.assertEqual(version, - self.migration_api.db_version(engine, - self.REPOSITORY)) - if with_data: - check = getattr(self, "_check_%03d" % version, None) - if check: - check(engine, data) - except Exception: - LOG.error(_LE("Failed to migrate to version %s on engine %s") % - (version, engine)) - raise diff --git a/rack/openstack/common/db/sqlalchemy/utils.py b/rack/openstack/common/db/sqlalchemy/utils.py deleted file mode 100644 index 0561ee3..0000000 --- a/rack/openstack/common/db/sqlalchemy/utils.py +++ /dev/null @@ -1,638 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010-2011 OpenStack Foundation. -# Copyright 2012 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import re - -from migrate.changeset import UniqueConstraint -import sqlalchemy -from sqlalchemy import Boolean -from sqlalchemy import CheckConstraint -from sqlalchemy import Column -from sqlalchemy.engine import reflection -from sqlalchemy.ext.compiler import compiles -from sqlalchemy import func -from sqlalchemy import Index -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import or_ -from sqlalchemy.sql.expression import literal_column -from sqlalchemy.sql.expression import UpdateBase -from sqlalchemy.sql import select -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy.types import NullType - -from rack.openstack.common import context as request_context -from rack.openstack.common.db.sqlalchemy import models -from rack.openstack.common.gettextutils import _, _LI, _LW -from rack.openstack.common import timeutils - - -LOG = logging.getLogger(__name__) - -_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+") - - -def sanitize_db_url(url): - match = _DBURL_REGEX.match(url) - if match: - return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):]) - return url - - -class InvalidSortKey(Exception): - message = _("Sort key supplied was not valid.") - - -# copy from glance/db/sqlalchemy/api.py -def paginate_query(query, model, limit, sort_keys, marker=None, - sort_dir=None, sort_dirs=None): - """Returns a query with sorting / pagination criteria added. - - Pagination works by requiring a unique sort_key, specified by sort_keys. - (If sort_keys is not unique, then we risk looping through values.) - We use the last row in the previous page as the 'marker' for pagination. - So we must return values that follow the passed marker in the order. - With a single-valued sort_key, this would be easy: sort_key > X. - With a compound-values sort_key, (k1, k2, k3) we must do this to repeat - the lexicographical ordering: - (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) - - We also have to cope with different sort_directions. - - Typically, the id of the last row is used as the client-facing pagination - marker, then the actual marker object must be fetched from the db and - passed in to us as marker. - - :param query: the query object to which we should add paging/sorting - :param model: the ORM model class - :param limit: maximum number of items to return - :param sort_keys: array of attributes by which results should be sorted - :param marker: the last item of the previous page; we returns the next - results after this value. - :param sort_dir: direction in which results should be sorted (asc, desc) - :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys - - :rtype: sqlalchemy.orm.query.Query - :return: The query with sorting/pagination added. - """ - - if 'id' not in sort_keys: - # TODO(justinsb): If this ever gives a false-positive, check - # the actual primary key, rather than assuming its id - LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) - - assert(not (sort_dir and sort_dirs)) - - # Default the sort direction to ascending - if sort_dirs is None and sort_dir is None: - sort_dir = 'asc' - - # Ensure a per-column sort direction - if sort_dirs is None: - sort_dirs = [sort_dir for _sort_key in sort_keys] - - assert(len(sort_dirs) == len(sort_keys)) - - # Add sorting - for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): - try: - sort_dir_func = { - 'asc': sqlalchemy.asc, - 'desc': sqlalchemy.desc, - }[current_sort_dir] - except KeyError: - raise ValueError(_("Unknown sort direction, " - "must be 'desc' or 'asc'")) - try: - sort_key_attr = getattr(model, current_sort_key) - except AttributeError: - raise InvalidSortKey() - query = query.order_by(sort_dir_func(sort_key_attr)) - - # Add pagination - if marker is not None: - marker_values = [] - for sort_key in sort_keys: - v = getattr(marker, sort_key) - marker_values.append(v) - - # Build up an array of sort criteria as in the docstring - criteria_list = [] - for i in range(len(sort_keys)): - crit_attrs = [] - for j in range(i): - model_attr = getattr(model, sort_keys[j]) - crit_attrs.append((model_attr == marker_values[j])) - - model_attr = getattr(model, sort_keys[i]) - if sort_dirs[i] == 'desc': - crit_attrs.append((model_attr < marker_values[i])) - else: - crit_attrs.append((model_attr > marker_values[i])) - - criteria = sqlalchemy.sql.and_(*crit_attrs) - criteria_list.append(criteria) - - f = sqlalchemy.sql.or_(*criteria_list) - query = query.filter(f) - - if limit is not None: - query = query.limit(limit) - - return query - - -def _read_deleted_filter(query, db_model, read_deleted): - if 'deleted' not in db_model.__table__.columns: - raise ValueError(_("There is no `deleted` column in `%s` table. " - "Project doesn't use soft-deleted feature.") - % db_model.__name__) - - default_deleted_value = db_model.__table__.c.deleted.default.arg - if read_deleted == 'no': - query = query.filter(db_model.deleted == default_deleted_value) - elif read_deleted == 'yes': - pass # omit the filter to include deleted and active - elif read_deleted == 'only': - query = query.filter(db_model.deleted != default_deleted_value) - else: - raise ValueError(_("Unrecognized read_deleted value '%s'") - % read_deleted) - return query - - -def _project_filter(query, db_model, context, project_only): - if project_only and 'project_id' not in db_model.__table__.columns: - raise ValueError(_("There is no `project_id` column in `%s` table.") - % db_model.__name__) - - if request_context.is_user_context(context) and project_only: - if project_only == 'allow_none': - is_none = None - query = query.filter(or_(db_model.project_id == context.project_id, - db_model.project_id == is_none)) - else: - query = query.filter(db_model.project_id == context.project_id) - - return query - - -def model_query(context, model, session, args=None, project_only=False, - read_deleted=None): - """Query helper that accounts for context's `read_deleted` field. - - :param context: context to query under - - :param model: Model to query. Must be a subclass of ModelBase. - :type model: models.ModelBase - - :param session: The session to use. - :type session: sqlalchemy.orm.session.Session - - :param args: Arguments to query. If None - model is used. - :type args: tuple - - :param project_only: If present and context is user-type, then restrict - query to match the context's project_id. If set to - 'allow_none', restriction includes project_id = None. - :type project_only: bool - - :param read_deleted: If present, overrides context's read_deleted field. - :type read_deleted: bool - - Usage: - result = (utils.model_query(context, models.Instance, session=session) - .filter_by(uuid=instance_uuid) - .all()) - - query = utils.model_query( - context, Node, - session=session, - args=(func.count(Node.id), func.sum(Node.ram)) - ).filter_by(project_id=project_id) - """ - - if not read_deleted: - if hasattr(context, 'read_deleted'): - # NOTE(viktors): some projects use `read_deleted` attribute in - # their contexts instead of `show_deleted`. - read_deleted = context.read_deleted - else: - read_deleted = context.show_deleted - - if not issubclass(model, models.ModelBase): - raise TypeError(_("model should be a subclass of ModelBase")) - - query = session.query(model) if not args else session.query(*args) - query = _read_deleted_filter(query, model, read_deleted) - query = _project_filter(query, model, context, project_only) - - return query - - -def get_table(engine, name): - """Returns an sqlalchemy table dynamically from db. - - Needed because the models don't work for us in migrations - as models will be far out of sync with the current data. - """ - metadata = MetaData() - metadata.bind = engine - return Table(name, metadata, autoload=True) - - -class InsertFromSelect(UpdateBase): - """Form the base for `INSERT INTO table (SELECT ... )` statement.""" - def __init__(self, table, select): - self.table = table - self.select = select - - -@compiles(InsertFromSelect) -def visit_insert_from_select(element, compiler, **kw): - """Form the `INSERT INTO table (SELECT ... )` statement.""" - return "INSERT INTO %s %s" % ( - compiler.process(element.table, asfrom=True), - compiler.process(element.select)) - - -class ColumnError(Exception): - """Error raised when no column or an invalid column is found.""" - - -def _get_not_supported_column(col_name_col_instance, column_name): - try: - column = col_name_col_instance[column_name] - except KeyError: - msg = _("Please specify column %s in col_name_col_instance " - "param. It is required because column has unsupported " - "type by sqlite).") - raise ColumnError(msg % column_name) - - if not isinstance(column, Column): - msg = _("col_name_col_instance param has wrong type of " - "column instance for column %s It should be instance " - "of sqlalchemy.Column.") - raise ColumnError(msg % column_name) - return column - - -def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, - **col_name_col_instance): - """Drop unique constraint from table. - - This method drops UC from table and works for mysql, postgresql and sqlite. - In mysql and postgresql we are able to use "alter table" construction. - Sqlalchemy doesn't support some sqlite column types and replaces their - type with NullType in metadata. We process these columns and replace - NullType with the correct column type. - - :param migrate_engine: sqlalchemy engine - :param table_name: name of table that contains uniq constraint. - :param uc_name: name of uniq constraint that will be dropped. - :param columns: columns that are in uniq constraint. - :param col_name_col_instance: contains pair column_name=column_instance. - column_instance is instance of Column. These params - are required only for columns that have unsupported - types by sqlite. For example BigInteger. - """ - - meta = MetaData() - meta.bind = migrate_engine - t = Table(table_name, meta, autoload=True) - - if migrate_engine.name == "sqlite": - override_cols = [ - _get_not_supported_column(col_name_col_instance, col.name) - for col in t.columns - if isinstance(col.type, NullType) - ] - for col in override_cols: - t.columns.replace(col) - - uc = UniqueConstraint(*columns, table=t, name=uc_name) - uc.drop() - - -def drop_old_duplicate_entries_from_table(migrate_engine, table_name, - use_soft_delete, *uc_column_names): - """Drop all old rows having the same values for columns in uc_columns. - - This method drop (or mark ad `deleted` if use_soft_delete is True) old - duplicate rows form table with name `table_name`. - - :param migrate_engine: Sqlalchemy engine - :param table_name: Table with duplicates - :param use_soft_delete: If True - values will be marked as `deleted`, - if False - values will be removed from table - :param uc_column_names: Unique constraint columns - """ - meta = MetaData() - meta.bind = migrate_engine - - table = Table(table_name, meta, autoload=True) - columns_for_group_by = [table.c[name] for name in uc_column_names] - - columns_for_select = [func.max(table.c.id)] - columns_for_select.extend(columns_for_group_by) - - duplicated_rows_select = select(columns_for_select, - group_by=columns_for_group_by, - having=func.count(table.c.id) > 1) - - for row in migrate_engine.execute(duplicated_rows_select): - # NOTE(boris-42): Do not remove row that has the biggest ID. - delete_condition = table.c.id != row[0] - is_none = None # workaround for pyflakes - delete_condition &= table.c.deleted_at == is_none - for name in uc_column_names: - delete_condition &= table.c[name] == row[name] - - rows_to_delete_select = select([table.c.id]).where(delete_condition) - for row in migrate_engine.execute(rows_to_delete_select).fetchall(): - LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: " - "%(table)s") % dict(id=row[0], table=table_name)) - - if use_soft_delete: - delete_statement = table.update().\ - where(delete_condition).\ - values({ - 'deleted': literal_column('id'), - 'updated_at': literal_column('updated_at'), - 'deleted_at': timeutils.utcnow() - }) - else: - delete_statement = table.delete().where(delete_condition) - migrate_engine.execute(delete_statement) - - -def _get_default_deleted_value(table): - if isinstance(table.c.id.type, Integer): - return 0 - if isinstance(table.c.id.type, String): - return "" - raise ColumnError(_("Unsupported id columns type")) - - -def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): - table = get_table(migrate_engine, table_name) - - insp = reflection.Inspector.from_engine(migrate_engine) - real_indexes = insp.get_indexes(table_name) - existing_index_names = dict( - [(index['name'], index['column_names']) for index in real_indexes]) - - # NOTE(boris-42): Restore indexes on `deleted` column - for index in indexes: - if 'deleted' not in index['column_names']: - continue - name = index['name'] - if name in existing_index_names: - column_names = [table.c[c] for c in existing_index_names[name]] - old_index = Index(name, *column_names, unique=index["unique"]) - old_index.drop(migrate_engine) - - column_names = [table.c[c] for c in index['column_names']] - new_index = Index(index["name"], *column_names, unique=index["unique"]) - new_index.create(migrate_engine) - - -def change_deleted_column_type_to_boolean(migrate_engine, table_name, - **col_name_col_instance): - if migrate_engine.name == "sqlite": - return _change_deleted_column_type_to_boolean_sqlite( - migrate_engine, table_name, **col_name_col_instance) - insp = reflection.Inspector.from_engine(migrate_engine) - indexes = insp.get_indexes(table_name) - - table = get_table(migrate_engine, table_name) - - old_deleted = Column('old_deleted', Boolean, default=False) - old_deleted.create(table, populate_default=False) - - table.update().\ - where(table.c.deleted == table.c.id).\ - values(old_deleted=True).\ - execute() - - table.c.deleted.drop() - table.c.old_deleted.alter(name="deleted") - - _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) - - -def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, - **col_name_col_instance): - insp = reflection.Inspector.from_engine(migrate_engine) - table = get_table(migrate_engine, table_name) - - columns = [] - for column in table.columns: - column_copy = None - if column.name != "deleted": - if isinstance(column.type, NullType): - column_copy = _get_not_supported_column(col_name_col_instance, - column.name) - else: - column_copy = column.copy() - else: - column_copy = Column('deleted', Boolean, default=0) - columns.append(column_copy) - - constraints = [constraint.copy() for constraint in table.constraints] - - meta = table.metadata - new_table = Table(table_name + "__tmp__", meta, - *(columns + constraints)) - new_table.create() - - indexes = [] - for index in insp.get_indexes(table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], *column_names, - unique=index["unique"])) - - c_select = [] - for c in table.c: - if c.name != "deleted": - c_select.append(c) - else: - c_select.append(table.c.deleted == table.c.id) - - ins = InsertFromSelect(new_table, select(c_select)) - migrate_engine.execute(ins) - - table.drop() - [index.create(migrate_engine) for index in indexes] - - new_table.rename(table_name) - new_table.update().\ - where(new_table.c.deleted == new_table.c.id).\ - values(deleted=True).\ - execute() - - -def change_deleted_column_type_to_id_type(migrate_engine, table_name, - **col_name_col_instance): - if migrate_engine.name == "sqlite": - return _change_deleted_column_type_to_id_type_sqlite( - migrate_engine, table_name, **col_name_col_instance) - insp = reflection.Inspector.from_engine(migrate_engine) - indexes = insp.get_indexes(table_name) - - table = get_table(migrate_engine, table_name) - - new_deleted = Column('new_deleted', table.c.id.type, - default=_get_default_deleted_value(table)) - new_deleted.create(table, populate_default=True) - - deleted = True # workaround for pyflakes - table.update().\ - where(table.c.deleted == deleted).\ - values(new_deleted=table.c.id).\ - execute() - table.c.deleted.drop() - table.c.new_deleted.alter(name="deleted") - - _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) - - -def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, - **col_name_col_instance): - # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check - # constraints in sqlite DB and our `deleted` column has - # 2 check constraints. So there is only one way to remove - # these constraints: - # 1) Create new table with the same columns, constraints - # and indexes. (except deleted column). - # 2) Copy all data from old to new table. - # 3) Drop old table. - # 4) Rename new table to old table name. - insp = reflection.Inspector.from_engine(migrate_engine) - meta = MetaData(bind=migrate_engine) - table = Table(table_name, meta, autoload=True) - default_deleted_value = _get_default_deleted_value(table) - - columns = [] - for column in table.columns: - column_copy = None - if column.name != "deleted": - if isinstance(column.type, NullType): - column_copy = _get_not_supported_column(col_name_col_instance, - column.name) - else: - column_copy = column.copy() - else: - column_copy = Column('deleted', table.c.id.type, - default=default_deleted_value) - columns.append(column_copy) - - def is_deleted_column_constraint(constraint): - # NOTE(boris-42): There is no other way to check is CheckConstraint - # associated with deleted column. - if not isinstance(constraint, CheckConstraint): - return False - sqltext = str(constraint.sqltext) - return (sqltext.endswith("deleted in (0, 1)") or - sqltext.endswith("deleted IN (:deleted_1, :deleted_2)")) - - constraints = [] - for constraint in table.constraints: - if not is_deleted_column_constraint(constraint): - constraints.append(constraint.copy()) - - new_table = Table(table_name + "__tmp__", meta, - *(columns + constraints)) - new_table.create() - - indexes = [] - for index in insp.get_indexes(table_name): - column_names = [new_table.c[c] for c in index['column_names']] - indexes.append(Index(index["name"], *column_names, - unique=index["unique"])) - - ins = InsertFromSelect(new_table, table.select()) - migrate_engine.execute(ins) - - table.drop() - [index.create(migrate_engine) for index in indexes] - - new_table.rename(table_name) - deleted = True # workaround for pyflakes - new_table.update().\ - where(new_table.c.deleted == deleted).\ - values(deleted=new_table.c.id).\ - execute() - - # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. - deleted = False # workaround for pyflakes - new_table.update().\ - where(new_table.c.deleted == deleted).\ - values(deleted=default_deleted_value).\ - execute() - - -def get_connect_string(backend, database, user=None, passwd=None): - """Get database connection - - Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped - """ - args = {'backend': backend, - 'user': user, - 'passwd': passwd, - 'database': database} - if backend == 'sqlite': - template = '%(backend)s:///%(database)s' - else: - template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" - return template % args - - -def is_backend_avail(backend, database, user=None, passwd=None): - try: - connect_uri = get_connect_string(backend=backend, - database=database, - user=user, - passwd=passwd) - engine = sqlalchemy.create_engine(connect_uri) - connection = engine.connect() - except Exception: - # intentionally catch all to handle exceptions even if we don't - # have any backend code loaded. - return False - else: - connection.close() - engine.dispose() - return True - - -def get_db_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - password = auth_pieces[1].strip() - - return (user, password, database, host) diff --git a/rack/openstack/common/eventlet_backdoor.py b/rack/openstack/common/eventlet_backdoor.py deleted file mode 100644 index e1aad43..0000000 --- a/rack/openstack/common/eventlet_backdoor.py +++ /dev/null @@ -1,146 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 OpenStack Foundation. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import errno -import gc -import os -import pprint -import socket -import sys -import traceback - -import eventlet -import eventlet.backdoor -import greenlet -from oslo.config import cfg - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -help_for_backdoor_port = 'Acceptable ' + \ - 'values are 0, and :, where 0 results in ' + \ - 'listening on a random tcp port number, results in ' + \ - 'listening on the specified port number and not enabling backdoor' + \ - 'if it is in use and : results in listening on the ' + \ - 'smallest unused port number within the specified range of port ' + \ - 'numbers. The chosen port is displayed in the service\'s log file.' -eventlet_backdoor_opts = [ - cfg.StrOpt('backdoor_port', - default=None, - help='Enable eventlet backdoor. %s' % help_for_backdoor_port) -] - -CONF = cfg.CONF -CONF.register_opts(eventlet_backdoor_opts) -LOG = logging.getLogger(__name__) - - -class EventletBackdoorConfigValueError(Exception): - def __init__(self, port_range, help_msg, ex): - msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' - '%(help)s' % - {'range': port_range, 'ex': ex, 'help': help_msg}) - super(EventletBackdoorConfigValueError, self).__init__(msg) - self.port_range = port_range - - -def _dont_use_this(): - print("Don't use this, just disconnect instead") - - -def _find_objects(t): - return filter(lambda o: isinstance(o, t), gc.get_objects()) - - -def _print_greenthreads(): - for i, gt in enumerate(_find_objects(greenlet.greenlet)): - print(i, gt) - traceback.print_stack(gt.gr_frame) - print() - - -def _print_nativethreads(): - for threadId, stack in sys._current_frames().items(): - print(threadId) - traceback.print_stack(stack) - print() - - -def _parse_port_range(port_range): - if ':' not in port_range: - start, end = port_range, port_range - else: - start, end = port_range.split(':', 1) - try: - start, end = int(start), int(end) - if end < start: - raise ValueError - return start, end - except ValueError as ex: - raise EventletBackdoorConfigValueError(port_range, ex, - help_for_backdoor_port) - - -def _listen(host, start_port, end_port, listen_func): - try_port = start_port - while True: - try: - return listen_func((host, try_port)) - except socket.error as exc: - if (exc.errno != errno.EADDRINUSE or - try_port >= end_port): - raise - try_port += 1 - - -def initialize_if_enabled(): - backdoor_locals = { - 'exit': _dont_use_this, # So we don't exit the entire process - 'quit': _dont_use_this, # So we don't exit the entire process - 'fo': _find_objects, - 'pgt': _print_greenthreads, - 'pnt': _print_nativethreads, - } - - if CONF.backdoor_port is None: - return None - - start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) - - # NOTE(johannes): The standard sys.displayhook will print the value of - # the last expression and set it to __builtin__._, which overwrites - # the __builtin__._ that gettext sets. Let's switch to using pprint - # since it won't interact poorly with gettext, and it's easier to - # read the output too. - def displayhook(val): - if val is not None: - pprint.pprint(val) - sys.displayhook = displayhook - - sock = _listen('localhost', start_port, end_port, eventlet.listen) - - # In the case of backdoor port being zero, a port number is assigned by - # listen(). In any case, pull the port number out here. - port = sock.getsockname()[1] - LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') % - {'port': port, 'pid': os.getpid()}) - eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, - locals=backdoor_locals) - return port diff --git a/rack/openstack/common/excutils.py b/rack/openstack/common/excutils.py deleted file mode 100644 index 3b73dc0..0000000 --- a/rack/openstack/common/excutils.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# Copyright 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exception related utilities. -""" - -import logging -import sys -import time -import traceback - -import six - -from rack.openstack.common.gettextutils import _ # noqa - - -class save_and_reraise_exception(object): - """Save current exception, run some code and then re-raise. - - In some cases the exception context can be cleared, resulting in None - being attempted to be re-raised after an exception handler is run. This - can happen when eventlet switches greenthreads or when running an - exception handler, code raises and catches an exception. In both - cases the exception context will be cleared. - - To work around this, we save the exception state, run handler code, and - then re-raise the original exception. If another exception occurs, the - saved exception is logged and the new exception is re-raised. - - In some cases the caller may not want to re-raise the exception, and - for those circumstances this context provides a reraise flag that - can be used to suppress the exception. For example: - - except Exception: - with save_and_reraise_exception() as ctxt: - decide_if_need_reraise() - if not should_be_reraised: - ctxt.reraise = False - """ - def __init__(self): - self.reraise = True - - def __enter__(self): - self.type_, self.value, self.tb, = sys.exc_info() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: - logging.error(_('Original exception being dropped: %s'), - traceback.format_exception(self.type_, - self.value, - self.tb)) - return False - if self.reraise: - six.reraise(self.type_, self.value, self.tb) - - -def forever_retry_uncaught_exceptions(infunc): - def inner_func(*args, **kwargs): - last_log_time = 0 - last_exc_message = None - exc_count = 0 - while True: - try: - return infunc(*args, **kwargs) - except Exception as exc: - this_exc_message = six.u(str(exc)) - if this_exc_message == last_exc_message: - exc_count += 1 - else: - exc_count = 1 - # Do not log any more frequently than once a minute unless - # the exception message changes - cur_time = int(time.time()) - if (cur_time - last_log_time > 60 or - this_exc_message != last_exc_message): - logging.exception( - _('Unexpected exception occurred %d time(s)... ' - 'retrying.') % exc_count) - last_log_time = cur_time - last_exc_message = this_exc_message - exc_count = 0 - # This should be a very rare event. In case it isn't, do - # a sleep. - time.sleep(1) - return inner_func diff --git a/rack/openstack/common/fileutils.py b/rack/openstack/common/fileutils.py deleted file mode 100644 index 8f539e0..0000000 --- a/rack/openstack/common/fileutils.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib -import errno -import os -import tempfile - -from rack.openstack.common import excutils -from rack.openstack.common.gettextutils import _ # noqa -from rack.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -_FILE_CACHE = {} - - -def ensure_tree(path): - """Create a directory (and any ancestor directories required) - - :param path: Directory to create - """ - try: - os.makedirs(path) - except OSError as exc: - if exc.errno == errno.EEXIST: - if not os.path.isdir(path): - raise - else: - raise - - -def read_cached_file(filename, force_reload=False): - """Read from a file if it has been modified. - - :param force_reload: Whether to reload the file. - :returns: A tuple with a boolean specifying if the data is fresh - or not. - """ - global _FILE_CACHE - - if force_reload and filename in _FILE_CACHE: - del _FILE_CACHE[filename] - - reloaded = False - mtime = os.path.getmtime(filename) - cache_info = _FILE_CACHE.setdefault(filename, {}) - - if not cache_info or mtime > cache_info.get('mtime', 0): - LOG.debug(_("Reloading cached file %s") % filename) - with open(filename) as fap: - cache_info['data'] = fap.read() - cache_info['mtime'] = mtime - reloaded = True - return (reloaded, cache_info['data']) - - -def delete_if_exists(path, remove=os.unlink): - """Delete a file, but ignore file not found error. - - :param path: File to delete - :param remove: Optional function to remove passed path - """ - - try: - remove(path) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - -@contextlib.contextmanager -def remove_path_on_error(path, remove=delete_if_exists): - """Protect code that wants to operate on PATH atomically. - Any exception will cause PATH to be removed. - - :param path: File to work with - :param remove: Optional function to remove passed path - """ - - try: - yield - except Exception: - with excutils.save_and_reraise_exception(): - remove(path) - - -def file_open(*args, **kwargs): - """Open file - - see built-in file() documentation for more details - - Note: The reason this is kept in a separate module is to easily - be able to provide a stub module that doesn't alter system - state at all (for unit tests) - """ - return file(*args, **kwargs) - - -def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): - """Create temporary file or use existing file. - - This util is needed for creating temporary file with - specified content, suffix and prefix. If path is not None, - it will be used for writing content. If the path doesn't - exist it'll be created. - - :param content: content for temporary file. - :param path: same as parameter 'dir' for mkstemp - :param suffix: same as parameter 'suffix' for mkstemp - :param prefix: same as parameter 'prefix' for mkstemp - - For example: it can be used in database tests for creating - configuration files. - """ - if path: - ensure_tree(path) - - (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) - try: - os.write(fd, content) - finally: - os.close(fd) - return path diff --git a/rack/openstack/common/fixture/__init__.py b/rack/openstack/common/fixture/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/openstack/common/fixture/config.py b/rack/openstack/common/fixture/config.py deleted file mode 100644 index 9489b85..0000000 --- a/rack/openstack/common/fixture/config.py +++ /dev/null @@ -1,85 +0,0 @@ -# -# Copyright 2013 Mirantis, Inc. -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -from oslo.config import cfg -import six - - -class Config(fixtures.Fixture): - """Allows overriding configuration settings for the test. - - `conf` will be reset on cleanup. - - """ - - def __init__(self, conf=cfg.CONF): - self.conf = conf - - def setUp(self): - super(Config, self).setUp() - # NOTE(morganfainberg): unregister must be added to cleanup before - # reset is because cleanup works in reverse order of registered items, - # and a reset must occur before unregistering options can occur. - self.addCleanup(self._unregister_config_opts) - self.addCleanup(self.conf.reset) - self._registered_config_opts = {} - - def config(self, **kw): - """Override configuration values. - - The keyword arguments are the names of configuration options to - override and their values. - - If a `group` argument is supplied, the overrides are applied to - the specified configuration option group, otherwise the overrides - are applied to the ``default`` group. - - """ - - group = kw.pop('group', None) - for k, v in six.iteritems(kw): - self.conf.set_override(k, v, group) - - def _unregister_config_opts(self): - for group in self._registered_config_opts: - self.conf.unregister_opts(self._registered_config_opts[group], - group=group) - - def register_opt(self, opt, group=None): - """Register a single option for the test run. - - Options registered in this manner will automatically be unregistered - during cleanup. - - If a `group` argument is supplied, it will register the new option - to that group, otherwise the option is registered to the ``default`` - group. - """ - self.conf.register_opt(opt, group=group) - self._registered_config_opts.setdefault(group, set()).add(opt) - - def register_opts(self, opts, group=None): - """Register multiple options for the test run. - - This works in the same manner as register_opt() but takes a list of - options as the first argument. All arguments will be registered to the - same group if the ``group`` argument is supplied, otherwise all options - will be registered to the ``default`` group. - """ - for opt in opts: - self.register_opt(opt, group=group) diff --git a/rack/openstack/common/fixture/lockutils.py b/rack/openstack/common/fixture/lockutils.py deleted file mode 100644 index f8e89ea..0000000 --- a/rack/openstack/common/fixture/lockutils.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - -from rack.openstack.common import lockutils - - -class LockFixture(fixtures.Fixture): - """External locking fixture. - - This fixture is basically an alternative to the synchronized decorator with - the external flag so that tearDowns and addCleanups will be included in - the lock context for locking between tests. The fixture is recommended to - be the first line in a test method, like so:: - - def test_method(self): - self.useFixture(LockFixture) - ... - - or the first line in setUp if all the test methods in the class are - required to be serialized. Something like:: - - class TestCase(testtools.testcase): - def setUp(self): - self.useFixture(LockFixture) - super(TestCase, self).setUp() - ... - - This is because addCleanups are put on a LIFO queue that gets run after the - test method exits. (either by completing or raising an exception) - """ - def __init__(self, name, lock_file_prefix=None): - self.mgr = lockutils.lock(name, lock_file_prefix, True) - - def setUp(self): - super(LockFixture, self).setUp() - self.addCleanup(self.mgr.__exit__, None, None, None) - self.mgr.__enter__() diff --git a/rack/openstack/common/fixture/logging.py b/rack/openstack/common/fixture/logging.py deleted file mode 100644 index 3823a03..0000000 --- a/rack/openstack/common/fixture/logging.py +++ /dev/null @@ -1,34 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - - -def get_logging_handle_error_fixture(): - """returns a fixture to make logging raise formatting exceptions. - - Usage: - self.useFixture(logging.get_logging_handle_error_fixture()) - """ - return fixtures.MonkeyPatch('logging.Handler.handleError', - _handleError) - - -def _handleError(self, record): - """Monkey patch for logging.Handler.handleError. - - The default handleError just logs the error to stderr but we want - the option of actually raising an exception. - """ - raise diff --git a/rack/openstack/common/fixture/mockpatch.py b/rack/openstack/common/fixture/mockpatch.py deleted file mode 100644 index a8ffeb3..0000000 --- a/rack/openstack/common/fixture/mockpatch.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock - - -class PatchObject(fixtures.Fixture): - """Deal with code around mock.""" - - def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs): - self.obj = obj - self.attr = attr - self.kwargs = kwargs - self.new = new - - def setUp(self): - super(PatchObject, self).setUp() - _p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs) - self.mock = _p.start() - self.addCleanup(_p.stop) - - -class Patch(fixtures.Fixture): - - """Deal with code around mock.patch.""" - - def __init__(self, obj, new=mock.DEFAULT, **kwargs): - self.obj = obj - self.kwargs = kwargs - self.new = new - - def setUp(self): - super(Patch, self).setUp() - _p = mock.patch(self.obj, self.new, **self.kwargs) - self.mock = _p.start() - self.addCleanup(_p.stop) diff --git a/rack/openstack/common/fixture/moxstubout.py b/rack/openstack/common/fixture/moxstubout.py deleted file mode 100644 index d7e118e..0000000 --- a/rack/openstack/common/fixture/moxstubout.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -from six.moves import mox # noqa - - -class MoxStubout(fixtures.Fixture): - """Deal with code around mox and stubout as a fixture.""" - - def setUp(self): - super(MoxStubout, self).setUp() - # emulate some of the mox stuff, we can't use the metaclass - # because it screws with our generators - self.mox = mox.Mox() - self.stubs = self.mox.stubs - self.addCleanup(self.mox.UnsetStubs) - self.addCleanup(self.mox.VerifyAll) diff --git a/rack/openstack/common/gettextutils.py b/rack/openstack/common/gettextutils.py deleted file mode 100644 index a5748ab..0000000 --- a/rack/openstack/common/gettextutils.py +++ /dev/null @@ -1,474 +0,0 @@ -# Copyright 2012 Red Hat, Inc. -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -gettext for openstack-common modules. - -Usual usage in an openstack.common module: - - from rack.openstack.common.gettextutils import _ -""" - -import copy -import functools -import gettext -import locale -from logging import handlers -import os -import re - -from babel import localedata -import six - -_localedir = os.environ.get('rack'.upper() + '_LOCALEDIR') -_t = gettext.translation('rack', localedir=_localedir, fallback=True) - -# We use separate translation catalogs for each log level, so set up a -# mapping between the log level name and the translator. The domain -# for the log level is project_name + "-log-" + log_level so messages -# for each level end up in their own catalog. -_t_log_levels = dict( - (level, gettext.translation('rack' + '-log-' + level, - localedir=_localedir, - fallback=True)) - for level in ['info', 'warning', 'error', 'critical'] -) - -_AVAILABLE_LANGUAGES = {} -USE_LAZY = False - - -def enable_lazy(): - """Convenience function for configuring _() to use lazy gettext - - Call this at the start of execution to enable the gettextutils._ - function to use lazy gettext functionality. This is useful if - your project is importing _ directly instead of using the - gettextutils.install() way of importing the _ function. - """ - global USE_LAZY - USE_LAZY = True - - -def _(msg): - if USE_LAZY: - return Message(msg, domain='rack') - else: - if six.PY3: - return _t.gettext(msg) - return _t.ugettext(msg) - - -def _log_translation(msg, level): - """Build a single translation of a log message - """ - if USE_LAZY: - return Message(msg, domain='rack' + '-log-' + level) - else: - translator = _t_log_levels[level] - if six.PY3: - return translator.gettext(msg) - return translator.ugettext(msg) - -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = functools.partial(_log_translation, level='info') -_LW = functools.partial(_log_translation, level='warning') -_LE = functools.partial(_log_translation, level='error') -_LC = functools.partial(_log_translation, level='critical') - - -def install(domain, lazy=False): - """Install a _() function using the given translation domain. - - Given a translation domain, install a _() function using gettext's - install() function. - - The main difference from gettext.install() is that we allow - overriding the default localedir (e.g. /usr/share/locale) using - a translation-domain-specific environment variable (e.g. - NOVA_LOCALEDIR). - - :param domain: the translation domain - :param lazy: indicates whether or not to install the lazy _() function. - The lazy _() introduces a way to do deferred translation - of messages by installing a _ that builds Message objects, - instead of strings, which can then be lazily translated into - any available locale. - """ - if lazy: - # NOTE(mrodden): Lazy gettext functionality. - # - # The following introduces a deferred way to do translations on - # messages in OpenStack. We override the standard _() function - # and % (format string) operation to build Message objects that can - # later be translated when we have more information. - def _lazy_gettext(msg): - """Create and return a Message object. - - Lazy gettext function for a given domain, it is a factory method - for a project/module to get a lazy gettext function for its own - translation domain (i.e. rack, glance, cinder, etc.) - - Message encapsulates a string so that we can translate - it later when needed. - """ - return Message(msg, domain=domain) - - from six import moves - moves.builtins.__dict__['_'] = _lazy_gettext - else: - localedir = '%s_LOCALEDIR' % domain.upper() - if six.PY3: - gettext.install(domain, - localedir=os.environ.get(localedir)) - else: - gettext.install(domain, - localedir=os.environ.get(localedir), - unicode=True) - - -class Message(six.text_type): - """A Message object is a unicode object that can be translated. - - Translation of Message is done explicitly using the translate() method. - For all non-translation intents and purposes, a Message is simply unicode, - and can be treated as such. - """ - - def __new__(cls, msgid, msgtext=None, params=None, - domain='rack', *args): - """Create a new Message object. - - In order for translation to work gettext requires a message ID, this - msgid will be used as the base unicode text. It is also possible - for the msgid and the base unicode text to be different by passing - the msgtext parameter. - """ - # If the base msgtext is not given, we use the default translation - # of the msgid (which is in English) just in case the system locale is - # not English, so that the base text will be in that locale by default. - if not msgtext: - msgtext = Message._translate_msgid(msgid, domain) - # We want to initialize the parent unicode with the actual object that - # would have been plain unicode if 'Message' was not enabled. - msg = super(Message, cls).__new__(cls, msgtext) - msg.msgid = msgid - msg.domain = domain - msg.params = params - return msg - - def translate(self, desired_locale=None): - """Translate this message to the desired locale. - - :param desired_locale: The desired locale to translate the message to, - if no locale is provided the message will be - translated to the system's default locale. - - :returns: the translated message in unicode - """ - - translated_message = Message._translate_msgid(self.msgid, - self.domain, - desired_locale) - if self.params is None: - # No need for more translation - return translated_message - - # This Message object may have been formatted with one or more - # Message objects as substitution arguments, given either as a single - # argument, part of a tuple, or as one or more values in a dictionary. - # When translating this Message we need to translate those Messages too - translated_params = _translate_args(self.params, desired_locale) - - translated_message = translated_message % translated_params - - return translated_message - - @staticmethod - def _translate_msgid(msgid, domain, desired_locale=None): - if not desired_locale: - system_locale = locale.getdefaultlocale() - # If the system locale is not available to the runtime use English - if not system_locale[0]: - desired_locale = 'en_US' - else: - desired_locale = system_locale[0] - - locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') - lang = gettext.translation(domain, - localedir=locale_dir, - languages=[desired_locale], - fallback=True) - if six.PY3: - translator = lang.gettext - else: - translator = lang.ugettext - - translated_message = translator(msgid) - return translated_message - - def __mod__(self, other): - # When we mod a Message we want the actual operation to be performed - # by the parent class (i.e. unicode()), the only thing we do here is - # save the original msgid and the parameters in case of a translation - params = self._sanitize_mod_params(other) - unicode_mod = super(Message, self).__mod__(params) - modded = Message(self.msgid, - msgtext=unicode_mod, - params=params, - domain=self.domain) - return modded - - def _sanitize_mod_params(self, other): - """Sanitize the object being modded with this Message. - - - Add support for modding 'None' so translation supports it - - Trim the modded object, which can be a large dictionary, to only - those keys that would actually be used in a translation - - Snapshot the object being modded, in case the message is - translated, it will be used as it was when the Message was created - """ - if other is None: - params = (other,) - elif isinstance(other, dict): - params = self._trim_dictionary_parameters(other) - else: - params = self._copy_param(other) - return params - - def _trim_dictionary_parameters(self, dict_param): - """Return a dict that only has matching entries in the msgid.""" - # NOTE(luisg): Here we trim down the dictionary passed as parameters - # to avoid carrying a lot of unnecessary weight around in the message - # object, for example if someone passes in Message() % locals() but - # only some params are used, and additionally we prevent errors for - # non-deepcopyable objects by unicoding() them. - - # Look for %(param) keys in msgid; - # Skip %% and deal with the case where % is first character on the line - keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) - - # If we don't find any %(param) keys but have a %s - if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): - # Apparently the full dictionary is the parameter - params = self._copy_param(dict_param) - else: - params = {} - # Save our existing parameters as defaults to protect - # ourselves from losing values if we are called through an - # (erroneous) chain that builds a valid Message with - # arguments, and then does something like "msg % kwds" - # where kwds is an empty dictionary. - src = {} - if isinstance(self.params, dict): - src.update(self.params) - src.update(dict_param) - for key in keys: - params[key] = self._copy_param(src[key]) - - return params - - def _copy_param(self, param): - try: - return copy.deepcopy(param) - except TypeError: - # Fallback to casting to unicode this will handle the - # python code-like objects that can't be deep-copied - return six.text_type(param) - - def __add__(self, other): - msg = _('Message objects do not support addition.') - raise TypeError(msg) - - def __radd__(self, other): - return self.__add__(other) - - def __str__(self): - # NOTE(luisg): Logging in python 2.6 tries to str() log records, - # and it expects specifically a UnicodeError in order to proceed. - msg = _('Message objects do not support str() because they may ' - 'contain non-ascii characters. ' - 'Please use unicode() or translate() instead.') - raise UnicodeError(msg) - - -def get_available_languages(domain): - """Lists the available languages for the given translation domain. - - :param domain: the domain to get languages for - """ - if domain in _AVAILABLE_LANGUAGES: - return copy.copy(_AVAILABLE_LANGUAGES[domain]) - - localedir = '%s_LOCALEDIR' % domain.upper() - find = lambda x: gettext.find(domain, - localedir=os.environ.get(localedir), - languages=[x]) - - # NOTE(mrodden): en_US should always be available (and first in case - # order matters) since our in-line message strings are en_US - language_list = ['en_US'] - # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list - # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects - list_identifiers = (getattr(localedata, 'list', None) or - getattr(localedata, 'locale_identifiers')) - locale_identifiers = list_identifiers() - - for i in locale_identifiers: - if find(i) is not None: - language_list.append(i) - - # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported - # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they - # are perfectly legitimate locales: - # https://github.com/mitsuhiko/babel/issues/37 - # In Babel 1.3 they fixed the bug and they support these locales, but - # they are still not explicitly "listed" by locale_identifiers(). - # That is why we add the locales here explicitly if necessary so that - # they are listed as supported. - aliases = {'zh': 'zh_CN', - 'zh_Hant_HK': 'zh_HK', - 'zh_Hant': 'zh_TW', - 'fil': 'tl_PH'} - for (locale, alias) in six.iteritems(aliases): - if locale in language_list and alias not in language_list: - language_list.append(alias) - - _AVAILABLE_LANGUAGES[domain] = language_list - return copy.copy(language_list) - - -def translate(obj, desired_locale=None): - """Gets the translated unicode representation of the given object. - - If the object is not translatable it is returned as-is. - If the locale is None the object is translated to the system locale. - - :param obj: the object to translate - :param desired_locale: the locale to translate the message to, if None the - default system locale will be used - :returns: the translated object in unicode, or the original object if - it could not be translated - """ - message = obj - if not isinstance(message, Message): - # If the object to translate is not already translatable, - # let's first get its unicode representation - message = six.text_type(obj) - if isinstance(message, Message): - # Even after unicoding() we still need to check if we are - # running with translatable unicode before translating - return message.translate(desired_locale) - return obj - - -def _translate_args(args, desired_locale=None): - """Translates all the translatable elements of the given arguments object. - - This method is used for translating the translatable values in method - arguments which include values of tuples or dictionaries. - If the object is not a tuple or a dictionary the object itself is - translated if it is translatable. - - If the locale is None the object is translated to the system locale. - - :param args: the args to translate - :param desired_locale: the locale to translate the args to, if None the - default system locale will be used - :returns: a new args object with the translated contents of the original - """ - if isinstance(args, tuple): - return tuple(translate(v, desired_locale) for v in args) - if isinstance(args, dict): - translated_dict = {} - for (k, v) in six.iteritems(args): - translated_v = translate(v, desired_locale) - translated_dict[k] = translated_v - return translated_dict - return translate(args, desired_locale) - - -class TranslationHandler(handlers.MemoryHandler): - """Handler that translates records before logging them. - - The TranslationHandler takes a locale and a target logging.Handler object - to forward LogRecord objects to after translating them. This handler - depends on Message objects being logged, instead of regular strings. - - The handler can be configured declaratively in the logging.conf as follows: - - [handlers] - keys = translatedlog, translator - - [handler_translatedlog] - class = handlers.WatchedFileHandler - args = ('/var/log/api-localized.log',) - formatter = context - - [handler_translator] - class = openstack.common.log.TranslationHandler - target = translatedlog - args = ('zh_CN',) - - If the specified locale is not available in the system, the handler will - log in the default locale. - """ - - def __init__(self, locale=None, target=None): - """Initialize a TranslationHandler - - :param locale: locale to use for translating messages - :param target: logging.Handler object to forward - LogRecord objects to after translation - """ - # NOTE(luisg): In order to allow this handler to be a wrapper for - # other handlers, such as a FileHandler, and still be able to - # configure it using logging.conf, this handler has to extend - # MemoryHandler because only the MemoryHandlers' logging.conf - # parsing is implemented such that it accepts a target handler. - handlers.MemoryHandler.__init__(self, capacity=0, target=target) - self.locale = locale - - def setFormatter(self, fmt): - self.target.setFormatter(fmt) - - def emit(self, record): - # We save the message from the original record to restore it - # after translation, so other handlers are not affected by this - original_msg = record.msg - original_args = record.args - - try: - self._translate_and_log_record(record) - finally: - record.msg = original_msg - record.args = original_args - - def _translate_and_log_record(self, record): - record.msg = translate(record.msg, self.locale) - - # In addition to translating the message, we also need to translate - # arguments that were passed to the log method that were not part - # of the main message e.g., log.info(_('Some message %s'), this_one)) - record.args = _translate_args(record.args, self.locale) - - self.target.emit(record) diff --git a/rack/openstack/common/imageutils.py b/rack/openstack/common/imageutils.py deleted file mode 100644 index 7b3f94d..0000000 --- a/rack/openstack/common/imageutils.py +++ /dev/null @@ -1,144 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper methods to deal with images. -""" - -import re - -from rack.openstack.common.gettextutils import _ # noqa -from rack.openstack.common import strutils - - -class QemuImgInfo(object): - BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:" - r"\s+(.*?)\)\s*$"), re.I) - TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$") - SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I) - - def __init__(self, cmd_output=None): - details = self._parse(cmd_output or '') - self.image = details.get('image') - self.backing_file = details.get('backing_file') - self.file_format = details.get('file_format') - self.virtual_size = details.get('virtual_size') - self.cluster_size = details.get('cluster_size') - self.disk_size = details.get('disk_size') - self.snapshots = details.get('snapshot_list', []) - self.encryption = details.get('encryption') - - def __str__(self): - lines = [ - 'image: %s' % self.image, - 'file_format: %s' % self.file_format, - 'virtual_size: %s' % self.virtual_size, - 'disk_size: %s' % self.disk_size, - 'cluster_size: %s' % self.cluster_size, - 'backing_file: %s' % self.backing_file, - ] - if self.snapshots: - lines.append("snapshots: %s" % self.snapshots) - return "\n".join(lines) - - def _canonicalize(self, field): - # Standardize on underscores/lc/no dash and no spaces - # since qemu seems to have mixed outputs here... and - # this format allows for better integration with python - # - ie for usage in kwargs and such... - field = field.lower().strip() - for c in (" ", "-"): - field = field.replace(c, '_') - return field - - def _extract_bytes(self, details): - # Replace it with the byte amount - real_size = self.SIZE_RE.search(details) - if real_size: - details = real_size.group(1) - try: - details = strutils.to_bytes(details) - except TypeError: - pass - return details - - def _extract_details(self, root_cmd, root_details, lines_after): - real_details = root_details - if root_cmd == 'backing_file': - # Replace it with the real backing file - backing_match = self.BACKING_FILE_RE.match(root_details) - if backing_match: - real_details = backing_match.group(2).strip() - elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']: - # Replace it with the byte amount (if we can convert it) - real_details = self._extract_bytes(root_details) - elif root_cmd == 'file_format': - real_details = real_details.strip().lower() - elif root_cmd == 'snapshot_list': - # Next line should be a header, starting with 'ID' - if not lines_after or not lines_after[0].startswith("ID"): - msg = _("Snapshot list encountered but no header found!") - raise ValueError(msg) - del lines_after[0] - real_details = [] - # This is the sprintf pattern we will try to match - # "%-10s%-20s%7s%20s%15s" - # ID TAG VM SIZE DATE VM CLOCK (current header) - while lines_after: - line = lines_after[0] - line_pieces = line.split() - if len(line_pieces) != 6: - break - # Check against this pattern in the final position - # "%02d:%02d:%02d.%03d" - date_pieces = line_pieces[5].split(":") - if len(date_pieces) != 3: - break - real_details.append({ - 'id': line_pieces[0], - 'tag': line_pieces[1], - 'vm_size': line_pieces[2], - 'date': line_pieces[3], - 'vm_clock': line_pieces[4] + " " + line_pieces[5], - }) - del lines_after[0] - return real_details - - def _parse(self, cmd_output): - # Analysis done of qemu-img.c to figure out what is going on here - # Find all points start with some chars and then a ':' then a newline - # and then handle the results of those 'top level' items in a separate - # function. - # - # TODO(harlowja): newer versions might have a json output format - # we should switch to that whenever possible. - # see: http://bit.ly/XLJXDX - contents = {} - lines = [x for x in cmd_output.splitlines() if x.strip()] - while lines: - line = lines.pop(0) - top_level = self.TOP_LEVEL_RE.match(line) - if top_level: - root = self._canonicalize(top_level.group(1)) - if not root: - continue - root_details = top_level.group(2).strip() - details = self._extract_details(root, root_details, lines) - contents[root] = details - return contents diff --git a/rack/openstack/common/importutils.py b/rack/openstack/common/importutils.py deleted file mode 100644 index 4fd9ae2..0000000 --- a/rack/openstack/common/importutils.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Import related utilities and helper functions. -""" - -import sys -import traceback - - -def import_class(import_str): - """Returns a class from a string including module and class.""" - mod_str, _sep, class_str = import_str.rpartition('.') - try: - __import__(mod_str) - return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): - raise ImportError('Class %s cannot be found (%s)' % - (class_str, - traceback.format_exception(*sys.exc_info()))) - - -def import_object(import_str, *args, **kwargs): - """Import a class and return an instance of it.""" - return import_class(import_str)(*args, **kwargs) - - -def import_object_ns(name_space, import_str, *args, **kwargs): - """Tries to import object from default namespace. - - Imports a class and return an instance of it, first by trying - to find the class in a default namespace, then failing back to - a full path if not found in the default namespace. - """ - import_value = "%s.%s" % (name_space, import_str) - try: - return import_class(import_value)(*args, **kwargs) - except ImportError: - return import_class(import_str)(*args, **kwargs) - - -def import_module(import_str): - """Import a module.""" - __import__(import_str) - return sys.modules[import_str] - - -def try_import(import_str, default=None): - """Try to import a module and if it fails return default.""" - try: - return import_module(import_str) - except ImportError: - return default diff --git a/rack/openstack/common/jsonutils.py b/rack/openstack/common/jsonutils.py deleted file mode 100644 index 6fdd5ac..0000000 --- a/rack/openstack/common/jsonutils.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -''' -JSON related utilities. - -This module provides a few things: - - 1) A handy function for getting an object down to something that can be - JSON serialized. See to_primitive(). - - 2) Wrappers around loads() and dumps(). The dumps() wrapper will - automatically use to_primitive() for you if needed. - - 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson - is available. -''' - - -import datetime -import functools -import inspect -import itertools -import json -try: - import xmlrpclib -except ImportError: - # NOTE(jd): xmlrpclib is not shipped with Python 3 - xmlrpclib = None - -import six - -from rack.openstack.common import gettextutils -from rack.openstack.common import importutils -from rack.openstack.common import timeutils - -netaddr = importutils.try_import("netaddr") - -_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, - inspect.isfunction, inspect.isgeneratorfunction, - inspect.isgenerator, inspect.istraceback, inspect.isframe, - inspect.iscode, inspect.isbuiltin, inspect.isroutine, - inspect.isabstract] - -_simple_types = (six.string_types + six.integer_types - + (type(None), bool, float)) - - -def to_primitive(value, convert_instances=False, convert_datetime=True, - level=0, max_depth=3): - """Convert a complex object into primitives. - - Handy for JSON serialization. We can optionally handle instances, - but since this is a recursive function, we could have cyclical - data structures. - - To handle cyclical data structures we could track the actual objects - visited in a set, but not all objects are hashable. Instead we just - track the depth of the object inspections and don't go too deep. - - Therefore, convert_instances=True is lossy ... be aware. - - """ - # handle obvious types first - order of basic types determined by running - # full tests on rack project, resulting in the following counts: - # 572754 - # 460353 - # 379632 - # 274610 - # 199918 - # 114200 - # 51817 - # 26164 - # 6491 - # 283 - # 19 - if isinstance(value, _simple_types): - return value - - if isinstance(value, datetime.datetime): - if convert_datetime: - return timeutils.strtime(value) - else: - return value - - # value of itertools.count doesn't get caught by nasty_type_tests - # and results in infinite loop when list(value) is called. - if type(value) == itertools.count: - return six.text_type(value) - - # FIXME(vish): Workaround for LP bug 852095. Without this workaround, - # tests that raise an exception in a mocked method that - # has a @wrap_exception with a notifier will fail. If - # we up the dependency to 0.5.4 (when it is released) we - # can remove this workaround. - if getattr(value, '__module__', None) == 'mox': - return 'mock' - - if level > max_depth: - return '?' - - # The try block may not be necessary after the class check above, - # but just in case ... - try: - recursive = functools.partial(to_primitive, - convert_instances=convert_instances, - convert_datetime=convert_datetime, - level=level, - max_depth=max_depth) - if isinstance(value, dict): - return dict((k, recursive(v)) for k, v in six.iteritems(value)) - elif isinstance(value, (list, tuple)): - return [recursive(lv) for lv in value] - - # It's not clear why xmlrpclib created their own DateTime type, but - # for our purposes, make it a datetime type which is explicitly - # handled - if xmlrpclib and isinstance(value, xmlrpclib.DateTime): - value = datetime.datetime(*tuple(value.timetuple())[:6]) - - if convert_datetime and isinstance(value, datetime.datetime): - return timeutils.strtime(value) - elif isinstance(value, gettextutils.Message): - return value.data - elif hasattr(value, 'iteritems'): - return recursive(dict(value.iteritems()), level=level + 1) - elif hasattr(value, '__iter__'): - return recursive(list(value)) - elif convert_instances and hasattr(value, '__dict__'): - # Likely an instance of something. Watch for cycles. - # Ignore class member vars. - return recursive(value.__dict__, level=level + 1) - elif netaddr and isinstance(value, netaddr.IPAddress): - return six.text_type(value) - else: - if any(test(value) for test in _nasty_type_tests): - return six.text_type(value) - return value - except TypeError: - # Class objects are tricky since they may define something like - # __iter__ defined but it isn't callable as list(). - return six.text_type(value) - - -def dumps(value, default=to_primitive, **kwargs): - return json.dumps(value, default=default, **kwargs) - - -def loads(s): - return json.loads(s) - - -def load(s): - return json.load(s) - - -try: - import anyjson -except ImportError: - pass -else: - anyjson._modules.append((__name__, 'dumps', TypeError, - 'loads', ValueError, 'load')) - anyjson.force_implementation(__name__) diff --git a/rack/openstack/common/local.py b/rack/openstack/common/local.py deleted file mode 100644 index 0819d5b..0000000 --- a/rack/openstack/common/local.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Local storage of variables using weak references""" - -import threading -import weakref - - -class WeakLocal(threading.local): - def __getattribute__(self, attr): - rval = super(WeakLocal, self).__getattribute__(attr) - if rval: - # NOTE(mikal): this bit is confusing. What is stored is a weak - # reference, not the value itself. We therefore need to lookup - # the weak reference and return the inner value here. - rval = rval() - return rval - - def __setattr__(self, attr, value): - value = weakref.ref(value) - return super(WeakLocal, self).__setattr__(attr, value) - - -# NOTE(mikal): the name "store" should be deprecated in the future -store = WeakLocal() - -# A "weak" store uses weak references and allows an object to fall out of scope -# when it falls out of scope in the code that uses the thread local storage. A -# "strong" store will hold a reference to the object so that it never falls out -# of scope. -weak_store = WeakLocal() -strong_store = threading.local() diff --git a/rack/openstack/common/lockutils.py b/rack/openstack/common/lockutils.py deleted file mode 100644 index 163bec0..0000000 --- a/rack/openstack/common/lockutils.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib -import errno -import functools -import os -import shutil -import subprocess -import sys -import tempfile -import threading -import time -import weakref - -from oslo.config import cfg - -from rack.openstack.common import fileutils -from rack.openstack.common.gettextutils import _ # noqa -from rack.openstack.common import local -from rack.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -util_opts = [ - cfg.BoolOpt('disable_process_locking', default=False, - help='Whether to disable inter-process locks'), - cfg.StrOpt('lock_path', - default=os.environ.get("NOVA_LOCK_PATH"), - help=('Directory to use for lock files.')) -] - - -CONF = cfg.CONF -CONF.register_opts(util_opts) - - -def set_defaults(lock_path): - cfg.set_defaults(util_opts, lock_path=lock_path) - - -class _InterProcessLock(object): - """Lock implementation which allows multiple locks, working around - issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does - not require any cleanup. Since the lock is always held on a file - descriptor rather than outside of the process, the lock gets dropped - automatically if the process crashes, even if __exit__ is not executed. - - There are no guarantees regarding usage by multiple green threads in a - single process here. This lock works only between processes. Exclusive - access between local threads should be achieved using the semaphores - in the @synchronized decorator. - - Note these locks are released when the descriptor is closed, so it's not - safe to close the file descriptor while another green thread holds the - lock. Just opening and closing the lock file can break synchronisation, - so lock files must be accessed only using this abstraction. - """ - - def __init__(self, name): - self.lockfile = None - self.fname = name - - def __enter__(self): - self.lockfile = open(self.fname, 'w') - - while True: - try: - # Using non-blocking locks since green threads are not - # patched to deal with blocking locking calls. - # Also upon reading the MSDN docs for locking(), it seems - # to have a laughable 10 attempts "blocking" mechanism. - self.trylock() - return self - except IOError as e: - if e.errno in (errno.EACCES, errno.EAGAIN): - # external locks synchronise things like iptables - # updates - give it some time to prevent busy spinning - time.sleep(0.01) - else: - raise - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - self.unlock() - self.lockfile.close() - except IOError: - LOG.exception(_("Could not release the acquired lock `%s`"), - self.fname) - - def trylock(self): - raise NotImplementedError() - - def unlock(self): - raise NotImplementedError() - - -class _WindowsLock(_InterProcessLock): - def trylock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) - - def unlock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) - - -class _PosixLock(_InterProcessLock): - def trylock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) - - def unlock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_UN) - - -if os.name == 'nt': - import msvcrt - InterProcessLock = _WindowsLock -else: - import fcntl - InterProcessLock = _PosixLock - -_semaphores = weakref.WeakValueDictionary() -_semaphores_lock = threading.Lock() - - -@contextlib.contextmanager -def lock(name, lock_file_prefix=None, external=False, lock_path=None): - """Context based lock - - This function yields a `threading.Semaphore` instance (if we don't use - eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is - True, in which case, it'll yield an InterProcessLock instance. - - :param lock_file_prefix: The lock_file_prefix argument is used to provide - lock files on disk with a meaningful prefix. - - :param external: The external keyword argument denotes whether this lock - should work across multiple processes. This means that if two different - workers both run a a method decorated with @synchronized('mylock', - external=True), only one of them will execute at a time. - - :param lock_path: The lock_path keyword argument is used to specify a - special location for external lock files to live. If nothing is set, then - CONF.lock_path is used as a default. - """ - with _semaphores_lock: - try: - sem = _semaphores[name] - except KeyError: - sem = threading.Semaphore() - _semaphores[name] = sem - - with sem: - LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) - - # NOTE(mikal): I know this looks odd - if not hasattr(local.strong_store, 'locks_held'): - local.strong_store.locks_held = [] - local.strong_store.locks_held.append(name) - - try: - if external and not CONF.disable_process_locking: - LOG.debug(_('Attempting to grab file lock "%(lock)s"'), - {'lock': name}) - - # We need a copy of lock_path because it is non-local - local_lock_path = lock_path or CONF.lock_path - if not local_lock_path: - raise cfg.RequiredOptError('lock_path') - - if not os.path.exists(local_lock_path): - fileutils.ensure_tree(local_lock_path) - LOG.info(_('Created lock path: %s'), local_lock_path) - - def add_prefix(name, prefix): - if not prefix: - return name - sep = '' if prefix.endswith('-') else '-' - return '%s%s%s' % (prefix, sep, name) - - # NOTE(mikal): the lock name cannot contain directory - # separators - lock_file_name = add_prefix(name.replace(os.sep, '_'), - lock_file_prefix) - - lock_file_path = os.path.join(local_lock_path, lock_file_name) - - try: - lock = InterProcessLock(lock_file_path) - with lock as lock: - LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - yield lock - finally: - LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), - {'lock': name, 'path': lock_file_path}) - else: - yield sem - - finally: - local.strong_store.locks_held.remove(name) - - -def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): - """Synchronization decorator. - - Decorating a method like so:: - - @synchronized('mylock') - def foo(self, *args): - ... - - ensures that only one thread will execute the foo method at a time. - - Different methods can share the same lock:: - - @synchronized('mylock') - def foo(self, *args): - ... - - @synchronized('mylock') - def bar(self, *args): - ... - - This way only one of either foo or bar can be executing at a time. - """ - - def wrap(f): - @functools.wraps(f) - def inner(*args, **kwargs): - try: - with lock(name, lock_file_prefix, external, lock_path): - LOG.debug(_('Got semaphore / lock "%(function)s"'), - {'function': f.__name__}) - return f(*args, **kwargs) - finally: - LOG.debug(_('Semaphore / lock released "%(function)s"'), - {'function': f.__name__}) - return inner - return wrap - - -def synchronized_with_prefix(lock_file_prefix): - """Partial object generator for the synchronization decorator. - - Redefine @synchronized in each project like so:: - - (in rack/utils.py) - from rack.openstack.common import lockutils - - synchronized = lockutils.synchronized_with_prefix('rack-') - - - (in rack/foo.py) - from rack import utils - - @utils.synchronized('mylock') - def bar(self, *args): - ... - - The lock_file_prefix argument is used to provide lock files on disk with a - meaningful prefix. - """ - - return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) - - -def main(argv): - """Create a dir for locks and pass it to command from arguments - - If you run this: - python -m openstack.common.lockutils python setup.py testr - - a temporary directory will be created for all your locks and passed to all - your tests in an environment variable. The temporary dir will be deleted - afterwards and the return value will be preserved. - """ - - lock_dir = tempfile.mkdtemp() - os.environ["NOVA_LOCK_PATH"] = lock_dir - try: - ret_val = subprocess.call(argv[1:]) - finally: - shutil.rmtree(lock_dir, ignore_errors=True) - return ret_val - - -if __name__ == '__main__': - sys.exit(main(sys.argv)) diff --git a/rack/openstack/common/log.py b/rack/openstack/common/log.py deleted file mode 100644 index 7b72ba7..0000000 --- a/rack/openstack/common/log.py +++ /dev/null @@ -1,655 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Openstack logging handler. - -This module adds to logging functionality by adding the option to specify -a context object when calling the various log methods. If the context object -is not specified, default formatting is used. Additionally, an instance uuid -may be passed as part of the log message, which is intended to make it easier -for admins to find messages related to a specific instance. - -It also allows setting of formatting information through conf. - -""" - -import inspect -import itertools -import logging -import logging.config -import logging.handlers -import os -import re -import sys -import traceback - -from oslo.config import cfg -import six -from six import moves - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import importutils -from rack.openstack.common import jsonutils -from rack.openstack.common import local - - -_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - -_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] - -# NOTE(ldbragst): Let's build a list of regex objects using the list of -# _SANITIZE_KEYS we already have. This way, we only have to add the new key -# to the list of _SANITIZE_KEYS and we can generate regular expressions -# for XML and JSON automatically. -_SANITIZE_PATTERNS = [] -_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', - r'(<%(key)s>).*?()', - r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', - r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] - -for key in _SANITIZE_KEYS: - for pattern in _FORMAT_PATTERNS: - reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) - _SANITIZE_PATTERNS.append(reg_ex) - - -common_cli_opts = [ - cfg.BoolOpt('debug', - short='d', - default=False, - help='Print debugging output (set logging level to ' - 'DEBUG instead of default WARNING level).'), - cfg.BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output (set logging level to ' - 'INFO instead of default WARNING level).'), -] - -logging_cli_opts = [ - cfg.StrOpt('log-config-append', - metavar='PATH', - deprecated_name='log-config', - help='The name of logging configuration file. It does not ' - 'disable existing loggers, but just appends specified ' - 'logging configuration to any other existing logging ' - 'options. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), - cfg.StrOpt('log-format', - default=None, - metavar='FORMAT', - help='DEPRECATED. ' - 'A logging.Formatter log message format string which may ' - 'use any of the available logging.LogRecord attributes. ' - 'This option is deprecated. Please use ' - 'logging_context_format_string and ' - 'logging_default_format_string instead.'), - cfg.StrOpt('log-date-format', - default=_DEFAULT_LOG_DATE_FORMAT, - metavar='DATE_FORMAT', - help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s'), - cfg.StrOpt('log-file', - metavar='PATH', - deprecated_name='logfile', - help='(Optional) Name of log file to output to. ' - 'If no default is set, logging will go to stdout.'), - cfg.StrOpt('log-dir', - deprecated_name='logdir', - help='(Optional) The base directory used for relative ' - '--log-file paths'), - cfg.BoolOpt('use-syslog', - default=False, - help='Use syslog for logging. ' - 'Existing syslog format is DEPRECATED during I, ' - 'and then will be changed in J to honor RFC5424'), - cfg.BoolOpt('use-syslog-rfc-format', - # TODO(bogdando) remove or use True after existing - # syslog format deprecation in J - default=False, - help='(Optional) Use syslog rfc5424 format for logging. ' - 'If enabled, will add APP-NAME (RFC5424) before the ' - 'MSG part of the syslog message. The old format ' - 'without APP-NAME is deprecated in I, ' - 'and will be removed in J.'), - cfg.StrOpt('syslog-log-facility', - default='LOG_USER', - help='syslog facility to receive log lines') -] - -generic_log_opts = [ - cfg.BoolOpt('use_stderr', - default=True, - help='Log output to standard error') -] - -log_opts = [ - cfg.StrOpt('logging_context_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [%(request_id)s %(user)s %(tenant)s] ' - '%(instance)s%(message)s', - help='format string to use for log messages with context'), - cfg.StrOpt('logging_default_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [-] %(instance)s%(message)s', - help='format string to use for log messages without context'), - cfg.StrOpt('logging_debug_format_suffix', - default='%(funcName)s %(pathname)s:%(lineno)d', - help='data to append to log format when level is DEBUG'), - cfg.StrOpt('logging_exception_prefix', - default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' - '%(instance)s', - help='prefix each line of exception output with this format'), - cfg.ListOpt('default_log_levels', - default=[ - 'amqp=WARN', - 'amqplib=WARN', - 'boto=WARN', - 'qpid=WARN', - 'sqlalchemy=WARN', - 'suds=INFO', - 'oslo.messaging=INFO', - 'iso8601=WARN', - ], - help='list of logger=LEVEL pairs'), - cfg.BoolOpt('publish_errors', - default=False, - help='publish error events'), - cfg.BoolOpt('fatal_deprecations', - default=False, - help='make deprecations fatal'), - - # NOTE(mikal): there are two options here because sometimes we are handed - # a full instance (and could include more information), and other times we - # are just handed a UUID for the instance. - cfg.StrOpt('instance_format', - default='[instance: %(uuid)s] ', - help='If an instance is passed with the log message, format ' - 'it like this'), - cfg.StrOpt('instance_uuid_format', - default='[instance: %(uuid)s] ', - help='If an instance UUID is passed with the log message, ' - 'format it like this'), -] - -CONF = cfg.CONF -CONF.register_cli_opts(common_cli_opts) -CONF.register_cli_opts(logging_cli_opts) -CONF.register_opts(generic_log_opts) -CONF.register_opts(log_opts) - -# our new audit level -# NOTE(jkoelker) Since we synthesized an audit level, make the logging -# module aware of it so it acts like other levels. -logging.AUDIT = logging.INFO + 1 -logging.addLevelName(logging.AUDIT, 'AUDIT') - - -try: - NullHandler = logging.NullHandler -except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 - class NullHandler(logging.Handler): - def handle(self, record): - pass - - def emit(self, record): - pass - - def createLock(self): - self.lock = None - - -def _dictify_context(context): - if context is None: - return None - if not isinstance(context, dict) and getattr(context, 'to_dict', None): - context = context.to_dict() - return context - - -def _get_binary_name(): - return os.path.basename(inspect.stack()[-1][1]) - - -def _get_log_file_path(binary=None): - logfile = CONF.log_file - logdir = CONF.log_dir - - if logfile and not logdir: - return logfile - - if logfile and logdir: - return os.path.join(logdir, logfile) - - if logdir: - binary = binary or _get_binary_name() - return '%s.log' % (os.path.join(logdir, binary),) - - return None - - -def mask_password(message, secret="***"): - """Replace password with 'secret' in message. - - :param message: The string which includes security information. - :param secret: value with which to replace passwords. - :returns: The unicode value of message with the password fields masked. - - For example: - - >>> mask_password("'adminPass' : 'aaaaa'") - "'adminPass' : '***'" - >>> mask_password("'admin_pass' : 'aaaaa'") - "'admin_pass' : '***'" - >>> mask_password('"password" : "aaaaa"') - '"password" : "***"' - >>> mask_password("'original_password' : 'aaaaa'") - "'original_password' : '***'" - >>> mask_password("u'original_password' : u'aaaaa'") - "u'original_password' : u'***'" - """ - message = six.text_type(message) - - # NOTE(ldbragst): Check to see if anything in message contains any key - # specified in _SANITIZE_KEYS, if not then just return the message since - # we don't have to mask any passwords. - if not any(key in message for key in _SANITIZE_KEYS): - return message - - secret = r'\g<1>' + secret + r'\g<2>' - for pattern in _SANITIZE_PATTERNS: - message = re.sub(pattern, secret, message) - return message - - -class BaseLoggerAdapter(logging.LoggerAdapter): - - def audit(self, msg, *args, **kwargs): - self.log(logging.AUDIT, msg, *args, **kwargs) - - -class LazyAdapter(BaseLoggerAdapter): - def __init__(self, name='unknown', version='unknown'): - self._logger = None - self.extra = {} - self.name = name - self.version = version - - @property - def logger(self): - if not self._logger: - self._logger = getLogger(self.name, self.version) - return self._logger - - -class ContextAdapter(BaseLoggerAdapter): - warn = logging.LoggerAdapter.warning - - def __init__(self, logger, project_name, version_string): - self.logger = logger - self.project = project_name - self.version = version_string - - @property - def handlers(self): - return self.logger.handlers - - def deprecated(self, msg, *args, **kwargs): - stdmsg = _("Deprecated: %s") % msg - if CONF.fatal_deprecations: - self.critical(stdmsg, *args, **kwargs) - raise DeprecatedConfig(msg=stdmsg) - else: - self.warn(stdmsg, *args, **kwargs) - - def process(self, msg, kwargs): - # NOTE(mrodden): catch any Message/other object and - # coerce to unicode before they can get - # to the python logging and possibly - # cause string encoding trouble - if not isinstance(msg, six.string_types): - msg = six.text_type(msg) - - if 'extra' not in kwargs: - kwargs['extra'] = {} - extra = kwargs['extra'] - - context = kwargs.pop('context', None) - if not context: - context = getattr(local.store, 'context', None) - if context: - extra.update(_dictify_context(context)) - - instance = kwargs.pop('instance', None) - instance_uuid = (extra.get('instance_uuid', None) or - kwargs.pop('instance_uuid', None)) - instance_extra = '' - if instance: - instance_extra = CONF.instance_format % instance - elif instance_uuid: - instance_extra = (CONF.instance_uuid_format - % {'uuid': instance_uuid}) - extra.update({'instance': instance_extra}) - - extra.update({"project": self.project}) - extra.update({"version": self.version}) - extra['extra'] = extra.copy() - return msg, kwargs - - -class JSONFormatter(logging.Formatter): - def __init__(self, fmt=None, datefmt=None): - # NOTE(jkoelker) we ignore the fmt argument, but its still there - # since logging.config.fileConfig passes it. - self.datefmt = datefmt - - def formatException(self, ei, strip_newlines=True): - lines = traceback.format_exception(*ei) - if strip_newlines: - lines = [moves.filter( - lambda x: x, - line.rstrip().splitlines()) for line in lines] - lines = list(itertools.chain(*lines)) - return lines - - def format(self, record): - message = {'message': record.getMessage(), - 'asctime': self.formatTime(record, self.datefmt), - 'name': record.name, - 'msg': record.msg, - 'args': record.args, - 'levelname': record.levelname, - 'levelno': record.levelno, - 'pathname': record.pathname, - 'filename': record.filename, - 'module': record.module, - 'lineno': record.lineno, - 'funcname': record.funcName, - 'created': record.created, - 'msecs': record.msecs, - 'relative_created': record.relativeCreated, - 'thread': record.thread, - 'thread_name': record.threadName, - 'process_name': record.processName, - 'process': record.process, - 'traceback': None} - - if hasattr(record, 'extra'): - message['extra'] = record.extra - - if record.exc_info: - message['traceback'] = self.formatException(record.exc_info) - - return jsonutils.dumps(message) - - -def _create_logging_excepthook(product_name): - def logging_excepthook(exc_type, value, tb): - extra = {} - if CONF.verbose or CONF.debug: - extra['exc_info'] = (exc_type, value, tb) - getLogger(product_name).critical( - "".join(traceback.format_exception_only(exc_type, value)), - **extra) - return logging_excepthook - - -class LogConfigError(Exception): - - message = _('Error loading logging config %(log_config)s: %(err_msg)s') - - def __init__(self, log_config, err_msg): - self.log_config = log_config - self.err_msg = err_msg - - def __str__(self): - return self.message % dict(log_config=self.log_config, - err_msg=self.err_msg) - - -def _load_log_config(log_config_append): - try: - logging.config.fileConfig(log_config_append, - disable_existing_loggers=False) - except moves.configparser.Error as exc: - raise LogConfigError(log_config_append, str(exc)) - - -def setup(product_name): - """Setup logging.""" - if CONF.log_config_append: - _load_log_config(CONF.log_config_append) - else: - _setup_logging_from_conf() - sys.excepthook = _create_logging_excepthook(product_name) - - -def set_defaults(logging_context_format_string): - cfg.set_defaults(log_opts, - logging_context_format_string= - logging_context_format_string) - - -def _find_facility_from_conf(): - facility_names = logging.handlers.SysLogHandler.facility_names - facility = getattr(logging.handlers.SysLogHandler, - CONF.syslog_log_facility, - None) - - if facility is None and CONF.syslog_log_facility in facility_names: - facility = facility_names.get(CONF.syslog_log_facility) - - if facility is None: - valid_facilities = facility_names.keys() - consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', - 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', - 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', - 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', - 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] - valid_facilities.extend(consts) - raise TypeError(_('syslog facility must be one of: %s') % - ', '.join("'%s'" % fac - for fac in valid_facilities)) - - return facility - - -class RFCSysLogHandler(logging.handlers.SysLogHandler): - def __init__(self, *args, **kwargs): - self.binary_name = _get_binary_name() - super(RFCSysLogHandler, self).__init__(*args, **kwargs) - - def format(self, record): - msg = super(RFCSysLogHandler, self).format(record) - msg = self.binary_name + ' ' + msg - return msg - - -def _setup_logging_from_conf(): - log_root = getLogger(None).logger - for handler in log_root.handlers: - log_root.removeHandler(handler) - - if CONF.use_syslog: - facility = _find_facility_from_conf() - # TODO(bogdando) use the format provided by RFCSysLogHandler - # after existing syslog format deprecation in J - if CONF.use_syslog_rfc_format: - syslog = RFCSysLogHandler(address='/dev/log', - facility=facility) - else: - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) - log_root.addHandler(syslog) - - logpath = _get_log_file_path() - if logpath: - filelog = logging.handlers.WatchedFileHandler(logpath) - log_root.addHandler(filelog) - - if CONF.use_stderr: - streamlog = ColorHandler() - log_root.addHandler(streamlog) - - elif not logpath: - # pass sys.stdout as a positional argument - # python2.6 calls the argument strm, in 2.7 it's stream - streamlog = logging.StreamHandler(sys.stdout) - log_root.addHandler(streamlog) - - if CONF.publish_errors: - handler = importutils.import_object( - "rack.openstack.common.log_handler.PublishErrorsHandler", - logging.ERROR) - log_root.addHandler(handler) - - datefmt = CONF.log_date_format - for handler in log_root.handlers: - # NOTE(alaski): CONF.log_format overrides everything currently. This - # should be deprecated in favor of context aware formatting. - if CONF.log_format: - handler.setFormatter(logging.Formatter(fmt=CONF.log_format, - datefmt=datefmt)) - log_root.info('Deprecated: log_format is now deprecated and will ' - 'be removed in the next release') - else: - handler.setFormatter(ContextFormatter(datefmt=datefmt)) - - if CONF.debug: - log_root.setLevel(logging.DEBUG) - elif CONF.verbose: - log_root.setLevel(logging.INFO) - else: - log_root.setLevel(logging.WARNING) - - for pair in CONF.default_log_levels: - mod, _sep, level_name = pair.partition('=') - level = logging.getLevelName(level_name) - logger = logging.getLogger(mod) - logger.setLevel(level) - -_loggers = {} - - -def getLogger(name='unknown', version='unknown'): - if name not in _loggers: - _loggers[name] = ContextAdapter(logging.getLogger(name), - name, - version) - return _loggers[name] - - -def getLazyLogger(name='unknown', version='unknown'): - """Returns lazy logger. - - Creates a pass-through logger that does not create the real logger - until it is really needed and delegates all calls to the real logger - once it is created. - """ - return LazyAdapter(name, version) - - -class WritableLogger(object): - """A thin wrapper that responds to `write` and logs.""" - - def __init__(self, logger, level=logging.INFO): - self.logger = logger - self.level = level - - def write(self, msg): - self.logger.log(self.level, msg.rstrip()) - - -class ContextFormatter(logging.Formatter): - """A context.RequestContext aware formatter configured through flags. - - The flags used to set format strings are: logging_context_format_string - and logging_default_format_string. You can also specify - logging_debug_format_suffix to append extra formatting if the log level is - debug. - - For information about what variables are available for the formatter see: - http://docs.python.org/library/logging.html#formatter - - """ - - def format(self, record): - """Uses contextstring if request_id is set, otherwise default.""" - # NOTE(sdague): default the fancier formatting params - # to an empty string so we don't throw an exception if - # they get used - for key in ('instance', 'color'): - if key not in record.__dict__: - record.__dict__[key] = '' - - if record.__dict__.get('request_id', None): - self._fmt = CONF.logging_context_format_string - else: - self._fmt = CONF.logging_default_format_string - - if (record.levelno == logging.DEBUG and - CONF.logging_debug_format_suffix): - self._fmt += " " + CONF.logging_debug_format_suffix - - # Cache this on the record, Logger will respect our formatted copy - if record.exc_info: - record.exc_text = self.formatException(record.exc_info, record) - return logging.Formatter.format(self, record) - - def formatException(self, exc_info, record=None): - """Format exception output with CONF.logging_exception_prefix.""" - if not record: - return logging.Formatter.formatException(self, exc_info) - - stringbuffer = moves.StringIO() - traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], - None, stringbuffer) - lines = stringbuffer.getvalue().split('\n') - stringbuffer.close() - - if CONF.logging_exception_prefix.find('%(asctime)') != -1: - record.asctime = self.formatTime(record, self.datefmt) - - formatted_lines = [] - for line in lines: - pl = CONF.logging_exception_prefix % record.__dict__ - fl = '%s%s' % (pl, line) - formatted_lines.append(fl) - return '\n'.join(formatted_lines) - - -class ColorHandler(logging.StreamHandler): - LEVEL_COLORS = { - logging.DEBUG: '\033[00;32m', # GREEN - logging.INFO: '\033[00;36m', # CYAN - logging.AUDIT: '\033[01;36m', # BOLD CYAN - logging.WARN: '\033[01;33m', # BOLD YELLOW - logging.ERROR: '\033[01;31m', # BOLD RED - logging.CRITICAL: '\033[01;31m', # BOLD RED - } - - def format(self, record): - record.color = self.LEVEL_COLORS[record.levelno] - return logging.StreamHandler.format(self, record) - - -class DeprecatedConfig(Exception): - message = _("Fatal call to deprecated config: %(msg)s") - - def __init__(self, msg): - super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/rack/openstack/common/loopingcall.py b/rack/openstack/common/loopingcall.py deleted file mode 100644 index b80633a..0000000 --- a/rack/openstack/common/loopingcall.py +++ /dev/null @@ -1,147 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from eventlet import event -from eventlet import greenthread - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack.openstack.common import timeutils - -LOG = logging.getLogger(__name__) - - -class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCall. - - The poll-function passed to LoopingCall can raise this exception to - break out of the loop normally. This is somewhat analogous to - StopIteration. - - An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCall.wait() - - """ - - def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCall.wait() should return.""" - self.retvalue = retvalue - - -class LoopingCallBase(object): - def __init__(self, f=None, *args, **kw): - self.args = args - self.kw = kw - self.f = f - self._running = False - self.done = None - - def stop(self): - self._running = False - - def wait(self): - return self.done.wait() - - -class FixedIntervalLoopingCall(LoopingCallBase): - """A fixed interval looping call.""" - - def start(self, interval, initial_delay=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - start = timeutils.utcnow() - self.f(*self.args, **self.kw) - end = timeutils.utcnow() - if not self._running: - break - delay = interval - timeutils.delta_seconds(start, end) - if delay <= 0: - LOG.warn(_('task run outlasted interval by %s sec') % - -delay) - greenthread.sleep(delay if delay > 0 else 0) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_('in fixed duration looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn_n(_inner) - return self.done - - -# TODO(mikal): this class name is deprecated in Havana and should be removed -# in the I release -LoopingCall = FixedIntervalLoopingCall - - -class DynamicLoopingCall(LoopingCallBase): - """A looping call which sleeps until the next known event. - - The function called should return how long to sleep for before being - called again. - """ - - def start(self, initial_delay=None, periodic_interval_max=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - idle = self.f(*self.args, **self.kw) - if not self._running: - break - - if periodic_interval_max is not None: - idle = min(idle, periodic_interval_max) - LOG.debug(_('Dynamic looping call sleeping for %.02f ' - 'seconds'), idle) - greenthread.sleep(idle) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_('in dynamic looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn(_inner) - return self.done diff --git a/rack/openstack/common/memorycache.py b/rack/openstack/common/memorycache.py deleted file mode 100644 index 843573b..0000000 --- a/rack/openstack/common/memorycache.py +++ /dev/null @@ -1,97 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Super simple fake memcache client.""" - -from oslo.config import cfg - -from rack.openstack.common import timeutils - -memcache_opts = [ - cfg.ListOpt('memcached_servers', - default=None, - help='Memcached servers or None for in process cache.'), -] - -CONF = cfg.CONF -CONF.register_opts(memcache_opts) - - -def get_client(memcached_servers=None): - client_cls = Client - - if not memcached_servers: - memcached_servers = CONF.memcached_servers - if memcached_servers: - try: - import memcache - client_cls = memcache.Client - except ImportError: - pass - - return client_cls(memcached_servers, debug=0) - - -class Client(object): - """Replicates a tiny subset of memcached client interface.""" - - def __init__(self, *args, **kwargs): - """Ignores the passed in args.""" - self.cache = {} - - def get(self, key): - """Retrieves the value for a key or None. - - This expunges expired keys during each get. - """ - - now = timeutils.utcnow_ts() - for k in self.cache.keys(): - (timeout, _value) = self.cache[k] - if timeout and now >= timeout: - del self.cache[k] - - return self.cache.get(key, (0, None))[1] - - def set(self, key, value, time=0, min_compress_len=0): - """Sets the value for a key.""" - timeout = 0 - if time != 0: - timeout = timeutils.utcnow_ts() + time - self.cache[key] = (timeout, value) - return True - - def add(self, key, value, time=0, min_compress_len=0): - """Sets the value for a key if it doesn't exist.""" - if self.get(key) is not None: - return False - return self.set(key, value, time, min_compress_len) - - def incr(self, key, delta=1): - """Increments the value for a key.""" - value = self.get(key) - if value is None: - return None - new_value = int(value) + delta - self.cache[key] = (self.cache[key][0], str(new_value)) - return new_value - - def delete(self, key, time=0): - """Deletes the value associated with a key.""" - if key in self.cache: - del self.cache[key] diff --git a/rack/openstack/common/middleware/__init__.py b/rack/openstack/common/middleware/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/openstack/common/middleware/base.py b/rack/openstack/common/middleware/base.py deleted file mode 100644 index 2099549..0000000 --- a/rack/openstack/common/middleware/base.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base class(es) for WSGI Middleware.""" - -import webob.dec - - -class Middleware(object): - """Base WSGI middleware wrapper. - - These classes require an application to be initialized that will be called - next. By default the middleware will simply call its wrapped app, or you - can override __call__ to customize its behavior. - """ - - @classmethod - def factory(cls, global_conf, **local_conf): - """Factory method for paste.deploy.""" - return cls - - def __init__(self, application): - self.application = application - - def process_request(self, req): - """Called on each request. - - If this returns None, the next application down the stack will be - executed. If it returns a response then that response will be returned - and execution will stop here. - """ - return None - - def process_response(self, response): - """Do whatever you'd like to the response.""" - return response - - @webob.dec.wsgify - def __call__(self, req): - response = self.process_request(req) - if response: - return response - response = req.get_response(self.application) - return self.process_response(response) diff --git a/rack/openstack/common/middleware/request_id.py b/rack/openstack/common/middleware/request_id.py deleted file mode 100644 index b84324f..0000000 --- a/rack/openstack/common/middleware/request_id.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2013 NEC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Middleware that ensures request ID. - -It ensures to assign request ID for each API request and set it to -request environment. The request ID is also added to API response. -""" - -from rack.openstack.common import context -from rack.openstack.common.middleware import base - - -ENV_REQUEST_ID = 'openstack.request_id' -HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id' - - -class RequestIdMiddleware(base.Middleware): - - def process_request(self, req): - self.req_id = context.generate_request_id() - req.environ[ENV_REQUEST_ID] = self.req_id - - def process_response(self, response): - response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, self.req_id) - return response diff --git a/rack/openstack/common/network_utils.py b/rack/openstack/common/network_utils.py deleted file mode 100644 index dbed1ce..0000000 --- a/rack/openstack/common/network_utils.py +++ /dev/null @@ -1,81 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Network-related utilities and helper functions. -""" - -import urlparse - - -def parse_host_port(address, default_port=None): - """Interpret a string as a host:port pair. - - An IPv6 address MUST be escaped if accompanied by a port, - because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 - means both [2001:db8:85a3::8a2e:370:7334] and - [2001:db8:85a3::8a2e:370]:7334. - - >>> parse_host_port('server01:80') - ('server01', 80) - >>> parse_host_port('server01') - ('server01', None) - >>> parse_host_port('server01', default_port=1234) - ('server01', 1234) - >>> parse_host_port('[::1]:80') - ('::1', 80) - >>> parse_host_port('[::1]') - ('::1', None) - >>> parse_host_port('[::1]', default_port=1234) - ('::1', 1234) - >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) - ('2001:db8:85a3::8a2e:370:7334', 1234) - - """ - if address[0] == '[': - # Escaped ipv6 - _host, _port = address[1:].split(']') - host = _host - if ':' in _port: - port = _port.split(':')[1] - else: - port = default_port - else: - if address.count(':') == 1: - host, port = address.split(':') - else: - # 0 means ipv4, >1 means ipv6. - # We prohibit unescaped ipv6 addresses with port. - host = address - port = default_port - - return (host, None if port is None else int(port)) - - -def urlsplit(url, scheme='', allow_fragments=True): - """Parse a URL using urlparse.urlsplit(), splitting query and fragments. - This function papers over Python issue9374 when needed. - - The parameters are the same as urlparse.urlsplit. - """ - scheme, netloc, path, query, fragment = urlparse.urlsplit( - url, scheme, allow_fragments) - if allow_fragments and '#' in path: - path, fragment = path.split('#', 1) - if '?' in path: - path, query = path.split('?', 1) - return urlparse.SplitResult(scheme, netloc, path, query, fragment) diff --git a/rack/openstack/common/periodic_task.py b/rack/openstack/common/periodic_task.py deleted file mode 100644 index 37c8cd9..0000000 --- a/rack/openstack/common/periodic_task.py +++ /dev/null @@ -1,190 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import time - -from oslo.config import cfg -import six - -from rack.openstack.common.gettextutils import _ # noqa -from rack.openstack.common import log as logging -from rack.openstack.common import timeutils - - -periodic_opts = [ - cfg.BoolOpt('run_external_periodic_tasks', - default=True, - help=('Some periodic tasks can be run in a separate process. ' - 'Should we run them here?')), -] - -CONF = cfg.CONF -CONF.register_opts(periodic_opts) - -LOG = logging.getLogger(__name__) - -DEFAULT_INTERVAL = 60.0 - - -class InvalidPeriodicTaskArg(Exception): - message = _("Unexpected argument for periodic task creation: %(arg)s.") - - -def periodic_task(*args, **kwargs): - """Decorator to indicate that a method is a periodic task. - - This decorator can be used in two ways: - - 1. Without arguments '@periodic_task', this will be run on every cycle - of the periodic scheduler. - - 2. With arguments: - @periodic_task(spacing=N [, run_immediately=[True|False]]) - this will be run on approximately every N seconds. If this number is - negative the periodic task will be disabled. If the run_immediately - argument is provided and has a value of 'True', the first run of the - task will be shortly after task scheduler starts. If - run_immediately is omitted or set to 'False', the first time the - task runs will be approximately N seconds after the task scheduler - starts. - """ - def decorator(f): - # Test for old style invocation - if 'ticks_between_runs' in kwargs: - raise InvalidPeriodicTaskArg(arg='ticks_between_runs') - - # Control if run at all - f._periodic_task = True - f._periodic_external_ok = kwargs.pop('external_process_ok', False) - if f._periodic_external_ok and not CONF.run_external_periodic_tasks: - f._periodic_enabled = False - else: - f._periodic_enabled = kwargs.pop('enabled', True) - - # Control frequency - f._periodic_spacing = kwargs.pop('spacing', 0) - f._periodic_immediate = kwargs.pop('run_immediately', False) - if f._periodic_immediate: - f._periodic_last_run = None - else: - f._periodic_last_run = timeutils.utcnow() - return f - - # NOTE(sirp): The `if` is necessary to allow the decorator to be used with - # and without parens. - # - # In the 'with-parens' case (with kwargs present), this function needs to - # return a decorator function since the interpreter will invoke it like: - # - # periodic_task(*args, **kwargs)(f) - # - # In the 'without-parens' case, the original function will be passed - # in as the first argument, like: - # - # periodic_task(f) - if kwargs: - return decorator - else: - return decorator(args[0]) - - -class _PeriodicTasksMeta(type): - def __init__(cls, names, bases, dict_): - """Metaclass that allows us to collect decorated periodic tasks.""" - super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) - - # NOTE(sirp): if the attribute is not present then we must be the base - # class, so, go ahead an initialize it. If the attribute is present, - # then we're a subclass so make a copy of it so we don't step on our - # parent's toes. - try: - cls._periodic_tasks = cls._periodic_tasks[:] - except AttributeError: - cls._periodic_tasks = [] - - try: - cls._periodic_last_run = cls._periodic_last_run.copy() - except AttributeError: - cls._periodic_last_run = {} - - try: - cls._periodic_spacing = cls._periodic_spacing.copy() - except AttributeError: - cls._periodic_spacing = {} - - for value in cls.__dict__.values(): - if getattr(value, '_periodic_task', False): - task = value - name = task.__name__ - - if task._periodic_spacing < 0: - LOG.info(_('Skipping periodic task %(task)s because ' - 'its interval is negative'), - {'task': name}) - continue - if not task._periodic_enabled: - LOG.info(_('Skipping periodic task %(task)s because ' - 'it is disabled'), - {'task': name}) - continue - - # A periodic spacing of zero indicates that this task should - # be run every pass - if task._periodic_spacing == 0: - task._periodic_spacing = None - - cls._periodic_tasks.append((name, task)) - cls._periodic_spacing[name] = task._periodic_spacing - cls._periodic_last_run[name] = task._periodic_last_run - - -@six.add_metaclass(_PeriodicTasksMeta) -class PeriodicTasks(object): - - def run_periodic_tasks(self, context, raise_on_error=False): - """Tasks to be run at a periodic interval.""" - idle_for = DEFAULT_INTERVAL - for task_name, task in self._periodic_tasks: - full_task_name = '.'.join([self.__class__.__name__, task_name]) - - now = timeutils.utcnow() - spacing = self._periodic_spacing[task_name] - last_run = self._periodic_last_run[task_name] - - # If a periodic task is _nearly_ due, then we'll run it early - if spacing is not None and last_run is not None: - due = last_run + datetime.timedelta(seconds=spacing) - if not timeutils.is_soon(due, 0.2): - idle_for = min(idle_for, timeutils.delta_seconds(now, due)) - continue - - if spacing is not None: - idle_for = min(idle_for, spacing) - - LOG.debug(_("Running periodic task %(full_task_name)s"), - {"full_task_name": full_task_name}) - self._periodic_last_run[task_name] = timeutils.utcnow() - - try: - task(self, context) - except Exception as e: - if raise_on_error: - raise - LOG.exception(_("Error during %(full_task_name)s: %(e)s"), - {"full_task_name": full_task_name, "e": e}) - time.sleep(0) - - return idle_for diff --git a/rack/openstack/common/policy.py b/rack/openstack/common/policy.py deleted file mode 100644 index aa016b5..0000000 --- a/rack/openstack/common/policy.py +++ /dev/null @@ -1,779 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Common Policy Engine Implementation - -Policies can be expressed in one of two forms: A list of lists, or a -string written in the new policy language. - -In the list-of-lists representation, each check inside the innermost -list is combined as with an "and" conjunction--for that check to pass, -all the specified checks must pass. These innermost lists are then -combined as with an "or" conjunction. This is the original way of -expressing policies, but there now exists a new way: the policy -language. - -In the policy language, each check is specified the same way as in the -list-of-lists representation: a simple "a:b" pair that is matched to -the correct code to perform that check. However, conjunction -operators are available, allowing for more expressiveness in crafting -policies. - -As an example, take the following rule, expressed in the list-of-lists -representation:: - - [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] - -In the policy language, this becomes:: - - role:admin or (project_id:%(project_id)s and role:projectadmin) - -The policy language also has the "not" operator, allowing a richer -policy rule:: - - project_id:%(project_id)s and not role:dunce - -Finally, two special policy checks should be mentioned; the policy -check "@" will always accept an access, and the policy check "!" will -always reject an access. (Note that if a rule is either the empty -list ("[]") or the empty string, this is equivalent to the "@" policy -check.) Of these, the "!" policy check is probably the most useful, -as it allows particular rules to be explicitly disabled. -""" - -import abc -import re -import urllib - -import urllib2 - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import jsonutils -from rack.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -_rules = None -_checks = {} - - -class Rules(dict): - """ - A store for rules. Handles the default_rule setting directly. - """ - - @classmethod - def load_json(cls, data, default_rule=None): - """ - Allow loading of JSON rule data. - """ - - # Suck in the JSON data and parse the rules - rules = dict((k, parse_rule(v)) for k, v in - jsonutils.loads(data).items()) - - return cls(rules, default_rule) - - def __init__(self, rules=None, default_rule=None): - """Initialize the Rules store.""" - - super(Rules, self).__init__(rules or {}) - self.default_rule = default_rule - - def __missing__(self, key): - """Implements the default rule handling.""" - - # If the default rule isn't actually defined, do something - # reasonably intelligent - if not self.default_rule or self.default_rule not in self: - raise KeyError(key) - - return self[self.default_rule] - - def __str__(self): - """Dumps a string representation of the rules.""" - - # Start by building the canonical strings for the rules - out_rules = {} - for key, value in self.items(): - # Use empty string for singleton TrueCheck instances - if isinstance(value, TrueCheck): - out_rules[key] = '' - else: - out_rules[key] = str(value) - - # Dump a pretty-printed JSON representation - return jsonutils.dumps(out_rules, indent=4) - - -# Really have to figure out a way to deprecate this -def set_rules(rules): - """Set the rules in use for policy checks.""" - - global _rules - - _rules = rules - - -# Ditto -def reset(): - """Clear the rules used for policy checks.""" - - global _rules - - _rules = None - - -def check(rule, target, creds, exc=None, *args, **kwargs): - """ - Checks authorization of a rule against the target and credentials. - - :param rule: The rule to evaluate. - :param target: As much information about the object being operated - on as possible, as a dictionary. - :param creds: As much information about the user performing the - action as possible, as a dictionary. - :param exc: Class of the exception to raise if the check fails. - Any remaining arguments passed to check() (both - positional and keyword arguments) will be passed to - the exception class. If exc is not provided, returns - False. - - :return: Returns False if the policy does not allow the action and - exc is not provided; otherwise, returns a value that - evaluates to True. Note: for rules using the "case" - expression, this True value will be the specified string - from the expression. - """ - - # Allow the rule to be a Check tree - if isinstance(rule, BaseCheck): - result = rule(target, creds) - elif not _rules: - # No rules to reference means we're going to fail closed - result = False - else: - try: - # Evaluate the rule - result = _rules[rule](target, creds) - except KeyError: - # If the rule doesn't exist, fail closed - result = False - - # If it is False, raise the exception if requested - if exc and result is False: - raise exc(*args, **kwargs) - - return result - - -class BaseCheck(object): - """ - Abstract base class for Check classes. - """ - - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def __str__(self): - """ - Retrieve a string representation of the Check tree rooted at - this node. - """ - - pass - - @abc.abstractmethod - def __call__(self, target, cred): - """ - Perform the check. Returns False to reject the access or a - true value (not necessary True) to accept the access. - """ - - pass - - -class FalseCheck(BaseCheck): - """ - A policy check that always returns False (disallow). - """ - - def __str__(self): - """Return a string representation of this check.""" - - return "!" - - def __call__(self, target, cred): - """Check the policy.""" - - return False - - -class TrueCheck(BaseCheck): - """ - A policy check that always returns True (allow). - """ - - def __str__(self): - """Return a string representation of this check.""" - - return "@" - - def __call__(self, target, cred): - """Check the policy.""" - - return True - - -class Check(BaseCheck): - """ - A base class to allow for user-defined policy checks. - """ - - def __init__(self, kind, match): - """ - :param kind: The kind of the check, i.e., the field before the - ':'. - :param match: The match of the check, i.e., the field after - the ':'. - """ - - self.kind = kind - self.match = match - - def __str__(self): - """Return a string representation of this check.""" - - return "%s:%s" % (self.kind, self.match) - - -class NotCheck(BaseCheck): - """ - A policy check that inverts the result of another policy check. - Implements the "not" operator. - """ - - def __init__(self, rule): - """ - Initialize the 'not' check. - - :param rule: The rule to negate. Must be a Check. - """ - - self.rule = rule - - def __str__(self): - """Return a string representation of this check.""" - - return "not %s" % self.rule - - def __call__(self, target, cred): - """ - Check the policy. Returns the logical inverse of the wrapped - check. - """ - - return not self.rule(target, cred) - - -class AndCheck(BaseCheck): - """ - A policy check that requires that a list of other checks all - return True. Implements the "and" operator. - """ - - def __init__(self, rules): - """ - Initialize the 'and' check. - - :param rules: A list of rules that will be tested. - """ - - self.rules = rules - - def __str__(self): - """Return a string representation of this check.""" - - return "(%s)" % ' and '.join(str(r) for r in self.rules) - - def __call__(self, target, cred): - """ - Check the policy. Requires that all rules accept in order to - return True. - """ - - for rule in self.rules: - if not rule(target, cred): - return False - - return True - - def add_check(self, rule): - """ - Allows addition of another rule to the list of rules that will - be tested. Returns the AndCheck object for convenience. - """ - - self.rules.append(rule) - return self - - -class OrCheck(BaseCheck): - """ - A policy check that requires that at least one of a list of other - checks returns True. Implements the "or" operator. - """ - - def __init__(self, rules): - """ - Initialize the 'or' check. - - :param rules: A list of rules that will be tested. - """ - - self.rules = rules - - def __str__(self): - """Return a string representation of this check.""" - - return "(%s)" % ' or '.join(str(r) for r in self.rules) - - def __call__(self, target, cred): - """ - Check the policy. Requires that at least one rule accept in - order to return True. - """ - - for rule in self.rules: - if rule(target, cred): - return True - - return False - - def add_check(self, rule): - """ - Allows addition of another rule to the list of rules that will - be tested. Returns the OrCheck object for convenience. - """ - - self.rules.append(rule) - return self - - -def _parse_check(rule): - """ - Parse a single base check rule into an appropriate Check object. - """ - - # Handle the special checks - if rule == '!': - return FalseCheck() - elif rule == '@': - return TrueCheck() - - try: - kind, match = rule.split(':', 1) - except Exception: - LOG.exception(_("Failed to understand rule %(rule)s") % locals()) - # If the rule is invalid, we'll fail closed - return FalseCheck() - - # Find what implements the check - if kind in _checks: - return _checks[kind](kind, match) - elif None in _checks: - return _checks[None](kind, match) - else: - LOG.error(_("No handler for matches of kind %s") % kind) - return FalseCheck() - - -def _parse_list_rule(rule): - """ - Provided for backwards compatibility. Translates the old - list-of-lists syntax into a tree of Check objects. - """ - - # Empty rule defaults to True - if not rule: - return TrueCheck() - - # Outer list is joined by "or"; inner list by "and" - or_list = [] - for inner_rule in rule: - # Elide empty inner lists - if not inner_rule: - continue - - # Handle bare strings - if isinstance(inner_rule, basestring): - inner_rule = [inner_rule] - - # Parse the inner rules into Check objects - and_list = [_parse_check(r) for r in inner_rule] - - # Append the appropriate check to the or_list - if len(and_list) == 1: - or_list.append(and_list[0]) - else: - or_list.append(AndCheck(and_list)) - - # If we have only one check, omit the "or" - if len(or_list) == 0: - return FalseCheck() - elif len(or_list) == 1: - return or_list[0] - - return OrCheck(or_list) - - -# Used for tokenizing the policy language -_tokenize_re = re.compile(r'\s+') - - -def _parse_tokenize(rule): - """ - Tokenizer for the policy language. - - Most of the single-character tokens are specified in the - _tokenize_re; however, parentheses need to be handled specially, - because they can appear inside a check string. Thankfully, those - parentheses that appear inside a check string can never occur at - the very beginning or end ("%(variable)s" is the correct syntax). - """ - - for tok in _tokenize_re.split(rule): - # Skip empty tokens - if not tok or tok.isspace(): - continue - - # Handle leading parens on the token - clean = tok.lstrip('(') - for i in range(len(tok) - len(clean)): - yield '(', '(' - - # If it was only parentheses, continue - if not clean: - continue - else: - tok = clean - - # Handle trailing parens on the token - clean = tok.rstrip(')') - trail = len(tok) - len(clean) - - # Yield the cleaned token - lowered = clean.lower() - if lowered in ('and', 'or', 'not'): - # Special tokens - yield lowered, clean - elif clean: - # Not a special token, but not composed solely of ')' - if len(tok) >= 2 and ((tok[0], tok[-1]) in - [('"', '"'), ("'", "'")]): - # It's a quoted string - yield 'string', tok[1:-1] - else: - yield 'check', _parse_check(clean) - - # Yield the trailing parens - for i in range(trail): - yield ')', ')' - - -class ParseStateMeta(type): - """ - Metaclass for the ParseState class. Facilitates identifying - reduction methods. - """ - - def __new__(mcs, name, bases, cls_dict): - """ - Create the class. Injects the 'reducers' list, a list of - tuples matching token sequences to the names of the - corresponding reduction methods. - """ - - reducers = [] - - for key, value in cls_dict.items(): - if not hasattr(value, 'reducers'): - continue - for reduction in value.reducers: - reducers.append((reduction, key)) - - cls_dict['reducers'] = reducers - - return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) - - -def reducer(*tokens): - """ - Decorator for reduction methods. Arguments are a sequence of - tokens, in order, which should trigger running this reduction - method. - """ - - def decorator(func): - # Make sure we have a list of reducer sequences - if not hasattr(func, 'reducers'): - func.reducers = [] - - # Add the tokens to the list of reducer sequences - func.reducers.append(list(tokens)) - - return func - - return decorator - - -class ParseState(object): - """ - Implement the core of parsing the policy language. Uses a greedy - reduction algorithm to reduce a sequence of tokens into a single - terminal, the value of which will be the root of the Check tree. - - Note: error reporting is rather lacking. The best we can get with - this parser formulation is an overall "parse failed" error. - Fortunately, the policy language is simple enough that this - shouldn't be that big a problem. - """ - - __metaclass__ = ParseStateMeta - - def __init__(self): - """Initialize the ParseState.""" - - self.tokens = [] - self.values = [] - - def reduce(self): - """ - Perform a greedy reduction of the token stream. If a reducer - method matches, it will be executed, then the reduce() method - will be called recursively to search for any more possible - reductions. - """ - - for reduction, methname in self.reducers: - if (len(self.tokens) >= len(reduction) and - self.tokens[-len(reduction):] == reduction): - # Get the reduction method - meth = getattr(self, methname) - - # Reduce the token stream - results = meth(*self.values[-len(reduction):]) - - # Update the tokens and values - self.tokens[-len(reduction):] = [r[0] for r in results] - self.values[-len(reduction):] = [r[1] for r in results] - - # Check for any more reductions - return self.reduce() - - def shift(self, tok, value): - """Adds one more token to the state. Calls reduce().""" - - self.tokens.append(tok) - self.values.append(value) - - # Do a greedy reduce... - self.reduce() - - @property - def result(self): - """ - Obtain the final result of the parse. Raises ValueError if - the parse failed to reduce to a single result. - """ - - if len(self.values) != 1: - raise ValueError("Could not parse rule") - return self.values[0] - - @reducer('(', 'check', ')') - @reducer('(', 'and_expr', ')') - @reducer('(', 'or_expr', ')') - def _wrap_check(self, _p1, check, _p2): - """Turn parenthesized expressions into a 'check' token.""" - - return [('check', check)] - - @reducer('check', 'and', 'check') - def _make_and_expr(self, check1, _and, check2): - """ - Create an 'and_expr' from two checks joined by the 'and' - operator. - """ - - return [('and_expr', AndCheck([check1, check2]))] - - @reducer('and_expr', 'and', 'check') - def _extend_and_expr(self, and_expr, _and, check): - """ - Extend an 'and_expr' by adding one more check. - """ - - return [('and_expr', and_expr.add_check(check))] - - @reducer('check', 'or', 'check') - def _make_or_expr(self, check1, _or, check2): - """ - Create an 'or_expr' from two checks joined by the 'or' - operator. - """ - - return [('or_expr', OrCheck([check1, check2]))] - - @reducer('or_expr', 'or', 'check') - def _extend_or_expr(self, or_expr, _or, check): - """ - Extend an 'or_expr' by adding one more check. - """ - - return [('or_expr', or_expr.add_check(check))] - - @reducer('not', 'check') - def _make_not_expr(self, _not, check): - """Invert the result of another check.""" - - return [('check', NotCheck(check))] - - -def _parse_text_rule(rule): - """ - Translates a policy written in the policy language into a tree of - Check objects. - """ - - # Empty rule means always accept - if not rule: - return TrueCheck() - - # Parse the token stream - state = ParseState() - for tok, value in _parse_tokenize(rule): - state.shift(tok, value) - - try: - return state.result - except ValueError: - # Couldn't parse the rule - LOG.exception(_("Failed to understand rule %(rule)r") % locals()) - - # Fail closed - return FalseCheck() - - -def parse_rule(rule): - """ - Parses a policy rule into a tree of Check objects. - """ - - # If the rule is a string, it's in the policy language - if isinstance(rule, basestring): - return _parse_text_rule(rule) - return _parse_list_rule(rule) - - -def register(name, func=None): - """ - Register a function or Check class as a policy check. - - :param name: Gives the name of the check type, e.g., 'rule', - 'role', etc. If name is None, a default check type - will be registered. - :param func: If given, provides the function or class to register. - If not given, returns a function taking one argument - to specify the function or class to register, - allowing use as a decorator. - """ - - # Perform the actual decoration by registering the function or - # class. Returns the function or class for compliance with the - # decorator interface. - def decorator(func): - _checks[name] = func - return func - - # If the function or class is given, do the registration - if func: - return decorator(func) - - return decorator - - -@register("rule") -class RuleCheck(Check): - def __call__(self, target, creds): - """ - Recursively checks credentials based on the defined rules. - """ - - try: - return _rules[self.match](target, creds) - except KeyError: - # We don't have any matching rule; fail closed - return False - - -@register("role") -class RoleCheck(Check): - def __call__(self, target, creds): - """Check that there is a matching role in the cred dict.""" - - return self.match.lower() in [x.lower() for x in creds['roles']] - - -@register('http') -class HttpCheck(Check): - def __call__(self, target, creds): - """ - Check http: rules by calling to a remote server. - - This example implementation simply verifies that the response - is exactly 'True'. - """ - - url = ('http:' + self.match) % target - data = {'target': jsonutils.dumps(target), - 'credentials': jsonutils.dumps(creds)} - post_data = urllib.urlencode(data) - f = urllib2.urlopen(url, post_data) - return f.read() == "True" - - -@register(None) -class GenericCheck(Check): - def __call__(self, target, creds): - """ - Check an individual match. - - Matches look like: - - tenant:%(tenant_id)s - role:compute:admin - """ - - # TODO(termie): do dict inspection via dot syntax - match = self.match % target - if self.kind in creds: - return match == unicode(creds[self.kind]) - return False diff --git a/rack/openstack/common/processutils.py b/rack/openstack/common/processutils.py deleted file mode 100644 index 17475b2..0000000 --- a/rack/openstack/common/processutils.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -System-level utilities and helper functions. -""" - -import errno -import logging as stdlib_logging -import os -import random -import shlex -import signal - -from eventlet.green import subprocess -from eventlet import greenthread -import six - -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -class InvalidArgumentError(Exception): - def __init__(self, message=None): - super(InvalidArgumentError, self).__init__(message) - - -class UnknownArgumentError(Exception): - def __init__(self, message=None): - super(UnknownArgumentError, self).__init__(message) - - -class ProcessExecutionError(Exception): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - self.exit_code = exit_code - self.stderr = stderr - self.stdout = stdout - self.cmd = cmd - self.description = description - - if description is None: - description = _("Unexpected error while running command.") - if exit_code is None: - exit_code = '-' - message = _('%(description)s\n' - 'Command: %(cmd)s\n' - 'Exit code: %(exit_code)s\n' - 'Stdout: %(stdout)r\n' - 'Stderr: %(stderr)r') % {'description': description, - 'cmd': cmd, - 'exit_code': exit_code, - 'stdout': stdout, - 'stderr': stderr} - super(ProcessExecutionError, self).__init__(message) - - -class NoRootWrapSpecified(Exception): - def __init__(self, message=None): - super(NoRootWrapSpecified, self).__init__(message) - - -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - -def execute(*cmd, **kwargs): - """Helper method to shell out and execute a command through subprocess. - - Allows optional retry. - - :param cmd: Passed to subprocess.Popen. - :type cmd: string - :param process_input: Send to opened process. - :type process_input: string - :param check_exit_code: Single bool, int, or list of allowed exit - codes. Defaults to [0]. Raise - :class:`ProcessExecutionError` unless - program exits with one of these code. - :type check_exit_code: boolean, int, or [int] - :param delay_on_retry: True | False. Defaults to True. If set to True, - wait a short amount of time before retrying. - :type delay_on_retry: boolean - :param attempts: How many times to retry cmd. - :type attempts: int - :param run_as_root: True | False. Defaults to False. If set to True, - the command is prefixed by the command specified - in the root_helper kwarg. - :type run_as_root: boolean - :param root_helper: command to prefix to commands called with - run_as_root=True - :type root_helper: string - :param shell: whether or not there should be a shell used to - execute this command. Defaults to false. - :type shell: boolean - :param loglevel: log level for execute commands. - :type loglevel: int. (Should be stdlib_logging.DEBUG or - stdlib_logging.INFO) - :returns: (stdout, stderr) from process execution - :raises: :class:`UnknownArgumentError` on - receiving unknown arguments - :raises: :class:`ProcessExecutionError` - """ - - process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', [0]) - ignore_exit_code = False - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) - root_helper = kwargs.pop('root_helper', '') - shell = kwargs.pop('shell', False) - loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) - - if isinstance(check_exit_code, bool): - ignore_exit_code = not check_exit_code - check_exit_code = [0] - elif isinstance(check_exit_code, int): - check_exit_code = [check_exit_code] - - if kwargs: - raise UnknownArgumentError(_('Got unknown keyword args ' - 'to utils.execute: %r') % kwargs) - - if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: - if not root_helper: - raise NoRootWrapSpecified( - message=_('Command requested root, but did not ' - 'specify a root helper.')) - cmd = shlex.split(root_helper) + list(cmd) - - cmd = map(str, cmd) - - while attempts > 0: - attempts -= 1 - try: - LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd)) - _PIPE = subprocess.PIPE # pylint: disable=E1101 - - if os.name == 'nt': - preexec_fn = None - close_fds = False - else: - preexec_fn = _subprocess_setup - close_fds = True - - obj = subprocess.Popen(cmd, - stdin=_PIPE, - stdout=_PIPE, - stderr=_PIPE, - close_fds=close_fds, - preexec_fn=preexec_fn, - shell=shell) - result = None - for _i in six.moves.range(20): - # NOTE(russellb) 20 is an arbitrary number of retries to - # prevent any chance of looping forever here. - try: - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() - except OSError as e: - if e.errno in (errno.EAGAIN, errno.EINTR): - continue - raise - break - obj.stdin.close() # pylint: disable=E1101 - _returncode = obj.returncode # pylint: disable=E1101 - LOG.log(loglevel, _('Result was %s') % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - raise ProcessExecutionError(exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) - return result - except ProcessExecutionError: - if not attempts: - raise - else: - LOG.log(loglevel, _('%r failed. Retrying.'), cmd) - if delay_on_retry: - greenthread.sleep(random.randint(20, 200) / 100.0) - finally: - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one - greenthread.sleep(0) - - -def trycmd(*args, **kwargs): - """A wrapper around execute() to more easily handle warnings and errors. - - Returns an (out, err) tuple of strings containing the output of - the command's stdout and stderr. If 'err' is not empty then the - command can be considered to have failed. - - :discard_warnings True | False. Defaults to False. If set to True, - then for succeeding commands, stderr is cleared - - """ - discard_warnings = kwargs.pop('discard_warnings', False) - - try: - out, err = execute(*args, **kwargs) - failed = False - except ProcessExecutionError as exn: - out, err = '', str(exn) - failed = True - - if not failed and discard_warnings and err: - # Handle commands that output to stderr but otherwise succeed - err = '' - - return out, err - - -def ssh_execute(ssh, cmd, process_input=None, - addl_env=None, check_exit_code=True): - LOG.debug(_('Running cmd (SSH): %s'), cmd) - if addl_env: - raise InvalidArgumentError(_('Environment not supported over SSH')) - - if process_input: - # This is (probably) fixable if we need it... - raise InvalidArgumentError(_('process_input not supported over SSH')) - - stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) - channel = stdout_stream.channel - - # NOTE(justinsb): This seems suspicious... - # ...other SSH clients have buffering issues with this approach - stdout = stdout_stream.read() - stderr = stderr_stream.read() - stdin_stream.close() - - exit_status = channel.recv_exit_status() - - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug(_('Result was %s') % exit_status) - if check_exit_code and exit_status != 0: - raise ProcessExecutionError(exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=cmd) - - return (stdout, stderr) diff --git a/rack/openstack/common/report/__init__.py b/rack/openstack/common/report/__init__.py deleted file mode 100644 index 35390ec..0000000 --- a/rack/openstack/common/report/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides a way to generate serializable reports - -This package/module provides mechanisms for defining reports -which may then be serialized into various data types. Each -report ( :class:`openstack.common.report.report.BasicReport` ) -is composed of one or more report sections -( :class:`openstack.common.report.report.BasicSection` ), -which contain generators which generate data models -( :class:`openstack.common.report.models.base.ReportModels` ), -which are then serialized by views. -""" diff --git a/rack/openstack/common/report/generators/__init__.py b/rack/openstack/common/report/generators/__init__.py deleted file mode 100644 index 68473f2..0000000 --- a/rack/openstack/common/report/generators/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides Data Model Generators - -This module defines classes for generating data models -( :class:`openstack.common.report.models.base.ReportModel` ). -A generator is any object which is callable with no parameters -and returns a data model. -""" diff --git a/rack/openstack/common/report/generators/conf.py b/rack/openstack/common/report/generators/conf.py deleted file mode 100644 index 11b1f0d..0000000 --- a/rack/openstack/common/report/generators/conf.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides Openstack config generators - -This module defines a class for configuration -generators for generating the model in -:mod:`openstack.common.report.models.conf`. -""" - -from oslo.config import cfg - -import rack.openstack.common.report.models.conf as cm - - -class ConfigReportGenerator(object): - """A Configuration Data Generator - - This generator returns - :class:`openstack.common.report.models.conf.ConfigModel` , - by default using the configuration options stored - in :attr:`oslo.config.cfg.CONF`, which is where - Openstack stores everything. - - :param cnf: the configuration option object - :type cnf: :class:`oslo.config.cfg.ConfigOpts` - """ - - def __init__(self, cnf=cfg.CONF): - self.conf_obj = cnf - - def __call__(self): - return cm.ConfigModel(self.conf_obj) diff --git a/rack/openstack/common/report/generators/threading.py b/rack/openstack/common/report/generators/threading.py deleted file mode 100644 index e0eecf7..0000000 --- a/rack/openstack/common/report/generators/threading.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides thread-related generators - -This module defines classes for threading-related -generators for generating the models in -:mod:`openstack.common.report.models.threading`. -""" - -import sys - -import greenlet - -import rack.openstack.common.report.models.threading as tm -from rack.openstack.common.report.models import with_default_views as mwdv -import rack.openstack.common.report.utils as rutils -import rack.openstack.common.report.views.text.generic as text_views - - -class ThreadReportGenerator(object): - """A Thread Data Generator - - This generator returns a collection of - :class:`openstack.common.report.models.threading.ThreadModel` - objects by introspecting the current python state using - :func:`sys._current_frames()` . - """ - - def __call__(self): - threadModels = [ - tm.ThreadModel(thread_id, stack) - for thread_id, stack in sys._current_frames().items() - ] - - thread_pairs = dict(zip(range(len(threadModels)), threadModels)) - return mwdv.ModelWithDefaultViews(thread_pairs, - text_view=text_views.MultiView()) - - -class GreenThreadReportGenerator(object): - """A Green Thread Data Generator - - This generator returns a collection of - :class:`openstack.common.report.models.threading.GreenThreadModel` - objects by introspecting the current python garbage collection - state, and sifting through for :class:`greenlet.greenlet` objects. - - .. seealso:: - - Function :func:`openstack.common.report.utils._find_objects` - """ - - def __call__(self): - threadModels = [ - tm.GreenThreadModel(gr.gr_frame) - for gr in rutils._find_objects(greenlet.greenlet) - ] - - thread_pairs = dict(zip(range(len(threadModels)), threadModels)) - return mwdv.ModelWithDefaultViews(thread_pairs, - text_view=text_views.MultiView()) diff --git a/rack/openstack/common/report/generators/version.py b/rack/openstack/common/report/generators/version.py deleted file mode 100644 index 5aeab34..0000000 --- a/rack/openstack/common/report/generators/version.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides Openstack version generators - -This module defines a class for Openstack -version and package information -generators for generating the model in -:mod:`openstack.common.report.models.version`. -""" - -import rack.openstack.common.report.models.version as vm - - -class PackageReportGenerator(object): - """A Package Information Data Generator - - This generator returns - :class:`openstack.common.report.models.version.PackageModel`, - extracting data from the given version object, which should follow - the general format defined in Nova's version information (i.e. it - should contain the methods vendor_string, product_string, and - version_string_with_package). - - :param version_object: the version information object - """ - - def __init__(self, version_obj): - self.version_obj = version_obj - - def __call__(self): - return vm.PackageModel( - self.version_obj.vendor_string(), - self.version_obj.product_string(), - self.version_obj.version_string_with_package()) diff --git a/rack/openstack/common/report/guru_meditation_report.py b/rack/openstack/common/report/guru_meditation_report.py deleted file mode 100644 index a01c520..0000000 --- a/rack/openstack/common/report/guru_meditation_report.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides Guru Meditation Report - -This module defines the actual OpenStack Guru Meditation -Report class. - -This can be used in the OpenStack command definition files. -For example, in a rack command module (under rack/cmd): - -.. code-block:: python - :emphasize-lines: 8,9,10 - - CONF = cfg.CONF - # maybe import some options here... - - def main(): - config.parse_args(sys.argv) - logging.setup('blah') - - TextGuruMeditation.register_section('Some Special Section', - special_section_generator) - TextGuruMeditation.setup_autorun(version_object) - - server = service.Service.create(binary='some-service', - topic=CONF.some_service_topic) - service.serve(server) - service.wait() - -Then, you can do - -.. code-block:: bash - - $ kill -USR1 $SERVICE_PID - -and get a Guru Meditation Report in the file or terminal -where stderr is logged for that given service. -""" - -from __future__ import print_function - -import signal -import sys - -from rack.openstack.common.report.generators import conf as cgen -from rack.openstack.common.report.generators import threading as tgen -from rack.openstack.common.report.generators import version as pgen -from rack.openstack.common.report import report - - -class GuruMeditation(object): - """A Guru Meditation Report Mixin/Base Class - - This class is a base class for Guru Meditation Reports. - It provides facilities for registering sections and - setting up functionality to auto-run the report on - a certain signal. - - This class should always be used in conjunction with - a Report class via multiple inheritance. It should - always come first in the class list to ensure the - MRO is correct. - """ - - def __init__(self, version_obj, *args, **kwargs): - self.version_obj = version_obj - - super(GuruMeditation, self).__init__(*args, **kwargs) - self.start_section_index = len(self.sections) - - @classmethod - def register_section(cls, section_title, generator): - """Register a New Section - - This method registers a persistent section for the current - class. - - :param str section_title: the title of the section - :param generator: the generator for the section - """ - - try: - cls.persistent_sections.append([section_title, generator]) - except AttributeError: - cls.persistent_sections = [[section_title, generator]] - - @classmethod - def setup_autorun(cls, version, signum=None): - """Set Up Auto-Run - - This method sets up the Guru Meditation Report to automatically - get dumped to stderr when the given signal is received. - - :param version: the version object for the current product - :param signum: the signal to associate with running the report - """ - - if not signum and hasattr(signal, 'SIGUSR1'): - # SIGUSR1 is not supported on all platforms - signum = signal.SIGUSR1 - - if signum: - signal.signal(signum, - lambda *args: cls.handle_signal(version, *args)) - - @classmethod - def handle_signal(cls, version, *args): - """The Signal Handler - - This method (indirectly) handles receiving a registered signal and - dumping the Guru Meditation Report to stderr. This method is designed - to be curried into a proper signal handler by currying out the version - parameter. - - :param version: the version object for the current product - """ - - try: - res = cls(version).run() - except Exception: - print("Unable to run Guru Meditation Report!", - file=sys.stderr) - else: - print(res, file=sys.stderr) - - def _readd_sections(self): - del self.sections[self.start_section_index:] - - self.add_section('Package', - pgen.PackageReportGenerator(self.version_obj)) - - self.add_section('Threads', - tgen.ThreadReportGenerator()) - - self.add_section('Green Threads', - tgen.GreenThreadReportGenerator()) - - self.add_section('Configuration', - cgen.ConfigReportGenerator()) - - try: - for section_title, generator in self.persistent_sections: - self.add_section(section_title, generator) - except AttributeError: - pass - - def run(self): - self._readd_sections() - return super(GuruMeditation, self).run() - - -# GuruMeditation must come first to get the correct MRO -class TextGuruMeditation(GuruMeditation, report.TextReport): - """A Text Guru Meditation Report - - This report is the basic human-readable Guru Meditation Report - - It contains the following sections by default - (in addition to any registered persistent sections): - - - Package Information - - - Threads List - - - Green Threads List - - - Configuration Options - - :param version_obj: the version object for the current product - """ - - def __init__(self, version_obj): - super(TextGuruMeditation, self).__init__(version_obj, - 'Guru Meditation') diff --git a/rack/openstack/common/report/models/__init__.py b/rack/openstack/common/report/models/__init__.py deleted file mode 100644 index 7bfed3d..0000000 --- a/rack/openstack/common/report/models/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides data models - -This module provides both the base data model, -as well as several predefined specific data models -to be used in reports. -""" diff --git a/rack/openstack/common/report/models/base.py b/rack/openstack/common/report/models/base.py deleted file mode 100644 index 90914ff..0000000 --- a/rack/openstack/common/report/models/base.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides the base report model - -This module defines a class representing the basic report -data model from which all data models should inherit (or -at least implement similar functionality). Data models -store unserialized data generated by generators during -the report serialization process. -""" - -import collections as col -import copy - - -class ReportModel(col.MutableMapping): - """A Report Data Model - - A report data model contains data generated by some - generator method or class. Data may be read or written - using dictionary-style access, and may be read (but not - written) using object-member-style access. Additionally, - a data model may have an associated view. This view is - used to serialize the model when str() is called on the - model. An appropriate object for a view is callable with - a single parameter: the model to be serialized. - - :param data: a dictionary of data to initially associate with the model - :param attached_view: a view object to attach to this model - """ - - def __init__(self, data=None, attached_view=None): - self.attached_view = attached_view - self.data = data or {} - - def __str__(self): - self_cpy = copy.deepcopy(self) - for key in self_cpy: - if getattr(self_cpy[key], 'attached_view', None) is not None: - self_cpy[key] = str(self_cpy[key]) - - if self.attached_view is not None: - return self.attached_view(self_cpy) - else: - raise Exception("Cannot stringify model: no attached view") - - def __repr__(self): - if self.attached_view is not None: - return ("").format(cl=type(self), - dt=self.data, - vw=type(self.attached_view)) - else: - return ("").format(cl=type(self), - dt=self.data) - - def __getitem__(self, attrname): - return self.data[attrname] - - def __setitem__(self, attrname, attrval): - self.data[attrname] = attrval - - def __delitem__(self, attrname): - del self.data[attrname] - - def __contains__(self, key): - return self.data.__contains__(key) - - def __getattr__(self, attrname): - try: - return self.data[attrname] - except KeyError: - raise AttributeError( - "'{cl}' object has no attribute '{an}'".format( - cl=type(self).__name__, an=attrname - ) - ) - - def __len__(self): - return len(self.data) - - def __iter__(self): - return self.data.__iter__() - - def set_current_view_type(self, tp): - """Set the current view type - - This method attempts to set the current view - type for this model and all submodels by calling - itself recursively on all values (and ignoring the - ones that are not themselves models) - - :param tp: the type of the view ('text', 'json', 'xml', etc) - """ - - for key in self: - try: - self[key].set_current_view_type(tp) - except AttributeError: - pass diff --git a/rack/openstack/common/report/models/conf.py b/rack/openstack/common/report/models/conf.py deleted file mode 100644 index 061eb65..0000000 --- a/rack/openstack/common/report/models/conf.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides Openstack Configuration Model - -This module defines a class representing the data -model for :mod:`oslo.config` configuration options -""" - -import rack.openstack.common.report.models.with_default_views as mwdv -import rack.openstack.common.report.views.text.generic as generic_text_views - - -class ConfigModel(mwdv.ModelWithDefaultViews): - """A Configuration Options Model - - This model holds data about a set of configuration options - from :mod:`oslo.config`. It supports both the default group - of options and named option groups. - - :param conf_obj: a configuration object - :type conf_obj: :class:`oslo.config.cfg.ConfigOpts` - """ - - def __init__(self, conf_obj): - kv_view = generic_text_views.KeyValueView(dict_sep=": ", - before_dict='') - super(ConfigModel, self).__init__(text_view=kv_view) - - def opt_title(optname, co): - return co._opts[optname]['opt'].name - - self['default'] = dict( - (opt_title(optname, conf_obj), conf_obj[optname]) - for optname in conf_obj._opts - ) - - groups = {} - for groupname in conf_obj._groups: - group_obj = conf_obj._groups[groupname] - curr_group_opts = dict( - (opt_title(optname, group_obj), conf_obj[groupname][optname]) - for optname in group_obj._opts - ) - groups[group_obj.name] = curr_group_opts - - self.update(groups) diff --git a/rack/openstack/common/report/models/threading.py b/rack/openstack/common/report/models/threading.py deleted file mode 100644 index 6715108..0000000 --- a/rack/openstack/common/report/models/threading.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides threading and stack-trace models - -This module defines classes representing thread, green -thread, and stack trace data models -""" - -import traceback - -import rack.openstack.common.report.models.with_default_views as mwdv -import rack.openstack.common.report.views.text.threading as text_views - - -class StackTraceModel(mwdv.ModelWithDefaultViews): - """A Stack Trace Model - - This model holds data from a python stack trace, - commonly extracted from running thread information - - :param stack_state: the python stack_state object - """ - - def __init__(self, stack_state): - super(StackTraceModel, self).__init__( - text_view=text_views.StackTraceView()) - - if (stack_state is not None): - self['lines'] = [ - {'filename': fn, 'line': ln, 'name': nm, 'code': cd} - for fn, ln, nm, cd in traceback.extract_stack(stack_state) - ] - - if stack_state.f_exc_type is not None: - self['root_exception'] = { - 'type': stack_state.f_exc_type, - 'value': stack_state.f_exc_value - } - else: - self['root_exception'] = None - else: - self['lines'] = [] - self['root_exception'] = None - - -class ThreadModel(mwdv.ModelWithDefaultViews): - """A Thread Model - - This model holds data for information about an - individual thread. It holds both a thread id, - as well as a stack trace for the thread - - .. seealso:: - - Class :class:`StackTraceModel` - - :param int thread_id: the id of the thread - :param stack: the python stack state for the current thread - """ - - # threadId, stack in sys._current_frams().items() - def __init__(self, thread_id, stack): - super(ThreadModel, self).__init__(text_view=text_views.ThreadView()) - - self['thread_id'] = thread_id - self['stack_trace'] = StackTraceModel(stack) - - -class GreenThreadModel(mwdv.ModelWithDefaultViews): - """A Green Thread Model - - This model holds data for information about an - individual thread. Unlike the thread model, - it holds just a stack trace, since green threads - do not have thread ids. - - .. seealso:: - - Class :class:`StackTraceModel` - - :param stack: the python stack state for the green thread - """ - - # gr in greenpool.coroutines_running --> gr.gr_frame - def __init__(self, stack): - super(GreenThreadModel, self).__init__( - {'stack_trace': StackTraceModel(stack)}, - text_view=text_views.GreenThreadView()) diff --git a/rack/openstack/common/report/models/version.py b/rack/openstack/common/report/models/version.py deleted file mode 100644 index e353759..0000000 --- a/rack/openstack/common/report/models/version.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides Openstack Version Info Model - -This module defines a class representing the data -model for Openstack package and version information -""" - -import rack.openstack.common.report.models.with_default_views as mwdv -import rack.openstack.common.report.views.text.generic as generic_text_views - - -class PackageModel(mwdv.ModelWithDefaultViews): - """A Package Information Model - - This model holds information about the current - package. It contains vendor, product, and version - information. - - :param str vendor: the product vendor - :param str product: the product name - :param str version: the product version - """ - - def __init__(self, vendor, product, version): - super(PackageModel, self).__init__( - text_view=generic_text_views.KeyValueView() - ) - - self['vendor'] = vendor - self['product'] = product - self['version'] = version diff --git a/rack/openstack/common/report/models/with_default_views.py b/rack/openstack/common/report/models/with_default_views.py deleted file mode 100644 index 0a3bd02..0000000 --- a/rack/openstack/common/report/models/with_default_views.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import rack.openstack.common.report.models.base as base_model -import rack.openstack.common.report.views.json.generic as jsonviews -import rack.openstack.common.report.views.text.generic as textviews -import rack.openstack.common.report.views.xml.generic as xmlviews - - -class ModelWithDefaultViews(base_model.ReportModel): - """A Model With Default Views of Various Types - - A model with default views has several predefined views, - each associated with a given type. This is often used for - when a submodel should have an attached view, but the view - differs depending on the serialization format - - Paramaters are as the superclass, with the exception - of any parameters ending in '_view': these parameters - get stored as default views. - - The default 'default views' are - - text - :class:`openstack.common.views.text.generic.KeyValueView` - xml - :class:`openstack.common.views.xml.generic.KeyValueView` - json - :class:`openstack.common.views.json.generic.KeyValueView` - - .. function:: to_type() - - ('type' is one of the 'default views' defined for this model) - Serializes this model using the default view for 'type' - - :rtype: str - :returns: this model serialized as 'type' - """ - - def __init__(self, *args, **kwargs): - self.views = { - 'text': textviews.KeyValueView(), - 'json': jsonviews.KeyValueView(), - 'xml': xmlviews.KeyValueView() - } - - newargs = copy.copy(kwargs) - for k in kwargs: - if k.endswith('_view'): - self.views[k[:-5]] = kwargs[k] - del newargs[k] - super(ModelWithDefaultViews, self).__init__(*args, **newargs) - - def set_current_view_type(self, tp): - self.attached_view = self.views[tp] - super(ModelWithDefaultViews, self).set_current_view_type(tp) - - def __getattr__(self, attrname): - if attrname[:3] == 'to_': - if self.views[attrname[3:]] is not None: - return lambda: self.views[attrname[3:]](self) - else: - raise NotImplementedError(( - "Model {cn.__module__}.{cn.__name__} does not have" + - " a default view for " - "{tp}").format(cn=type(self), tp=attrname[3:])) - else: - return super(ModelWithDefaultViews, self).__getattr__(attrname) diff --git a/rack/openstack/common/report/report.py b/rack/openstack/common/report/report.py deleted file mode 100644 index ca8479a..0000000 --- a/rack/openstack/common/report/report.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides Report classes - -This module defines various classes representing -reports and report sections. All reports take the -form of a report class containing various report sections. -""" - -import rack.openstack.common.report.views.text.header as header_views - - -class BasicReport(object): - """A Basic Report - - A Basic Report consists of a collection of :class:`ReportSection` - objects, each of which contains a top-level model and generator. - It collects these sections into a cohesive report which may then - be serialized by calling :func:`run` - """ - - def __init__(self): - self.sections = [] - self._state = 0 - - def add_section(self, view, generator, index=None): - """Add a section to the report - - This method adds a section with the given view and - generator to the report. An index may be specified to - insert the section at a given location in the list; - If no index is specified, the section is appended to the - list. The view is called on the model which results from - the generator when the report is run. A generator is simply - a method or callable object which takes no arguments and - returns a :class:`openstack.common.report.models.base.ReportModel` - or similar object. - - :param view: the top-level view for the section - :param generator: the method or class which generates the model - :param index: the index at which to insert the section - (or None to append it) - :type index: int or None - """ - - if index is None: - self.sections.append(ReportSection(view, generator)) - else: - self.sections.insert(index, ReportSection(view, generator)) - - def run(self): - """Run the report - - This method runs the report, having each section generate - its data and serialize itself before joining the sections - together. The BasicReport accomplishes the joining - by joining the serialized sections together with newlines. - - :rtype: str - :returns: the serialized report - """ - - return "\n".join(str(sect) for sect in self.sections) - - -class ReportSection(object): - """A Report Section - - A report section contains a generator and a top-level view. - When something attempts to serialize the section by calling - str() on it, the section runs the generator and calls the view - on the resulting model. - - .. seealso:: - - Class :class:`BasicReport` - :func:`BasicReport.add_section` - - :param view: the top-level view for this section - :param generator: the generator for this section - (any callable object which takes - no parameters and returns a data model) - """ - - def __init__(self, view, generator): - self.view = view - self.generator = generator - - def __str__(self): - return self.view(self.generator()) - - -class ReportOfType(BasicReport): - """A Report of a Certain Type - - A ReportOfType has a predefined type associated with it. - This type is automatically propagated down to the each of - the sections upon serialization by wrapping the generator - for each section. - - .. seealso:: - - Class :class:`openstack.common.report.models.with_default_view.ModelWithDefaultView` # noqa - (the entire class) - - Class :class:`openstack.common.report.models.base.ReportModel` - :func:`openstack.common.report.models.base.ReportModel.set_current_view_type` # noqa - - :param str tp: the type of the report - """ - - def __init__(self, tp): - self.output_type = tp - super(ReportOfType, self).__init__() - - def add_section(self, view, generator, index=None): - def with_type(gen): - def newgen(): - res = gen() - try: - res.set_current_view_type(self.output_type) - except AttributeError: - pass - - return res - return newgen - - super(ReportOfType, self).add_section( - view, - with_type(generator), - index - ) - - -class TextReport(ReportOfType): - """A Human-Readable Text Report - - This class defines a report that is designed to be read by a human - being. It has nice section headers, and a formatted title. - - :param str name: the title of the report - """ - - def __init__(self, name): - super(TextReport, self).__init__('text') - self.name = name - # add a title with a generator that creates an empty result model - self.add_section(name, lambda: ('|' * 72) + "\n\n") - - def add_section(self, heading, generator, index=None): - """Add a section to the report - - This method adds a section with the given title, and - generator to the report. An index may be specified to - insert the section at a given location in the list; - If no index is specified, the section is appended to the - list. The view is called on the model which results from - the generator when the report is run. A generator is simply - a method or callable object which takes no arguments and - returns a :class:`openstack.common.report.models.base.ReportModel` - or similar object. - - The model is told to serialize as text (if possible) at serialization - time by wrapping the generator. The view model's attached view - (if any) is wrapped in a - :class:`openstack.common.report.views.text.header.TitledView` - - :param str heading: the title for the section - :param generator: the method or class which generates the model - :param index: the index at which to insert the section - (or None to append) - :type index: int or None - """ - - super(TextReport, self).add_section(header_views.TitledView(heading), - generator, - index) diff --git a/rack/openstack/common/report/utils.py b/rack/openstack/common/report/utils.py deleted file mode 100644 index fb71e36..0000000 --- a/rack/openstack/common/report/utils.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Various utilities for report generation - -This module includes various utilities -used in generating reports. -""" - -import gc - - -class StringWithAttrs(str): - """A String that can have arbitrary attributes - """ - - pass - - -def _find_objects(t): - """Find Objects in the GC State - - This horribly hackish method locates objects of a - given class in the current python instance's garbage - collection state. In case you couldn't tell, this is - horribly hackish, but is necessary for locating all - green threads, since they don't keep track of themselves - like normal threads do in python. - - :param class t: the class of object to locate - :rtype: list - :returns: a list of objects of the given type - """ - - return [o for o in gc.get_objects() if isinstance(o, t)] diff --git a/rack/openstack/common/report/views/__init__.py b/rack/openstack/common/report/views/__init__.py deleted file mode 100644 index 612959b..0000000 --- a/rack/openstack/common/report/views/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides predefined views - -This module provides a collection of predefined views -for use in reports. It is separated by type (xml, json, or text). -Each type contains a submodule called 'generic' containing -several basic, universal views for that type. There is also -a predefined view that utilizes Jinja. -""" diff --git a/rack/openstack/common/report/views/jinja_view.py b/rack/openstack/common/report/views/jinja_view.py deleted file mode 100644 index a6f340e..0000000 --- a/rack/openstack/common/report/views/jinja_view.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides Jinja Views - -This module provides views that utilize the Jinja templating -system for serialization. For more information on Jinja, please -see http://jinja.pocoo.org/ . -""" - -import jinja2 - - -class JinjaView(object): - """A Jinja View - - This view renders the given model using the provided Jinja - template. The template can be given in various ways. - If the `VIEw_TEXT` property is defined, that is used as template. - Othewise, if a `path` parameter is passed to the constructor, that - is used to load a file containing the template. If the `path` - parameter is None, the `text` parameter is used as the template. - - The leading newline character and trailing newline character are stripped - from the template (provided they exist). Baseline indentation is - also stripped from each line. The baseline indentation is determined by - checking the indentation of the first line, after stripping off the leading - newline (if any). - - :param str path: the path to the Jinja template - :param str text: the text of the Jinja template - """ - - def __init__(self, path=None, text=None): - try: - self._text = self.VIEW_TEXT - except AttributeError: - if path is not None: - with open(path, 'r') as f: - self._text = f.read() - elif text is not None: - self._text = text - else: - self._text = "" - - if self._text[0] == "\n": - self._text = self._text[1:] - - newtext = self._text.lstrip() - amt = len(self._text) - len(newtext) - if (amt > 0): - base_indent = self._text[0:amt] - lines = self._text.splitlines() - newlines = [] - for line in lines: - if line.startswith(base_indent): - newlines.append(line[amt:]) - else: - newlines.append(line) - self._text = "\n".join(newlines) - - if self._text[-1] == "\n": - self._text = self._text[:-1] - - self._regentemplate = True - self._templatecache = None - - def __call__(self, model): - return self.template.render(**model) - - @property - def template(self): - """Get the Compiled Template - - Gets the compiled template, using a cached copy if possible - (stored in attr:`_templatecache`) or otherwise recompiling - the template if the compiled template is not present or is - invalid (due to attr:`_regentemplate` being set to True). - - :returns: the compiled Jinja template - :rtype: :class:`jinja2.Template` - """ - - if self._templatecache is None or self._regentemplate: - self._templatecache = jinja2.Template(self._text) - self._regentemplate = False - - return self._templatecache - - def _gettext(self): - """Get the Template Text - - Gets the text of the current template - - :returns: the text of the Jinja template - :rtype: str - """ - - return self._text - - def _settext(self, textval): - """Set the Template Text - - Sets the text of the current template, marking it - for recompilation next time the compiled template - is retrived via attr:`template` . - - :param str textval: the new text of the Jinja template - """ - - self._text = textval - self.regentemplate = True - - text = property(_gettext, _settext) diff --git a/rack/openstack/common/report/views/json/__init__.py b/rack/openstack/common/report/views/json/__init__.py deleted file mode 100644 index 47bd33b..0000000 --- a/rack/openstack/common/report/views/json/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides basic JSON views - -This module provides several basic views which serialize -models into JSON. -""" diff --git a/rack/openstack/common/report/views/json/generic.py b/rack/openstack/common/report/views/json/generic.py deleted file mode 100644 index 319abd6..0000000 --- a/rack/openstack/common/report/views/json/generic.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides generic JSON views - -This modules defines several basic views for serializing -data to JSON. Submodels that have already been serialized -as JSON may have their string values marked with `__is_json__ -= True` using :class:`openstack.common.report.utils.StringWithAttrs` -(each of the classes within this module does this automatically, -and non-naive serializers check for this attribute and handle -such strings specially) -""" - -import copy - -from rack.openstack.common import jsonutils as json -import rack.openstack.common.report.utils as utils - - -class BasicKeyValueView(object): - """A Basic Key-Value JSON View - - This view performs a naive serialization of a model - into JSON by simply calling :func:`json.dumps` on the model - """ - - def __call__(self, model): - res = utils.StringWithAttrs(json.dumps(model.data)) - res.__is_json__ = True - return res - - -class KeyValueView(object): - """A Key-Value JSON View - - This view performs advanced serialization to a model - into JSON. It does so by first checking all values to - see if they are marked as JSON. If so, they are deserialized - using :func:`json.loads`. Then, the copy of the model with all - JSON deserialized is reserialized into proper nested JSON using - :func:`json.dumps`. - """ - - def __call__(self, model): - # this part deals with subviews that were already serialized - cpy = copy.deepcopy(model) - for key, valstr in model.items(): - if getattr(valstr, '__is_json__', False): - cpy[key] = json.loads(valstr) - - res = utils.StringWithAttrs(json.dumps(cpy.data)) - res.__is_json__ = True - return res diff --git a/rack/openstack/common/report/views/text/__init__.py b/rack/openstack/common/report/views/text/__init__.py deleted file mode 100644 index c097484..0000000 --- a/rack/openstack/common/report/views/text/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides basic text views - -This module provides several basic views which serialize -models into human-readable text. -""" diff --git a/rack/openstack/common/report/views/text/generic.py b/rack/openstack/common/report/views/text/generic.py deleted file mode 100644 index 7363833..0000000 --- a/rack/openstack/common/report/views/text/generic.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides generic text views - -This modules provides several generic views for -serializing models into human-readable text. -""" - -import collections as col - -import six - - -class MultiView(object): - """A Text View Containing Multiple Views - - This view simply serializes each - value in the data model, and then - joins them with newlines (ignoring - the key values altogether). This is - useful for serializing lists of models - (as array-like dicts). - """ - - def __call__(self, model): - res = [str(model[key]) for key in model] - return "\n".join(res) - - -class BasicKeyValueView(object): - """A Basic Key-Value Text View - - This view performs a naive serialization of a model into - text using a basic key-value method, where each - key-value pair is rendered as "key = str(value)" - """ - - def __call__(self, model): - res = "" - for key in model: - res += "{key} = {value}\n".format(key=key, value=model[key]) - - return res - - -class KeyValueView(object): - """A Key-Value Text View - - This view performs an advanced serialization of a model - into text by following the following set of rules: - - key : text - key = text - - rootkey : Mapping - :: - - rootkey = - serialize(key, value) - - key : Sequence - :: - - key = - serialize(item) - - :param str indent_str: the string used to represent one "indent" - :param str key_sep: the separator to use between keys and values - :param str dict_sep: the separator to use after a dictionary root key - :param str list_sep: the separator to use after a list root key - :param str anon_dict: the "key" to use when there is a dict in a list - (does not automatically use the dict separator) - :param before_dict: content to place on the line(s) before the a dict - root key (use None to avoid inserting an extra line) - :type before_dict: str or None - :param before_list: content to place on the line(s) before the a list - root key (use None to avoid inserting an extra line) - :type before_list: str or None - """ - - def __init__(self, - indent_str=' ', - key_sep=' = ', - dict_sep=' = ', - list_sep=' = ', - anon_dict='[dict]', - before_dict=None, - before_list=None): - self.indent_str = indent_str - self.key_sep = key_sep - self.dict_sep = dict_sep - self.list_sep = list_sep - self.anon_dict = anon_dict - self.before_dict = before_dict - self.before_list = before_list - - def __call__(self, model): - def serialize(root, rootkey, indent): - res = [] - if rootkey is not None: - res.append((self.indent_str * indent) + rootkey) - - if isinstance(root, col.Mapping): - if rootkey is None and indent > 0: - res.append((self.indent_str * indent) + self.anon_dict) - elif rootkey is not None: - res[0] += self.dict_sep - if self.before_dict is not None: - res.insert(0, self.before_dict) - - for key in root: - res.extend(serialize(root[key], key, indent + 1)) - elif (isinstance(root, col.Sequence) and - not isinstance(root, six.string_types)): - if rootkey is not None: - res[0] += self.list_sep - if self.before_list is not None: - res.insert(0, self.before_list) - - for val in root: - res.extend(serialize(val, None, indent + 1)) - else: - str_root = str(root) - if '\n' in str_root: - # we are in a submodel - if rootkey is not None: - res[0] += self.dict_sep - - list_root = [(self.indent_str * (indent + 1)) + line - for line in str_root.split('\n')] - res.extend(list_root) - else: - # just a normal key or list entry - try: - res[0] += self.key_sep + str_root - except IndexError: - res = [(self.indent_str * indent) + str_root] - - return res - - return "\n".join(serialize(model, None, -1)) - - -class TableView(object): - """A Basic Table Text View - - This view performs serialization of data into a basic table with - predefined column names and mappings. Column width is auto-calculated - evenly, column values are automatically truncated accordingly. Values - are centered in the columns. - - :param [str] column_names: the headers for each of the columns - :param [str] column_values: the item name to match each column to in - each row - :param str table_prop_name: the name of the property within the model - containing the row models - """ - - def __init__(self, column_names, column_values, table_prop_name): - self.table_prop_name = table_prop_name - self.column_names = column_names - self.column_values = column_values - self.column_width = (72 - len(column_names) + 1) / len(column_names) - - column_headers = "|".join( - "{ch[" + str(n) + "]: ^" + str(self.column_width) + "}" - for n in range(len(column_names)) - ) - - # correct for float-to-int roundoff error - test_fmt = column_headers.format(ch=column_names) - if len(test_fmt) < 72: - column_headers += ' ' * (72 - len(test_fmt)) - - vert_divider = '-' * 72 - self.header_fmt_str = column_headers + "\n" + vert_divider + "\n" - - self.row_fmt_str = "|".join( - "{cv[" + str(n) + "]: ^" + str(self.column_width) + "}" - for n in range(len(column_values)) - ) - - def __call__(self, model): - res = self.header_fmt_str.format(ch=self.column_names) - for raw_row in model[self.table_prop_name]: - row = [str(raw_row[prop_name]) for prop_name in self.column_values] - # double format is in case we have roundoff error - res += '{0: <72}\n'.format(self.row_fmt_str.format(cv=row)) - - return res diff --git a/rack/openstack/common/report/views/text/header.py b/rack/openstack/common/report/views/text/header.py deleted file mode 100644 index 58d06c0..0000000 --- a/rack/openstack/common/report/views/text/header.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Text Views With Headers - -This package defines several text views with headers -""" - - -class HeaderView(object): - """A Text View With a Header - - This view simply serializes the model and places the given - header on top. - - :param header: the header (can be anything on which str() can be called) - """ - - def __init__(self, header): - self.header = header - - def __call__(self, model): - return str(self.header) + "\n" + str(model) - - -class TitledView(HeaderView): - """A Text View With a Title - - This view simply serializes the model, and places - a preformatted header containing the given title - text on top. The title text can be up to 64 characters - long. - - :param str title: the title of the view - """ - - FORMAT_STR = ('=' * 72) + "\n===={0: ^64}====\n" + ('=' * 72) - - def __init__(self, title): - super(TitledView, self).__init__(self.FORMAT_STR.format(title)) diff --git a/rack/openstack/common/report/views/text/threading.py b/rack/openstack/common/report/views/text/threading.py deleted file mode 100644 index d4574bd..0000000 --- a/rack/openstack/common/report/views/text/threading.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides thread and stack-trace views - -This module provides a collection of views for -visualizing threads, green threads, and stack traces -in human-readable form. -""" - -import rack.openstack.common.report.views.jinja_view as jv - - -class StackTraceView(jv.JinjaView): - """A Stack Trace View - - This view displays stack trace models defined by - :class:`openstack.common.report.models.threading.StackTraceModel` - """ - - VIEW_TEXT = ( - "{% if root_exception is not none %}" - "Exception: {{ root_exception }}\n" - "------------------------------------\n" - "\n" - "{% endif %}" - "{% for line in lines %}\n" - "{{ line.filename }}:{{ line.line }} in {{ line.name }}\n" - " {% if line.code is not none %}" - "`{{ line.code }}`" - "{% else %}" - "(source not found)" - "{% endif %}\n" - "{% else %}\n" - "No Traceback!\n" - "{% endfor %}" - ) - - -class GreenThreadView(object): - """A Green Thread View - - This view displays a green thread provided by the data - model :class:`openstack.common.report.models.threading.GreenThreadModel` # noqa - """ - - FORMAT_STR = "------{thread_str: ^60}------" + "\n" + "{stack_trace}" - - def __call__(self, model): - return self.FORMAT_STR.format( - thread_str=" Green Thread ", - stack_trace=model.stack_trace - ) - - -class ThreadView(object): - """A Thread Collection View - - This view displays a python thread provided by the data - model :class:`openstack.common.report.models.threading.ThreadModel` # noqa - """ - - FORMAT_STR = "------{thread_str: ^60}------" + "\n" + "{stack_trace}" - - def __call__(self, model): - return self.FORMAT_STR.format( - thread_str=" Thread #{0} ".format(model.thread_id), - stack_trace=model.stack_trace - ) diff --git a/rack/openstack/common/report/views/xml/__init__.py b/rack/openstack/common/report/views/xml/__init__.py deleted file mode 100644 index a40fec9..0000000 --- a/rack/openstack/common/report/views/xml/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides basic XML views - -This module provides several basic views which serialize -models into XML. -""" diff --git a/rack/openstack/common/report/views/xml/generic.py b/rack/openstack/common/report/views/xml/generic.py deleted file mode 100644 index 9db3b03..0000000 --- a/rack/openstack/common/report/views/xml/generic.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Provides generic XML views - -This modules defines several basic views for serializing -data to XML. Submodels that have already been serialized -as XML may have their string values marked with `__is_xml__ -= True` using :class:`openstack.common.report.utils.StringWithAttrs` -(each of the classes within this module does this automatically, -and non-naive serializers check for this attribute and handle -such strings specially) -""" - -import collections as col -import copy -import xml.etree.ElementTree as ET - -import six - -import rack.openstack.common.report.utils as utils - - -class KeyValueView(object): - """A Key-Value XML View - - This view performs advanced serialization of a data model - into XML. It first deserializes any values marked as XML so - that they can be properly reserialized later. It then follows - the following rules to perform serialization: - - key : text/xml - The tag name is the key name, and the contents are the text or xml - key : Sequence - A wrapper tag is created with the key name, and each item is placed - in an 'item' tag - key : Mapping - A wrapper tag is created with the key name, and the serialize is called - on each key-value pair (such that each key gets its own tag) - - :param str wrapper_name: the name of the top-level element - """ - - def __init__(self, wrapper_name="model"): - self.wrapper_name = wrapper_name - - def __call__(self, model): - # this part deals with subviews that were already serialized - cpy = copy.deepcopy(model) - for key, valstr in model.items(): - if getattr(valstr, '__is_xml__', False): - cpy[key] = ET.fromstring(valstr) - - def serialize(rootmodel, rootkeyname): - res = ET.Element(rootkeyname) - - if isinstance(rootmodel, col.Mapping): - for key in rootmodel: - res.append(serialize(rootmodel[key], key)) - elif (isinstance(rootmodel, col.Sequence) - and not isinstance(rootmodel, six.string_types)): - for val in rootmodel: - res.append(serialize(val, 'item')) - elif ET.iselement(rootmodel): - res.append(rootmodel) - else: - res.text = str(rootmodel) - - return res - - res = utils.StringWithAttrs(ET.tostring(serialize(cpy, - self.wrapper_name))) - res.__is_xml__ = True - return res diff --git a/rack/openstack/common/service.py b/rack/openstack/common/service.py deleted file mode 100644 index 991c8b3..0000000 --- a/rack/openstack/common/service.py +++ /dev/null @@ -1,491 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generic Node base class for all workers that run on hosts.""" - -import errno -import logging as std_logging -import os -import random -import signal -import sys -import time - -try: - # Importing just the symbol here because the io module does not - # exist in Python 2.6. - from io import UnsupportedOperation # noqa -except ImportError: - # Python 2.6 - UnsupportedOperation = None - -import eventlet -from eventlet import event -from oslo.config import cfg - -from rack.openstack.common import eventlet_backdoor -from rack.openstack.common.gettextutils import _ # noqa -from rack.openstack.common import importutils -from rack.openstack.common import log as logging -from rack.openstack.common import threadgroup - - -rpc = importutils.try_import('rack.rpc') -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _sighup_supported(): - return hasattr(signal, 'SIGHUP') - - -def _is_daemon(): - # The process group for a foreground process will match the - # process group of the controlling terminal. If those values do - # not match, or ioctl() fails on the stdout file handle, we assume - # the process is running in the background as a daemon. - # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics - try: - is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) - except OSError as err: - if err.errno == errno.ENOTTY: - # Assume we are a daemon because there is no terminal. - is_daemon = True - else: - raise - except UnsupportedOperation: - # Could not get the fileno for stdout, so we must be a daemon. - is_daemon = True - return is_daemon - - -def _is_sighup_and_daemon(signo): - if not (_sighup_supported() and signo == signal.SIGHUP): - # Avoid checking if we are a daemon, because the signal isn't - # SIGHUP. - return False - return _is_daemon() - - -def _signo_to_signame(signo): - signals = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'} - if _sighup_supported(): - signals[signal.SIGHUP] = 'SIGHUP' - return signals[signo] - - -def _set_signals_handler(handler): - signal.signal(signal.SIGTERM, handler) - signal.signal(signal.SIGINT, handler) - if _sighup_supported(): - signal.signal(signal.SIGHUP, handler) - - -class Launcher(object): - """Launch one or more services and wait for them to complete.""" - - def __init__(self): - """Initialize the service launcher. - - :returns: None - - """ - self.services = Services() - self.backdoor_port = eventlet_backdoor.initialize_if_enabled() - - def launch_service(self, service): - """Load and start the given service. - - :param service: The service you would like to start. - :returns: None - - """ - service.backdoor_port = self.backdoor_port - self.services.add(service) - - def stop(self): - """Stop all services which are currently running. - - :returns: None - - """ - self.services.stop() - - def wait(self): - """Waits until all services have been stopped, and then returns. - - :returns: None - - """ - self.services.wait() - - def restart(self): - """Reload config files and restart service. - - :returns: None - - """ - cfg.CONF.reload_config_files() - self.services.restart() - - -class SignalExit(SystemExit): - def __init__(self, signo, exccode=1): - super(SignalExit, self).__init__(exccode) - self.signo = signo - - -class ServiceLauncher(Launcher): - def _handle_signal(self, signo, frame): - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - raise SignalExit(signo) - - def handle_signal(self): - _set_signals_handler(self._handle_signal) - - def _wait_for_exit_or_signal(self, ready_callback=None): - status = None - signo = 0 - - LOG.debug(_('Full set of CONF:')) - CONF.log_opt_values(LOG, std_logging.DEBUG) - - try: - if ready_callback: - ready_callback() - super(ServiceLauncher, self).wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_('Caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - finally: - self.stop() - if rpc: - try: - rpc.cleanup() - except Exception: - # We're shutting down, so it doesn't matter at this point. - LOG.exception(_('Exception during rpc cleanup.')) - - return status, signo - - def wait(self, ready_callback=None): - while True: - self.handle_signal() - status, signo = self._wait_for_exit_or_signal(ready_callback) - if not _is_sighup_and_daemon(signo): - return status - self.restart() - - -class ServiceWrapper(object): - def __init__(self, service, workers): - self.service = service - self.workers = workers - self.children = set() - self.forktimes = [] - - -class ProcessLauncher(object): - def __init__(self): - self.children = {} - self.sigcaught = None - self.running = True - rfd, self.writepipe = os.pipe() - self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') - self.handle_signal() - - def handle_signal(self): - _set_signals_handler(self._handle_signal) - - def _handle_signal(self, signo, frame): - self.sigcaught = signo - self.running = False - - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - - def _pipe_watcher(self): - # This will block until the write end is closed when the parent - # dies unexpectedly - self.readpipe.read() - - LOG.info(_('Parent process has died unexpectedly, exiting')) - - sys.exit(1) - - def _child_process_handle_signal(self): - # Setup child signal handlers differently - def _sigterm(*args): - signal.signal(signal.SIGTERM, signal.SIG_DFL) - raise SignalExit(signal.SIGTERM) - - def _sighup(*args): - signal.signal(signal.SIGHUP, signal.SIG_DFL) - raise SignalExit(signal.SIGHUP) - - signal.signal(signal.SIGTERM, _sigterm) - if _sighup_supported(): - signal.signal(signal.SIGHUP, _sighup) - # Block SIGINT and let the parent send us a SIGTERM - signal.signal(signal.SIGINT, signal.SIG_IGN) - - def _child_wait_for_exit_or_signal(self, launcher): - status = 0 - signo = 0 - - # NOTE(johannes): All exceptions are caught to ensure this - # doesn't fallback into the loop spawning children. It would - # be bad for a child to spawn more children. - try: - launcher.wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_('Caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - except BaseException: - LOG.exception(_('Unhandled exception')) - status = 2 - finally: - launcher.stop() - - return status, signo - - def _child_process(self, service): - self._child_process_handle_signal() - - # Reopen the eventlet hub to make sure we don't share an epoll - # fd with parent and/or siblings, which would be bad - eventlet.hubs.use_hub() - - # Close write to ensure only parent has it open - os.close(self.writepipe) - # Create greenthread to watch for parent to close pipe - eventlet.spawn_n(self._pipe_watcher) - - # Reseed random number generator - random.seed() - - launcher = Launcher() - launcher.launch_service(service) - return launcher - - def _start_child(self, wrap): - if len(wrap.forktimes) > wrap.workers: - # Limit ourselves to one process a second (over the period of - # number of workers * 1 second). This will allow workers to - # start up quickly but ensure we don't fork off children that - # die instantly too quickly. - if time.time() - wrap.forktimes[0] < wrap.workers: - LOG.info(_('Forking too fast, sleeping')) - time.sleep(1) - - wrap.forktimes.pop(0) - - wrap.forktimes.append(time.time()) - - pid = os.fork() - if pid == 0: - launcher = self._child_process(wrap.service) - while True: - self._child_process_handle_signal() - status, signo = self._child_wait_for_exit_or_signal(launcher) - if not _is_sighup_and_daemon(signo): - break - launcher.restart() - - os._exit(status) - - LOG.info(_('Started child %d'), pid) - - wrap.children.add(pid) - self.children[pid] = wrap - - return pid - - def launch_service(self, service, workers=1): - wrap = ServiceWrapper(service, workers) - - LOG.info(_('Starting %d workers'), wrap.workers) - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def _wait_child(self): - try: - # Don't block if no child processes have exited - pid, status = os.waitpid(0, os.WNOHANG) - if not pid: - return None - except OSError as exc: - if exc.errno not in (errno.EINTR, errno.ECHILD): - raise - return None - - if os.WIFSIGNALED(status): - sig = os.WTERMSIG(status) - LOG.info(_('Child %(pid)d killed by signal %(sig)d'), - dict(pid=pid, sig=sig)) - else: - code = os.WEXITSTATUS(status) - LOG.info(_('Child %(pid)s exited with status %(code)d'), - dict(pid=pid, code=code)) - - if pid not in self.children: - LOG.warning(_('pid %d not in child list'), pid) - return None - - wrap = self.children.pop(pid) - wrap.children.remove(pid) - return wrap - - def _respawn_children(self): - while self.running: - wrap = self._wait_child() - if not wrap: - # Yield to other threads if no children have exited - # Sleep for a short time to avoid excessive CPU usage - # (see bug #1095346) - eventlet.greenthread.sleep(.01) - continue - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def wait(self): - """Loop waiting on children to die and respawning as necessary.""" - - LOG.debug(_('Full set of CONF:')) - CONF.log_opt_values(LOG, std_logging.DEBUG) - - while True: - self.handle_signal() - self._respawn_children() - if self.sigcaught: - signame = _signo_to_signame(self.sigcaught) - LOG.info(_('Caught %s, stopping children'), signame) - if not _is_sighup_and_daemon(self.sigcaught): - break - - for pid in self.children: - os.kill(pid, signal.SIGHUP) - self.running = True - self.sigcaught = None - - for pid in self.children: - try: - os.kill(pid, signal.SIGTERM) - except OSError as exc: - if exc.errno != errno.ESRCH: - raise - - # Wait for children to die - if self.children: - LOG.info(_('Waiting on %d children to exit'), len(self.children)) - while self.children: - self._wait_child() - - -class Service(object): - """Service object for binaries running on hosts.""" - - def __init__(self, threads=1000): - self.tg = threadgroup.ThreadGroup(threads) - - # signal that the service is done shutting itself down: - self._done = event.Event() - - def reset(self): - # NOTE(Fengqian): docs for Event.reset() recommend against using it - self._done = event.Event() - - def start(self): - pass - - def stop(self): - self.tg.stop() - self.tg.wait() - # Signal that service cleanup is done: - if not self._done.ready(): - self._done.send() - - def wait(self): - self._done.wait() - - -class Services(object): - - def __init__(self): - self.services = [] - self.tg = threadgroup.ThreadGroup() - self.done = event.Event() - - def add(self, service): - self.services.append(service) - self.tg.add_thread(self.run_service, service, self.done) - - def stop(self): - # wait for graceful shutdown of services: - for service in self.services: - service.stop() - service.wait() - - # Each service has performed cleanup, now signal that the run_service - # wrapper threads can now die: - if not self.done.ready(): - self.done.send() - - # reap threads: - self.tg.stop() - - def wait(self): - self.tg.wait() - - def restart(self): - self.stop() - self.done = event.Event() - for restart_service in self.services: - restart_service.reset() - self.tg.add_thread(self.run_service, restart_service, self.done) - - @staticmethod - def run_service(service, done): - """Service start wrapper. - - :param service: service to run - :param done: event to wait on until a shutdown is triggered - :returns: None - - """ - service.start() - done.wait() - - -def launch(service, workers=None): - if workers: - launcher = ProcessLauncher() - launcher.launch_service(service, workers=workers) - else: - launcher = ServiceLauncher() - launcher.launch_service(service) - return launcher diff --git a/rack/openstack/common/sslutils.py b/rack/openstack/common/sslutils.py deleted file mode 100644 index 4f68ea5..0000000 --- a/rack/openstack/common/sslutils.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import ssl - -from oslo.config import cfg - -from rack.openstack.common.gettextutils import _ - - -ssl_opts = [ - cfg.StrOpt('ca_file', - default=None, - help="CA certificate file to use to verify " - "connecting clients."), - cfg.StrOpt('cert_file', - default=None, - help="Certificate file to use when starting " - "the server securely."), - cfg.StrOpt('key_file', - default=None, - help="Private key file to use when starting " - "the server securely."), -] - - -CONF = cfg.CONF -CONF.register_opts(ssl_opts, "ssl") - - -def is_enabled(): - cert_file = CONF.ssl.cert_file - key_file = CONF.ssl.key_file - ca_file = CONF.ssl.ca_file - use_ssl = cert_file or key_file - - if cert_file and not os.path.exists(cert_file): - raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) - - if ca_file and not os.path.exists(ca_file): - raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) - - if key_file and not os.path.exists(key_file): - raise RuntimeError(_("Unable to find key_file : %s") % key_file) - - if use_ssl and (not cert_file or not key_file): - raise RuntimeError(_("When running server in SSL mode, you must " - "specify both a cert_file and key_file " - "option value in your configuration file")) - - return use_ssl - - -def wrap(sock): - ssl_kwargs = { - 'server_side': True, - 'certfile': CONF.ssl.cert_file, - 'keyfile': CONF.ssl.key_file, - 'cert_reqs': ssl.CERT_NONE, - } - - if CONF.ssl.ca_file: - ssl_kwargs['ca_certs'] = CONF.ssl.ca_file - ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED - - return ssl.wrap_socket(sock, **ssl_kwargs) - - -_SSL_PROTOCOLS = { - "tlsv1": ssl.PROTOCOL_TLSv1, - "sslv23": ssl.PROTOCOL_SSLv23, - "sslv3": ssl.PROTOCOL_SSLv3 -} - -try: - _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2 -except AttributeError: - pass - - -def validate_ssl_version(version): - key = version.lower() - try: - return _SSL_PROTOCOLS[key] - except KeyError: - raise RuntimeError(_("Invalid SSL version : %s") % version) diff --git a/rack/openstack/common/strutils.py b/rack/openstack/common/strutils.py deleted file mode 100644 index 5d908bf..0000000 --- a/rack/openstack/common/strutils.py +++ /dev/null @@ -1,216 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -System-level utilities and helper functions. -""" - -import re -import sys -import unicodedata - -from rack.openstack.common.gettextutils import _ - - -# Used for looking up extensions of text -# to their 'multiplied' byte amount -BYTE_MULTIPLIERS = { - '': 1, - 't': 1024 ** 4, - 'g': 1024 ** 3, - 'm': 1024 ** 2, - 'k': 1024, -} -BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)') - -TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') -FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') - -SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") -SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") - - -def int_from_bool_as_string(subject): - """Interpret a string as a boolean and return either 1 or 0. - - Any string value in: - - ('True', 'true', 'On', 'on', '1') - - is interpreted as a boolean True. - - Useful for JSON-decoded stuff and config file parsing - """ - return bool_from_string(subject) and 1 or 0 - - -def bool_from_string(subject, strict=False): - """Interpret a string as a boolean. - - A case-insensitive match is performed such that strings matching 't', - 'true', 'on', 'y', 'yes', or '1' are considered True and, when - `strict=False`, anything else is considered False. - - Useful for JSON-decoded stuff and config file parsing. - - If `strict=True`, unrecognized values, including None, will raise a - ValueError which is useful when parsing values passed in from an API call. - Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. - """ - if not isinstance(subject, basestring): - subject = str(subject) - - lowered = subject.strip().lower() - - if lowered in TRUE_STRINGS: - return True - elif lowered in FALSE_STRINGS: - return False - elif strict: - acceptable = ', '.join( - "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) - msg = _("Unrecognized value '%(val)s', acceptable values are:" - " %(acceptable)s") % {'val': subject, - 'acceptable': acceptable} - raise ValueError(msg) - else: - return False - - -def safe_decode(text, incoming=None, errors='strict'): - """Decodes incoming str using `incoming` if they're not already unicode. - - :param incoming: Text's current encoding - :param errors: Errors handling policy. See here for valid - values http://docs.python.org/2/library/codecs.html - :returns: text or a unicode `incoming` encoded - representation of it. - :raises TypeError: If text is not an isntance of basestring - """ - if not isinstance(text, basestring): - raise TypeError("%s can't be decoded" % type(text)) - - if isinstance(text, unicode): - return text - - if not incoming: - incoming = (sys.stdin.encoding or - sys.getdefaultencoding()) - - try: - return text.decode(incoming, errors) - except UnicodeDecodeError: - # Note(flaper87) If we get here, it means that - # sys.stdin.encoding / sys.getdefaultencoding - # didn't return a suitable encoding to decode - # text. This happens mostly when global LANG - # var is not set correctly and there's no - # default encoding. In this case, most likely - # python will use ASCII or ANSI encoders as - # default encodings but they won't be capable - # of decoding non-ASCII characters. - # - # Also, UTF-8 is being used since it's an ASCII - # extension. - return text.decode('utf-8', errors) - - -def safe_encode(text, incoming=None, - encoding='utf-8', errors='strict'): - """Encodes incoming str/unicode using `encoding`. - - If incoming is not specified, text is expected to be encoded with - current python's default encoding. (`sys.getdefaultencoding`) - - :param incoming: Text's current encoding - :param encoding: Expected encoding for text (Default UTF-8) - :param errors: Errors handling policy. See here for valid - values http://docs.python.org/2/library/codecs.html - :returns: text or a bytestring `encoding` encoded - representation of it. - :raises TypeError: If text is not an isntance of basestring - """ - if not isinstance(text, basestring): - raise TypeError("%s can't be encoded" % type(text)) - - if not incoming: - incoming = (sys.stdin.encoding or - sys.getdefaultencoding()) - - if isinstance(text, unicode): - return text.encode(encoding, errors) - elif text and encoding != incoming: - # Decode text before encoding it with `encoding` - text = safe_decode(text, incoming, errors) - return text.encode(encoding, errors) - - return text - - -def to_bytes(text, default=0): - """Converts a string into an integer of bytes. - - Looks at the last characters of the text to determine - what conversion is needed to turn the input text into a byte number. - Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) - - :param text: String input for bytes size conversion. - :param default: Default return value when text is blank. - - """ - match = BYTE_REGEX.search(text) - if match: - magnitude = int(match.group(1)) - mult_key_org = match.group(2) - if not mult_key_org: - return magnitude - elif text: - msg = _('Invalid string format: %s') % text - raise TypeError(msg) - else: - return default - mult_key = mult_key_org.lower().replace('b', '', 1) - multiplier = BYTE_MULTIPLIERS.get(mult_key) - if multiplier is None: - msg = _('Unknown byte multiplier: %s') % mult_key_org - raise TypeError(msg) - return magnitude * multiplier - - -def to_slug(value, incoming=None, errors="strict"): - """Normalize string. - - Convert to lowercase, remove non-word characters, and convert spaces - to hyphens. - - Inspired by Django's `slugify` filter. - - :param value: Text to slugify - :param incoming: Text's current encoding - :param errors: Errors handling policy. See here for valid - values http://docs.python.org/2/library/codecs.html - :returns: slugified unicode representation of `value` - :raises TypeError: If text is not an instance of basestring - """ - value = safe_decode(value, incoming, errors) - # NOTE(aababilov): no need to use safe_(encode|decode) here: - # encodings are always "ascii", error handling is always "ignore" - # and types are always known (first: unicode; second: str) - value = unicodedata.normalize("NFKD", value).encode( - "ascii", "ignore").decode("ascii") - value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() - return SLUGIFY_HYPHENATE_RE.sub("-", value) diff --git a/rack/openstack/common/threadgroup.py b/rack/openstack/common/threadgroup.py deleted file mode 100644 index f185f31..0000000 --- a/rack/openstack/common/threadgroup.py +++ /dev/null @@ -1,121 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from eventlet import greenlet -from eventlet import greenpool -from eventlet import greenthread - -from rack.openstack.common import log as logging -from rack.openstack.common import loopingcall - - -LOG = logging.getLogger(__name__) - - -def _thread_done(gt, *args, **kwargs): - """Callback function to be passed to GreenThread.link() when we spawn() - Calls the :class:`ThreadGroup` to notify if. - - """ - kwargs['group'].thread_done(kwargs['thread']) - - -class Thread(object): - """Wrapper around a greenthread, that holds a reference to the - :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when - it has done so it can be removed from the threads list. - """ - def __init__(self, thread, group): - self.thread = thread - self.thread.link(_thread_done, group=group, thread=self) - - def stop(self): - self.thread.kill() - - def wait(self): - return self.thread.wait() - - -class ThreadGroup(object): - """The point of the ThreadGroup classis to: - - * keep track of timers and greenthreads (making it easier to stop them - when need be). - * provide an easy API to add timers. - """ - def __init__(self, thread_pool_size=10): - self.pool = greenpool.GreenPool(thread_pool_size) - self.threads = [] - self.timers = [] - - def add_dynamic_timer(self, callback, initial_delay=None, - periodic_interval_max=None, *args, **kwargs): - timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) - timer.start(initial_delay=initial_delay, - periodic_interval_max=periodic_interval_max) - self.timers.append(timer) - - def add_timer(self, interval, callback, initial_delay=None, - *args, **kwargs): - pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) - pulse.start(interval=interval, - initial_delay=initial_delay) - self.timers.append(pulse) - - def add_thread(self, callback, *args, **kwargs): - gt = self.pool.spawn(callback, *args, **kwargs) - th = Thread(gt, self) - self.threads.append(th) - - def thread_done(self, thread): - self.threads.remove(thread) - - def stop(self): - current = greenthread.getcurrent() - for x in self.threads: - if x is current: - # don't kill the current thread. - continue - try: - x.stop() - except Exception as ex: - LOG.exception(ex) - - for x in self.timers: - try: - x.stop() - except Exception as ex: - LOG.exception(ex) - self.timers = [] - - def wait(self): - for x in self.timers: - try: - x.wait() - except greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) - current = greenthread.getcurrent() - for x in self.threads: - if x is current: - continue - try: - x.wait() - except greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) diff --git a/rack/openstack/common/timeutils.py b/rack/openstack/common/timeutils.py deleted file mode 100644 index d8cf539..0000000 --- a/rack/openstack/common/timeutils.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Time related utilities and helper functions. -""" - -import calendar -import datetime -import time - -import iso8601 -import six - - -# ISO 8601 extended time format with microseconds -_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' -_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' -PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND - - -def isotime(at=None, subsecond=False): - """Stringify time in ISO 8601 format.""" - if not at: - at = utcnow() - st = at.strftime(_ISO8601_TIME_FORMAT - if not subsecond - else _ISO8601_TIME_FORMAT_SUBSECOND) - tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - st += ('Z' if tz == 'UTC' else tz) - return st - - -def parse_isotime(timestr): - """Parse time from ISO 8601 format.""" - try: - return iso8601.parse_date(timestr) - except iso8601.ParseError as e: - raise ValueError(six.text_type(e)) - except TypeError as e: - raise ValueError(six.text_type(e)) - - -def strtime(at=None, fmt=PERFECT_TIME_FORMAT): - """Returns formatted utcnow.""" - if not at: - at = utcnow() - return at.strftime(fmt) - - -def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): - """Turn a formatted time back into a datetime.""" - return datetime.datetime.strptime(timestr, fmt) - - -def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object.""" - offset = timestamp.utcoffset() - if offset is None: - return timestamp - return timestamp.replace(tzinfo=None) - offset - - -def is_older_than(before, seconds): - """Return True if before is older than seconds.""" - if isinstance(before, six.string_types): - before = parse_strtime(before).replace(tzinfo=None) - else: - before = before.replace(tzinfo=None) - - return utcnow() - before > datetime.timedelta(seconds=seconds) - - -def is_newer_than(after, seconds): - """Return True if after is newer than seconds.""" - if isinstance(after, six.string_types): - after = parse_strtime(after).replace(tzinfo=None) - else: - after = after.replace(tzinfo=None) - - return after - utcnow() > datetime.timedelta(seconds=seconds) - - -def utcnow_ts(): - """Timestamp version of our utcnow function.""" - if utcnow.override_time is None: - # NOTE(kgriffs): This is several times faster - # than going through calendar.timegm(...) - return int(time.time()) - - return calendar.timegm(utcnow().timetuple()) - - -def utcnow(): - """Overridable version of utils.utcnow.""" - if utcnow.override_time: - try: - return utcnow.override_time.pop(0) - except AttributeError: - return utcnow.override_time - return datetime.datetime.utcnow() - - -def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp.""" - return isotime(datetime.datetime.utcfromtimestamp(timestamp)) - - -utcnow.override_time = None - - -def set_time_override(override_time=None): - """Overrides utils.utcnow. - - Make it return a constant time or a list thereof, one at a time. - - :param override_time: datetime instance or list thereof. If not - given, defaults to the current UTC time. - """ - utcnow.override_time = override_time or datetime.datetime.utcnow() - - -def advance_time_delta(timedelta): - """Advance overridden time using a datetime.timedelta.""" - assert(not utcnow.override_time is None) - try: - for dt in utcnow.override_time: - dt += timedelta - except TypeError: - utcnow.override_time += timedelta - - -def advance_time_seconds(seconds): - """Advance overridden time by seconds.""" - advance_time_delta(datetime.timedelta(0, seconds)) - - -def clear_time_override(): - """Remove the overridden time.""" - utcnow.override_time = None - - -def marshall_now(now=None): - """Make an rpc-safe datetime with microseconds. - - Note: tzinfo is stripped, but not required for relative times. - """ - if not now: - now = utcnow() - return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, - minute=now.minute, second=now.second, - microsecond=now.microsecond) - - -def unmarshall_time(tyme): - """Unmarshall a datetime dict.""" - return datetime.datetime(day=tyme['day'], - month=tyme['month'], - year=tyme['year'], - hour=tyme['hour'], - minute=tyme['minute'], - second=tyme['second'], - microsecond=tyme['microsecond']) - - -def delta_seconds(before, after): - """Return the difference between two timing objects. - - Compute the difference in seconds between two date, time, or - datetime objects (as a float, to microsecond resolution). - """ - delta = after - before - return total_seconds(delta) - - -def total_seconds(delta): - """Return the total seconds of datetime.timedelta object. - - Compute total seconds of datetime.timedelta, datetime.timedelta - doesn't have method total_seconds in Python2.6, calculate it manually. - """ - try: - return delta.total_seconds() - except AttributeError: - return ((delta.days * 24 * 3600) + delta.seconds + - float(delta.microseconds) / (10 ** 6)) - - -def is_soon(dt, window): - """Determines if time is going to happen in the next window seconds. - - :params dt: the time - :params window: minimum seconds to remain to consider the time not soon - - :return: True if expiration is within the given duration - """ - soon = (utcnow() + datetime.timedelta(seconds=window)) - return normalize_time(dt) <= soon diff --git a/rack/openstack/common/units.py b/rack/openstack/common/units.py deleted file mode 100644 index 84b518c..0000000 --- a/rack/openstack/common/units.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2013 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit constants -""" - -#Binary unit constants. -Ki = 1024 -Mi = 1024 ** 2 -Gi = 1024 ** 3 -Ti = 1024 ** 4 -Pi = 1024 ** 5 -Ei = 1024 ** 6 -Zi = 1024 ** 7 -Yi = 1024 ** 8 - -#Decimal unit constants. -k = 1000 -M = 1000 ** 2 -G = 1000 ** 3 -T = 1000 ** 4 -P = 1000 ** 5 -E = 1000 ** 6 -Z = 1000 ** 7 -Y = 1000 ** 8 diff --git a/rack/openstack/common/uuidutils.py b/rack/openstack/common/uuidutils.py deleted file mode 100644 index 234b880..0000000 --- a/rack/openstack/common/uuidutils.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2012 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -UUID related utilities and helper functions. -""" - -import uuid - - -def generate_uuid(): - return str(uuid.uuid4()) - - -def is_uuid_like(val): - """Returns validation of a value as a UUID. - - For our purposes, a UUID is a canonical form string: - aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa - - """ - try: - return str(uuid.UUID(val)) == val - except (TypeError, ValueError, AttributeError): - return False diff --git a/rack/openstack/common/versionutils.py b/rack/openstack/common/versionutils.py deleted file mode 100644 index f7b1f8a..0000000 --- a/rack/openstack/common/versionutils.py +++ /dev/null @@ -1,45 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helpers for comparing version strings. -""" - -import pkg_resources - - -def is_compatible(requested_version, current_version, same_major=True): - """Determine whether `requested_version` is satisfied by - `current_version`; in other words, `current_version` is >= - `requested_version`. - - :param requested_version: version to check for compatibility - :param current_version: version to check against - :param same_major: if True, the major version must be identical between - `requested_version` and `current_version`. This is used when a - major-version difference indicates incompatibility between the two - versions. Since this is the common-case in practice, the default is - True. - :returns: True if compatible, False if not - """ - requested_parts = pkg_resources.parse_version(requested_version) - current_parts = pkg_resources.parse_version(current_version) - - if same_major and (requested_parts[0] != current_parts[0]): - return False - - return current_parts >= requested_parts diff --git a/rack/openstack/common/xmlutils.py b/rack/openstack/common/xmlutils.py deleted file mode 100644 index b131d3e..0000000 --- a/rack/openstack/common/xmlutils.py +++ /dev/null @@ -1,74 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from xml.dom import minidom -from xml.parsers import expat -from xml import sax -from xml.sax import expatreader - - -class ProtectedExpatParser(expatreader.ExpatParser): - """An expat parser which disables DTD's and entities by default.""" - - def __init__(self, forbid_dtd=True, forbid_entities=True, - *args, **kwargs): - # Python 2.x old style class - expatreader.ExpatParser.__init__(self, *args, **kwargs) - self.forbid_dtd = forbid_dtd - self.forbid_entities = forbid_entities - - def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): - raise ValueError("Inline DTD forbidden") - - def entity_decl(self, entityName, is_parameter_entity, value, base, - systemId, publicId, notationName): - raise ValueError(" entity declaration forbidden") - - def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): - # expat 1.2 - raise ValueError(" unparsed entity forbidden") - - def external_entity_ref(self, context, base, systemId, publicId): - raise ValueError(" external entity forbidden") - - def notation_decl(self, name, base, sysid, pubid): - raise ValueError(" notation forbidden") - - def reset(self): - expatreader.ExpatParser.reset(self) - if self.forbid_dtd: - self._parser.StartDoctypeDeclHandler = self.start_doctype_decl - self._parser.EndDoctypeDeclHandler = None - if self.forbid_entities: - self._parser.EntityDeclHandler = self.entity_decl - self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl - self._parser.ExternalEntityRefHandler = self.external_entity_ref - self._parser.NotationDeclHandler = self.notation_decl - try: - self._parser.SkippedEntityHandler = None - except AttributeError: - # some pyexpat versions do not support SkippedEntity - pass - - -def safe_minidom_parse_string(xml_string): - """Parse an XML string using minidom safely. - - """ - try: - return minidom.parseString(xml_string, parser=ProtectedExpatParser()) - except sax.SAXParseException: - raise expat.ExpatError() diff --git a/rack/paths.py b/rack/paths.py deleted file mode 100644 index 794ba7b..0000000 --- a/rack/paths.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from oslo.config import cfg - -path_opts = [ - cfg.StrOpt('pybasedir', - default=os.path.abspath(os.path.join(os.path.dirname(__file__), - '../')), - help='Directory where the rack python module is installed'), - cfg.StrOpt('bindir', - default=os.path.join(sys.prefix, 'local', 'bin'), - help='Directory where rack binaries are installed'), - cfg.StrOpt('state_path', - default='$pybasedir', - help="Top-level directory for maintaining rack's state"), -] - -CONF = cfg.CONF -CONF.register_opts(path_opts) - - -def basedir_def(*args): - """Return an uninterpolated path relative to $pybasedir.""" - return os.path.join('$pybasedir', *args) - - -def bindir_def(*args): - """Return an uninterpolated path relative to $bindir.""" - return os.path.join('$bindir', *args) - - -def state_path_def(*args): - """Return an uninterpolated path relative to $state_path.""" - return os.path.join('$state_path', *args) - - -def basedir_rel(*args): - """Return a path relative to $pybasedir.""" - return os.path.join(CONF.pybasedir, *args) - - -def bindir_rel(*args): - """Return a path relative to $bindir.""" - return os.path.join(CONF.bindir, *args) - - -def state_path_rel(*args): - """Return a path relative to $state_path.""" - return os.path.join(CONF.state_path, *args) diff --git a/rack/policy.py b/rack/policy.py deleted file mode 100644 index 56461a3..0000000 --- a/rack/policy.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Policy Engine For Nova.""" - -import os.path - -from oslo.config import cfg - -from rack import exception -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import policy -from rack import utils - - -policy_opts = [ - cfg.StrOpt('policy_file', - default='policy.json', - help=_('JSON file representing policy')), - cfg.StrOpt('policy_default_rule', - default='default', - help=_('Rule checked when requested rule is not found')), -] - -CONF = cfg.CONF -CONF.register_opts(policy_opts) - -_POLICY_PATH = None -_POLICY_CACHE = {} - - -def reset(): - global _POLICY_PATH - global _POLICY_CACHE - _POLICY_PATH = None - _POLICY_CACHE = {} - policy.reset() - - -def init(): - global _POLICY_PATH - global _POLICY_CACHE - if not _POLICY_PATH: - _POLICY_PATH = CONF.policy_file - if not os.path.exists(_POLICY_PATH): - _POLICY_PATH = CONF.find_file(_POLICY_PATH) - if not _POLICY_PATH: - raise exception.ConfigNotFound(path=CONF.policy_file) - utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, - reload_func=_set_rules) - - -def _set_rules(data): - default_rule = CONF.policy_default_rule - policy.set_rules(policy.Rules.load_json(data, default_rule)) - - -def enforce(context, action, target, do_raise=True): - """Verifies that the action is valid on the target in this context. - - :param context: rack context - :param action: string representing the action to be checked - this should be colon separated for clarity. - i.e. ``compute:create_instance``, - ``compute:attach_volume``, - ``volume:attach_volume`` - :param target: dictionary representing the object of the action - for object creation this should be a dictionary representing the - location of the object e.g. ``{'project_id': context.project_id}`` - :param do_raise: if True (the default), raises PolicyNotAuthorized; - if False, returns False - - :raises rack.exception.PolicyNotAuthorized: if verification fails - and do_raise is True. - - :return: returns a non-False value (not necessarily "True") if - authorized, and the exact value False if not authorized and - do_raise is False. - """ - init() - - credentials = context.to_dict() - - # Add the exception arguments if asked to do a raise - extra = {} - if do_raise: - extra.update(exc=exception.PolicyNotAuthorized, action=action) - - return policy.check(action, target, credentials, **extra) - - -def check_is_admin(context): - """Whether or not roles contains 'admin' role according to policy setting. - - """ - init() - - # the target is user-self - credentials = context.to_dict() - target = credentials - - return policy.check('context_is_admin', target, credentials) - - -@policy.register('is_admin') -class IsAdminCheck(policy.Check): - - """An explicit check for is_admin.""" - - def __init__(self, kind, match): - """Initialize the check.""" - - self.expected = (match.lower() == 'true') - - super(IsAdminCheck, self).__init__(kind, str(self.expected)) - - def __call__(self, target, creds): - """Determine whether is_admin matches the requested value.""" - - return creds['is_admin'] == self.expected - - -def get_rules(): - return policy._rules diff --git a/rack/resourceoperator/__init__.py b/rack/resourceoperator/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/resourceoperator/manager.py b/rack/resourceoperator/manager.py deleted file mode 100644 index b5baad0..0000000 --- a/rack/resourceoperator/manager.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -ResourceOperator Service -""" - -from rack import exception - -from rack.openstack.common import log as logging - -from rack.resourceoperator.openstack import keypairs -from rack.resourceoperator.openstack import networks -from rack.resourceoperator.openstack import processes -from rack.resourceoperator.openstack import securitygroups - - -LOG = logging.getLogger(__name__) - - -class ResourceOperator(object): - - def __init__(self): - self.keypair_client = keypairs.KeypairAPI() - self.securitygroup_client = securitygroups.SecuritygroupAPI() - self.network_client = networks.NetworkAPI() - self.process_client = processes.ProcessesAPI() - - def keypair_list(self, context, keypairs): - try: - ids = self.keypair_client.keypair_list() - except Exception as e: - LOG.exception(e) - raise exception.OpenStackException(e.code, e.message) - - for keypair in keypairs: - if keypair["nova_keypair_id"] in ids: - keypair["status"] = "Exist" - else: - keypair["status"] = "NotExist" - return keypairs - - def keypair_show(self, context, keypair): - try: - self.keypair_client.keypair_show(keypair["nova_keypair_id"]) - keypair["status"] = "Exist" - except Exception as e: - LOG.exception(e) - if e.code == 404: - keypair["status"] = "NotExist" - return - raise exception.OpenStackException(e.code, e.message) - - def keypair_create(self, context, name): - try: - return self.keypair_client.keypair_create(name) - except Exception as e: - LOG.exception(e) - raise exception.OpenStackException(e.code, e.message) - - def keypair_delete(self, context, nova_keypair_id): - try: - self.keypair_client.keypair_delete(nova_keypair_id) - except Exception as e: - LOG.exception(e) - if e.code == 404: - return - raise exception.OpenStackException(e.code, e.message) - - def securitygroup_list(self, context, securitygroups): - try: - neutron_securitygroup_ids = self.securitygroup_client.\ - securitygroup_list() - except Exception as e: - raise exception.OpenStackException(e.status_code, e.message) - for securitygroup in securitygroups: - if securitygroup["neutron_securitygroup_id"] in\ - neutron_securitygroup_ids: - securitygroup["status"] = "Exist" - else: - securitygroup["status"] = "NotExist" - return securitygroups - - def securitygroup_show(self, context, securitygroup): - try: - self.securitygroup_client.securitygroup_get( - securitygroup['neutron_securitygroup_id']) - securitygroup["status"] = "Exist" - except Exception as e: - if e.status_code == 404: - securitygroup["status"] = "NotExist" - else: - raise exception.OpenStackException(e.status_code, e.message) - return securitygroup - - def securitygroup_create(self, context, name, securitygrouprules): - try: - return self.securitygroup_client.securitygroup_create( - name, securitygrouprules) - except Exception as e: - raise exception.OpenStackException(e.status_code, e.message) - - def securitygroup_delete(self, context, neutron_securitygroup_id): - try: - self.securitygroup_client.securitygroup_delete( - neutron_securitygroup_id) - except Exception as e: - if e.status_code == 404: - pass - else: - LOG.exception(e) - raise exception.OpenStackException(e.status_code, e.message) - - def network_list(self, context, networks): - try: - ids = self.network_client.network_list() - except Exception as e: - LOG.exception(e) - raise exception.OpenStackException(e.status_code, e.message) - - for network in networks: - if network["neutron_network_id"] in ids: - network["status"] = "Exist" - else: - network["status"] = "NotExist" - return networks - - def network_show(self, context, network): - try: - self.network_client.network_show(network["neutron_network_id"]) - network["status"] = "Exist" - except Exception as e: - LOG.exception(e) - if e.status_code == 404: - network["status"] = "NotExist" - return - raise exception.OpenStackException(e.status_code, e.message) - - def network_create(self, context, name, cidr, gateway, ext_router, - dns_nameservers): - try: - return self.network_client.network_create( - name, cidr, gateway, ext_router, dns_nameservers) - except Exception as e: - LOG.exception(e) - raise exception.OpenStackException(e.status_code, e.message) - - def network_delete(self, context, neutron_network_id, ext_router): - try: - self.network_client.network_delete(neutron_network_id, ext_router) - except Exception as e: - LOG.exception(e) - if e.status_code == 404: - return - raise exception.OpenStackException(e.status_code, e.message) - - def process_list(self, context, processes): - try: - nova_process_list = self.process_client.process_list() - except Exception as e: - LOG.exception(e) - raise exception.OpenStackException(e.code, e.message) - - for process in processes: - is_exist = False - for nova_process in nova_process_list: - if process["nova_instance_id"] ==\ - nova_process["nova_instance_id"]: - is_exist = True - process["status"] = nova_process["status"] - for nova_network in nova_process["networks"]: - for network in process["networks"]: - if(nova_network["display_name"] == - network["display_name"]): - network.update(nova_network) - break - if not is_exist: - process["status"] = "NotExist" - - return processes - - def process_show(self, context, process): - try: - nova_process = self.process_client.process_show( - process["nova_instance_id"]) - process["status"] = nova_process["status"] - for nova_network in nova_process["networks"]: - for network in process["networks"]: - if(nova_network["display_name"] == - network["display_name"]): - network.update(nova_network) - - except Exception as e: - LOG.exception(e) - if e.code == 404: - process["status"] = "NotExist" - return - raise exception.OpenStackException(e.code, e.message) - - def process_create(self, context, name, key_name, - security_groups, image, flavor, - userdata, meta, nics): - try: - return self.process_client.process_create( - name, key_name, security_groups, image, flavor, - userdata, meta, nics) - except Exception as e: - LOG.exception(e) - raise exception.OpenStackException(e.code, e.message) - - def process_delete(self, context, nova_instance_id): - try: - self.process_client.process_delete(nova_instance_id) - except Exception as e: - LOG.exception(e) - if e.code == 404: - return - raise exception.OpenStackException(e.code, e.message) - - def get_process_address(self, context, nova_instance_id): - try: - return self.process_client.get_process_address(nova_instance_id) - except Exception as e: - LOG.exception(e) - raise exception.OpenStackException(e.code, e.message) diff --git a/rack/resourceoperator/openstack/__init__.py b/rack/resourceoperator/openstack/__init__.py deleted file mode 100644 index 9e0f74b..0000000 --- a/rack/resourceoperator/openstack/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo.config import cfg - -from neutronclient.v2_0 import client as neutron_client -from novaclient.v1_1 import client as nova_client - -from rack import exception -from rack.openstack.common import log as logging - -openstack_client_opts = [ - cfg.StrOpt('os_username', - help='Valid username for OpenStack'), - cfg.StrOpt('os_password', - help='Valid password for OpenStack'), - cfg.StrOpt('os_tenant_name', - help='Valid tenant name for OpenStack'), - cfg.StrOpt('os_auth_url', - help='The keystone endpoint'), - cfg.StrOpt('os_region_name', - help='Valid region name for OpenStack') -] - -CONF = cfg.CONF -CONF.register_opts(openstack_client_opts) - -LOG = logging.getLogger(__name__) - - -def get_nova_client(): - credentials = { - "username": CONF.os_username, - "api_key": CONF.os_password, - "project_id": CONF.os_tenant_name, - "auth_url": CONF.os_auth_url, - "region_name": CONF.os_region_name - } - - for key, value in credentials.items(): - if not value: - raise exception.InvalidOpenStackCredential(credential=key) - - return nova_client.Client(**credentials) - - -def get_neutron_client(): - credentials = { - "username": CONF.os_username, - "password": CONF.os_password, - "tenant_name": CONF.os_tenant_name, - "auth_url": CONF.os_auth_url, - "region_name": CONF.os_region_name - } - - for key, value in credentials.items(): - if not value: - raise exception.InvalidOpenStackCredential(credential=key) - - return neutron_client.Client(**credentials) diff --git a/rack/resourceoperator/openstack/keypairs.py b/rack/resourceoperator/openstack/keypairs.py deleted file mode 100644 index 70fb0e6..0000000 --- a/rack/resourceoperator/openstack/keypairs.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.openstack.common import log as logging -from rack.resourceoperator import openstack as os_client - -LOG = logging.getLogger(__name__) - - -class KeypairAPI(object): - - def __init__(self): - super(KeypairAPI, self).__init__() - - def keypair_list(self): - nova = os_client.get_nova_client() - keypairs = nova.keypairs.list() - nova_keypair_ids = [] - for keypair in keypairs: - nova_keypair_ids.append(keypair.id) - return nova_keypair_ids - - def keypair_show(self, nova_keypair_id): - nova = os_client.get_nova_client() - return nova.keypairs.get(nova_keypair_id) - - def keypair_create(self, name): - nova = os_client.get_nova_client() - keypair = nova.keypairs.create(name) - values = {} - values["nova_keypair_id"] = keypair.name - values["private_key"] = keypair.private_key - return values - - def keypair_delete(self, nova_keypair_id): - nova = os_client.get_nova_client() - nova.keypairs.delete(nova_keypair_id) diff --git a/rack/resourceoperator/openstack/networks.py b/rack/resourceoperator/openstack/networks.py deleted file mode 100644 index 6dd1fd9..0000000 --- a/rack/resourceoperator/openstack/networks.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.openstack.common import log as logging -from rack.resourceoperator import openstack as os_client - - -LOG = logging.getLogger(__name__) - - -class NetworkAPI(object): - - def network_list(self): - neutron = os_client.get_neutron_client() - networks = neutron.list_networks().get("networks") - neutron_network_ids = [] - for network in networks: - neutron_network_ids.append(network.get("id")) - return neutron_network_ids - - def network_show(self, neutron_network_id): - neutron = os_client.get_neutron_client() - return neutron.show_network(neutron_network_id) - - def network_create(self, name, cidr, gateway=None, ext_router=None, - dns_nameservers=None): - neutron = os_client.get_neutron_client() - network_body = {"network": {"name": name}} - network = neutron.create_network(network_body)["network"] - - try: - subnet_body = { - "subnet": { - "network_id": network["id"], - "ip_version": 4, - "cidr": cidr - } - } - if gateway: - subnet_body["subnet"]["gateway_ip"] = gateway - if dns_nameservers: - subnet_body["subnet"]["dns_nameservers"] = dns_nameservers - subnet = neutron.create_subnet(subnet_body)["subnet"] - - if ext_router: - router_body = {"subnet_id": subnet["id"]} - neutron.add_interface_router(ext_router, router_body) - except Exception as e: - neutron.delete_network(network['id']) - raise e - - return dict(neutron_network_id=network["id"]) - - def network_delete(self, neutron_network_id, ext_router=None): - neutron = os_client.get_neutron_client() - - if ext_router: - network = neutron.show_network(neutron_network_id)["network"] - subnets = network["subnets"] - for subnet in subnets: - neutron.remove_interface_router( - ext_router, {"subnet_id": subnet}) - - neutron.delete_network(neutron_network_id) diff --git a/rack/resourceoperator/openstack/processes.py b/rack/resourceoperator/openstack/processes.py deleted file mode 100644 index 7cf97a9..0000000 --- a/rack/resourceoperator/openstack/processes.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.openstack.common import log as logging -from rack.resourceoperator import openstack as os_client - - -LOG = logging.getLogger(__name__) - - -class ProcessesAPI(object): - - def process_list(self): - nova = os_client.get_nova_client() - servers = nova.servers.list() - server_status_list = [] - for server in servers: - networks = [] - for key in server.addresses.keys(): - for address in server.addresses[key]: - networks.append({ - "display_name": key, - address["OS-EXT-IPS:type"]: address["addr"]}) - d = { - "nova_instance_id": server.id, - "status": server.status, - "networks": networks - } - server_status_list.append(d) - return server_status_list - - def process_show(self, nova_instance_id): - nova = os_client.get_nova_client() - server = nova.servers.get(nova_instance_id) - networks = [] - for key in server.addresses.keys(): - for address in server.addresses[key]: - networks.append({ - "display_name": key, - address["OS-EXT-IPS:type"]: address["addr"]}) - return {"status": server.status, "networks": networks} - - def process_create(self, name, key_name, - security_groups, image, flavor, - userdata, meta, nics): - nova = os_client.get_nova_client() - server = nova.servers.create(name=name, key_name=key_name, - security_groups=security_groups, - image=image, flavor=flavor, - userdata=userdata, meta=meta, - nics=nics) - return (server.id, server.status) - - def process_delete(self, nova_instance_id): - nova = os_client.get_nova_client() - nova.servers.delete(nova_instance_id) - - def get_process_address(self, nova_instance_id): - nova = os_client.get_nova_client() - server = nova.servers.get(nova_instance_id) - addresses = server.addresses - addrs = [] - for k in addresses.keys(): - ips = addresses.get(k) - for ip in ips: - if ip["OS-EXT-IPS:type"] == "fixed": - addrs.append(ip["addr"]) - return ",".join(addrs) diff --git a/rack/resourceoperator/openstack/securitygroups.py b/rack/resourceoperator/openstack/securitygroups.py deleted file mode 100644 index d07b0db..0000000 --- a/rack/resourceoperator/openstack/securitygroups.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack.openstack.common import log as logging -from rack.resourceoperator import openstack as os_client - -LOG = logging.getLogger(__name__) - - -class SecuritygroupAPI(object): - - def securitygroup_list(self): - neutron = os_client.get_neutron_client() - securitygroup_list = neutron.list_security_groups() - neutron_securitygroup_ids = [] - for securitygroup in securitygroup_list['security_groups']: - neutron_securitygroup_ids.append(securitygroup['id']) - return neutron_securitygroup_ids - - def securitygroup_get(self, securitygroup_id): - neutron = os_client.get_neutron_client() - securitygroup = neutron.show_security_group(securitygroup_id) - return securitygroup['security_group']['id'] - - def securitygroup_create(self, name, rules): - neutron = os_client.get_neutron_client() - body = {"security_group": {"name": name}} - securitygroup = neutron.create_security_group(body)['security_group'] - neutron_securitygroup_id = securitygroup['id'] - - def _securitygroup_rule_create(neutron_securitygroup_id, - protocol, port_range_min=None, - port_range_max=None, - remote_neutron_securitygroup_id=None, - remote_ip_prefix=None): - body = { - "security_group_rule": { - "direction": "ingress", - "ethertype": "IPv4", - "security_group_id": neutron_securitygroup_id, - "protocol": protocol, - "port_range_min": port_range_min or port_range_max, - "port_range_max": port_range_max, - } - } - if remote_neutron_securitygroup_id: - body['security_group_rule']['remote_group_id'] =\ - remote_neutron_securitygroup_id - elif remote_ip_prefix: - body['security_group_rule']['remote_ip_prefix'] =\ - remote_ip_prefix - neutron.create_security_group_rule(body) - - if rules: - try: - for rule in rules: - _securitygroup_rule_create( - neutron_securitygroup_id, **rule) - except Exception as e: - neutron.delete_security_group(neutron_securitygroup_id) - raise e - - return dict(neutron_securitygroup_id=neutron_securitygroup_id) - - def securitygroup_delete(self, neutron_securitygroup_id): - neutron = os_client.get_neutron_client() - neutron.delete_security_group(neutron_securitygroup_id) diff --git a/rack/safe_utils.py b/rack/safe_utils.py deleted file mode 100644 index 42320d6..0000000 --- a/rack/safe_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities and helper functions that won't produce circular imports.""" - -import inspect - - -def getcallargs(function, *args, **kwargs): - """This is a simplified inspect.getcallargs (2.7+). - - It should be replaced when python >= 2.7 is standard. - """ - keyed_args = {} - argnames, varargs, keywords, defaults = inspect.getargspec(function) - - keyed_args.update(kwargs) - - #NOTE(alaski) the implicit 'self' or 'cls' argument shows up in - # argnames but not in args or kwargs. Uses 'in' rather than '==' because - # some tests use 'self2'. - if 'self' in argnames[0] or 'cls' == argnames[0]: - # The function may not actually be a method or have im_self. - # Typically seen when it's stubbed with mox. - if inspect.ismethod(function) and hasattr(function, 'im_self'): - keyed_args[argnames[0]] = function.im_self - else: - keyed_args[argnames[0]] = None - - remaining_argnames = filter(lambda x: x not in keyed_args, argnames) - keyed_args.update(dict(zip(remaining_argnames, args))) - - if defaults: - num_defaults = len(defaults) - for argname, value in zip(argnames[-num_defaults:], defaults): - if argname not in keyed_args: - keyed_args[argname] = value - - return keyed_args diff --git a/rack/service.py b/rack/service.py deleted file mode 100644 index ea861b0..0000000 --- a/rack/service.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generic Node base class for all workers that run on hosts.""" - -from oslo.config import cfg - -from rack import exception -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import importutils -from rack.openstack.common import log as logging -from rack.openstack.common import service -from rack import utils -from rack import wsgi - -LOG = logging.getLogger(__name__) - -service_opts = [ - cfg.StrOpt('rackapi_listen', - default="0.0.0.0", - help='The IP address on which the OpenStack API will listen.'), - cfg.IntOpt('rackapi_listen_port', - default=8088, - help='The port on which the OpenStack API will listen.'), - cfg.IntOpt('rackapi_workers', - help='Number of workers for OpenStack API service. The default ' - 'will be the number of CPUs available.'), -] - -CONF = cfg.CONF -CONF.register_opts(service_opts) -CONF.import_opt('host', 'rack.netconf') - - -class WSGIService(object): - - """Provides ability to launch API from a 'paste' configuration.""" - - def __init__(self, name, loader=None, use_ssl=False, max_url_len=None): - """Initialize, but do not start the WSGI server. - - :param name: The name of the WSGI server given to the loader. - :param loader: Loads the WSGI application using the given name. - :returns: None - - """ - self.name = name - self.manager = self._get_manager() - self.loader = loader or wsgi.Loader() - self.app = self.loader.load_app(name) - self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") - self.port = getattr(CONF, '%s_listen_port' % name, 0) - self.workers = (getattr(CONF, '%s_workers' % name, None) or - utils.cpu_count()) - if self.workers and self.workers < 1: - worker_name = '%s_workers' % name - msg = (_("%(worker_name)s value of %(workers)s is invalid, " - "must be greater than 0") % - {'worker_name': worker_name, - 'workers': str(self.workers)}) - raise exception.InvalidInput(msg) - self.use_ssl = use_ssl - self.server = wsgi.Server(name, - self.app, - host=self.host, - port=self.port, - use_ssl=self.use_ssl, - max_url_len=max_url_len) - # Pull back actual port used - self.port = self.server.port - self.backdoor_port = None - - def _get_manager(self): - """Initialize a Manager object appropriate for this service. - - Use the service name to look up a Manager subclass from the - configuration and initialize an instance. If no class name - is configured, just return None. - - :returns: a Manager instance, or None. - - """ - fl = '%s_manager' % self.name - if fl not in CONF: - return None - - manager_class_name = CONF.get(fl, None) - if not manager_class_name: - return None - - manager_class = importutils.import_class(manager_class_name) - return manager_class() - - def start(self): - """Start serving this service using loaded configuration. - - Also, retrieve updated port number in case '0' was passed in, which - indicates a random port should be used. - - :returns: None - - """ - if self.manager: - self.manager.init_host() - self.manager.pre_start_hook() - if self.backdoor_port is not None: - self.manager.backdoor_port = self.backdoor_port - self.server.start() - if self.manager: - self.manager.post_start_hook() - - def stop(self): - """Stop serving this API. - - :returns: None - - """ - self.server.stop() - - def wait(self): - """Wait for the service to stop serving this API. - - :returns: None - - """ - self.server.wait() - - -def process_launcher(): - return service.ProcessLauncher() - - -_launcher = None - - -def serve(server, workers=None): - global _launcher - if _launcher: - raise RuntimeError(_('serve() can only be called once')) - - _launcher = service.launch(server, workers=workers) - - -def wait(): - _launcher.wait() diff --git a/rack/test.py b/rack/test.py deleted file mode 100644 index a7e0d64..0000000 --- a/rack/test.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Base classes for our unit tests. - -Allows overriding of flags for use of fakes, and some black magic for -inline callbacks. - -""" - -import eventlet -eventlet.monkey_patch(os=False) - -import gettext -import logging -import os -import shutil -import sys -import uuid - -import fixtures -from oslo.config import cfg -from oslo.messaging import conffixture as messaging_conffixture -import testtools - -from rack.db import migration -from rack.db.sqlalchemy import api as session -from rack.openstack.common.fixture import logging as log_fixture -from rack.openstack.common.fixture import moxstubout -from rack.openstack.common import log as oslo_logging -from rack.openstack.common import timeutils -from rack import paths -from rack import service -from rack.tests import conf_fixture -from rack.tests import policy_fixture - - -test_opts = [ - cfg.StrOpt('sqlite_clean_db', - default='clean.sqlite', - help='File name of clean sqlite db'), -] - -CONF = cfg.CONF -CONF.register_opts(test_opts) -CONF.import_opt('connection', - 'rack.openstack.common.db.options', - group='database') -CONF.import_opt('sqlite_db', 'rack.openstack.common.db.options', - group='database') -CONF.set_override('use_stderr', False) - -oslo_logging.setup('rack') - -_DB_CACHE = None -_TRUE_VALUES = ('True', 'true', '1', 'yes') - - -class Database(fixtures.Fixture): - - def __init__(self, db_session, db_migrate, sql_connection, - sqlite_db, sqlite_clean_db): - self.sql_connection = sql_connection - self.sqlite_db = sqlite_db - self.sqlite_clean_db = sqlite_clean_db - - self.engine = db_session.get_engine() - self.engine.dispose() - conn = self.engine.connect() - if sql_connection == "sqlite://": - if db_migrate.db_version() > db_migrate.db_initial_version(): - return - else: - testdb = paths.state_path_rel(sqlite_db) - if os.path.exists(testdb): - return - db_migrate.db_sync() - if sql_connection == "sqlite://": - conn = self.engine.connect() - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - else: - cleandb = paths.state_path_rel(sqlite_clean_db) - shutil.copyfile(testdb, cleandb) - - def setUp(self): - super(Database, self).setUp() - - if self.sql_connection == "sqlite://": - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - else: - shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db), - paths.state_path_rel(self.sqlite_db)) - - -class ReplaceModule(fixtures.Fixture): - - """Replace a module with a fake module.""" - - def __init__(self, name, new_value): - self.name = name - self.new_value = new_value - - def _restore(self, old_value): - sys.modules[self.name] = old_value - - def setUp(self): - super(ReplaceModule, self).setUp() - old_value = sys.modules.get(self.name) - sys.modules[self.name] = self.new_value - self.addCleanup(self._restore, old_value) - - -class ServiceFixture(fixtures.Fixture): - - """Run a service as a test fixture.""" - - def __init__(self, name, host=None, **kwargs): - name = name - host = host and host or uuid.uuid4().hex - kwargs.setdefault('host', host) - kwargs.setdefault('binary', 'rack-%s' % name) - self.kwargs = kwargs - - def setUp(self): - super(ServiceFixture, self).setUp() - self.service = service.Service.create(**self.kwargs) - self.service.start() - self.addCleanup(self.service.kill) - - -class TranslationFixture(fixtures.Fixture): - - """Use gettext NullTranslation objects in tests.""" - - def setUp(self): - super(TranslationFixture, self).setUp() - nulltrans = gettext.NullTranslations() - gettext_fixture = fixtures.MonkeyPatch('gettext.translation', - lambda *x, **y: nulltrans) - self.gettext_patcher = self.useFixture(gettext_fixture) - - -class TestingException(Exception): - pass - - -class TestCase(testtools.TestCase): - - """Test case base class for all unit tests. - - Due to the slowness of DB access, please consider deriving from - `NoDBTestCase` first. - """ - USES_DB = True - - # NOTE(rpodolyaka): this attribute can be overridden in subclasses in order - # to scale the global test timeout value set for each - # test case separately. Use 0 value to disable timeout. - TIMEOUT_SCALING_FACTOR = 1 - - def setUp(self): - """Run before each test method to initialize test environment.""" - super(TestCase, self).setUp() - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - - if self.TIMEOUT_SCALING_FACTOR >= 0: - test_timeout *= self.TIMEOUT_SCALING_FACTOR - else: - raise ValueError('TIMEOUT_SCALING_FACTOR value must be >= 0') - - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - self.useFixture(fixtures.NestedTempfile()) - self.useFixture(fixtures.TempHomeDir()) - self.useFixture(TranslationFixture()) - self.useFixture(log_fixture.get_logging_handle_error_fixture()) - - if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - - fs = '%(levelname)s [%(name)s] %(message)s' - self.log_fixture = self.useFixture(fixtures.FakeLogger( - level=logging.DEBUG, - format=fs)) - self.useFixture(conf_fixture.ConfFixture(CONF)) - - self.messaging_conf = messaging_conffixture.ConfFixture(CONF) - self.messaging_conf.transport_driver = 'fake' - self.messaging_conf.response_timeout = 15 - self.useFixture(self.messaging_conf) - - if self.USES_DB: - global _DB_CACHE - if not _DB_CACHE: - _DB_CACHE = Database(session, migration, - sql_connection=CONF.database.connection, - sqlite_db=CONF.database.sqlite_db, - sqlite_clean_db=CONF.sqlite_clean_db) - - self.useFixture(_DB_CACHE) - - mox_fixture = self.useFixture(moxstubout.MoxStubout()) - self.mox = mox_fixture.mox - self.stubs = mox_fixture.stubs - self.addCleanup(self._clear_attrs) - self.useFixture(fixtures.EnvironmentVariable('http_proxy')) - self.policy = self.useFixture(policy_fixture.PolicyFixture()) - CONF.set_override('fatal_exception_format_errors', True) - - def _clear_attrs(self): - # Delete attributes that don't start with _ so they don't pin - # memory around unnecessarily for the duration of the test - # suite - for key in [k for k in self.__dict__.keys() if k[0] != '_']: - del self.__dict__[key] - - def flags(self, **kw): - """Override flag variables for a test.""" - group = kw.pop('group', None) - for k, v in kw.iteritems(): - CONF.set_override(k, v, group) - - def start_service(self, name, host=None, **kwargs): - svc = self.useFixture(ServiceFixture(name, host, **kwargs)) - return svc.service - - -class APICoverage(object): - - cover_api = None - - def test_api_methods(self): - self.assertTrue(self.cover_api is not None) - api_methods = [x for x in dir(self.cover_api) - if not x.startswith('_')] - test_methods = [x[5:] for x in dir(self) - if x.startswith('test_')] - self.assertThat( - test_methods, - testtools.matchers.ContainsAll(api_methods)) - - -class TimeOverride(fixtures.Fixture): - - """Fixture to start and remove time override.""" - - def setUp(self): - super(TimeOverride, self).setUp() - timeutils.set_time_override() - self.addCleanup(timeutils.clear_time_override) - - -class NoDBTestCase(TestCase): - - """`NoDBTestCase` differs from TestCase in that DB access is not supported. - This makes tests run significantly faster. If possible, all new tests - should derive from this class. - """ - USES_DB = False diff --git a/rack/tests/__init__.py b/rack/tests/__init__.py deleted file mode 100644 index 884ee32..0000000 --- a/rack/tests/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import sys - -if ('eventlet' in sys.modules and - os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): - raise ImportError('eventlet imported before rack/cmd/__init__ ' - '(env var set to %s)' - % os.environ.get('EVENTLET_NO_GREENDNS')) - -os.environ['EVENTLET_NO_GREENDNS'] = 'yes' - -import eventlet - -eventlet.monkey_patch(os=False) diff --git a/rack/tests/api/__init__.py b/rack/tests/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/tests/api/fakes.py b/rack/tests/api/fakes.py deleted file mode 100644 index cfae435..0000000 --- a/rack/tests/api/fakes.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import paste.urlmap - -import rack.api -from rack.api import auth -from rack.api import v1 -from rack.api import versions - - -def wsgi_app(inner_app_v1=None): - if not inner_app_v1: - inner_app_v1 = v1.APIRouter() - - api_v1 = rack.api.FaultWrapper(auth.NoAuthMiddleware(inner_app_v1)) - - mapper = paste.urlmap.URLMap() - mapper['/v1'] = api_v1 - mapper['/'] = rack.api.FaultWrapper(versions.Versions()) - return mapper diff --git a/rack/tests/api/v1/__init__.py b/rack/tests/api/v1/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/tests/api/v1/test_groups.py b/rack/tests/api/v1/test_groups.py deleted file mode 100644 index 8deb23e..0000000 --- a/rack/tests/api/v1/test_groups.py +++ /dev/null @@ -1,610 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from rack.api.v1 import groups -from rack import db -from rack import exception -from rack.openstack.common import jsonutils -from rack import test -from rack.tests.api import fakes - -import copy -import uuid -import webob - -GID = str(uuid.uuid4()) - -FAKE_GROUPS = { - "groups": [ - { - "gid": "gid1", - "user_id": "user_id1", - "project_id": "fake", - "display_name": "fake", - "display_description": "fake", - "status": "ACTIVE" - }, - { - "gid": "gid2", - "user_id": "user_id1", - "project_id": "fake", - "display_name": "fake", - "display_description": "fake", - "status": "ACTIVE" - }, - { - "gid": "gid3", - "user_id": "user_id2", - "project_id": "fake", - "display_name": "fake", - "display_description": "fake", - "status": "ACTIVE" - } - ] -} - - -def fake_create(context, kwargs): - return {"gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": kwargs["display_name"], - "display_description": kwargs["display_description"], - "status": "ACTIVE"} - - -def fake_update(context, kwargs): - return { - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "test", - "display_description": "test", - "status": "ACTIVE" - } - - -def fake_delete(context, kwargs): - return { - } - - -def fake_not_group_data_exists(context, kwargs): - return {"dummy-key": "dummy-data"} - - -def fake_not_group_data_not_exists(context, kwargs): - return {} - - -def fake_raise_exception(context, kwargs): - raise Exception() - - -def raise_group_not_found(context, kwargs): - raise exception.GroupNotFound(gid=GID) - - -def fake_group_get_all(context, filters): - if not filters: - return copy.deepcopy(FAKE_GROUPS["groups"]) - else: - return [ - {"gid": "fake", - "user_id": "fake", - "project_id": filters["project_id"], - "display_name": filters["display_name"], - "display_description": "fake", - "status": filters["status"]} - ] - - -def fake_group_get_by_gid(context, gid): - return { - "gid": gid, - "user_id": "a4362182a2ac425c9b0b0826ad187d31", - "project_id": "a43621849823764c9b0b0826ad187d31t", - "display_name": "my_group", - "display_description": "This is my group.", - "status": "ACTIVE" - } - - -def get_request(url, method, body=None): - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = method - if body is not None: - req.body = jsonutils.dumps(body) - return req - - -class GroupsTest(test.NoDBTestCase): - - def setUp(self): - super(GroupsTest, self).setUp() - self.stubs.Set(db, "group_create", fake_create) - self.stubs.Set(db, "group_get_all", fake_group_get_all) - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) - self.stubs.Set(db, "group_update", fake_update) - self.stubs.Set(db, "group_delete", fake_delete) - self.controller = groups.Controller() - self.app = fakes.wsgi_app() - - def test_index(self): - url = '/v1/groups' - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - expected = copy.deepcopy(FAKE_GROUPS) - for group in expected["groups"]: - group["name"] = group.pop("display_name") - group["description"] = group.pop("display_description") - self.assertEqual(res.status_code, 200) - self.assertEqual(body, expected) - - def test_index_filters(self): - url = '/v1/groups?project_id=PID&name=NAME&status=STATUS' - - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - expected = {"groups": [ - {"gid": "fake", - "user_id": "fake", - "project_id": "PID", - "name": "NAME", - "description": "fake", - "status": "STATUS"} - ]} - self.assertEqual(res.status_code, 200) - self.assertEqual(body, expected) - - def test_show(self): - url = '/v1/groups/00000000-0000-0000-0000-000000000010' - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - expected = {"group": { - "gid": "00000000-0000-0000-0000-000000000010", - "user_id": "a4362182a2ac425c9b0b0826ad187d31", - "project_id": "a43621849823764c9b0b0826ad187d31t", - "name": "my_group", - "description": "This is my group.", - "status": "ACTIVE" - }} - - self.assertEqual(res.status_code, 200) - self.assertEqual(body, expected) - - def test_show_not_found_exception(self): - self.stubs.Set(db, "group_get_by_gid", - raise_group_not_found) - url = '/v1/groups/' + GID - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - self.assertRaises( - webob.exc.HTTPNotFound, self.controller.show, req, GID) - - def test_show_gid_is_not_uuid_format(self): - gid = "abcdefgid" - url = '/v1/groups/' + gid - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - self.assertRaises( - webob.exc.HTTPNotFound, self.controller.show, req, gid) - - def test_create(self): - name = "test_group" - description = "This is test group." - request_body = { - "group": { - "name": name, - "description": description, - } - } - expected = { - "group": { - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": name, - "description": description, - "status": "ACTIVE" - } - } - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 201) - for key in expected["group"]: - self.assertEqual(body["group"][key], expected["group"][key]) - - def test_create_group_name_is_whitespace(self): - request_body = { - "group": { - "name": " ", - "description": "This is test group", - } - } - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_group_name_with_leading_trailing_whitespace(self): - request_body = { - "group": { - "name": " test_group ", - "description": "This is test group" - } - } - expected = { - "group": { - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": "test_group", - "description": "This is test group", - "status": "ACTIVE" - } - } - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 201) - for key in expected["group"]: - self.assertEqual(body["group"][key], expected["group"][key]) - - def test_create_without_group_name(self): - request_body = { - "group": { - "description": "This is test group", - } - } - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_without_group_description(self): - request_body = { - "group": { - "name": "test_group", - } - } - expected = { - "group": { - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": "test_group", - "status": "ACTIVE" - } - } - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 201) - for key in expected["group"]: - self.assertEqual(body["group"][key], expected["group"][key]) - - def test_create_empty_body(self): - request_body = {"group": {}} - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_no_body(self): - request_body = {} - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_invalid_format_body(self): - request_body = [] - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_check_group_name_length(self): - MAX_LENGTH = 255 - request_body = { - "group": { - "name": "a" * (MAX_LENGTH + 1), - "description": "This is test group" - } - } - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_group_description_length_zero(self): - request_body = { - "group": { - "name": "test_group", - "description": "" - } - } - expected = { - "group": { - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": "test_group", - "description": "", - "status": "ACTIVE" - } - } - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 201) - for key in expected["group"]: - self.assertEqual(body["group"][key], expected["group"][key]) - - def test_create_check_group_description_length(self): - MAX_LENGTH = 255 - request_body = { - "group": { - "name": "test_group", - "description": "a" * (MAX_LENGTH + 1) - } - } - - url = '/v1/groups' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update(self): - request_body = { - "group": { - "name": "My_Group_updated", - "description": "This is my group updated.", - } - } - expected = { - "group": { - "gid": GID, - "user_id": "fake", - "project_id": "fake", - "name": "test", - "description": "test", - "status": "ACTIVE" - } - } - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - for key in request_body["group"]: - self.assertEqual(body["group"][key], expected["group"][key]) - - def test_update_allow_group_name_none(self): - request_body = { - "group": { - "description": "This is test group" - } - } - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 200) - - def test_update_allow_group_description_none(self): - request_body = { - "group": { - "name": "my_group", - } - } - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 200) - - def test_update_allow_group_description_blank(self): - request_body = { - "group": { - "name": "my_group", - "description": "", - } - } - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 200) - - def test_update_invalid_gid(self): - request_body = { - "group": { - "description": "This is test group" - } - } - - url = '/v1/groups/' + GID + "err" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_update_empty_body(self): - request_body = {"group": {}} - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_no_body(self): - request_body = {} - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_invalid_format_body(self): - request_body = [] - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_group_name_blank(self): - request_body = { - "group": { - "name": "", - } - } - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_check_group_name_length(self): - MAX_LENGTH = 255 - request_body = { - "group": { - "name": "a" * (MAX_LENGTH + 1), - "description": "This is test group" - } - } - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_check_group_description_length(self): - MAX_LENGTH = 255 - request_body = { - "group": { - "name": "my_group", - "description": "a" * (MAX_LENGTH + 1) - } - } - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_group_not_found_on_db(self): - self.stubs.Set(db, "group_update", raise_group_not_found) - request_body = { - "group": { - "description": "This is test group" - } - } - - url = '/v1/groups/' + GID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - self.assertRaises( - webob.exc.HTTPNotFound, - self.controller.update, - req, - request_body, - GID) - - def test_delete_invalid_format_gid(self): - url = '/v1/groups/' + GID + "err" - req = get_request(url, 'DELETE') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - body = jsonutils.loads(res.body) - print(body) - - def test_delete(self): - url = '/v1/groups/' + GID - req = get_request(url, 'DELETE') - self.stubs.Set(db, "keypair_get_all", fake_not_group_data_not_exists) - self.stubs.Set( - db, "securitygroup_get_all", fake_not_group_data_not_exists) - self.stubs.Set(db, "network_get_all", fake_not_group_data_not_exists) - self.stubs.Set(db, "process_get_all", fake_not_group_data_not_exists) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 204) - - def test_delete_group_inuse_keypair(self): - url = '/v1/groups/' + GID - req = get_request(url, 'DELETE') - self.stubs.Set(db, "keypair_get_all", fake_not_group_data_exists) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 409) - - def test_delete_group_inuse_securitygroup(self): - url = '/v1/groups/' + GID - req = get_request(url, 'DELETE') - self.stubs.Set(db, "keypair_get_all", fake_not_group_data_not_exists) - self.stubs.Set(db, "securitygroup_get_all", fake_not_group_data_exists) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 409) - - def test_delete_group_inuse_network(self): - url = '/v1/groups/' + GID - req = get_request(url, 'DELETE') - self.stubs.Set(db, "keypair_get_all", fake_not_group_data_not_exists) - self.stubs.Set( - db, "securitygroup_get_all", fake_not_group_data_not_exists) - self.stubs.Set(db, "network_get_all", fake_not_group_data_exists) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 409) - - def test_delete_group_inuse_process(self): - url = '/v1/groups/' + GID - req = get_request(url, 'DELETE') - self.stubs.Set(db, "keypair_get_all", fake_not_group_data_not_exists) - self.stubs.Set( - db, "securitygroup_get_all", fake_not_group_data_not_exists) - self.stubs.Set(db, "network_get_all", fake_not_group_data_not_exists) - self.stubs.Set(db, "process_get_all", fake_not_group_data_exists) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 409) - - def test_delete_exception(self): - url = '/v1/groups/' + GID - req = get_request(url, 'DELETE') - self.stubs.Set(db, "keypair_get_all", fake_raise_exception) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 500) diff --git a/rack/tests/api/v1/test_keypairs.py b/rack/tests/api/v1/test_keypairs.py deleted file mode 100644 index d44f889..0000000 --- a/rack/tests/api/v1/test_keypairs.py +++ /dev/null @@ -1,660 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mox import IsA - -from rack import context -from rack import db -from rack import exception -from rack.openstack.common import jsonutils -from rack.resourceoperator import manager -from rack import test -from rack.tests.api import fakes - -import uuid -import webob - -GID = unicode(uuid.uuid4()) -KEYPAIR_ID = unicode(uuid.uuid4()) -PRIVATE_KEY = ( - "-----BEGIN RSA PRIVATE KEY-----\n" - "MIIEoAIBAAKCAQEA6W34Ak32uxp7Oh0rh1mCQclkw+NeqchAOhyO/rcphFt280D9\n" - "YXxdUa43i51IDS9VpyFFd10Cv4ccynTPnky82CpGcuXCzaACzI/FHhmBeXTrFoXm\n" - "682b/8kXVQfCVfSjnvChxeeATjPu9GQkNrgyYyoubHxrrW7fTaRLEz/Np9CvCq/F\n" - "PJcsx7FwD0adFfmnulbZpplunqMGKX2nYXbDlLi7Ykjd3KbH1PRJuu+sPYDz3GmZ\n" - "4Z0naojOUDcajuMckN8RzNblBrksH8g6NDauoX5hQa9dyd1q36403NW9tcE6ZwNp\n" - "1GYCnN7/YgI/ugHo30ptpBvGw1zuY5/+FkU7SQIBIwKCAQA8BlW3cyIwHMCZ6j5k\n" - "ofzsWFu9V7lBmeShOosrji8/Srgv7CPl3iaf+ZlBKHGc/YsNuBktUm5rw6hRUTyz\n" - "rVUhpHiD8fBDgOrG4yQPDd93AM68phbO67pmWEfUCU86rJ8aPeB0t98qDVqz3zyD\n" - "GWwK3vX+o6ao8J/SIu67zpP381d/ZigDsq+yqhtPpz04YJ2W0w67NV6XSPOV1AX0\n" - "YLniHMwfbSTdwJ/wVWoooIgbTo7ldPuBsKUwNIVW8H9tmapVdyQxAS9JAkr1Y2si\n" - "xKURN4Iez2oyCFv5+P1emhoptgECr49kpOBAvhRfWWkumgR1azqynzTjSnpQVO62\n" - "vQr7AoGBAPkYWJX0tFNlqIWw4tcHtcPHJkRwvLdPUfM6Q0b6+YctKBmLoNJWBiXr\n" - "39wiYnftSdJO+L96HAG38RrmeCfafz19EDPVXepAUYZDwnY1HGx7ZqbiPwxYMN4C\n" - "+Wg3LzuSh7d5fe409+TCtX4YqSVFQd9gl8Ml3sKVOTxeaDROw6hFAoGBAO/mdJOr\n" - "SGcAj9V99df6IX8abZTPm2PmirT95WWwIYX4PRY//5iaCN6XyEKIx5TJk9lmcQhS\n" - "tb++PTsXpea01WUcxqaOO3vG7PQhvAbpq8A4eMBZZiY9UyctCPNSMscPPNRU2r/C\n" - "tAsXRk6BNkiGofgn2MY5YBoPkEgiJmJWMKE1AoGAeP0yV3bbPnM0mLUAdxJfmZs+\n" - "eQOO3LF/k2VxInnm6eK7tKLntp7PyUauj35qV4HiBxBqMR4Nmm9JOPOZcnFxAJvU\n" - "q3ZDjwlMK0V7tcIGfdWJoYPVewZDnwjCSI/VHO9mfbAJ91uOWStfd8LV0EY18Cea\n" - "K5YNHK7hSTUrTJtJFzcCgYB7YJO5qIuir9Txc/rG2Gj/ie82lqevuGSXmISaslpi\n" - "J+Tm3xW8MfXu0bdyrL5pxsEQuFdjXbyOfxgtBNj6Tl8eDsyQK+QTxWPrRIyV10Ji\n" - "2zbJUoxOLirDsMLGR4fUFncOHQLJBQwi9gbmi5hCjmHtVlI6DuD3dbfqlThP1I4J\n" - "wwKBgHfbOPVCgcJA3733J+dBC8gLt5QT2fCZ2N7PtaHcsSrW/B9VlGP+tviEC59U\n" - "bmpOLADzAto1MZdRDr8uXByZ8/eI37Txn6YchMVp43uL2+WaTdn9GBtOBpWJ0Pqi\n" - "x3HBmILbvIEzB2BX11/PDNGRMNcCy7edvnFMCxeAiW7DJqCb\n" - "-----END RSA PRIVATE KEY-----\n") - -KEYPAIR_ID1 = unicode(uuid.uuid4()) -KEYPAIR_ID2 = unicode(uuid.uuid4()) - - -def _base_keypair_get_response(context): - return [ - { - "keypair_id": KEYPAIR_ID1, - "nova_keypair_id": "fake_key1", - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "fake_key1", - "private_key": PRIVATE_KEY, - "is_default": False, - "status": "ACTIVE" - }, - { - "keypair_id": KEYPAIR_ID2, - "nova_keypair_id": "fake_key2", - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "fake_key2", - "private_key": PRIVATE_KEY, - "is_default": False, - "status": "ACTIVE" - }, - ] - - -def fake_group_get_by_id(context, gid): - pass - - -def fake_keypair_get_all(context, gid, filters=None): - return _base_keypair_get_response(context) - - -def fake_keypair_get_by_keypair_id(context, gid, keypair_id): - keypair_list = _base_keypair_get_response(context) - for keypair in keypair_list: - if keypair["keypair_id"] == keypair_id: - return keypair - raise exception.KeypairNotFound() - - -def fake_create(context, kwargs): - return { - "keypair_id": "1234-5678", - "nova_keypair_id": kwargs.get("nova_keypair_id"), - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": kwargs.get("display_name"), - "is_default": kwargs.get("is_default"), - "private_key": "private-key-1234" - } - - -def fake_update(context, gid, keypair_id, kwargs): - return { - "keypair_id": keypair_id, - "nova_keypair_id": "test_keypair", - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "test_keypair", - "private_key": PRIVATE_KEY, - "is_default": kwargs.get("is_default"), - "status": "ACTIVE" - } - - -def fake_delete(context, gid, keypair_id): - return { - "keypair_id": keypair_id, - "nova_keypair_id": "test_keypair", - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "test_keypair", - "private_key": PRIVATE_KEY, - "is_default": False, - "status": "DELETING" - } - - -def get_request(url, method, body=None): - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = method - if body is not None: - req.body = jsonutils.dumps(body) - return req - - -class KeypairsTest(test.NoDBTestCase): - - def setUp(self): - super(KeypairsTest, self).setUp() - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - self.stubs.Set(db, "keypair_get_all", fake_keypair_get_all) - self.stubs.Set(db, "keypair_create", fake_create) - self.stubs.Set(db, "keypair_update", fake_update) - self.stubs.Set(db, "keypair_delete", fake_delete) - self.app = fakes.wsgi_app() - - def test_index(self): - - def _mock_data(): - return [ - { - "keypair_id": KEYPAIR_ID1, - "nova_keypair_id": "fake_key1", - "gid": GID, - "user_id": "fake", - "project_id": "fake", - "display_name": "fake_key1", - "private_key": PRIVATE_KEY, - "is_default": False, - "status": "ACTIVE" - }, - { - "keypair_id": KEYPAIR_ID2, - "nova_keypair_id": "fake_key2", - "gid": GID, - "user_id": "fake", - "project_id": "fake", - "display_name": "fake_key2", - "private_key": PRIVATE_KEY, - "is_default": False, - "status": "ACTIVE" - }] - - self.mox.StubOutWithMock(manager.ResourceOperator, "keypair_list") - manager.ResourceOperator.keypair_list( - IsA(context.RequestContext), - IsA(list)).AndReturn(_mock_data()) - self.mox.ReplayAll() - - expect = _mock_data() - expect[0].update(name="fake_key1") - expect[1].update(name="fake_key2") - - url = "/v1/groups/" + GID + "/keypairs" - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(200, res.status_code) - for i in range(len(body["keypairs"])): - for key in body["keypairs"][i]: - self.assertEqual(expect[i][key], body["keypairs"][i][key]) - - def test_index_invalid_format_gid(self): - url = "/v1/groups/" + "aaaaa" + "/keypairs" - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show(self): - - def _mock_data(): - return { - "keypair_id": KEYPAIR_ID1, - "nova_keypair_id": "fake_key1", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "display_name": "fake_key1", - "private_key": PRIVATE_KEY, - "is_default": False, - "status": "ACTIVE" - } - - self.mox.StubOutWithMock(db, "keypair_get_by_keypair_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "keypair_show") - db.keypair_get_by_keypair_id(IsA(context.RequestContext), - GID, KEYPAIR_ID1).AndReturn(_mock_data()) - manager.ResourceOperator.keypair_show( - IsA(context.RequestContext), - IsA(dict)).AndReturn(_mock_data()) - - self.mox.ReplayAll() - - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - expect = _mock_data() - expect.update(name="fake_key1") - - self.assertEqual(res.status_code, 200) - for key in body["keypair"]: - self.assertEqual(expect[key], body["keypair"][key]) - - def test_show_not_found(self): - self.mox.StubOutWithMock(db, "keypair_get_by_keypair_id") - db.keypair_get_by_keypair_id( - IsA(context.RequestContext), - GID, KEYPAIR_ID1).AndRaise( - exception.KeypairNotFound(keypair_id=KEYPAIR_ID1)) - self.mox.ReplayAll() - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show_invalid_format_gid(self): - url = "/v1/groups/" + "aaaaa" + "/keypairs/" + KEYPAIR_ID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create(self): - request_body = { - "keypair": { - "name": "test_keypair", - "is_default": "true", - } - } - - self.mox.StubOutWithMock(db, "keypair_get_all") - self.mox.StubOutWithMock(manager.ResourceOperator, "keypair_create") - self.mox.StubOutWithMock(uuid, 'uuid4') - mock_id = "1234-5678" - uuid.uuid4().AndReturn(mock_id) - uuid.uuid4().AndReturn(mock_id) - db.keypair_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict)).AndReturn([]) - manager.ResourceOperator.keypair_create( - IsA(context.RequestContext), IsA(unicode)).AndReturn( - {"nova_keypair_id": "keypair-" + mock_id, - "private_key": "private-key-1234"}) - self.mox.ReplayAll() - - expect = { - "keypair": { - "keypair_id": mock_id, - "nova_keypair_id": "keypair-" + mock_id, - "user_id": "noauth", - "project_id": "noauth", - "gid": GID, - "name": "test_keypair", - "private_key": "private-key-1234", - "is_default": True} - } - - url = '/v1/groups/' + GID + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in body["keypair"]: - self.assertEqual(expect["keypair"][key], body["keypair"][key]) - self.assertEqual(res.status_code, 201) - - def test_create_without_name(self): - request_body = { - "keypair": { - } - } - - self.mox.StubOutWithMock(manager.ResourceOperator, "keypair_create") - self.mox.StubOutWithMock(uuid, 'uuid4') - mock_id = "1234-5678" - uuid.uuid4().AndReturn(mock_id) - uuid.uuid4().AndReturn(mock_id) - manager.ResourceOperator.keypair_create( - IsA(context.RequestContext), IsA(unicode)).AndReturn( - {"nova_keypair_id": "keypair-" + mock_id, - "private_key": "private-key-1234"}) - self.mox.ReplayAll() - - expect = { - "keypair": { - "keypair_id": mock_id, - "nova_keypair_id": "keypair-" + mock_id, - "user_id": "noauth", - "project_id": "noauth", - "gid": GID, - "name": "keypair-" + mock_id, - "private_key": "private-key-1234", - "is_default": False} - } - - url = '/v1/groups/' + GID + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in body["keypair"]: - self.assertEqual(expect["keypair"][key], body["keypair"][key]) - self.assertEqual(res.status_code, 201) - - def test_create_default_keypair_already_exists(self): - request_body = { - "keypair": { - "is_default": "true" - } - } - - self.mox.StubOutWithMock(db, "keypair_get_all") - db.keypair_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{}]) - self.mox.ReplayAll() - - url = '/v1/groups/' + GID + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_raise_exception_by_db_keypair_create(self): - self.mox.StubOutWithMock(db, "group_get_by_gid") - db.group_get_by_gid(IsA(context.RequestContext), GID)\ - .AndRaise(exception.GroupNotFound(gid=GID)) - self.mox.ReplayAll() - - request_body = { - "keypair": { - "name": "test_key", - } - } - url = '/v1/groups/' + GID + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create_invalid_format_gid(self): - request_body = { - "keypair": { - "name": "test_keypair", - } - } - - url = '/v1/groups/' + 'aaaaaaa' + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create_without_is_default(self): - request_body = { - "keypair": { - "name": "test_keypair" - } - } - - self.mox.StubOutWithMock(manager.ResourceOperator, "keypair_create") - self.mox.StubOutWithMock(uuid, 'uuid4') - mock_id = "1234-5678" - uuid.uuid4().AndReturn(mock_id) - uuid.uuid4().AndReturn(mock_id) - manager.ResourceOperator.keypair_create( - IsA(context.RequestContext), IsA(unicode)).AndReturn( - {"nova_keypair_id": "keypair-" + mock_id, - "private_key": "private-key-1234"}) - self.mox.ReplayAll() - - expect = { - "keypair": { - "keypair_id": mock_id, - "nova_keypair_id": "keypair-" + mock_id, - "user_id": "noauth", - "project_id": "noauth", - "gid": GID, - "name": "test_keypair", - "private_key": "private-key-1234", - "is_default": False} - } - - url = '/v1/groups/' + GID + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in body["keypair"]: - self.assertEqual(expect["keypair"][key], body["keypair"][key]) - self.assertEqual(res.status_code, 201) - - def test_create_empty_request_body(self): - self.mox.StubOutWithMock(manager.ResourceOperator, "keypair_create") - self.mox.StubOutWithMock(uuid, 'uuid4') - mock_id = "1234-5678" - uuid.uuid4().AndReturn(mock_id) - uuid.uuid4().AndReturn(mock_id) - manager.ResourceOperator.keypair_create( - IsA(context.RequestContext), IsA(unicode)).AndReturn( - {"nova_keypair_id": "keypair-" + mock_id, - "private_key": "private-key-1234"}) - self.mox.ReplayAll() - - expect = { - "keypair": { - "keypair_id": mock_id, - "nova_keypair_id": "keypair-" + mock_id, - "user_id": "noauth", - "project_id": "noauth", - "gid": GID, - "name": "keypair-" + mock_id, - "private_key": "private-key-1234", - "is_default": False} - } - - url = '/v1/groups/' + GID + '/keypairs' - request_body = {"keypair": {}} - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in body["keypair"]: - self.assertEqual(expect["keypair"][key], body["keypair"][key]) - self.assertEqual(res.status_code, 201) - - def test_create_no_body(self): - request_body = {} - - url = '/v1/groups/' + GID + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_invalid_format_body(self): - request_body = [] - - url = '/v1/groups/' + GID + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_invalid_is_default(self): - request_body = { - "keypair": { - "name": "test_keypair", - "is_default": "aaa" - } - } - - url = '/v1/groups/' + GID + '/keypairs' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update(self): - request_body = { - "keypair": { - "is_default": "true" - } - } - expected = { - "keypair": { - "keypair_id": KEYPAIR_ID, - "gid": GID, - "user_id": "fake", - "project_id": "fake", - "nova_keypair_id": "test_keypair", - "name": "test_keypair", - "private_key": PRIVATE_KEY, - "is_default": True, - "status": "ACTIVE" - } - } - - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - for key in request_body["keypair"]: - self.assertEqual(body["keypair"][key], expected["keypair"][key]) - - def test_update_invalid_format_gid(self): - request_body = { - "keypair": { - "is_default": "true", - } - } - - url = "/v1/groups/" + "aaaaaaa" + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "PUT", request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_update_invalid_format_keypair_id(self): - request_body = { - "keypair": { - "is_default": "true", - } - } - - url = "/v1/groups/" + GID + "/keypairs/" + "aaaaa" - req = get_request(url, "PUT", request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_update_invalid_format_is_default(self): - request_body = { - "keypair": { - "is_default": "aaa", - } - } - - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "PUT", request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_without_is_default(self): - request_body = { - "keypair": { - "name": "aaa", - } - } - - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "PUT", request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_empty_body(self): - request_body = {"keypair": {}} - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "PUT", request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_no_body(self): - request_body = {} - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "PUT", request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_invalid_body(self): - request_body = [] - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "PUT", request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_delete(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "keypair_get_by_keypair_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "keypair_delete") - - db.process_get_all(IsA(context.RequestContext), GID, - filters={"keypair_id": KEYPAIR_ID}).AndReturn([]) - - db.keypair_get_by_keypair_id( - IsA(context.RequestContext), - GID, KEYPAIR_ID).AndReturn( - {"nova_keypair_id": KEYPAIR_ID}) - manager.ResourceOperator.keypair_delete( - IsA(context.RequestContext), KEYPAIR_ID) - self.mox.ReplayAll() - - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 204) - - def test_delete_invalid_format_gid(self): - url = "/v1/groups/" + "aaaaaaa" + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_invalid_format_keypair_id(self): - url = "/v1/groups/" + GID + "/keypairs/" + "aaaaa" - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_keypair_not_found(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "keypair_get_by_keypair_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "keypair_delete") - - db.process_get_all(IsA(context.RequestContext), GID, - filters={"keypair_id": KEYPAIR_ID}).AndReturn([]) - - db.keypair_get_by_keypair_id( - IsA(context.RequestContext), - GID, KEYPAIR_ID).AndReturn( - {"nova_keypair_id": KEYPAIR_ID}) - manager.ResourceOperator.keypair_delete( - IsA(context.RequestContext), KEYPAIR_ID).AndRaise( - exception.NotFound()) - self.mox.ReplayAll() - - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_raise_exception_keypair_inuse(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all(IsA(context.RequestContext), GID, - filters={"keypair_id": KEYPAIR_ID}).AndReturn([{}]) - self.mox.ReplayAll() - - url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 409) diff --git a/rack/tests/api/v1/test_networks.py b/rack/tests/api/v1/test_networks.py deleted file mode 100644 index d50be01..0000000 --- a/rack/tests/api/v1/test_networks.py +++ /dev/null @@ -1,490 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from exceptions import Exception -from mox import IsA - -from rack import context -from rack import db -from rack import exception -from rack.openstack.common import jsonutils -from rack.resourceoperator import manager -from rack import test -from rack.tests.api import fakes - -import uuid -import webob - -GID = unicode(uuid.uuid4()) -NETWORK_ID1 = unicode(uuid.uuid4()) -NETWORK_ID2 = unicode(uuid.uuid4()) -RO_HOST_NAME = "host_resource_operator" -NEUTRON_NW_ID = "neutron_network_id" - - -def fake_create_db(context, values): - values["network_id"] = NETWORK_ID1 - return values - - -def fake_group_get_by_gid(context, gid): - return {"gid": gid, - "status": "ACTIVE" - } - - -def fake_select_destinations(context, request_spec, filter_properties): - return {"host": RO_HOST_NAME} - - -def fake_network_get_all(context, gid): - return _return_base_network_list(context, gid) - - -def fake_network_get_all_empty_list(contextm, gid): - return [] - - -def fake_raise_exception(): - raise Exception() - - -def _return_base_network_list(context, gid): - return [ - {"network_id": NETWORK_ID1, - "neutron_network_id": "net-" + NETWORK_ID1, - "gid": gid, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "net-45212048-abc3-43cc-89b3-377341426ac", - "is_admin": "True", - "cidr": "10.0.0.0/24", - "ext_router": "11212048-abc3-43cc-89b3-377341426aca", - "status": "Exist"}, - {"network_id": NETWORK_ID2, - "neutron_network_id": None, - "gid": gid, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "net-13092048-abc3-43cc-89b3-377341426ac", - "is_admin": "True", - "cidr": "10.0.1.0/24", - "ext_router": "21212048-abc3-43cc-89b3-377341426aca", - "status": "Exist"} - ] - - -def fake_network_get_by_network_id(context, gid, network_id): - network_dict = _return_base_network_list(context, gid)[0] - network_dict["processes"] = [] - return network_dict - - -def fake_network_delete(context, gid, network_id): - return { - "neutron_network_id": NEUTRON_NW_ID, - "ext_router": "fake_ext_router"} - - -def get_request(url, method, body=None): - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = method - if body is not None: - req.body = jsonutils.dumps(body) - return req - - -class FakeContext(context.RequestContext): - - def elevated(self): - """Return a consistent elevated context so we can detect it.""" - if not hasattr(self, '_elevated'): - self._elevated = super(FakeContext, self).elevated() - return self._elevated - - -class NetworksTest(test.NoDBTestCase): - - def setUp(self): - super(NetworksTest, self).setUp() - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) - self.user_id = 'fake' - self.project_id = 'fake' - self.context = FakeContext(self.user_id, self.project_id) - self.app = fakes.wsgi_app() - - def test_index(self): - self.stubs.Set(db, "network_get_all", fake_network_get_all) - self.mox.StubOutWithMock(manager.ResourceOperator, "network_list") - manager.ResourceOperator.network_list( - IsA(context.RequestContext), - IsA(list)).AndReturn(_return_base_network_list(self.context, GID)) - self.mox.ReplayAll() - - expect = _return_base_network_list(self.context, GID) - expect[0].update(ext_router_id="11212048-abc3-43cc-89b3-377341426aca") - expect[1].update(ext_router_id="21212048-abc3-43cc-89b3-377341426aca") - expect[0].update(name="net-45212048-abc3-43cc-89b3-377341426ac") - expect[1].update(name="net-13092048-abc3-43cc-89b3-377341426ac") - expect[0].update(cidr="10.0.0.0/24") - expect[1].update(cidr="10.0.1.0/24") - - url = "/v1/groups/" + GID + "/networks" - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(200, res.status_code) - for i in range(len(body["networks"])): - for key in body["networks"][i]: - self.assertEqual(expect[i][key], body["networks"][i][key]) - - def test_index_return_empty_list(self): - self.stubs.Set(db, "network_get_all", fake_network_get_all_empty_list) - self.mox.StubOutWithMock(manager.ResourceOperator, "network_list") - manager.ResourceOperator.network_list( - IsA(context.RequestContext), []).AndReturn([]) - self.mox.ReplayAll() - - url = '/v1/groups/' + GID + '/networks' - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - expected_body = {"networks": []} - - self.assertEqual(expected_body, body) - self.assertEqual(res.status_code, 200) - - def test_index_validate_exception_by_gid_format(self): - not_uuid_gid = "aaaaa" - url = '/v1/groups/' + not_uuid_gid + '/networks' - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show(self): - - def _mock_data(): - return { - "network_id": NETWORK_ID1, - "neutron_network_id": None, - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "display_name": "net-45212048-abc3-43cc-89b3-377341426ac", - "is_admin": "True", - "cidr": "10.0.0.0/24", - "ext_router": "11212048-abc3-43cc-89b3-377341426aca", - "status": "Exist"} - - mock_data = _mock_data() - self.mox.StubOutWithMock(db, "network_get_by_network_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "network_show") - db.network_get_by_network_id(IsA(context.RequestContext), - GID, NETWORK_ID1).AndReturn(_mock_data()) - manager.ResourceOperator.network_show(IsA(context.RequestContext), - IsA(dict)) - - self.mox.ReplayAll() - - url = '/v1/groups/' + GID + '/networks' - req = get_request(url + "/" + NETWORK_ID1, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - expect = mock_data - expect.update(ext_router_id="11212048-abc3-43cc-89b3-377341426aca") - expect.update(name="net-45212048-abc3-43cc-89b3-377341426ac") - expect.update(cidr="10.0.0.0/24") - - self.assertEqual(res.status_code, 200) - for key in body["network"]: - self.assertEqual(expect[key], body["network"][key]) - - def test_show_validate_exception_by_gid_format(self): - not_uuid_gid = "aaaaa" - url = '/v1/groups/' + not_uuid_gid + '/networks/' + NETWORK_ID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show_validate_exception_by_network_id_format(self): - url = '/v1/groups/' + GID + '/networks/' + NETWORK_ID1 + "aaa" - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show_exception_networknotfound(self): - self.mox.StubOutWithMock(db, "network_get_by_network_id") - db.network_get_by_network_id(IsA(context.RequestContext), GID, - NETWORK_ID1)\ - .AndRaise(exception.NetworkNotFound(network_id=NETWORK_ID1)) - self.mox.ReplayAll() - - url = '/v1/groups/' + GID + '/networks' - req = get_request(url + "/" + NETWORK_ID1, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) - self.stubs.Set(db, "network_create", fake_create_db) - self.mox.StubOutWithMock(manager.ResourceOperator, "network_create") - manager.ResourceOperator.network_create( - IsA(context.RequestContext), - name=IsA(unicode), - cidr=IsA(unicode), - gateway=IsA(unicode), - dns_nameservers=IsA(list), - ext_router=IsA(unicode)).AndReturn( - {"neutron_network_id": "neutron-id-data"}) - self.mox.ReplayAll() - - request_body = { - "network": { - "is_admin": "True", - "name": "network-test", - "cidr": "10.0.0.0/24", - "gateway": "10.0.0.254", - "dns_nameservers": ["8.8.8.8", "8.8.4.4"], - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - } - - expect = { - "network": { - "network_id": NETWORK_ID1, - "neutron_network_id": "neutron-id-data", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": "network-test", - "is_admin": True, - "cidr": "10.0.0.0/24", - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - } - - url = '/v1/groups/' + GID + '/networks' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in body["network"]: - self.assertEqual( - expect["network"][key], body["network"][key]) - self.assertEqual(res.status_code, 201) - - def test_create_validate_exception_by_gid_notfound_format(self): - request_body = { - "network": { - "name": "test_network", - "is_admin": "True", - "cidr": "10.0.0.0/24", - "gateway": "10.0.0.254", - "dns_nameservers": ["8.8.8.8", "8.8.4.4"], - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - } - url = '/v1/groups/' + GID + "a" + '/networks' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create_validate_exception_by_gid_notfound(self): - self.mox.StubOutWithMock(db, "group_get_by_gid") - db.group_get_by_gid(IsA(context.RequestContext), - GID).AndRaise(exception.GroupNotFound(gid=GID)) - self.mox.ReplayAll() - - request_body = { - "network": { - "name": "test_network", - "is_admin": "True", - "cidr": "10.0.0.0/24", - "gateway": "10.0.0.254", - "dns_nameservers": ["8.8.8.8", "8.8.4.4"], - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - } - url = '/v1/groups/' + GID + '/networks' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create_validate_exception_no_body(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) - - url = '/v1/groups/' + GID + '/networks' - req = get_request(url, 'POST') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_validate_exception_body_format(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) - - request_body = { - "name": "test_network", - "is_admin": "True", - "cidr": "10.0.0.0/24", - "gateway": "10.0.0.254", - "dns_nameservers": ["8.8.8.8", "8.8.4.4"], - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - url = '/v1/groups/' + GID + '/networks' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_by_name_blank(self): - self.stubs.Set(db, "network_create", fake_create_db) - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) - - self.mox.StubOutWithMock(uuid, "uuid4") - self.mox.StubOutWithMock(manager.ResourceOperator, "network_create") - - mock_uuid = NETWORK_ID1 - uuid.uuid4().AndReturn(mock_uuid) - uuid.uuid4().AndReturn(mock_uuid) - - network_value = { - "name": "network-" + mock_uuid, - "cidr": "10.0.0.0/24", - "gateway": "10.0.0.254", - "dns_nameservers": ["8.8.8.8", "8.8.4.4"], - "ext_router": "91212048-abc3-43cc-89b3-377341426aca"} - manager.ResourceOperator.network_create( - IsA(context.RequestContext), **network_value).AndReturn( - {"neutron_network_id": "neutron-id-data"}) - self.mox.ReplayAll() - - request_body = { - "network": { - "name": None, - "is_admin": "True", - "cidr": "10.0.0.0/24", - "gateway": "10.0.0.254", - "dns_nameservers": ["8.8.8.8", "8.8.4.4"], - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - } - - expect = { - "network": { - "network_id": mock_uuid, - "neutron_network_id": "neutron-id-data", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": "network-" + mock_uuid, - "is_admin": True, - "cidr": "10.0.0.0/24", - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - } - - url = '/v1/groups/' + GID + '/networks' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in body["network"]: - self.assertEqual( - expect["network"][key], body["network"][key]) - self.assertEqual(res.status_code, 201) - - def test_create_validate_exception_by_is_admin_not_boolean(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) - - request_body = { - "network": { - "name": "test_network", - "cidr": "10.0.0.0/24", - "is_admin": "admin", - "gateway": "10.0.0.254", - "dns_nameservers": ["8.8.8.8", "8.8.4.4"], - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - } - - url = '/v1/groups/' + GID + '/networks' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_validate_exception_by_dns_nameservers_is_not_list(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) - - request_body = { - "network": { - "name": "test_network", - "cidr": "10.0.0.0/24", - "is_admin": False, - "gateway": "10.0.0.254", - "dns_nameservers": "8.8.8.8, 8.8.4.4", - "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" - } - } - - url = '/v1/groups/' + GID + '/networks' - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_delete(self): - self.stubs.Set(db, - "network_get_by_network_id", - fake_network_get_by_network_id) - self.stubs.Set(db, - "network_delete", fake_network_delete) - self.mox.StubOutWithMock(manager.ResourceOperator, "network_delete") - manager.ResourceOperator.network_delete( - IsA(context.RequestContext), IsA(unicode), IsA(str)) - self.mox.ReplayAll() - - url = "/v1/groups/" + GID + "/networks/" + NETWORK_ID1 - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 204) - - def test_delete_validate_exception_by_gid_format(self): - not_uuid_gid = "aaaaa" - url = '/v1/groups/' + not_uuid_gid + '/networks/' + NETWORK_ID1 - req = get_request(url, 'DELETE') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_exception_not_found(self): - self.mox.StubOutWithMock(db, "network_get_by_network_id") - db.network_get_by_network_id( - IsA(context.RequestContext), GID, NETWORK_ID1)\ - .AndRaise(exception.NetworkNotFound(network_id=NETWORK_ID1)) - self.mox.ReplayAll() - url = '/v1/groups/' + GID + '/networks/' + NETWORK_ID1 - req = get_request(url, 'DELETE') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_exception_inuse(self): - self.mox.StubOutWithMock(db, "network_get_by_network_id") - network_process_inuse = {"processes": [{"pid": "pid"}]} - db.network_get_by_network_id(IsA(context.RequestContext), - GID, - NETWORK_ID1)\ - .AndReturn(network_process_inuse) - self.mox.ReplayAll() - - url = '/v1/groups/' + GID + '/networks/' + NETWORK_ID1 - req = get_request(url, 'DELETE') - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 409) diff --git a/rack/tests/api/v1/test_processes.py b/rack/tests/api/v1/test_processes.py deleted file mode 100644 index 9c77951..0000000 --- a/rack/tests/api/v1/test_processes.py +++ /dev/null @@ -1,1993 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from mox import IsA - -from oslo.config import cfg - -from rack.api.v1.views.processes import ViewBuilder -from rack import context -from rack import db -from rack import exception -from rack.openstack.common import jsonutils -from rack.openstack.common import log as logging -from rack.resourceoperator import manager -from rack import test -from rack.tests.api import fakes - -import json -import uuid -import webob - -LOG = logging.getLogger(__name__) - - -GID = unicode(uuid.uuid4()) - -PPID1 = unicode(uuid.uuid4()) -PPID2 = unicode(uuid.uuid4()) - -PID1 = unicode(uuid.uuid4()) -PID2 = unicode(uuid.uuid4()) - -PIDX = unicode(uuid.uuid4()) - -KEYPAIR_ID1 = unicode(uuid.uuid4()) -KEYPAIR_ID2 = unicode(uuid.uuid4()) - -NOVA_KEYPAIR_ID1 = unicode(uuid.uuid4()) - -SECURITYGROUP_ID1 = unicode(uuid.uuid4()) -SECURITYGROUP_ID2 = unicode(uuid.uuid4()) -SECURITYGROUP_ID3 = unicode(uuid.uuid4()) - -NEUTRON_SECURITYGROUP_ID1 = unicode(uuid.uuid4()) -NEUTRON_SECURITYGROUP_ID2 = unicode(uuid.uuid4()) -NEUTRON_SECURITYGROUP_ID3 = unicode(uuid.uuid4()) - -NETWORK_ID1 = unicode(uuid.uuid4()) -NETWORK_ID2 = unicode(uuid.uuid4()) - -NEUTRON_NETWORK_ID1 = unicode(uuid.uuid4()) -NEUTRON_NETWORK_ID2 = unicode(uuid.uuid4()) - -GLANCE_IMAGE_ID1 = unicode(uuid.uuid4()) -GLANCE_IMAGE_ID2 = unicode(uuid.uuid4()) - -METADATA1 = {"type1": "test1", "type2": "test2"} - -USER_DATA_B64_ENC = "IyEvYmluL3NoCmVjaG8gXCd0ZXN0Llwn" -USER_DATA_B64_DEC = "#!/bin/sh\necho \\'test.\\'" - -NOVA_INSTANCE_ID1 = unicode(uuid.uuid4()) -NOVA_INSTANCE_ID2 = unicode(uuid.uuid4()) -NOVA_INSTANCE_ID3 = unicode(uuid.uuid4()) - - -def _base(context): - return { - "user_id": context.user_id, - "project_id": context.project_id - } - - -def _base_keypair(keypair_id, nova_keypair_id): - return { - "keypair_id": keypair_id, - "nova_keypair_id": nova_keypair_id - } - - -def _base_securitygroup(securitygroup_id, neutron_securitygroup_id): - return { - "securitygroup_id": securitygroup_id, - "neutron_securitygroup_id": neutron_securitygroup_id - } - - -def _base_securitygroups1(): - return [ - _base_securitygroup(SECURITYGROUP_ID1, NEUTRON_SECURITYGROUP_ID1), - _base_securitygroup(SECURITYGROUP_ID2, NEUTRON_SECURITYGROUP_ID2), - ] - - -def _base_securitygroups2(): - return [ - _base_securitygroup(SECURITYGROUP_ID3, NEUTRON_SECURITYGROUP_ID3), - ] - - -def _base_network(network_id, neutron_network_id): - return { - "network_id": network_id, - "neutron_network_id": neutron_network_id - } - - -def _base_networks(): - return [ - _base_network(NETWORK_ID1, NEUTRON_NETWORK_ID1), - _base_network(NETWORK_ID2, NEUTRON_NETWORK_ID2), - ] - - -def _base_process1(gid, pid): - return { - "pid": pid, - "ppid": PPID1, - "nova_instance_id": NOVA_INSTANCE_ID1, - "gid": gid, - "project_id": "noauth", - "user_id": "noauth", - "display_name": "test1", - "nova_flavor_id": 1, - "glance_image_id": GLANCE_IMAGE_ID1, - "keypair_id": KEYPAIR_ID1, - "securitygroups": _base_securitygroups1(), - "networks": _base_networks(), - "is_proxy": False, - "status": "BUILDING", - "app_status": None, - "userdata": USER_DATA_B64_ENC, - "shm_endpoint": "shm_endpoint_data", - "ipc_endpoint": "ipc_endpoint_data", - "fs_endpoint": "fs_endpoint_data", - "args": '{"gid": "' + gid + '","pid": "' + pid + '"}'} - - -def _base_process2(gid, pid): - return { - "pid": pid, - "ppid": PPID2, - "nova_instance_id": NOVA_INSTANCE_ID2, - "gid": gid, - "project_id": "noauth", - "user_id": "noauth", - "display_name": "test2", - "nova_flavor_id": 2, - "glance_image_id": GLANCE_IMAGE_ID2, - "keypair_id": KEYPAIR_ID2, - "securitygroups": _base_securitygroups2(), - "networks": _base_networks(), - "is_proxy": False, - "status": "BUILDING", - "app_status": "BUILDING", - "userdata": USER_DATA_B64_ENC, - "shm_endpoint": "shm_endpoint_data", - "ipc_endpoint": "ipc_endpoin_datat", - "fs_endpoint": "fs_endpoint_data", - "args": '{"key": "value"}'} - - -def _base_process3(gid, pid): - return { - "pid": pid, - "ppid": PPID1, - "nova_instance_id": NOVA_INSTANCE_ID3, - "gid": gid, - "project_id": "noauth", - "user_id": "noauth", - "display_name": "test1", - "nova_flavor_id": 1, - "glance_image_id": GLANCE_IMAGE_ID1, - "keypair_id": KEYPAIR_ID1, - "securitygroups": _base_securitygroups1(), - "networks": _base_networks(), - "is_proxy": True, - "status": "BUILDING", - "app_status": "BUILDING", - "userdata": USER_DATA_B64_ENC, - "shm_endpoint": "shm_endpoint_data", - "ipc_endpoint": "ipc_endpoint_data", - "fs_endpoint": "fs_endpoint_data", - "args": '{"key": "value"}'} - - -def _base_processes(gid): - return [ - _base_process1(gid, PPID1), - _base_process2(gid, PPID2), - _base_process1(gid, PID1), - _base_process2(gid, PID2), - ] - - -def fake_keypair_get_by_keypair_id(context, gid, keypair_id): - return _base_keypair(keypair_id, NOVA_KEYPAIR_ID1) - - -def fake_keypair_get_by_keypair_id_raise_not_found(context, gid, keypair_id): - raise exception.KeypairNotFound(keypair_id=keypair_id) - - -def fake_network_get_all(context, gid, filters=None): - return _base_networks() - - -def fake_network_get_all_not_found(context, gid, filters=None): - return [] - - -def fake_process_get_all(context, gid, filters=None): - processes = _base_processes(gid) - for process in processes: - process.update(_base(context)) - return processes - - -def fake_process_get_all_for_proxy(context, gid, filters=None): - process = _base_process3(gid, PID1) - return [process] - - -def fake_process_get_by_pid(context, gid, pid): - processes = _base_processes(gid) - for process in processes: - if process["pid"] == pid: - process.update(_base(context)) - return process - raise exception.ProcessNotFound(pid=pid) - - -def fake_pid1(): - return PID1 - - -def fake_create(context, kwargs, network_ids, securitygroup_ids): - process = _base(context) - process.update(kwargs) - process["networks"] = fake_network_get_all(context, GID) - process["securitygroups"] = _base_securitygroups1() - return process - - -def fake_delete(context, gid, pid): - process = _base(context) - process.update(gid=gid) - process.update(pid=pid) - process.update(nova_instance_id=NOVA_INSTANCE_ID1) - return process - - -def get_request(url, method, body=None): - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = method - if body is not None: - req.body = jsonutils.dumps(body) - return req - - -def get_base_url(gid): - return "/v1/groups/" + gid + "/processes" - - -def get_base_body(process): - return { - "project_id": process["project_id"], - "user_id": process["user_id"], - "ppid": process["ppid"], - "name": process["display_name"], - "nova_instance_id": process["nova_instance_id"], - "nova_flavor_id": process["nova_flavor_id"], - "glance_image_id": process["glance_image_id"], - "keypair_id": process["keypair_id"], - "securitygroup_ids": [securitygroup["securitygroup_id"] - for securitygroup in process["securitygroups"]], - "metadata": METADATA1, - "userdata": process["userdata"] - } - - -def get_base_request_body1(process): - return {"process": get_base_body(process)} - - -def get_proxy_request_body1(process): - body = get_base_body(process) - body.update(ipc_endpoint="ipc_endpoint_data") - body.update(shm_endpoint="shm_endpoint_data") - body.update(fs_endpoint="fs_endpoint_data") - return {"proxy": body} - - -def get_base_process_body(process): - process_body = get_base_body(process) - process_body.update(gid=GID) - process_body.update(pid=process["pid"]) - process_body.update(status=process["status"]) - process_body.update(networks=[ - {"fixed": None, - "floating": None, - "network_id": NETWORK_ID1}, - {"fixed": None, - "floating": None, - "network_id": NETWORK_ID2}]) - process_body.update(app_status=process["app_status"]) - process_body.update(userdata=process["userdata"]) - process_body.update(args=json.loads(process["args"])) - process_body.pop("metadata") - return process_body - - -def get_base_prxoy_body(process): - process_body = get_base_body(process) - process_body.update(gid=GID) - process_body.update(pid=process["pid"]) - process_body.update(status=process["status"]) - process_body.update(networks=[ - {"fixed": None, - "floating": None, - "network_id": NETWORK_ID1}, - {"fixed": None, - "floating": None, - "network_id": NETWORK_ID2}]) - process_body.update(app_status=process["app_status"]) - process_body.update(userdata=process["userdata"]) - process_body.update(args=json.loads(process["args"])) - process_body.update(ipc_endpoint=process["ipc_endpoint"]) - process_body.update(shm_endpoint=process["shm_endpoint"]) - process_body.update(fs_endpoint=process["fs_endpoint"]) - process_body.pop("metadata") - return process_body - - -def get_base_process_response_body(process): - process_body = get_base_process_body(process) - return {"process": process_body} - - -def get_base_processes_response_body(processes): - processes_body = [] - for process in processes: - process_body = get_base_process_body(process) - processes_body.append(process_body) - return {"processes": processes_body} - - -def get_proxy_response_body(process): - process_body = get_base_prxoy_body(process) - return {"proxy": process_body} - - -class ProcessesTest(test.NoDBTestCase): - - def _set_mox_db_process_update_on_error(self): - self.mox.StubOutWithMock(db, "process_update") - db.process_update(IsA(context.RequestContext), IsA( - unicode), IsA(unicode), {"status": "ERROR"}) - - def setUp(self): - super(ProcessesTest, self).setUp() - self.stubs.Set(uuid, "uuid4", fake_pid1) - self.stubs.Set( - db, "keypair_get_by_keypair_id", fake_keypair_get_by_keypair_id) - self.stubs.Set(db, "network_get_all", fake_network_get_all) - self.stubs.Set(db, "process_get_all", fake_process_get_all) - self.stubs.Set(db, "process_get_by_pid", fake_process_get_by_pid) - self.stubs.Set(db, "process_create", fake_create) - self.stubs.Set(db, "process_delete", fake_delete) - self.app = fakes.wsgi_app() - self.view = ViewBuilder() - - def test_index(self): - processes = _base_processes(GID) - expect = get_base_processes_response_body(processes) - self.mox.StubOutWithMock(manager.ResourceOperator, "process_list") - manager.ResourceOperator.process_list(IsA(context.RequestContext), - IsA(list)).AndReturn(processes) - self.mox.ReplayAll() - - url = get_base_url(GID) - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for i in range(len(body["processes"])): - for key in body["processes"][i]: - self.assertEqual( - expect["processes"][i][key], body["processes"][i][key]) - self.assertEqual(200, res.status_code) - - def test_index_invalid_format_gid(self): - url = get_base_url("aaaaa") - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show(self): - process = _base_process1(GID, PID1) - expect = get_base_process_response_body(process) - self.mox.StubOutWithMock(manager.ResourceOperator, "process_show") - manager.ResourceOperator.process_show(IsA(context.RequestContext), - IsA(dict)) - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + PID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - self.assertEqual(body, expect) - - def test_show_invalid_format_gid(self): - url = get_base_url("aaaaa") + "/" + PID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show_invalid_format_pid(self): - url = get_base_url(GID) + "/" + "aaaaa" - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show_process_not_found(self): - self.stubs.Set(db, "keypair_get_by_keypair_id", - fake_keypair_get_by_keypair_id_raise_not_found) - url = get_base_url(GID) + "/" + PIDX - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show_proxy(self): - process = _base_process3(GID, PID1) - expect = get_proxy_response_body(process) - self.stubs.Set(db, "process_get_all", fake_process_get_all_for_proxy) - self.mox.StubOutWithMock(manager.ResourceOperator, "process_show") - manager.ResourceOperator.process_show(IsA(context.RequestContext), - IsA(dict)) - self.mox.ReplayAll() - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - self.assertEqual(body, expect) - - def test_show_proxy_not_found_exception(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndRaise(exception.NotFound()) - self.mox.ReplayAll() - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_show_proxy_bad_request(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict)).AndReturn([]) - self.mox.ReplayAll() - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'GET') - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_proxy(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock(cfg.CONF, "my_ip") - self.mox.StubOutWithMock(cfg.CONF, "os_username") - self.mox.StubOutWithMock(cfg.CONF, "os_password") - self.mox.StubOutWithMock(cfg.CONF, "os_tenant_name") - self.mox.StubOutWithMock(cfg.CONF, "os_auth_url") - self.mox.StubOutWithMock(cfg.CONF, "os_region_name") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(unicode), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - cfg.CONF.my_ip = "my_ip_data" - cfg.CONF.os_username = "os_username_data" - cfg.CONF.os_password = "os_password_data" - cfg.CONF.os_tenant_name = "os_tenant_name_data" - cfg.CONF.os_auth_url = "os_auth_url_data" - cfg.CONF.os_region_name = "os_region_name" - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_proxy_request_body1(process) - expect = get_proxy_response_body(process) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["proxy"]["userdata"] = USER_DATA_B64_ENC - expect["proxy"]["args"].update(ppid=PPID1) - expect["proxy"]["args"].update(rackapi_ip="my_ip_data") - expect["proxy"]["args"].update(os_username="os_username_data") - expect["proxy"]["args"].update(os_password="os_password_data") - expect["proxy"]["args"].update(os_tenant_name="os_tenant_name_data") - expect["proxy"]["args"].update(os_auth_url="os_auth_url_data") - expect["proxy"]["args"].update(os_region_name="os_region_name") - for key in body["proxy"]: - self.assertEqual(body["proxy"][key], expect["proxy"][key]) - - def test_create_proxy_without_proxy_name(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock(cfg.CONF, "my_ip") - self.mox.StubOutWithMock(cfg.CONF, "os_username") - self.mox.StubOutWithMock(cfg.CONF, "os_password") - self.mox.StubOutWithMock(cfg.CONF, "os_tenant_name") - self.mox.StubOutWithMock(cfg.CONF, "os_auth_url") - self.mox.StubOutWithMock(cfg.CONF, "os_region_name") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(unicode), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - cfg.CONF.my_ip = "my_ip_data" - cfg.CONF.os_username = "os_username_data" - cfg.CONF.os_password = "os_password_data" - cfg.CONF.os_tenant_name = "os_tenant_name_data" - cfg.CONF.os_auth_url = "os_auth_url_data" - cfg.CONF.os_region_name = "os_region_name" - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_proxy_request_body1(process) - request_body["proxy"].pop("name") - expect = get_proxy_response_body(process) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["proxy"]["userdata"] = USER_DATA_B64_ENC - expect["proxy"]["args"].update(ppid=PPID1) - expect["proxy"]["args"].update(rackapi_ip="my_ip_data") - expect["proxy"]["args"].update(os_username="os_username_data") - expect["proxy"]["args"].update(os_password="os_password_data") - expect["proxy"]["args"].update(os_tenant_name="os_tenant_name_data") - expect["proxy"]["args"].update(os_auth_url="os_auth_url_data") - expect["proxy"]["args"].update(os_region_name="os_region_name") - expect["proxy"].update(name="proxy-" + PID1) - for key in body["proxy"]: - self.assertEqual(body["proxy"][key], expect["proxy"][key]) - - def test_create_proxy_ipc_endpoint_invalid_max_length(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(cfg.CONF, "my_ip") - self.mox.StubOutWithMock(cfg.CONF, "os_username") - self.mox.StubOutWithMock(cfg.CONF, "os_password") - self.mox.StubOutWithMock(cfg.CONF, "os_tenant_name") - self.mox.StubOutWithMock(cfg.CONF, "os_auth_url") - self.mox.StubOutWithMock(cfg.CONF, "os_region_name") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - cfg.CONF.my_ip = "my_ip_data" - cfg.CONF.os_username = "os_username_data" - cfg.CONF.os_password = "os_password_data" - cfg.CONF.os_tenant_name = "os_tenant_name_data" - cfg.CONF.os_auth_url = "os_auth_url_data" - cfg.CONF.os_region_name = "os_region_name" - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_proxy_request_body1(process) - request_body["proxy"].update(ipc_endpoint="a" * (256)) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_proxy_shm_endpoint_invalid_max_length(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(cfg.CONF, "my_ip") - self.mox.StubOutWithMock(cfg.CONF, "os_username") - self.mox.StubOutWithMock(cfg.CONF, "os_password") - self.mox.StubOutWithMock(cfg.CONF, "os_tenant_name") - self.mox.StubOutWithMock(cfg.CONF, "os_auth_url") - self.mox.StubOutWithMock(cfg.CONF, "os_region_name") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - cfg.CONF.my_ip = "my_ip_data" - cfg.CONF.os_username = "os_username_data" - cfg.CONF.os_password = "os_password_data" - cfg.CONF.os_tenant_name = "os_tenant_name_data" - cfg.CONF.os_auth_url = "os_auth_url_data" - cfg.CONF.os_region_name = "os_region_name" - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_proxy_request_body1(process) - request_body["proxy"].update(shm_endpoint="a" * (256)) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_proxy_fs_endpoint_invalid_max_length(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(cfg.CONF, "my_ip") - self.mox.StubOutWithMock(cfg.CONF, "os_username") - self.mox.StubOutWithMock(cfg.CONF, "os_password") - self.mox.StubOutWithMock(cfg.CONF, "os_tenant_name") - self.mox.StubOutWithMock(cfg.CONF, "os_auth_url") - self.mox.StubOutWithMock(cfg.CONF, "os_region_name") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - cfg.CONF.my_ip = "my_ip_data" - cfg.CONF.os_username = "os_username_data" - cfg.CONF.os_password = "os_password_data" - cfg.CONF.os_tenant_name = "os_tenant_name_data" - cfg.CONF.os_auth_url = "os_auth_url_data" - cfg.CONF.os_region_name = "os_region_name" - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_proxy_request_body1(process) - request_body["proxy"].update(fs_endpoint="a" * (256)) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_proxy_already_exist(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{}]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_proxy_request_body1(process) - request_body["proxy"].update(fs_endpoint="a" * (256)) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_proxy_invalid_dict_key(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{}]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock( - manager.ResourceOperator, "get_process_address") - - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - manager.ResourceOperator.get_process_address( - IsA(context.RequestContext), - IsA(str)).AndReturn("proxy_instance_id_data") - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(unicode), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - expect = get_base_process_response_body(process) - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["process"]["userdata"] = USER_DATA_B64_ENC - expect["process"]["args"].update(ppid=PPID1) - expect["process"]["args"].update(proxy_ip="proxy_instance_id_data") - for key in body["process"]: - self.assertEqual(body["process"][key], expect["process"][key]) - - def test_create_args_value_integer(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock( - manager.ResourceOperator, "get_process_address") - - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - manager.ResourceOperator.get_process_address( - IsA(context.RequestContext), - IsA(str)).AndReturn("proxy_instance_id_data") - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(unicode), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].update(args={ - "test_key1": 123, "test_key2": 456}) - expect = get_base_process_response_body(process) - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["process"]["userdata"] = USER_DATA_B64_ENC - expect["process"]["args"].update(ppid=PPID1) - expect["process"]["args"].update(proxy_ip="proxy_instance_id_data") - expect["process"]["args"].update(test_key1="123") - expect["process"]["args"].update(test_key2="456") - for key in body["process"]: - self.assertEqual(body["process"][key], expect["process"][key]) - - def test_create_default_securitygroup(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_all") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock( - manager.ResourceOperator, "get_process_address") - - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - manager.ResourceOperator.get_process_address( - IsA(context.RequestContext), - IsA(str)).AndReturn("proxy_instance_id_data") - db.securitygroup_get_all( - IsA(context.RequestContext), GID, - filters=IsA(dict)).AndReturn([ - {"securitygroup_id": "securitygroup_id_data", - "neutron_securitygroup_id": "neutron_securitygroup_id_data"}]) - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(unicode), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].pop("securitygroup_ids") - request_body["process"].pop("ppid") - expect = get_base_process_response_body(process) - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["process"]["userdata"] = USER_DATA_B64_ENC - expect["process"]["args"].update(proxy_ip="proxy_instance_id_data") - expect["process"].update(ppid=None) - for key in body["process"]: - self.assertEqual(body["process"][key], expect["process"][key]) - - def test_create_parent_securitygroup_and_image_and_flavor(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock( - manager.ResourceOperator, "get_process_address") - - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - manager.ResourceOperator.get_process_address( - IsA(context.RequestContext), - IsA(str)).AndReturn("proxy_instance_id_data") - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(unicode), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].pop("securitygroup_ids") - request_body["process"].pop("glance_image_id") - request_body["process"].pop("nova_flavor_id") - request_body["process"].update(args={"key": "value"}) - - expect = get_base_process_response_body(process) - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["process"]["userdata"] = USER_DATA_B64_ENC - expect["process"]["args"].update(proxy_ip="proxy_instance_id_data") - expect["process"]["args"].update(ppid=PPID1) - expect["process"]["args"].update(key="value") - for key in body["process"]: - self.assertEqual(body["process"][key], expect["process"][key]) - - def test_create_without_keypair_id(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock( - manager.ResourceOperator, "get_process_address") - - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - manager.ResourceOperator.get_process_address( - IsA(context.RequestContext), - IsA(str)).AndReturn("proxy_instance_id_data") - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(unicode), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - expect = get_base_process_response_body(process) - request_body["process"].pop("keypair_id") - expect["process"]["keypair_id"] = KEYPAIR_ID1 - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["process"]["userdata"] = USER_DATA_B64_ENC - expect["process"]["args"].update(ppid=PPID1) - expect["process"]["args"].update(proxy_ip="proxy_instance_id_data") - for key in body["process"]: - self.assertEqual(body["process"][key], expect["process"][key]) - - def test_create_without_keypair_id_and_ppid(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(db, "keypair_get_all") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock( - manager.ResourceOperator, "get_process_address") - - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.keypair_get_all(IsA(context.RequestContext), GID, - filters=IsA(dict))\ - .AndReturn([{"keypair_id": "keypair_id_data", - "nova_keypair_id": "nova_keypair_id_data"}]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - manager.ResourceOperator.get_process_address( - IsA(context.RequestContext), - IsA(str)).AndReturn("proxy_instance_id_data") - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(str), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - expect = get_base_process_response_body(process) - request_body["process"].pop("keypair_id") - request_body["process"].pop("ppid") - expect["process"]["keypair_id"] = KEYPAIR_ID1 - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["process"]["userdata"] = USER_DATA_B64_ENC - expect["process"]["args"].update(proxy_ip="proxy_instance_id_data") - expect["process"].update(keypair_id="keypair_id_data") - expect["process"].update(ppid=None) - for key in body["process"]: - self.assertEqual(body["process"][key], expect["process"][key]) - - def test_create_without_process_name(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - self.mox.StubOutWithMock(manager.ResourceOperator, "process_create") - self.mox.StubOutWithMock( - manager.ResourceOperator, "get_process_address") - - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - manager.ResourceOperator.get_process_address( - IsA(context.RequestContext), - IsA(str)).AndReturn("proxy_instance_id_data") - manager.ResourceOperator.process_create( - IsA(context.RequestContext), - name=IsA(unicode), - key_name=IsA(unicode), - security_groups=IsA(list), - image=IsA(unicode), - flavor=IsA(int), - userdata=IsA(unicode), - meta=IsA(dict), - nics=IsA(list)).AndReturn((NOVA_INSTANCE_ID1, "BUILDING")) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].pop("name") - expect = get_base_process_response_body(process) - expect["process"]["name"] = "process-" + PID1 - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 202) - expect["process"]["userdata"] = USER_DATA_B64_ENC - expect["process"]["args"].update(ppid=PPID1) - expect["process"]["args"].update(proxy_ip="proxy_instance_id_data") - for key in body["process"]: - self.assertEqual(body["process"][key], expect["process"][key]) - - def test_create_process_proxy_not_exits(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_process_invalid_dict_key(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{}]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_proxy_request_body1(process) - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_invalid_format_gid(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), IsA(unicode), filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - self.mox.ReplayAll() - - gid = "aaaaaaaaa" - process = _base_process1(gid, PID1) - request_body = get_base_request_body1(process) - - url = get_base_url(gid) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create_invalid_securitygroup_ids(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), IsA(unicode), filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].update(securitygroup_ids={"key": "value"}) - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_invalid_format_securitygroup_ids(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), IsA(unicode), filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].update(securitygroup_ids=["invalid_id"]) - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create_invalid_args(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].update(args=[{"key": "value"}]) - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_invalid_format_keypair_id(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), IsA(unicode), filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].update(keypair_id="aaaaaaaaa") - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create_invalid_format_ppid(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), IsA(unicode), filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].update(ppid="aaaaaaaaa") - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_create_process_name_is_whitespace(self): - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].update(name=" ") - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_default_security_group_not_found(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.securitygroup_get_all(IsA(context.RequestContext), GID, - filters=IsA(dict)).AndReturn([]) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].pop("securitygroup_ids") - request_body["process"].pop("ppid") - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_invalid_userdata(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - request_body["process"].update(userdata="/") - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_empty_body(self): - request_body = {"process": {}} - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_no_body(self): - request_body = {} - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_invalid_format_body(self): - request_body = [] - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_create_notfound_networks(self): - self.stubs.Set(db, "network_get_all", fake_network_get_all_not_found) - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"nova_instance_id": "nova_instance_id_data"}]) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data1"}) - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), GID, IsA(unicode)).AndReturn( - {"neutron_securitygroup_id": "securitygroup_id_data2"}) - self.mox.ReplayAll() - - process = _base_process1(GID, PID1) - request_body = get_base_request_body1(process) - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def delete_invalid_format_gid(self): - url = get_base_url("aaaaaaa") + "/" + PID1 - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_invalid_format_pid(self): - url = get_base_url(GID) + "/" + "aaaaa" - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_update(self): - - def _fake_update(context, gid, pid, kwargs): - process = _base(context) - process.update(gid=gid) - process.update(pid=pid) - process.update(kwargs) - process.update(args='{"key": "value"}') - process.update(securitygroups=[{"securitygroup_id": None}]) - process.update(networks=[{"network_id": None}]) - return process - - def _update_process_mockdata(gid, pid): - return { - "pid": pid, - "ppid": None, - "gid": gid, - "nova_instance_id": None, - "nova_flavor_id": None, - "display_name": None, - "glance_image_id": None, - "keypair_id": None, - "userdata": None, - "status": None, - "user_id": "noauth", - "project_id": "noauth", - "app_status": "ACTIVE", - "userdata": None, - "args": '{"key": "value"}', - "securitygroups": [{"securitygroup_id": None}], - "networks": [{"network_id": None}]} - - def _get_update_response_body(process): - return {"process": { - "pid": process["pid"], - "ppid": process["ppid"], - "gid": process["gid"], - "nova_instance_id": process["nova_instance_id"], - "nova_flavor_id": process["nova_flavor_id"], - "name": process["display_name"], - "glance_image_id": process["glance_image_id"], - "keypair_id": process["keypair_id"], - "userdata": process["userdata"], - "status": process["status"], - "user_id": "noauth", - "project_id": "noauth", - "app_status": process["app_status"], - "userdata": process["userdata"], - "args": json.loads(process["args"]), - "securitygroup_ids": [None], - "networks": [{ - "fixed": None, "floating": None, "network_id": None}]}} - - self.stubs.Set(db, "process_update", _fake_update) - self.mox.ReplayAll() - - request_body = {"process": {"app_status": "ACTIVE"}} - process = _update_process_mockdata(GID, PID1) - expect = _get_update_response_body(process) - - url = get_base_url(GID) + "/" + PID1 - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - for key in body["process"]: - self.assertEqual(expect["process"][key], body["process"][key]) - - def test_update_invalid_request_body(self): - request_body = {"invalid": {"app_status": "ACTIVE"}} - - url = get_base_url(GID) + "/" + PID1 - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_app_status_required(self): - request_body = {"process": {}} - - url = get_base_url(GID) + "/" + PID1 - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_not_found(self): - self.mox.StubOutWithMock(db, "process_update") - db.process_update( - IsA(context.RequestContext), GID, PID1, IsA(dict))\ - .AndRaise(exception.ProcessNotFound(pid=PID1)) - self.mox.ReplayAll() - request_body = {"process": {"app_status": "ACTIVE"}} - url = get_base_url(GID) + "/" + PID1 - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_update_proxy_all(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"pid": PID1}]) - - def _fake_update(context, gid, pid, kwargs): - process = _base(context) - process.update(gid=gid) - process.update(pid=pid) - process.update(kwargs) - process.update(args='{"key": "value"}') - process.update(is_proxy=True) - process.update(securitygroups=[{"securitygroup_id": None}]) - process.update(networks=[{"network_id": None}]) - return process - - def _update_process_mockdata(gid, pid): - return { - "pid": pid, - "ppid": None, - "gid": gid, - "nova_instance_id": None, - "nova_flavor_id": None, - "display_name": None, - "glance_image_id": None, - "keypair_id": None, - "userdata": None, - "status": None, - "user_id": "noauth", - "project_id": "noauth", - "app_status": "app_status_data", - "ipc_endpoint": "ipc_endpoint_data", - "shm_endpoint": "shm_endpoint_data", - "fs_endpoint": "fs_endpoint_data", - "userdata": None, - "is_proxy": True, - "args": '{"key": "value"}', - "securitygroups": [{"securitygroup_id": None}], - "networks": [{"network_id": None}]} - - def _get_update_response_body(process): - return {"proxy": { - "pid": process["pid"], - "ppid": process["ppid"], - "gid": process["gid"], - "nova_flavor_id": process["nova_flavor_id"], - "nova_instance_id": process["nova_instance_id"], - "name": process["display_name"], - "glance_image_id": process["glance_image_id"], - "keypair_id": process["keypair_id"], - "userdata": process["userdata"], - "status": process["status"], - "user_id": "noauth", - "project_id": "noauth", - "app_status": process["app_status"], - "ipc_endpoint": process.get("ipc_endpoint"), - "shm_endpoint": process.get("shm_endpoint"), - "fs_endpoint": process.get("fs_endpoint"), - "userdata": process["userdata"], - "args": json.loads(process["args"]), - "securitygroup_ids": [None], - "networks": [{ - "fixed": None, "floating": None, "network_id": None}]}} - - self.stubs.Set(db, "process_update", _fake_update) - self.mox.ReplayAll() - - request_body = {"proxy": { - "app_status": "app_status_data", - "ipc_endpoint": "ipc_endpoint_data", - "shm_endpoint": "shm_endpoint_data", - "fs_endpoint": "fs_endpoint_data"}} - process = _update_process_mockdata(GID, PID1) - expect = _get_update_response_body(process) - - url = url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - for key in body["proxy"]: - self.assertEqual(expect["proxy"][key], body["proxy"][key]) - - def test_update_proxy_ipc_endpoint(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"pid": PID1}]) - - def _fake_update(context, gid, pid, kwargs): - process = _base(context) - process.update(gid=gid) - process.update(pid=pid) - process.update(kwargs) - process.update(is_proxy=True) - process.update(args='{"key": "value"}') - process.update(securitygroups=[{"securitygroup_id": None}]) - process.update(networks=[{"network_id": None}]) - return process - - def _update_process_mockdata(gid, pid): - return { - "pid": pid, - "ppid": None, - "gid": gid, - "nova_instance_id": None, - "nova_flavor_id": None, - "display_name": None, - "glance_image_id": None, - "keypair_id": None, - "userdata": None, - "status": None, - "user_id": "noauth", - "project_id": "noauth", - "app_status": None, - "ipc_endpoint": "ipc_endpoint_data", - "shm_endpoint": None, - "fs_endpoint": None, - "userdata": None, - "args": '{"key": "value"}', - "securitygroups": [{"securitygroup_id": None}], - "networks": [{"network_id": None}]} - - def _get_update_response_body(process): - return {"proxy": { - "pid": process["pid"], - "ppid": process["ppid"], - "gid": process["gid"], - "nova_instance_id": process["nova_instance_id"], - "nova_flavor_id": process["nova_flavor_id"], - "name": process["display_name"], - "glance_image_id": process["glance_image_id"], - "keypair_id": process["keypair_id"], - "userdata": process["userdata"], - "status": process["status"], - "user_id": "noauth", - "project_id": "noauth", - "app_status": None, - "ipc_endpoint": process.get("ipc_endpoint"), - "shm_endpoint": None, - "fs_endpoint": None, - "userdata": process["userdata"], - "args": json.loads(process["args"]), - "securitygroup_ids": [None], - "networks": [{ - "fixed": None, "floating": None, "network_id": None}]}} - - self.stubs.Set(db, "process_update", _fake_update) - self.mox.ReplayAll() - - request_body = {"proxy": { - "ipc_endpoint": "ipc_endpoint_data"}} - process = _update_process_mockdata(GID, PID1) - expect = _get_update_response_body(process) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - for key in body["proxy"]: - self.assertEqual(expect["proxy"][key], body["proxy"][key]) - - def test_update_proxy_shm_endpoint(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"pid": PID1}]) - - def _fake_update(context, gid, pid, kwargs): - process = _base(context) - process.update(gid=gid) - process.update(pid=pid) - process.update(kwargs) - process.update(is_proxy=True) - process.update(args='{"key": "value"}') - process.update(securitygroups=[{"securitygroup_id": None}]) - process.update(networks=[{"network_id": None}]) - return process - - def _update_process_mockdata(gid, pid): - return { - "pid": pid, - "ppid": None, - "gid": gid, - "nova_instance_id": None, - "nova_flavor_id": None, - "display_name": None, - "glance_image_id": None, - "keypair_id": None, - "userdata": None, - "status": None, - "user_id": "noauth", - "project_id": "noauth", - "app_status": None, - "ipc_endpoint": None, - "shm_endpoint": "shm_endpoint_data", - "fs_endpoint": None, - "userdata": None, - "args": '{"key": "value"}', - "securitygroups": [{"securitygroup_id": None}], - "networks": [{"network_id": None}]} - - def _get_update_response_body(process): - return {"proxy": { - "pid": process["pid"], - "ppid": process["ppid"], - "gid": process["gid"], - "nova_instance_id": process["nova_instance_id"], - "nova_flavor_id": process["nova_flavor_id"], - "name": process["display_name"], - "glance_image_id": process["glance_image_id"], - "keypair_id": process["keypair_id"], - "userdata": process["userdata"], - "status": process["status"], - "user_id": "noauth", - "project_id": "noauth", - "app_status": None, - "ipc_endpoint": None, - "shm_endpoint": process.get("shm_endpoint"), - "fs_endpoint": None, - "userdata": process["userdata"], - "args": json.loads(process["args"]), - "securitygroup_ids": [None], - "networks": [{ - "fixed": None, "floating": None, "network_id": None}]}} - - self.stubs.Set(db, "process_update", _fake_update) - self.mox.ReplayAll() - - request_body = {"proxy": { - "shm_endpoint": "shm_endpoint_data"}} - process = _update_process_mockdata(GID, PID1) - expect = _get_update_response_body(process) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - for key in body["proxy"]: - self.assertEqual(expect["proxy"][key], body["proxy"][key]) - - def test_update_proxy_fs_endpoint(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"pid": PID1}]) - - def _fake_update(context, gid, pid, kwargs): - process = _base(context) - process.update(gid=gid) - process.update(pid=pid) - process.update(kwargs) - process.update(is_proxy=True) - process.update(args='{"key": "value"}') - process.update(securitygroups=[{"securitygroup_id": None}]) - process.update(networks=[{"network_id": None}]) - return process - - def _update_process_mockdata(gid, pid): - return { - "pid": pid, - "ppid": None, - "gid": gid, - "nova_instance_id": None, - "nova_flavor_id": None, - "display_name": None, - "glance_image_id": None, - "keypair_id": None, - "userdata": None, - "status": None, - "user_id": "noauth", - "project_id": "noauth", - "app_status": None, - "ipc_endpoint": None, - "shm_endpoint": None, - "fs_endpoint": "fs_endpoint_data", - "userdata": None, - "args": '{"key": "value"}', - "securitygroups": [{"securitygroup_id": None}], - "networks": [{"network_id": None}]} - - def _get_update_response_body(process): - return {"proxy": { - "pid": process["pid"], - "ppid": process["ppid"], - "gid": process["gid"], - "nova_instance_id": process["nova_instance_id"], - "nova_flavor_id": process["nova_flavor_id"], - "name": process["display_name"], - "glance_image_id": process["glance_image_id"], - "keypair_id": process["keypair_id"], - "userdata": process["userdata"], - "status": process["status"], - "user_id": "noauth", - "project_id": "noauth", - "app_status": None, - "ipc_endpoint": None, - "shm_endpoint": None, - "fs_endpoint": process.get("fs_endpoint"), - "userdata": process["userdata"], - "args": json.loads(process["args"]), - "securitygroup_ids": [None], - "networks": [{ - "fixed": None, "floating": None, "network_id": None}]}} - - self.stubs.Set(db, "process_update", _fake_update) - self.mox.ReplayAll() - - request_body = {"proxy": { - "fs_endpoint": "fs_endpoint_data"}} - process = _update_process_mockdata(GID, PID1) - expect = _get_update_response_body(process) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - for key in body["proxy"]: - self.assertEqual(expect["proxy"][key], body["proxy"][key]) - - def test_update_proxy_app_status(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"pid": PID1}]) - - def _fake_update(context, gid, pid, kwargs): - process = _base(context) - process.update(gid=gid) - process.update(pid=pid) - process.update(kwargs) - process.update(is_proxy=True) - process.update(args='{"key": "value"}') - process.update(securitygroups=[{"securitygroup_id": None}]) - process.update(networks=[{"network_id": None}]) - return process - - def _update_process_mockdata(gid, pid): - return { - "pid": pid, - "ppid": None, - "gid": gid, - "nova_instance_id": None, - "nova_flavor_id": None, - "display_name": None, - "glance_image_id": None, - "keypair_id": None, - "userdata": None, - "status": None, - "user_id": "noauth", - "project_id": "noauth", - "app_status": "app_status_data", - "ipc_endpoint": None, - "shm_endpoint": None, - "fs_endpoint": None, - "userdata": None, - "args": '{"key": "value"}', - "securitygroups": [{"securitygroup_id": None}], - "networks": [{"network_id": None}]} - - def _get_update_response_body(process): - return {"proxy": { - "pid": process["pid"], - "ppid": process["ppid"], - "gid": process["gid"], - "nova_instance_id": process["nova_instance_id"], - "nova_flavor_id": process["nova_flavor_id"], - "name": process["display_name"], - "glance_image_id": process["glance_image_id"], - "keypair_id": process["keypair_id"], - "userdata": process["userdata"], - "status": process["status"], - "user_id": "noauth", - "project_id": "noauth", - "app_status": process.get("app_status"), - "ipc_endpoint": None, - "shm_endpoint": None, - "fs_endpoint": None, - "userdata": process["userdata"], - "args": json.loads(process["args"]), - "securitygroup_ids": [None], - "networks": [{ - "fixed": None, "floating": None, "network_id": None}]}} - - self.stubs.Set(db, "process_update", _fake_update) - self.mox.ReplayAll() - - request_body = {"proxy": { - "app_status": "app_status_data"}} - process = _update_process_mockdata(GID, PID1) - expect = _get_update_response_body(process) - - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 200) - for key in body["proxy"]: - self.assertEqual(expect["proxy"][key], body["proxy"][key]) - - def test_update_proxy_does_not_exist(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([]) - self.mox.ReplayAll() - - request_body = {"proxy": {"app_status": "app_status_data"}} - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_proxy_invalid_request_body(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{}]) - self.mox.ReplayAll() - - request_body = {"invalid": {"app_status": "app_status_data"}} - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_prxoy_no_keyword(self): - self.mox.StubOutWithMock(db, "process_get_all") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{}]) - self.mox.ReplayAll() - - request_body = {"proxy": {}} - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) - - def test_update_proxy_not_found(self): - self.mox.StubOutWithMock(db, "process_get_all") - self.mox.StubOutWithMock(db, "process_update") - db.process_get_all( - IsA(context.RequestContext), GID, filters=IsA(dict))\ - .AndReturn([{"pid": PID1}]) - db.process_update( - IsA(context.RequestContext), GID, PID1, IsA(dict))\ - .AndRaise(exception.ProcessNotFound(pid=PID1)) - self.mox.ReplayAll() - - request_body = {"proxy": {"app_status": "app_status_data"}} - url = "/v1/groups/" + GID + "/proxy" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_non_parent(self): - self.stubs.Set(db, "process_delete", fake_delete) - self.mox.StubOutWithMock(db, "process_get_by_pid") - self.mox.StubOutWithMock(db, 'process_get_all') - self.mox.StubOutWithMock(manager.ResourceOperator, "process_delete") - db.process_get_by_pid( - IsA(context.RequestContext), GID, PID1)\ - .AndReturn( - {"pid": PID1, "nova_instance_id": "nova_instance_id_data"}) - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": PID1}).AndReturn([]) - manager.ResourceOperator.process_delete( - IsA(context.RequestContext), IsA(str)) - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + PID1 - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 204) - - def test_delete_parent_child_relation(self): - self.stubs.Set(db, "process_delete", fake_delete) - self.mox.StubOutWithMock(db, "process_get_by_pid") - self.mox.StubOutWithMock(db, 'process_get_all') - self.mox.StubOutWithMock(manager.ResourceOperator, "process_delete") - - # ppid - # |-- pid_1 - # | |---pid_1_1 - # |---pid_2 - # | |---pid_2_1 - # | |---pid_2_1_1 - # | |---pid_2_1_2 - # |---pid_3 - ppid = unicode(uuid.uuid4()) - pid_1 = unicode(uuid.uuid4()) - pid_1_1 = unicode(uuid.uuid4()) - pid_2 = unicode(uuid.uuid4()) - pid_2_1 = unicode(uuid.uuid4()) - pid_2_1_1 = unicode(uuid.uuid4()) - pid_2_1_2 = unicode(uuid.uuid4()) - pid_3 = unicode(uuid.uuid4()) - - db.process_get_by_pid(IsA(context.RequestContext), GID, ppid)\ - .AndReturn( - {"pid": ppid, "nova_instance_id": "nova_id_ppid"}) - - # ppid -> [pid_1, pid_2, pid_3] - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": ppid})\ - .AndReturn( - [{"pid": pid_1, "nova_instance_id": "nova_id_pid_1"}, - {"pid": pid_2, "nova_instance_id": "nova_id_pid_2"}, - {"pid": pid_3, "nova_instance_id": "nova_id_pid_3"}]) - - # pid_1 -> [pid_1_1] - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": pid_1})\ - .AndReturn( - [{"pid": pid_1_1, "nova_instance_id": "nova_id_pid_1_1"}]) - - # pid_1_1 -> [] - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": pid_1_1})\ - .AndReturn([]) - - # pid_2 -> [pid_2_1] - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": pid_2})\ - .AndReturn( - [{"pid": pid_2_1, "nova_instance_id": "nova_id_pid_2_1"}]) - - # pid_2_1 -> [pid_2_1_1, pid_2_1_2] - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": pid_2_1})\ - .AndReturn( - [{"pid": pid_2_1_1, "nova_instance_id": "nova_id_pid_2_1_1"}, - {"pid": pid_2_1_2, "nova_instance_id": "nova_id_pid_2_1_2"}]) - - # pid_2_1_1 -> [] - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": pid_2_1_1})\ - .AndReturn([]) - - # pid_2_1_2 -> [] - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": pid_2_1_2})\ - .AndReturn([]) - - # pid_3 -> [] - db.process_get_all( - IsA(context.RequestContext), GID, {"ppid": pid_3})\ - .AndReturn([]) - - for i in range(8): - manager.ResourceOperator.process_delete( - IsA(context.RequestContext), IsA(str)) - - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + ppid - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 204) - - def test_delete_not_found_exception(self): - self.mox.StubOutWithMock(db, "process_get_by_pid") - db.process_get_by_pid(IsA(context.RequestContext), GID, PID1)\ - .AndRaise(exception.NotFound()) - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + PID1 - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) diff --git a/rack/tests/api/v1/test_securitygroups.py b/rack/tests/api/v1/test_securitygroups.py deleted file mode 100644 index 8862e22..0000000 --- a/rack/tests/api/v1/test_securitygroups.py +++ /dev/null @@ -1,831 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mox import IsA - -from rack import context -from rack import db -from rack import exception -from rack.openstack.common import jsonutils -from rack.resourceoperator import manager -from rack import test -from rack.tests.api import fakes - -import uuid -import webob - -GID = unicode(uuid.uuid4()) -SECURITYGROUP_ID = unicode(uuid.uuid4()) - -SECURITYGROUP_ID1 = unicode(uuid.uuid4()) -SECURITYGROUP_ID2 = unicode(uuid.uuid4()) - - -def _base_securitygroup_get_response(context): - return [ - { - "securitygroup_id": SECURITYGROUP_ID1, - "neutron_securitygroup_id": "fake_key1", - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "fake_key1", - "is_default": False, - }, - { - "securitygroup_id": SECURITYGROUP_ID2, - "neutron_securitygroup_id": "fake_key2", - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "fake_key2", - "is_default": False, - }, - ] - - -def fake_group_get_by_id(context, gid): - pass - - -def fake_securitygroup_get_all(context, gid, filters=None): - return _base_securitygroup_get_response(context) - - -def fake_securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id): - securitygroup_list = _base_securitygroup_get_response(context) - for securitygroup in securitygroup_list: - if securitygroup["securitygroup_id"] == securitygroup_id: - return securitygroup - raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) - - -def fake_create(context, kwargs): - return { - "securitygroup_id": kwargs.get("securitygroup_id"), - "neutron_securitygroup_id": kwargs.get("neutron_securitygroup_id"), - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": kwargs.get("display_name"), - "is_default": kwargs.get("is_default") - } - - -def fake_update(context, gid, securitygroup_id, kwargs): - return { - "securitygroup_id": securitygroup_id, - "neutron_securitygroup_id": "test_securitygroup", - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "test_securitygroup", - "is_default": kwargs.get("is_default") - } - - -def fake_delete(context, gid, securitygroup_id): - return { - "securitygroup_id": securitygroup_id, - "neutron_securitygroup_id": "test_securitygroup", - "gid": GID, - "user_id": context.user_id, - "project_id": context.project_id, - "display_name": "test_securitygrouppair", - "is_default": False, - "status": "DELETING" - } - - -def fake_neutron_securitygroup_id(context, gid, securitygroup_id): - return {"neutron_securitygroup_id": "fake_id"} - - -def get_request(url, method, body=None): - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = method - if body is not None: - req.body = jsonutils.dumps(body) - return req - - -def get_base_url(gid): - return "/v1/groups/" + gid + "/securitygroups" - - -class SecuritygroupsTest(test.NoDBTestCase): - - def setUp(self): - super(SecuritygroupsTest, self).setUp() - self.app = fakes.wsgi_app() - - def test_index(self): - self.stubs.Set(db, "securitygroup_get_all", fake_securitygroup_get_all) - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_list") - securitygroup_list = [ - { - "securitygroup_id": SECURITYGROUP_ID1, - "neutron_securitygroup_id": "fake_key1", - "user_id": "fake", - "project_id": "fake", - "gid": GID, - "display_name": "fake_key1", - "is_default": False, - "status": "Exist" - }, - { - "securitygroup_id": SECURITYGROUP_ID2, - "neutron_securitygroup_id": "fake_key2", - "user_id": "fake", - "project_id": "fake", - "gid": GID, - "display_name": "fake_key2", - "is_default": False, - "status": "Exist" - }, - ] - manager.ResourceOperator.securitygroup_list( - IsA(context.RequestContext), - IsA(list) - ).AndReturn(securitygroup_list) - self.mox.ReplayAll() - expected = [ - { - "securitygroup_id": SECURITYGROUP_ID1, - "neutron_securitygroup_id": "fake_key1", - "user_id": "fake", - "project_id": "fake", - "gid": GID, - "name": "fake_key1", - "is_default": False, - "status": "Exist" - }, - { - "securitygroup_id": SECURITYGROUP_ID2, - "neutron_securitygroup_id": "fake_key2", - "user_id": "fake", - "project_id": "fake", - "gid": GID, - "name": "fake_key2", - "is_default": False, - "status": "Exist" - }, - ] - url = get_base_url(GID) - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 200) - self.assertEqual(body["securitygroups"], expected) - - def test_index_securitygroup_not_found_exception(self): - url = get_base_url(GID + "a") - req = get_request(url, 'GET') - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_index_raise_exception_by_manager(self): - self.stubs.Set(db, "securitygroup_get_all", fake_securitygroup_get_all) - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_list") - - manager.ResourceOperator.securitygroup_list( - IsA(context.RequestContext), - IsA(list) - ).AndRaise(exception.RackException()) - self.mox.ReplayAll() - url = get_base_url(GID) - req = get_request(url, 'GET') - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 500) - - def test_show(self): - self.stubs.Set(db, "securitygroup_get_by_securitygroup_id", - fake_securitygroup_get_by_securitygroup_id) - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_show") - securitygroup = { - "securitygroup_id": SECURITYGROUP_ID1, - "neutron_securitygroup_id": "fake_key1", - "gid": GID, - "user_id": "fake", - "project_id": "fake", - "display_name": "fake_key1", - "is_default": False, - "status": "Exist" - } - manager.ResourceOperator.securitygroup_show( - IsA(context.RequestContext), - IsA(object) - ).AndReturn(securitygroup) - self.mox.ReplayAll() - url = get_base_url(GID) + "/" + SECURITYGROUP_ID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - expected = { - "securitygroup_id": SECURITYGROUP_ID1, - "neutron_securitygroup_id": "fake_key1", - "gid": GID, - "user_id": "fake", - "project_id": "fake", - "name": "fake_key1", - "is_default": False, - "status": "Exist" - } - self.assertEqual(res.status_code, 200) - self.assertEqual(body["securitygroup"], expected) - - def test_show_uuidcheck_gid_not_found_exception(self): - url = get_base_url(GID + "aaa") + "/" + SECURITYGROUP_ID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_show_uuidcheck_securitygroup_not_found_exception(self): - url = get_base_url(GID) + "/" + SECURITYGROUP_ID1 + "aaaa" - req = get_request(url, 'GET') - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_show_db_securitygroup_not_found_exception(self): - self.mox.StubOutWithMock( - db, "securitygroup_get_by_securitygroup_id") - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), - GID, - SECURITYGROUP_ID1 - ).AndRaise(exception.SecuritygroupNotFound( - securitygroup_id=SECURITYGROUP_ID1)) - self.mox.ReplayAll() - url = get_base_url(GID) + "/" + SECURITYGROUP_ID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_show_manager_exception(self): - self.mox.StubOutWithMock( - db, "securitygroup_get_by_securitygroup_id") - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), - GID, - SECURITYGROUP_ID1 - ).AndRaise(exception.SecuritygroupNotFound( - securitygroup_id=SECURITYGROUP_ID1)) - self.mox.ReplayAll() - url = get_base_url(GID) + "/" + SECURITYGROUP_ID1 - req = get_request(url, 'GET') - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_create_with_no_rules(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_create") - name = "test_securitygroup" - securty_group = {"neutron_securitygroup_id": "fake_id"} - manager.ResourceOperator.securitygroup_create( - IsA(context.RequestContext), - name, - IsA(list) - ).AndReturn(securty_group) - self.mox.StubOutWithMock(db, "securitygroup_create") - db.securitygroup_create(IsA(context.RequestContext), - IsA(dict))\ - .AndReturn({"securitygroup_id": SECURITYGROUP_ID, - "neutron_securitygroup_id": "fake_id", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "display_name": name, - "is_default": True}) - self.mox.ReplayAll() - - request_body = {"securitygroup": {"name": name, - "is_default": "true"}} - - expected = {"securitygroup": {"securitygroup_id": SECURITYGROUP_ID, - "neutron_securitygroup_id": "fake_id", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": name, - "is_default": True}} - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 201) - - for key in body["securitygroup"]: - self.assertEqual( - body["securitygroup"][key], expected["securitygroup"][key]) - - def test_create_with_no_name(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_create") - result_value = {"neutron_securitygroup_id": "fake_id"} - manager.ResourceOperator.securitygroup_create( - IsA(context.RequestContext), - IsA(unicode), - IsA(list) - ).AndReturn(result_value) - self.mox.StubOutWithMock(db, "securitygroup_create") - name = "securitygroup-" + SECURITYGROUP_ID - db.securitygroup_create(IsA(context.RequestContext), - IsA(dict))\ - .AndReturn({"securitygroup_id": SECURITYGROUP_ID, - "neutron_securitygroup_id": "fake_id", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "display_name": name, - "is_default": False}) - self.mox.ReplayAll() - - request_body = { - "securitygroup": { - "is_default": "false", - } - } - - expected = { - "securitygroup": { - "securitygroup_id": SECURITYGROUP_ID, - "neutron_securitygroup_id": "fake_id", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": name, - "is_default": False, - } - } - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 201) - for key in body["securitygroup"]: - self.assertEqual( - body["securitygroup"][key], expected["securitygroup"][key]) - - def test_create_with_rules(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - self.stubs.Set(db, "securitygroup_get_by_securitygroup_id", - fake_securitygroup_get_by_securitygroup_id) - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_create") - name = "test_securitygroup" - security_group = {"neutron_securitygroup_id": "fake_id"} - manager.ResourceOperator.securitygroup_create( - IsA(context.RequestContext), - name, - [{"protocol": "icmp", - "port_range_max": None, - "port_range_min": None, - "remote_neutron_securitygroup_id": "fake_key1", - "remote_ip_prefix": None}, - {"protocol": "tcp", - "port_range_max": "80", - "port_range_min": "80", - "remote_neutron_securitygroup_id": "fake_key1", - "remote_ip_prefix": None}] - ).AndReturn(security_group) - self.mox.StubOutWithMock(db, "securitygroup_create") - db.securitygroup_create(IsA(context.RequestContext), - IsA(dict))\ - .AndReturn({"securitygroup_id": SECURITYGROUP_ID, - "neutron_securitygroup_id": "fake_id", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "display_name": name, - "is_default": True}) - self.mox.ReplayAll() - - request_body = { - "securitygroup": { - "name": name, - "is_default": "true", - "securitygrouprules": [ - { - "protocol": "icmp", - "remote_securitygroup_id": SECURITYGROUP_ID1 - }, - { - "port_range_max": "80", - "port_range_min": "80", - "protocol": "tcp", - "remote_securitygroup_id": SECURITYGROUP_ID1} - ] - } - } - - expected = {"securitygroup": {"securitygroup_id": SECURITYGROUP_ID, - "neutron_securitygroup_id": "fake_id", - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "name": name, - "is_default": True}} - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - self.assertEqual(res.status_code, 201) - for key in body["securitygroup"]: - self.assertEqual( - body["securitygroup"][key], expected["securitygroup"][key]) - - def test_create_exception_InvalidInput_invalid_request_body(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - request_body = {} - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 400) - - def test_create_exception_InvalidInput_rule_is_not_list(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - name = "test_securitygroup" - request_body = { - "securitygroup": { - "name": name, - "is_default": "true", - "securitygrouprules": "fake_rules" - } - } - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 400) - - def test_create_exception_InvalidInput_is_default_is_not_boolean(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - name = "test_securitygroup" - request_body = { - "securitygroup": { - "name": name, - "is_default": "fake" - } - } - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 400) - - def test_create_exception_HTTPNotFound_gid_not_found(self): - self.mox.StubOutWithMock(db, "group_get_by_gid") - db.group_get_by_gid(IsA(context.RequestContext), - GID)\ - .AndRaise(exception.GroupNotFound(gid=GID)) - self.mox.ReplayAll() - name = "test_securitygroup" - request_body = { - "securitygroup": { - "name": name, - "is_default": "true" - } - } - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_create_exception_HTTPNotFound_gid_is_not_uuid(self): - name = "test_securitygroup" - request_body = { - "securitygroup": { - "name": name, - "is_default": "true" - } - } - - url = get_base_url(GID + "aaa") - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_create_exception_manager_securitygroup_create(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_create") - name = "test_securitygroup" - manager.ResourceOperator.securitygroup_create( - IsA(context.RequestContext), - name, - IsA(list) - ).AndRaise(exception.RackException()) - self.mox.ReplayAll() - - request_body = { - "securitygroup": { - "name": name, - "is_default": "true", - } - } - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 500) - - def test_create_exception_db_securitygroup_create(self): - self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_create") - name = "test_securitygroup" - securty_group = { - "securitygroup": { - "securitygroup_id": SECURITYGROUP_ID, - "neutron_securitygroup_id": "fake_id", - "gid": GID, - "display_name": name, - "is_default": True, - } - } - manager.ResourceOperator.securitygroup_create( - IsA(context.RequestContext), - name, - IsA(list) - ).AndReturn(securty_group) - self.mox.StubOutWithMock(db, "securitygroup_create") - db.securitygroup_create(IsA(context.RequestContext), - IsA(dict))\ - .AndRaise(exception.RackException()) - self.mox.ReplayAll() - - request_body = { - "securitygroup": { - "name": name, - "is_default": "true", - } - } - - url = get_base_url(GID) - req = get_request(url, 'POST', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 500) - - def test_update(self): - self.stubs.Set(db, "securitygroup_update", fake_update) - request_body = { - "securitygroup": { - "is_default": "true" - } - } - expected = { - "securitygroup": { - "securitygroup_id": SECURITYGROUP_ID, - "gid": GID, - "user_id": "noauth", - "project_id": "noauth", - "neutron_securitygroup_id": "test_securitygroup", - "name": "test_securitygroup", - "is_default": True, - } - } - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - - self.assertEqual(res.status_code, 200) - for key in body["securitygroup"]: - self.assertEqual( - body["securitygroup"][key], expected["securitygroup"][key]) - - def test_update_exception_InValidInput_invalid_request_body(self): - request_body = {} - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 400) - - def test_update_exception_InValidInput_is_default_is_not_boolean(self): - request_body = { - "securitygroup": { - "is_default": "fake" - } - } - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 400) - - def test_update_exception_InValidInput_is_default_is_required(self): - request_body = { - "securitygroup": {} - } - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 400) - - def test_update_exception_NotFound_gid_is_not_uuid(self): - request_body = { - "securitygroup": { - "is_default": "true" - } - } - - url = get_base_url(GID + "aaa") + "/" + SECURITYGROUP_ID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_update_exception_NotFound_securitygroup_id_is_not_uuid(self): - request_body = { - "securitygroup": { - "is_default": "true" - } - } - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID + "aaa" - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_update_NotFound_db_securitygroup_create(self): - self.mox.StubOutWithMock(db, "securitygroup_update") - db.securitygroup_update( - IsA(context.RequestContext), - GID, - SECURITYGROUP_ID, - IsA(dict))\ - .AndRaise(exception.SecuritygroupNotFound( - securitygroup_id=SECURITYGROUP_ID)) - self.mox.ReplayAll() - request_body = { - "securitygroup": { - "is_default": "true" - } - } - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, 'PUT', request_body) - res = req.get_response(self.app) - - self.assertEqual(res.status_code, 404) - - def test_delete(self): - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), - GID, - SECURITYGROUP_ID)\ - .AndReturn({"processes": [], - "neutron_securitygroup_id": "fake_id"}) - - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_delete") - manager.ResourceOperator.securitygroup_delete( - IsA(context.RequestContext), - "fake_id") - - self.mox.StubOutWithMock(db, "securitygroup_delete") - db.securitygroup_delete(IsA(context.RequestContext), - GID, - SECURITYGROUP_ID) - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 204) - - def test_delete_exception_HTTPNotFound_gid_is_not_uuid(self): - url = get_base_url(GID + "aaa") + "/" + SECURITYGROUP_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_exception_HTTPNotFound_securitygroup_id_is_not_uuid(self): - url = get_base_url(GID) + "/" + SECURITYGROUP_ID + "aaa" - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_exeption_HTTPNotFound_securitygroup_not_found(self): - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), - GID, - SECURITYGROUP_ID)\ - .AndRaise(exception.SecuritygroupNotFound( - securitygroup_id=SECURITYGROUP_ID)) - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_exeption_manager_securitygroup_delete(self): - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), - GID, - SECURITYGROUP_ID)\ - .AndReturn({"processes": [], - "neutron_securitygroup_id": "fake_id"}) - - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_delete") - manager.ResourceOperator.securitygroup_delete( - IsA(context.RequestContext), - "fake_id")\ - .AndRaise(exception.RackException()) - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 500) - - def test_delete_exeption_HTTPNotFound_db_securitygroup_id_not_found(self): - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), - GID, - SECURITYGROUP_ID)\ - .AndReturn({"processes": [], - "neutron_securitygroup_id": "fake_id"}) - - self.mox.StubOutWithMock( - manager.ResourceOperator, "securitygroup_delete") - manager.ResourceOperator.securitygroup_delete( - IsA(context.RequestContext), - "fake_id") - - self.mox.StubOutWithMock(db, "securitygroup_delete") - db.securitygroup_delete(IsA(context.RequestContext), - GID, - SECURITYGROUP_ID)\ - .AndRaise(exception.SecuritygroupNotFound( - securitygroup_id=SECURITYGROUP_ID)) - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 404) - - def test_delete_exeption_SecuritygroupInUse(self): - self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") - db.securitygroup_get_by_securitygroup_id( - IsA(context.RequestContext), - GID, - SECURITYGROUP_ID)\ - .AndReturn({"processes": [{"gid": "gid"}], - "neutron_securitygroup_id": "fake_id"}) - - self.mox.ReplayAll() - - url = get_base_url(GID) + "/" + SECURITYGROUP_ID - req = get_request(url, "DELETE") - res = req.get_response(self.app) - self.assertEqual(res.status_code, 409) diff --git a/rack/tests/conf_fixture.py b/rack/tests/conf_fixture.py deleted file mode 100644 index b0dcea7..0000000 --- a/rack/tests/conf_fixture.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo.config import cfg - - -from rack import config -from rack.openstack.common.fixture import config as config_fixture -from rack import paths -from rack.tests import utils - -CONF = cfg.CONF -CONF.import_opt('use_ipv6', 'rack.netconf') -CONF.import_opt('host', 'rack.netconf') -CONF.import_opt('policy_file', 'rack.policy') -CONF.import_opt('api_paste_config', 'rack.wsgi') - - -class ConfFixture(config_fixture.Config): - """Fixture to manage global conf settings.""" - def setUp(self): - super(ConfFixture, self).setUp() - self.conf.set_default('api_paste_config', - paths.state_path_def('etc/api-paste.ini')) - self.conf.set_default('host', 'fake-mini') - self.conf.set_default('connection', "sqlite://", group='database') - self.conf.set_default('sqlite_synchronous', False, group='database') - self.conf.set_default('use_ipv6', True) - config.parse_args([], default_config_files=[]) - self.addCleanup(utils.cleanup_dns_managers) diff --git a/rack/tests/db/__init__.py b/rack/tests/db/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/tests/db/test_db_api.py b/rack/tests/db/test_db_api.py deleted file mode 100644 index 618c108..0000000 --- a/rack/tests/db/test_db_api.py +++ /dev/null @@ -1,1093 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from rack import context -from rack import db -from rack import exception -from rack import test -import uuid - - -class ModelsObjectComparatorMixin(object): - - def _dict_from_object(self, obj, ignored_keys): - if ignored_keys is None: - ignored_keys = [] - return dict([(k, v) for k, v in obj.iteritems() - if k not in ignored_keys]) - - def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): - obj1 = self._dict_from_object(obj1, ignored_keys) - obj2 = self._dict_from_object(obj2, ignored_keys) - - self.assertEqual(len(obj1), - len(obj2), - "Keys mismatch: %s" % - str(set(obj1.keys()) ^ set(obj2.keys()))) - for key, value in obj1.iteritems(): - self.assertEqual(value, obj2[key]) - - def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): - obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) - sort_key = lambda d: [d[k] for k in sorted(d)] - conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), - key=sort_key) - - self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2)) - - def _assertEqualOrderedListOfObjects(self, objs1, objs2, - ignored_keys=None): - obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) - conv = lambda obj: map(obj_to_dict, obj) - - self.assertEqual(conv(objs1), conv(objs2)) - - def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): - self.assertEqual(len(primitives1), len(primitives2)) - for primitive in primitives1: - self.assertIn(primitive, primitives2) - - for primitive in primitives2: - self.assertIn(primitive, primitives1) - - -class GroupTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(GroupTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.user_ctxt = context.RequestContext('user', 'user') - self.gid = unicode(uuid.uuid4()) - - def test_group_get_all(self): - # set test data - groups = [ - { - "display_name": "display_name_01", - "display_description": "display_description_01", - }, - { - "display_name": "display_name_02", - "display_description": "display_description_02", - }, - { - "display_name": "display_name_03", - "display_description": "display_description_03", - }, - { - "display_name": "display_name_04", - "display_description": "display_description_04", - } - ] - - # create test data to group table - user_ids = ["user_id_01", "user_id_02"] - created_groups_list = [] - for user_id in user_ids: - created_groups = [self._create_group(group, user_id=user_id, - project_id=user_id) - for group in groups] - created_groups_list.append(created_groups) - - # test - ctext = context.RequestContext( - user_id=user_ids[0], project_id=user_ids[0]) - res_groups = db.group_get_all(ctext) - ignored_keys = ['deleted', 'deleted_at', 'updated_at', - 'created_at'] - self.assertEqual(len(res_groups), len(created_groups_list[0])) - for group in range(0, len(res_groups)): - self._assertEqualObjects( - res_groups[group], created_groups_list[0][group], - ignored_keys) - - def test_group_get_all_empty(self): - ctext = context.RequestContext( - user_id="user01", project_id="user01") - res_groups = db.group_get_all(ctext) - expected = [] - self.assertEqual(res_groups, expected) - - def test_group_get_by_gid(self): - # set test data - groups = [ - { - "display_name": "display_name_01", - "display_description": "display_description_01", - }, - { - "display_name": "display_name_02", - "display_description": "display_description_02", - }, - { - "display_name": "display_name_03", - "display_description": "display_description_03", - }, - { - "display_name": "display_name_04", - "display_description": "display_description_04", - } - ] - - # create test data to group table - user_id = "user_id_01" - created_groups = [self._create_group( - group, user_id=user_id, project_id=user_id)for group in groups] - gid = created_groups[1]["gid"] - - # test - ctext = context.RequestContext( - user_id=user_id, project_id=user_id) - res_group = db.group_get_by_gid(ctext, gid) - ignored_keys = ['deleted', 'deleted_at', 'updated_at', - 'created_at'] - self._assertEqualObjects(res_group, created_groups[1], ignored_keys) - - def test_group_get_by_gid_not_found(self): - # test - user_id = "user_id_01" - ctext = context.RequestContext( - user_id=user_id, project_id=user_id) - gid = "00000000-0000-0000-0000-000000000010" - status_code = 200 - try: - db.group_get_by_gid(ctext, gid) - except Exception as e: - status_code = e.code - self.assertEqual(status_code, 404) - - def _get_base_values(self): - return { - 'gid': 'fake_name', - 'user_id': 'fake_user_id', - 'project_id': 'fake_project_id', - 'display_name': 'fake_dispalay_name', - 'display_description': 'fake_display_description', - 'status': 'fake_status' - } - - def _create_group(self, values, user_id=None, project_id=None): - user_ctxt = context.RequestContext(user_id, project_id) - values['gid'] = unicode(uuid.uuid4()) - values['user_id'] = user_id - values['project_id'] = project_id - v = self._get_base_values() - v.update(values) - return db.group_create(user_ctxt, v) - - def test_group_create(self): - values = { - "gid": "12345678-1234-5678-9123-123456789012", - "user_id": "user", - "project_id": "user", - "display_name": "test_group", - "display_description": "This is test group", - "status": "active" - } - group = db.group_create(self.user_ctxt, values) - ignored_keys = ['deleted', 'deleted_at', 'updated_at', - 'created_at'] - values.update({"user_id": "user", - "project_id": "user", - "status": "active"}) - self.assertIsNotNone(group['gid']) - self._assertEqualObjects(group, values, ignored_keys) - - def test_group_update(self): - values_before = { - "gid": "12345678-1234-5678-9123-123456789012", - "user_id": "user", - "project_id": "user", - "display_name": "My_group", - "display_description": "This is my group.", - "status": "active" - } - group_before = db.group_create(self.user_ctxt, values_before) - values = { - "gid": group_before["gid"], - "display_name": "My_group_updated", - "display_description": "This is my group updated." - } - group = db.group_update(self.user_ctxt, values) - ignored_keys = ['deleted', 'deleted_at', 'updated_at', - 'created_at', "user_id", "project_id", "status"] - self._assertEqualObjects(group, values, ignored_keys) - - def test_group_delete(self): - values_before = { - "gid": self.gid, - "user_id": "user_id", - "project_id": "project_id", - "display_name": "My_group", - "display_description": "This is my group.", - "status": "active" - } - db.group_create(self.user_ctxt, values_before) - deleted_group = db.group_delete(self.ctxt, self.gid) - self.assertEqual(deleted_group["deleted"], 1) - self.assertEqual(deleted_group["status"], "DELETING") - self.assertIsNotNone(deleted_group.get("deleted_at")) - - def test_group_update_gid_not_found(self): - # test - values_before = { - "gid": "12345678-1234-5678-9123-123456789012", - "user_id": "user", - "project_id": "user", - "display_name": "My_group", - "display_description": "This is my group.", - "status": "active" - } - group_before = db.group_create(self.user_ctxt, values_before) - values = { - "gid": group_before["gid"] + "not-found", - "display_name": "My_group_updated", - "display_description": "This is my group updated." - } - try: - db.group_update(self.user_ctxt, values) - except Exception as e: - status_code = e.code - self.assertEqual(status_code, 404) - - def test_group_delete_not_found(self): - self.assertRaises(exception.GroupNotFound, - db.group_delete, - context=self.user_ctxt, - gid=self.gid) - - -class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(ServiceTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_base_values(self): - return { - 'host': 'fake_host', - 'binary': 'fake_binary', - 'topic': 'fake_topic', - 'report_count': 3, - 'disabled': False - } - - def _create_service(self, values): - v = self._get_base_values() - v.update(values) - return db.service_create(self.ctxt, v) - - def test_service_create(self): - service = self._create_service({}) - self.assertIsNotNone(service['id']) - for key, value in self._get_base_values().iteritems(): - self.assertEqual(value, service[key]) - - def test_service_destroy(self): - service1 = self._create_service({}) - service2 = self._create_service({'host': 'fake_host2'}) - - db.service_destroy(self.ctxt, service1['id']) - self.assertRaises(exception.ServiceNotFound, - db.service_get, self.ctxt, service1['id']) - self._assertEqualObjects(db.service_get(self.ctxt, service2['id']), - service2) - - def test_service_update(self): - service = self._create_service({}) - new_values = { - 'host': 'fake_host1', - 'binary': 'fake_binary1', - 'topic': 'fake_topic1', - 'report_count': 4, - 'disabled': True - } - db.service_update(self.ctxt, service['id'], new_values) - updated_service = db.service_get(self.ctxt, service['id']) - for key, value in new_values.iteritems(): - self.assertEqual(value, updated_service[key]) - - def test_service_update_not_found_exception(self): - self.assertRaises(exception.ServiceNotFound, - db.service_update, self.ctxt, 100500, {}) - - def test_service_get(self): - service1 = self._create_service({}) - self._create_service({'host': 'some_other_fake_host'}) - real_service1 = db.service_get(self.ctxt, service1['id']) - self._assertEqualObjects(service1, real_service1, - ignored_keys=['compute_node']) - - def test_service_get_not_found_exception(self): - self.assertRaises(exception.ServiceNotFound, - db.service_get, self.ctxt, 100500) - - def test_service_get_by_host_and_topic(self): - service1 = self._create_service({'host': 'host1', 'topic': 'topic1'}) - self._create_service({'host': 'host2', 'topic': 'topic2'}) - - real_service1 = db.service_get_by_host_and_topic(self.ctxt, - host='host1', - topic='topic1') - self._assertEqualObjects(service1, real_service1) - - def test_service_get_all(self): - values = [ - {'host': 'host1', 'topic': 'topic1'}, - {'host': 'host2', 'topic': 'topic2'}, - {'disabled': True} - ] - services = [self._create_service(vals) for vals in values] - disabled_services = [services[-1]] - non_disabled_services = services[:-1] - - compares = [ - (services, db.service_get_all(self.ctxt)), - (disabled_services, db.service_get_all(self.ctxt, True)), - (non_disabled_services, db.service_get_all(self.ctxt, False)) - ] - for comp in compares: - self._assertEqualListsOfObjects(*comp) - - def test_service_get_all_by_topic(self): - values = [ - {'host': 'host1', 'topic': 't1'}, - {'host': 'host2', 'topic': 't1'}, - {'disabled': True, 'topic': 't1'}, - {'host': 'host3', 'topic': 't2'} - ] - services = [self._create_service(vals) for vals in values] - expected = services[:2] - real = db.service_get_all_by_topic(self.ctxt, 't1') - self._assertEqualListsOfObjects(expected, real) - - def test_service_get_all_by_host(self): - values = [ - {'host': 'host1', 'topic': 't11', 'binary': 'b11'}, - {'host': 'host1', 'topic': 't12', 'binary': 'b12'}, - {'host': 'host2', 'topic': 't1'}, - {'host': 'host3', 'topic': 't1'} - ] - services = [self._create_service(vals) for vals in values] - - expected = services[:2] - real = db.service_get_all_by_host(self.ctxt, 'host1') - self._assertEqualListsOfObjects(expected, real) - - def test_service_get_by_args(self): - values = [ - {'host': 'host1', 'binary': 'a'}, - {'host': 'host2', 'binary': 'b'} - ] - services = [self._create_service(vals) for vals in values] - - service1 = db.service_get_by_args(self.ctxt, 'host1', 'a') - self._assertEqualObjects(services[0], service1) - - service2 = db.service_get_by_args(self.ctxt, 'host2', 'b') - self._assertEqualObjects(services[1], service2) - - def test_service_get_by_args_not_found_exception(self): - self.assertRaises(exception.HostBinaryNotFound, - db.service_get_by_args, - self.ctxt, 'non-exists-host', 'a') - - def test_service_binary_exists_exception(self): - db.service_create(self.ctxt, self._get_base_values()) - values = self._get_base_values() - values.update({'topic': 'top1'}) - self.assertRaises(exception.ServiceBinaryExists, db.service_create, - self.ctxt, values) - - def test_service_topic_exists_exceptions(self): - db.service_create(self.ctxt, self._get_base_values()) - values = self._get_base_values() - values.update({'binary': 'bin1'}) - self.assertRaises(exception.ServiceTopicExists, db.service_create, - self.ctxt, values) - - -class NetworksTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(NetworksTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - self.gid = unicode(uuid.uuid4()) - self.network_id = unicode(uuid.uuid4()) - self.neutron_network_id = unicode(uuid.uuid4()) - self.ext_router_id = unicode(uuid.uuid4()) - - def test_networks_create(self): - values = { - "network_id": self.network_id, - "gid": self.gid, - "neutron_network_id": "", - "is_admin": True, - "cidr": "10.0.0.0/24", - "ext_router": "", - "user_id": "user", - "project_id": "user", - "display_name": "net-" + self.network_id, - "deleted": 0 - } - network = db.network_create(self.ctxt, values) - - ignored_keys = ['deleted', - 'deleted_at', - 'updated_at', - 'created_at'] - self._assertEqualObjects(network, values, ignored_keys) - - def test_network_get_all(self): - values = { - "network_id": "", - "gid": self.gid, - "neutron_network_id": "", - "is_admin": True, - "cidr": "10.0.0.0/24", - "ext_router": "", - "user_id": "user", - "project_id": "user", - "display_name": "net-" + self.network_id, - "deleted": 0 - } - for i in range(1 - 5): - values["network_id"] = "network_id" + str(i) - db.network_create(self.ctxt, values) - - network_list = db.network_get_all(self.ctxt, self.gid) - for network in network_list: - self.assertEqual(network["gid"], self.gid) - - def test_network_get_all_return_empty_list(self): - network_list = db.network_get_all(self.ctxt, self.gid) - self.assertEqual(network_list, []) - - def test_network_get_by_network_id(self): - values = { - "network_id": "", - "gid": self.gid, - "neutron_network_id": "", - "is_admin": True, - "cidr": "10.0.0.0/24", - "ext_router": "", - "user_id": "user", - "project_id": "user", - "display_name": "net-" + self.network_id, - "deleted": 0 - } - for i in range(1 - 5): - values["network_id"] = "network_id" + str(i) - db.network_create(self.ctxt, values) - values["network_id"] = self.network_id - db.network_create(self.ctxt, values) - - network = db.network_get_by_network_id( - self.ctxt, self.gid, self.network_id) - self.assertEqual(network["network_id"], self.network_id) - - def test_network_get_by_network_id_exception_notfound(self): - values = { - "network_id": "", - "gid": self.gid, - "neutron_network_id": "", - "is_admin": True, - "cidr": "10.0.0.0/24", - "ext_router": "", - "user_id": "user", - "project_id": "user", - "display_name": "net-" + self.network_id, - "deleted": 0 - } - for i in range(1 - 5): - values["network_id"] = "network_id" + str(i) - db.network_create(self.ctxt, values) - - self.assertRaises(exception.NetworkNotFound, - db.network_get_by_network_id, - context=self.ctxt, - gid=self.gid, - network_id=self.network_id) - - def test_networks_update(self): - create_values = { - "network_id": self.network_id, - "gid": self.gid, - "neutron_network_id": "", - "is_admin": True, - "cidr": "10.0.0.0/24", - "ext_router": "", - "user_id": "user", - "project_id": "user", - "display_name": "net-" + self.network_id, - "deleted": 0 - } - create_network = db.network_create(self.ctxt, create_values) - create_network["cidr"] = "10.0.1.0/24" - - update_values = { - "cidr": "10.0.1.0/24" - } - db.network_update(self.ctxt, self.network_id, update_values) - - network = db.network_get_by_network_id( - self.ctxt, self.gid, self.network_id) - ignored_keys = ['deleted', - 'deleted_at', - 'updated_at', - 'processes'] - self.assertIsNotNone(network["updated_at"]) - self._assertEqualObjects(network, create_network, ignored_keys) - - def test_network_delete(self): - create_values = { - "network_id": self.network_id, - "gid": self.gid, - "neutron_network_id": "", - "is_admin": True, - "cidr": "10.0.0.0/24", - "ext_router": "", - "user_id": "user", - "project_id": "user", - "display_name": "net-" + self.network_id, - "deleted": 0 - } - db.network_create(self.ctxt, create_values) - deleted_network = db.network_delete( - self.ctxt, self.gid, self.network_id) - self.assertEqual(deleted_network["deleted"], 1) - network_list = db.network_get_all(self.ctxt, self.gid) - self.assertEqual(network_list, []) - - -PRIVATE_KEY = ("-----BEGIN RSA PRIVATE KEY-----\nMIIEoAIBA" - "AKCAQEA6W34Ak32uxp7Oh0rh1mCQclkw+NeqchAOhy" - "O/rcphFt280D9\nYXxdUa43i51IDS9VpyFFd10Cv4c" - "cynTPnky82CpGcuXCzaACzI/FHhmBeXTrFoXm\n682" - "b/8kXVQfCVfSjnvChxeeATjPu9GQkNrgyYyoubHxrr" - "W7fTaRLEz/Np9CvCq/F\nPJcsx7FwD0adFfmnulbZp" - "plunqMGKX2nYXbDlLi7Ykjd3KbH1PRJuu+sPYDz3Gm" - "Z\n4Z0naojOUDcajuMckN8RzNblBrksH8g6NDauoX5" - "hQa9dyd1q36403NW9tcE6ZwNp\n1GYCnN7/YgI/ugH" - "o30ptpBvGw1zuY5/+FkU7SQIBIwKCAQA8BlW3cyIwH" - "MCZ6j5k\nofzsWFu9V7lBmeShOosrji8/Srgv7CPl3" - "iaf+ZlBKHGc/YsNuBktUm5rw6hRUTyz\nrVUhpHiD8" - "fBDgOrG4yQPDd93AM68phbO67pmWEfUCU86rJ8aPeB" - "0t98qDVqz3zyD\nGWwK3vX+o6ao8J/SIu67zpP381d" - "/ZigDsq+yqhtPpz04YJ2W0w67NV6XSPOV1AX0\nYLn" - "iHMwfbSTdwJ/wVWoooIgbTo7ldPuBsKUwNIVW8H9tm" - "apVdyQxAS9JAkr1Y2si\nxKURN4Iez2oyCFv5+P1em" - "hoptgECr49kpOBAvhRfWWkumgR1azqynzTjSnpQVO6" - "2\nvQr7AoGBAPkYWJX0tFNlqIWw4tcHtcPHJkRwvLd" - "PUfM6Q0b6+YctKBmLoNJWBiXr\n39wiYnftSdJO+L9" - "6HAG38RrmeCfafz19EDPVXepAUYZDwnY1HGx7ZqbiP" - "wxYMN4C\n+Wg3LzuSh7d5fe409+TCtX4YqSVFQd9gl" - "8Ml3sKVOTxeaDROw6hFAoGBAO/mdJOr\nSGcAj9V99" - "df6IX8abZTPm2PmirT95WWwIYX4PRY//5iaCN6XyEK" - "Ix5TJk9lmcQhS\ntb++PTsXpea01WUcxqaOO3vG7PQ" - "hvAbpq8A4eMBZZiY9UyctCPNSMscPPNRU2r/C\ntAs" - "XRk6BNkiGofgn2MY5YBoPkEgiJmJWMKE1AoGAeP0yV" - "3bbPnM0mLUAdxJfmZs+\neQOO3LF/k2VxInnm6eK7t" - "KLntp7PyUauj35qV4HiBxBqMR4Nmm9JOPOZcnFxAJv" - "U\nq3ZDjwlMK0V7tcIGfdWJoYPVewZDnwjCSI/VHO9" - "mfbAJ91uOWStfd8LV0EY18Cea\nK5YNHK7hSTUrTJt" - "JFzcCgYB7YJO5qIuir9Txc/rG2Gj/ie82lqevuGSXm" - "ISaslpi\nJ+Tm3xW8MfXu0bdyrL5pxsEQuFdjXbyOf" - "xgtBNj6Tl8eDsyQK+QTxWPrRIyV10Ji\n2zbJUoxOL" - "irDsMLGR4fUFncOHQLJBQwi9gbmi5hCjmHtVlI6DuD" - "3dbfqlThP1I4J\nwwKBgHfbOPVCgcJA3733J+dBC8g" - "Lt5QT2fCZ2N7PtaHcsSrW/B9VlGP+tviEC59U\nbmp" - "OLADzAto1MZdRDr8uXByZ8/eI37Txn6YchMVp43uL2" - "+WaTdn9GBtOBpWJ0Pqi\nx3HBmILbvIEzB2BX11/PD" - "NGRMNcCy7edvnFMCxeAiW7DJqCb\n-----END RSA " - "PRIVATE KEY-----\n") - - -class KeypairTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(KeypairTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.user_ctxt = context.RequestContext('user', 'user') - - def _get_base_values(self, gid): - return { - "keypair_id": "abcdefgh-ijkl-mnop-qrst-uvwxyzabcdef", - "gid": gid, - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "nova_keypair_id": "nova-test-keypair", - "private_key": PRIVATE_KEY, - "display_name": "test_keypair", - "is_default": True - } - - def _create_group(self, gid): - values = { - "gid": gid, - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "display_name": "test_group", - "dsplay_description": "This is test group.", - "is_default": False - } - return db.group_create(self.user_ctxt, values) - - def _create_keypair(self, gid, values): - v = self._get_base_values(gid) - v.update(values) - return db.keypair_create(self.user_ctxt, v) - - def test_keypair_get_all(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - values = [ - {"keypair_id": unicode(uuid.uuid4()), - "display_name": "test_keypair1"}, - {"keypair_id": unicode(uuid.uuid4()), - "display_name": "test_keypair2"}, - {"keypair_id": unicode(uuid.uuid4()), - "display_name": "test_keypair3"}, - ] - keypairs = [self._create_keypair(gid, value) for value in values] - expected_keypairs = db.keypair_get_all(self.user_ctxt, gid) - self._assertEqualListsOfObjects(keypairs, expected_keypairs) - - def test_keypair_get_by_keypair_id(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - values = [ - {"keypair_id": unicode(uuid.uuid4()), - "display_name": "test_keypair1"}, - {"keypair_id": unicode(uuid.uuid4()), - "display_name": "test_keypair2"}, - ] - keypairs = [self._create_keypair(gid, value) for value in values] - expected = db.keypair_get_by_keypair_id( - self.user_ctxt, gid, values[0]["keypair_id"]) - self._assertEqualObjects(keypairs[0], expected) - - def test_keypair_get_keypair_not_found(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - values = self._get_base_values(gid) - db.keypair_create(self.user_ctxt, values) - self.assertRaises(exception.KeypairNotFound, - db.keypair_get_by_keypair_id, - self.user_ctxt, gid, "aaaaa") - - def test_keypair_create(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - - values = self._get_base_values(gid) - keypair = db.keypair_create(self.user_ctxt, values) - ignored_keys = ['deleted', 'deleted_at', 'updated_at', - 'created_at'] - self._assertEqualObjects(keypair, values, ignored_keys) - - def test_keypair_update(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - values_before = self._get_base_values(gid) - keypair = db.keypair_create(self.user_ctxt, values_before) - values = { - "is_default": False - } - keypair_after = db.keypair_update( - self.user_ctxt, gid, keypair["keypair_id"], values) - self.assertEqual(keypair_after["is_default"], False) - - def test_keypair_update_keypair_not_found(self): - gid = "12345678-1234-5678-9123-123456789012" - keypair_id = "12345678-1234-5678-9123-123456789012" - self.assertRaises(exception.KeypairNotFound, - db.keypair_update, - context=self.user_ctxt, - gid=gid, - keypair_id=keypair_id, - values={}) - - def test_keypair_delete(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - values_before = self._get_base_values(gid) - keypair = db.keypair_create(self.user_ctxt, values_before) - keypair_after = db.keypair_delete( - self.user_ctxt, gid, keypair["keypair_id"]) - self.assertEqual(1, keypair_after["deleted"]) - self.assertIsNotNone(keypair_after.get("deleted_at")) - - def test_keypair_delete_not_found(self): - gid = "12345678-1234-5678-9123-123456789012" - keypair_id = "12345678-1234-5678-9123-123456789012" - self.assertRaises(exception.KeypairNotFound, - db.keypair_delete, - context=self.user_ctxt, - gid=gid, keypair_id=keypair_id) - - -class SecuritygroupTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(SecuritygroupTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.user_ctxt = context.RequestContext('user', 'user') - - def _get_base_values(self, gid, securitygroup_id=None): - return { - "securitygroup_id": securitygroup_id or "abcdefgh-ijkl-mnop-qrst-" - "uvwxyzabcdef", - "gid": gid, - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "neutron_securitygroup_id": securitygroup_id or "neutron-test-" - "securitygroup", - "display_name": "test_securitygroup", - "is_default": True, - "deleted": 0 - } - - def _create_group(self, gid): - values = { - "gid": gid, - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "display_name": "test_group", - "dsplay_description": "This is test group.", - "is_default": False, - "deleted": 0 - } - return db.group_create(self.user_ctxt, values) - - def test_securitygroup_get_all(self): - group = self._create_group("gid1") - securitygroup_ids = ["sc1", "sc2", "sc3"] - securitygroups = [] - for securitygroup_id in securitygroup_ids: - securitygroup = db.securitygroup_create( - self.user_ctxt, self._get_base_values(group["gid"], - securitygroup_id)) - securitygroups.append(securitygroup) - - res_securitygroups = db.securitygroup_get_all(context, group["gid"]) - ignored_keys = ['deleted_at', 'updated_at', 'created_at'] - self.assertEqual(len(res_securitygroups), len(securitygroups)) - for i in range(0, len(res_securitygroups)): - self._assertEqualObjects( - res_securitygroups[i], securitygroups[i], ignored_keys) - - def test_securitygroup_get_all_empty(self): - res_securitygroups = db.securitygroup_get_all(context, "gid") - expected = [] - self.assertEqual(res_securitygroups, expected) - - def test_securitygroup_get_by_securitygroup_id(self): - group = self._create_group("gid1") - securitygroup_ids = ["sc1", "sc2", "sc3"] - securitygroups = [] - for securitygroup_id in securitygroup_ids: - securitygroup = db.securitygroup_create( - self.user_ctxt, self._get_base_values(group["gid"], - securitygroup_id)) - securitygroups.append(securitygroup) - - res_securitygroup = db.securitygroup_get_by_securitygroup_id( - self.user_ctxt, group["gid"], securitygroup_ids[0]) - ignored_keys = ['deleted_at', 'updated_at', 'created_at', 'processes'] - self._assertEqualObjects( - res_securitygroup, securitygroups[0], ignored_keys) - - def test_securitygroup_get_by_securitygroup_id_not_found(self): - try: - db.securitygroup_get_by_securitygroup_id( - self.user_ctxt, "gid", "sec") - except Exception as e: - status_code = e.code - self.assertEqual(status_code, 404) - - def test_securitygroup_create(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - - values = self._get_base_values(gid) - securitygroup = db.securitygroup_create(self.user_ctxt, values) - ignored_keys = ['deleted', 'deleted_at', 'updated_at', - 'created_at'] - self._assertEqualObjects(securitygroup, values, ignored_keys) - - def test_securitygroup_update(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - values_before = self._get_base_values(gid) - securitygroup = db.securitygroup_create(self.user_ctxt, values_before) - values = { - "is_default": False - } - securitygroup_after = db.securitygroup_update( - self.user_ctxt, gid, securitygroup["securitygroup_id"], values) - self.assertEqual(securitygroup_after["is_default"], False) - - def test_securitygroup_update_securitygroup_not_found(self): - gid = "12345678-1234-5678-9123-123456789012" - securitygroup_id = "12345678-1234-5678-9123-123456789012" - self.assertRaises(exception.SecuritygroupNotFound, - db.securitygroup_update, - context=self.user_ctxt, - gid=gid, - securitygroup_id=securitygroup_id, - values={}) - - def test_securitygroup_delete(self): - gid = "12345678-1234-5678-9123-123456789012" - self._create_group(gid) - values_before = self._get_base_values(gid) - securitygroup = db.securitygroup_create(self.user_ctxt, values_before) - securitygroup_after = db.securitygroup_delete( - self.user_ctxt, gid, securitygroup["securitygroup_id"]) - self.assertEqual(1, securitygroup_after["deleted"]) - self.assertIsNotNone(securitygroup_after.get("deleted_at")) - - def test_securitygroup_delete_not_found(self): - gid = "12345678-1234-5678-9123-123456789012" - securitygroup_id = "12345678-1234-5678-9123-123456789012" - self.assertRaises(exception.SecuritygroupNotFound, - db.securitygroup_delete, - context=self.user_ctxt, - gid=gid, securitygroup_id=securitygroup_id) - - -class ProcessTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(ProcessTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.user_ctxt = context.RequestContext('user', 'user') - self.gid = unicode(uuid.uuid4()) - self.group = self._create_group(self.gid) - self.network = self._create_network(self.gid) - self.keypair = self._create_keypair(self.gid) - self.securitygroup = self._create_securitygroup(self.gid) - - def _get_base_values(self): - return { - "pid": unicode(uuid.uuid4()), - "ppid": unicode(uuid.uuid4()), - "nova_instance_id": unicode(uuid.uuid4()), - "glance_image_id": unicode(uuid.uuid4()), - "nova_flavor_id": 1, - "keypair_id": self.keypair["keypair_id"], - "gid": self.gid, - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "display_name": "test_process", - "deleted": 0, - "is_proxy": False, - "app_status": "BUILDING", - "shm_endpoint": "shm_data_original", - "ipc_endpoint": "ipc_data_original", - "fs_endpoint": "fs_data_original", - "args": None, - "userdata": None - } - - def _create_group(self, gid): - values = { - "gid": gid, - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "display_name": "test_group", - "dsplay_description": "This is test group.", - "is_default": False, - "deleted": 0 - } - return db.group_create(self.user_ctxt, values) - - def _create_network(self, gid): - values = { - "gid": gid, - "network_id": unicode(uuid.uuid4()), - "ext_router": unicode(uuid.uuid4()), - "cidr": "10.0.0.1/24", - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "display_name": "test_network", - "is_admin": False, - "deleted": 0 - } - return db.network_create(self.user_ctxt, values) - - def _create_keypair(self, gid): - values = { - "gid": gid, - "keypair_id": unicode(uuid.uuid4()), - "private_key": "test", - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "display_name": "test_keypair", - "is_default": False, - "deleted": 0 - } - return db.keypair_create(self.user_ctxt, values) - - def _create_securitygroup(self, gid): - values = { - "gid": gid, - "securitygroup_id": unicode(uuid.uuid4()), - "user_id": self.user_ctxt.user_id, - "project_id": self.user_ctxt.project_id, - "display_name": "test_securitygroup", - "is_default": False, - "deleted": 0 - } - return db.securitygroup_create(self.user_ctxt, values) - - def _create_process(self, gid, create_count): - processes = [] - for i in range(0, create_count): - process = db.process_create( - self.user_ctxt, - self._get_base_values(), - [self.network["network_id"]], - [self.securitygroup["securitygroup_id"]]) - processes.append(process) - return processes - - def _create_process_deleted(self, gid, create_count): - processes = [] - process_base_value = self._get_base_values() - process_base_value["deleted"] = 1 - for i in range(0, create_count): - process = db.process_create( - self.user_ctxt, - process_base_value, - [self.network["network_id"]], - [self.securitygroup["securitygroup_id"]]) - processes.append(process) - return processes - - def test_process_get_all(self): - processes = self._create_process(self.gid, 3) - res_processes = db.process_get_all(context, self.gid) - ignored_keys = ['deleted_at', 'updated_at', 'created_at'] - self.assertEqual(len(res_processes), len(processes)) - for i in range(0, len(res_processes)): - self._assertEqualObjects( - res_processes[i], processes[i], ignored_keys) - - def test_process_get_all_empty(self): - res_processes = db.process_get_all(context, self.gid) - expected = [] - self.assertEqual(res_processes, expected) - - def test_process_get_by_pid(self): - processes = self._create_process(self.gid, 3) - res_process = db.process_get_by_pid( - self.user_ctxt, self.gid, processes[0]["pid"]) - ignored_keys = ['deleted_at', 'updated_at', 'created_at'] - self._assertEqualObjects(res_process, processes[0], ignored_keys) - - def test_process_get_by_pid_get_deleted(self): - processes = self._create_process_deleted(self.gid, 1) - res_process = db.process_get_by_pid( - self.user_ctxt, self.gid, processes[0]["pid"]) - ignored_keys = ['deleted_at', 'updated_at', 'created_at'] - self._assertEqualObjects(res_process, processes[0], ignored_keys) - - def test_process_get_by_pid_not_found(self): - try: - db.process_get_by_pid(self.user_ctxt, self.gid, "notfound-pid") - except Exception as e: - status_code = e.code - self.assertEqual(status_code, 404) - - def test_process_create(self): - values = self._get_base_values() - process = db.process_create(self.user_ctxt, - values, - [self.network["network_id"]], - [self.securitygroup["securitygroup_id"]]) - - values["networks"] = [self.network] - values["securitygroups"] = [self.securitygroup] - ignored_keys = ['deleted', 'deleted_at', 'updated_at', - 'created_at'] - - self._assertEqualObjects(process, values, ignored_keys) - - def test_process_create_duplicated_network_id(self): - values = self._get_base_values() - try: - db.process_create(self.user_ctxt, - values, - [self.network["network_id"], - self.network["network_id"]], - [self.securitygroup["securitygroup_id"]]) - except exception.InvalidInput as e: - status_code = e.code - self.assertEqual(status_code, 400) - - def test_process_create_duplicated_securitygroup_id(self): - values = self._get_base_values() - try: - db.process_create(self.user_ctxt, - values, - [self.network["network_id"]], - [self.securitygroup["securitygroup_id"], - self.securitygroup["securitygroup_id"]]) - except exception.InvalidInput as e: - status_code = e.code - self.assertEqual(status_code, 400) - - def test_process_update(self): - values_before = self._get_base_values() - process = db.process_create(self.user_ctxt, - values_before, - [self.network["network_id"]], - [self.securitygroup["securitygroup_id"]]) - values = { - "display_name": "test", - "fs_endpoint": "fs_data_update" - } - process_after = db.process_update( - self.user_ctxt, self.gid, process["pid"], values) - self.assertEqual(process_after["display_name"], "test") - self.assertEqual(process_after["fs_endpoint"], "fs_data_update") - self.assertEqual(process_after["shm_endpoint"], "shm_data_original") - self.assertEqual(process_after["ipc_endpoint"], "ipc_data_original") - - def test_process_update_process_not_found(self): - self.assertRaises(exception.ProcessNotFound, - db.process_update, - context=self.user_ctxt, - gid=self.gid, - pid=unicode(uuid.uuid4()), - values={}) - - def test_process_delete(self): - values_before = self._get_base_values() - process = db.process_create(self.user_ctxt, - values_before, - [self.network["network_id"]], - [self.securitygroup["securitygroup_id"]]) - process_after = db.process_delete( - self.user_ctxt, self.gid, process["pid"]) - self.assertEqual(1, process_after["deleted"]) - self.assertIsNotNone(process_after.get("deleted_at")) - - def test_process_delete_not_found(self): - self.assertRaises(exception.ProcessNotFound, - db.process_delete, - context=self.user_ctxt, - gid=self.gid, pid=unicode(uuid.uuid4())) diff --git a/rack/tests/db/test_migrations.conf b/rack/tests/db/test_migrations.conf deleted file mode 100644 index 310b705..0000000 --- a/rack/tests/db/test_migrations.conf +++ /dev/null @@ -1,26 +0,0 @@ -[unit_tests] -# Set up any number of databases to test concurrently. -# The "name" used in the test is the config variable key. - -# A few tests rely on one sqlite database with 'sqlite' as the key. - -sqlite=sqlite:// -#sqlitefile=sqlite:///test_migrations_utils.db -#mysql=mysql+mysqldb://user:pass@localhost/test_migrations_utils -#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations_utils - -[migration_dbs] -# Migration DB details are listed separately as they can't be connected to -# concurrently. These databases can't be the same as above - -# Note, sqlite:// is in-memory and unique each time it is spawned. -# However file sqlite's are not unique. - -sqlite=sqlite:// -#sqlitefile=sqlite:///test_migrations.db -#mysql=mysql+mysqldb://user:pass@localhost/test_migrations -#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations - -[walk_style] -snake_walk=yes -downgrade=yes diff --git a/rack/tests/db/test_migrations.py b/rack/tests/db/test_migrations.py deleted file mode 100644 index c333ca4..0000000 --- a/rack/tests/db/test_migrations.py +++ /dev/null @@ -1,609 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Tests for database migrations. This test case reads the configuration -file test_migrations.conf for database connection settings -to use in the tests. For each connection found in the config file, -the test case runs a series of test cases to ensure that migrations work -properly both upgrading and downgrading, and that no data loss occurs -if possible. - -There are also "opportunistic" tests for both mysql and postgresql in here, -which allows testing against all 3 databases (sqlite in memory, mysql, pg) in -a properly configured unit test environment. - -For the opportunistic testing you need to set up db's named 'openstack_citest' -and 'openstack_baremetal_citest' with user 'openstack_citest' and password -'openstack_citest' on localhost. The test will then use that db and u/p combo -to run the tests. - -For postgres on Ubuntu this can be done with the following commands: - -sudo -u postgres psql -postgres=# create user openstack_citest with createdb login password - 'openstack_citest'; -postgres=# create database openstack_citest with owner openstack_citest; -postgres=# create database openstack_baremetal_citest with owner - openstack_citest; - -""" - -import ConfigParser -import glob -import os - -from migrate.versioning import repository -import six.moves.urllib.parse as urlparse -import sqlalchemy.exc - -import rack.db.sqlalchemy.migrate_repo -from rack.openstack.common.db.sqlalchemy import utils as oslodbutils -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging -from rack.openstack.common import processutils -from rack import test -from rack import utils - - -LOG = logging.getLogger(__name__) - - -def _have_mysql(user, passwd, database): - present = os.environ.get('RACK_TEST_MYSQL_PRESENT') - if present is None: - return oslodbutils.is_backend_avail('mysql+mysqldb', database, - user, passwd) - return present.lower() in ('', 'true') - - -def _have_postgresql(user, passwd, database): - present = os.environ.get('RACK_TEST_POSTGRESQL_PRESENT') - if present is None: - return oslodbutils.is_backend_avail('postgresql+psycopg2', database, - user, passwd) - return present.lower() in ('', 'true') - - -def get_mysql_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - if auth_pieces[1].strip(): - password = "-p\"%s\"" % auth_pieces[1] - - return (user, password, database, host) - - -def get_pgsql_connection_info(conn_pieces): - database = conn_pieces.path.strip('/') - loc_pieces = conn_pieces.netloc.split('@') - host = loc_pieces[1] - - auth_pieces = loc_pieces[0].split(':') - user = auth_pieces[0] - password = "" - if len(auth_pieces) > 1: - password = auth_pieces[1].strip() - - return (user, password, database, host) - - -class CommonTestsMixIn(object): - - """These tests are shared between TestRackMigrations and - TestBaremetalMigrations. - - BaseMigrationTestCase is effectively an abstract class, meant to be derived - from and not directly tested against; that's why these `test_` methods need - to be on a Mixin, so that they won't be picked up as valid tests for - BaseMigrationTestCase. - """ - - def test_walk_versions(self): - for key, engine in self.engines.items(): - # We start each walk with a completely blank slate. - self._reset_database(key) - self._walk_versions(engine, self.snake_walk, self.downgrade) - -# def test_mysql_opportunistically(self): -# self._test_mysql_opportunistically() - - def test_mysql_connect_fail(self): - """Test that we can trigger a mysql connection failure and we fail - gracefully to ensure we don't break people without mysql - """ - if oslodbutils.is_backend_avail('mysql+mysqldb', self.DATABASE, - "openstack_cifail", self.PASSWD): - self.fail("Shouldn't have connected") - -# def test_postgresql_opportunistically(self): -# self._test_postgresql_opportunistically() - - def test_postgresql_connect_fail(self): - """Test that we can trigger a postgres connection failure and we fail - gracefully to ensure we don't break people without postgres - """ - if oslodbutils.is_backend_avail('postgresql+psycopg2', self.DATABASE, - "openstack_cifail", self.PASSWD): - self.fail("Shouldn't have connected") - - -class BaseMigrationTestCase(test.NoDBTestCase): - - """Base class for testing migrations and migration utils. This sets up - and configures the databases to run tests against. - """ - - # NOTE(jhesketh): It is expected that tests clean up after themselves. - # This is necessary for concurrency to allow multiple tests to work on - # one database. - # The full migration walk tests however do call the old _reset_databases() - # to throw away whatever was there so they need to operate on their own - # database that we know isn't accessed concurrently. - # Hence, BaseWalkMigrationTestCase overwrites the engine list. - - USER = None - PASSWD = None - DATABASE = None - - TIMEOUT_SCALING_FACTOR = 2 - - def __init__(self, *args, **kwargs): - super(BaseMigrationTestCase, self).__init__(*args, **kwargs) - - self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), - 'test_migrations.conf') - # Test machines can set the RACK_TEST_MIGRATIONS_CONF variable - # to override the location of the config file for migration testing - self.CONFIG_FILE_PATH = os.environ.get('RACK_TEST_MIGRATIONS_CONF', - self.DEFAULT_CONFIG_FILE) - self.MIGRATE_FILE = rack.db.sqlalchemy.migrate_repo.__file__ - self.REPOSITORY = repository.Repository( - os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) - self.INIT_VERSION = 0 - - self.snake_walk = False - self.downgrade = False - self.test_databases = {} - self.migration = None - self.migration_api = None - - def setUp(self): - super(BaseMigrationTestCase, self).setUp() - self._load_config() - - def _load_config(self): - # Load test databases from the config file. Only do this - # once. No need to re-run this on each test... - LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) - if os.path.exists(self.CONFIG_FILE_PATH): - cp = ConfigParser.RawConfigParser() - try: - cp.read(self.CONFIG_FILE_PATH) - config = cp.options('unit_tests') - for key in config: - self.test_databases[key] = cp.get('unit_tests', key) - self.snake_walk = cp.getboolean('walk_style', 'snake_walk') - self.downgrade = cp.getboolean('walk_style', 'downgrade') - - except ConfigParser.ParsingError as e: - self.fail("Failed to read test_migrations.conf config " - "file. Got error: %s" % e) - else: - self.fail("Failed to find test_migrations.conf config " - "file.") - - self.engines = {} - for key, value in self.test_databases.items(): - self.engines[key] = sqlalchemy.create_engine(value) - - # NOTE(jhesketh): We only need to make sure the databases are created - # not necessarily clean of tables. - self._create_databases() - - def execute_cmd(self, cmd=None): - out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True) - output = out or err - LOG.debug(output) - self.assertEqual('', err, - "Failed to run: %s\n%s" % (cmd, output)) - - @utils.synchronized('pgadmin', external=True) - def _reset_pg(self, conn_pieces): - (user, password, database, host) = \ - get_pgsql_connection_info(conn_pieces) - os.environ['PGPASSWORD'] = password - os.environ['PGUSER'] = user - # note(boris-42): We must create and drop database, we can't - # drop database which we have connected to, so for such - # operations there is a special database template1. - sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" - " '%(sql)s' -d template1") - sqldict = {'user': user, 'host': host} - - sqldict['sql'] = ("drop database if exists %s;") % database - droptable = sqlcmd % sqldict - self.execute_cmd(droptable) - - sqldict['sql'] = ("create database %s;") % database - createtable = sqlcmd % sqldict - self.execute_cmd(createtable) - - os.unsetenv('PGPASSWORD') - os.unsetenv('PGUSER') - - @utils.synchronized('mysql', external=True) - def _reset_mysql(self, conn_pieces): - # We can execute the MySQL client to destroy and re-create - # the MYSQL database, which is easier and less error-prone - # than using SQLAlchemy to do this via MetaData...trust me. - (user, password, database, host) = \ - get_mysql_connection_info(conn_pieces) - sql = ("drop database if exists %(database)s; " - "create database %(database)s;" % {'database': database}) - cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " - "-e \"%(sql)s\"" % {'user': user, 'password': password, - 'host': host, 'sql': sql}) - self.execute_cmd(cmd) - - @utils.synchronized('sqlite', external=True) - def _reset_sqlite(self, conn_pieces): - # We can just delete the SQLite database, which is - # the easiest and cleanest solution - db_path = conn_pieces.path.strip('/') - if os.path.exists(db_path): - os.unlink(db_path) - # No need to recreate the SQLite DB. SQLite will - # create it for us if it's not there... - - def _create_databases(self): - """Create all configured databases as needed.""" - for key, engine in self.engines.items(): - self._create_database(key) - - def _create_database(self, key): - """Create database if it doesn't exist.""" - conn_string = self.test_databases[key] - conn_pieces = urlparse.urlparse(conn_string) - - if conn_string.startswith('mysql'): - (user, password, database, host) = \ - get_mysql_connection_info(conn_pieces) - sql = "create database if not exists %s;" % database - cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " - "-e \"%(sql)s\"" % {'user': user, 'password': password, - 'host': host, 'sql': sql}) - self.execute_cmd(cmd) - elif conn_string.startswith('postgresql'): - (user, password, database, host) = \ - get_pgsql_connection_info(conn_pieces) - os.environ['PGPASSWORD'] = password - os.environ['PGUSER'] = user - - sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" - " '%(sql)s' -d template1") - - sql = ("create database if not exists %s;") % database - createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql} - # 0 means databases is created - # 256 means it already exists (which is fine) - # otherwise raise an error - out, err = processutils.trycmd(createtable, shell=True, - check_exit_code=[0, 256], - discard_warnings=True) - output = out or err - if err != '': - self.fail("Failed to run: %s\n%s" % (createtable, output)) - - os.unsetenv('PGPASSWORD') - os.unsetenv('PGUSER') - - def _reset_databases(self): - """Reset all configured databases.""" - for key, engine in self.engines.items(): - self._reset_database(key) - - def _reset_database(self, key): - """Reset specific database.""" - engine = self.engines[key] - conn_string = self.test_databases[key] - conn_pieces = urlparse.urlparse(conn_string) - engine.dispose() - if conn_string.startswith('sqlite'): - self._reset_sqlite(conn_pieces) - elif conn_string.startswith('mysql'): - self._reset_mysql(conn_pieces) - elif conn_string.startswith('postgresql'): - self._reset_pg(conn_pieces) - - -class BaseWalkMigrationTestCase(BaseMigrationTestCase): - - """BaseWalkMigrationTestCase loads in an alternative set of databases for - testing against. This is necessary as the default databases can run tests - concurrently without interfering with itself. It is expected that - databases listed under [migraiton_dbs] in the configuration are only being - accessed by one test at a time. Currently only test_walk_versions accesses - the databases (and is the only method that calls _reset_database() which - is clearly problematic for concurrency). - """ - - def _load_config(self): - # Load test databases from the config file. Only do this - # once. No need to re-run this on each test... - LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) - if os.path.exists(self.CONFIG_FILE_PATH): - cp = ConfigParser.RawConfigParser() - try: - cp.read(self.CONFIG_FILE_PATH) - config = cp.options('migration_dbs') - for key in config: - self.test_databases[key] = cp.get('migration_dbs', key) - self.snake_walk = cp.getboolean('walk_style', 'snake_walk') - self.downgrade = cp.getboolean('walk_style', 'downgrade') - except ConfigParser.ParsingError as e: - self.fail("Failed to read test_migrations.conf config " - "file. Got error: %s" % e) - else: - self.fail("Failed to find test_migrations.conf config " - "file.") - - self.engines = {} - for key, value in self.test_databases.items(): - self.engines[key] = sqlalchemy.create_engine(value) - - self._create_databases() - - def _test_mysql_opportunistically(self): - # Test that table creation on mysql only builds InnoDB tables - if not _have_mysql(self.USER, self.PASSWD, self.DATABASE): - self.skipTest("mysql not available") - # add this to the global lists to make reset work with it, it's removed - # automatically in tearDown so no need to clean it up here. - connect_string = oslodbutils.get_connect_string( - "mysql+mysqldb", self.DATABASE, self.USER, self.PASSWD) - (user, password, database, host) = \ - get_mysql_connection_info(urlparse.urlparse(connect_string)) - engine = sqlalchemy.create_engine(connect_string) - self.engines[database] = engine - self.test_databases[database] = connect_string - - # build a fully populated mysql database with all the tables - self._reset_database(database) - self._walk_versions(engine, self.snake_walk, self.downgrade) - - connection = engine.connect() - # sanity check - total = connection.execute("SELECT count(*) " - "from information_schema.TABLES " - "where TABLE_SCHEMA='%(database)s'" % - {'database': database}) - self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") - - noninnodb = connection.execute("SELECT count(*) " - "from information_schema.TABLES " - "where TABLE_SCHEMA='%(database)s' " - "and ENGINE!='InnoDB' " - "and TABLE_NAME!='migrate_version'" % - {'database': database}) - count = noninnodb.scalar() - self.assertEqual(count, 0, "%d non InnoDB tables created" % count) - connection.close() - - del(self.engines[database]) - del(self.test_databases[database]) - - def _test_postgresql_opportunistically(self): - # Test postgresql database migration walk - if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE): - self.skipTest("postgresql not available") - # add this to the global lists to make reset work with it, it's removed - # automatically in tearDown so no need to clean it up here. - connect_string = oslodbutils.get_connect_string( - "postgresql+psycopg2", self.DATABASE, self.USER, self.PASSWD) - engine = sqlalchemy.create_engine(connect_string) - (user, password, database, host) = \ - get_pgsql_connection_info(urlparse.urlparse(connect_string)) - self.engines[database] = engine - self.test_databases[database] = connect_string - - # build a fully populated postgresql database with all the tables - self._reset_database(database) - self._walk_versions(engine, self.snake_walk, self.downgrade) - del(self.engines[database]) - del(self.test_databases[database]) - - def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): - # Determine latest version script from the repo, then - # upgrade from 1 through to the latest, with no data - # in the databases. This just checks that the schema itself - # upgrades successfully. - - # Place the database under version control - self.migration_api.version_control(engine, - self.REPOSITORY, - self.INIT_VERSION) - self.assertEqual(self.INIT_VERSION, - self.migration_api.db_version(engine, - self.REPOSITORY)) - - LOG.debug('latest version is %s' % self.REPOSITORY.latest) - versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) - - for version in versions: - # upgrade -> downgrade -> upgrade - self._migrate_up(engine, version, with_data=True) - if snake_walk: - downgraded = self._migrate_down( - engine, version - 1, with_data=True) - if downgraded: - self._migrate_up(engine, version) - - if downgrade: - # Now walk it back down to 0 from the latest, testing - # the downgrade paths. - for version in reversed(versions): - # downgrade -> upgrade -> downgrade - downgraded = self._migrate_down(engine, version - 1) - - if snake_walk and downgraded: - self._migrate_up(engine, version) - self._migrate_down(engine, version - 1) - - def _migrate_down(self, engine, version, with_data=False): - try: - self.migration_api.downgrade(engine, self.REPOSITORY, version) - except NotImplementedError: - # NOTE(sirp): some migrations, namely release-level - # migrations, don't support a downgrade. - return False - - self.assertEqual(version, - self.migration_api.db_version(engine, - self.REPOSITORY)) - - # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' - # version). So if we have any downgrade checks, they need to be run for - # the previous (higher numbered) migration. - if with_data: - post_downgrade = getattr( - self, "_post_downgrade_%03d" % (version + 1), None) - if post_downgrade: - post_downgrade(engine) - - return True - - def _migrate_up(self, engine, version, with_data=False): - """migrate up to a new version of the db. - - We allow for data insertion and post checks at every - migration version with special _pre_upgrade_### and - _check_### functions in the main test. - """ - # NOTE(sdague): try block is here because it's impossible to debug - # where a failed data migration happens otherwise - try: - if with_data: - data = None - pre_upgrade = getattr( - self, "_pre_upgrade_%03d" % version, None) - if pre_upgrade: - data = pre_upgrade(engine) - - self.migration_api.upgrade(engine, self.REPOSITORY, version) - self.assertEqual(version, - self.migration_api.db_version(engine, - self.REPOSITORY)) - if with_data: - check = getattr(self, "_check_%03d" % version, None) - if check: - check(engine, data) - except Exception: - LOG.error("Failed to migrate to version %s on engine %s" % - (version, engine)) - raise - - -class TestRackMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn): - - """Test sqlalchemy-migrate migrations.""" - USER = "openstack_citest" - PASSWD = "openstack_citest" - DATABASE = "openstack_citest" - - def __init__(self, *args, **kwargs): - super(TestRackMigrations, self).__init__(*args, **kwargs) - - self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), - 'test_migrations.conf') - # Test machines can set the RACK_TEST_MIGRATIONS_CONF variable - # to override the location of the config file for migration testing - self.CONFIG_FILE_PATH = os.environ.get('RACK_TEST_MIGRATIONS_CONF', - self.DEFAULT_CONFIG_FILE) - self.MIGRATE_FILE = rack.db.sqlalchemy.migrate_repo.__file__ - self.REPOSITORY = repository.Repository( - os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) - - def setUp(self): - super(TestRackMigrations, self).setUp() - - if self.migration is None: - self.migration = __import__('rack.db.migration', - globals(), locals(), - ['db_initial_version'], -1) - self.INIT_VERSION = self.migration.db_initial_version() - if self.migration_api is None: - temp = __import__('rack.db.sqlalchemy.migration', - globals(), locals(), ['versioning_api'], -1) - self.migration_api = temp.versioning_api - - def assertColumnExists(self, engine, table, column): - t = oslodbutils.get_table(engine, table) - self.assertIn(column, t.c) - - def assertColumnNotExists(self, engine, table, column): - t = oslodbutils.get_table(engine, table) - self.assertNotIn(column, t.c) - - def assertTableNotExists(self, engine, table): - self.assertRaises(sqlalchemy.exc.NoSuchTableError, - oslodbutils.get_table, engine, table) - - def assertIndexExists(self, engine, table, index): - t = oslodbutils.get_table(engine, table) - index_names = [idx.name for idx in t.indexes] - self.assertIn(index, index_names) - - def assertIndexMembers(self, engine, table, index, members): - self.assertIndexExists(engine, table, index) - - t = oslodbutils.get_table(engine, table) - index_columns = None - for idx in t.indexes: - if idx.name == index: - index_columns = idx.columns.keys() - break - - self.assertEqual(sorted(members), sorted(index_columns)) - - -class ProjectTestCase(test.NoDBTestCase): - - def test_all_migrations_have_downgrade(self): - topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../') - py_glob = os.path.join(topdir, "rack", "db", "sqlalchemy", - "migrate_repo", "versions", "*.py") - - missing_downgrade = [] - for path in glob.iglob(py_glob): - has_upgrade = False - has_downgrade = False - with open(path, "r") as f: - for line in f: - if 'def upgrade(' in line: - has_upgrade = True - if 'def downgrade(' in line: - has_downgrade = True - - if has_upgrade and not has_downgrade: - fname = os.path.basename(path) - missing_downgrade.append(fname) - - helpful_msg = (_("The following migrations are missing a downgrade:" - "\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) - self.assertTrue(not missing_downgrade, helpful_msg) diff --git a/rack/tests/fake_policy.py b/rack/tests/fake_policy.py deleted file mode 100644 index 117870a..0000000 --- a/rack/tests/fake_policy.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -policy_data = """ -{ - "admin_api": "is_admin:True", - - "cells_scheduler_filter:TargetCellFilter": "is_admin:True", - - "context_is_admin": "role:admin or role:administrator", - "compute:create": "", - "compute:create:attach_network": "", - "compute:create:attach_volume": "", - - "compute:get": "", - "compute:get_all": "", - "compute:get_all_tenants": "", - - "compute:update": "", - - "compute:get_instance_metadata": "", - "compute:get_all_instance_metadata": "", - "compute:get_all_instance_system_metadata": "", - "compute:update_instance_metadata": "", - "compute:delete_instance_metadata": "", - - "compute:get_instance_faults": "", - "compute:get_diagnostics": "", - - "compute:get_lock": "", - "compute:lock": "", - "compute:unlock": "", - "compute:unlock_override": "is_admin:True", - - "compute:get_vnc_console": "", - "compute:get_spice_console": "", - "compute:get_rdp_console": "", - "compute:get_console_output": "", - - "compute:associate_floating_ip": "", - "compute:reset_network": "", - "compute:inject_network_info": "", - "compute:add_fixed_ip": "", - "compute:remove_fixed_ip": "", - - "compute:attach_volume": "", - "compute:detach_volume": "", - - "compute:set_admin_password": "", - - "compute:rescue": "", - "compute:unrescue": "", - - "compute:suspend": "", - "compute:resume": "", - - "compute:pause": "", - "compute:unpause": "", - - "compute:start": "", - "compute:stop": "", - - "compute:resize": "", - "compute:confirm_resize": "", - "compute:revert_resize": "", - - "compute:rebuild": "", - - "compute:reboot": "", - - "compute:snapshot": "", - "compute:backup": "", - - "compute:shelve": "", - "compute:shelve_offload": "", - "compute:unshelve": "", - - "compute:security_groups:add_to_instance": "", - "compute:security_groups:remove_from_instance": "", - - "compute:delete": "", - "compute:soft_delete": "", - "compute:force_delete": "", - "compute:restore": "", - "compute:swap_volume": "", - - "compute:volume_snapshot_create": "", - "compute:volume_snapshot_delete": "", - - "compute:v3:servers:start": "", - "compute:v3:servers:stop": "", - "compute_extension:v3:os-access-ips": "", - "compute_extension:accounts": "", - "compute_extension:admin_actions:pause": "", - "compute_extension:admin_actions:unpause": "", - "compute_extension:admin_actions:suspend": "", - "compute_extension:admin_actions:resume": "", - "compute_extension:admin_actions:lock": "", - "compute_extension:admin_actions:unlock": "", - "compute_extension:admin_actions:resetNetwork": "", - "compute_extension:admin_actions:injectNetworkInfo": "", - "compute_extension:admin_actions:createBackup": "", - "compute_extension:admin_actions:migrateLive": "", - "compute_extension:admin_actions:resetState": "", - "compute_extension:admin_actions:migrate": "", - "compute_extension:v3:os-admin-actions:reset_network": "", - "compute_extension:v3:os-admin-actions:inject_network_info": "", - "compute_extension:v3:os-admin-actions:reset_state": "", - "compute_extension:v3:os-admin-password": "", - "compute_extension:aggregates": "rule:admin_api", - "compute_extension:v3:os-aggregates:index": "rule:admin_api", - "compute_extension:v3:os-aggregates:create": "rule:admin_api", - "compute_extension:v3:os-aggregates:show": "rule:admin_api", - "compute_extension:v3:os-aggregates:update": "rule:admin_api", - "compute_extension:v3:os-aggregates:delete": "rule:admin_api", - "compute_extension:v3:os-aggregates:add_host": "rule:admin_api", - "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api", - "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api", - "compute_extension:agents": "", - "compute_extension:v3:os-agents": "", - "compute_extension:attach_interfaces": "", - "compute_extension:v3:os-attach-interfaces": "", - "compute_extension:baremetal_nodes": "", - "compute_extension:cells": "", - "compute_extension:v3:os-cells": "", - "compute_extension:certificates": "", - "compute_extension:v3:os-certificates:create": "", - "compute_extension:v3:os-certificates:show": "", - "compute_extension:cloudpipe": "", - "compute_extension:cloudpipe_update": "", - "compute_extension:config_drive": "", - "compute_extension:v3:os-config-drive": "", - "compute_extension:console_output": "", - "compute_extension:v3:os-console-output": "", - "compute_extension:consoles": "", - "compute_extension:v3:os-remote-consoles": "", - "compute_extension:createserverext": "", - "compute_extension:v3:os-create-backup": "", - "compute_extension:deferred_delete": "", - "compute_extension:v3:os-deferred-delete": "", - "compute_extension:disk_config": "", - "compute_extension:evacuate": "is_admin:True", - "compute_extension:v3:os-evacuate": "is_admin:True", - "compute_extension:extended_server_attributes": "", - "compute_extension:v3:os-extended-server-attributes": "", - "compute_extension:extended_status": "", - "compute_extension:v3:os-extended-status": "", - "compute_extension:extended_availability_zone": "", - "compute_extension:v3:os-extended-availability-zone": "", - "compute_extension:extended_ips": "", - "compute_extension:extended_ips_mac": "", - "compute_extension:extended_vif_net": "", - "compute_extension:extended_volumes": "", - "compute_extension:v3:os-extended-volumes": "", - "compute_extension:v3:os-extended-volumes:swap": "", - "compute_extension:v3:os-extended-volumes:attach": "", - "compute_extension:v3:os-extended-volumes:detach": "", - "compute_extension:v3:extensions:discoverable": "", - "compute_extension:fixed_ips": "", - "compute_extension:flavor_access": "", - "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", - "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", - "compute_extension:v3:flavor-access": "", - "compute_extension:v3:flavor-access:remove_tenant_access": - "rule:admin_api", - "compute_extension:v3:flavor-access:add_tenant_access": - "rule:admin_api", - "compute_extension:flavor_disabled": "", - "compute_extension:v3:os-flavor-disabled": "", - "compute_extension:flavor_rxtx": "", - "compute_extension:v3:os-flavor-rxtx": "", - "compute_extension:flavor_swap": "", - "compute_extension:flavorextradata": "", - "compute_extension:flavorextraspecs:index": "", - "compute_extension:flavorextraspecs:show": "", - "compute_extension:flavorextraspecs:create": "is_admin:True", - "compute_extension:flavorextraspecs:update": "is_admin:True", - "compute_extension:flavorextraspecs:delete": "is_admin:True", - "compute_extension:v3:flavor-extra-specs:index": "", - "compute_extension:v3:flavor-extra-specs:show": "", - "compute_extension:v3:flavor-extra-specs:create": "is_admin:True", - "compute_extension:v3:flavor-extra-specs:update": "is_admin:True", - "compute_extension:v3:flavor-extra-specs:delete": "is_admin:True", - "compute_extension:flavormanage": "", - "compute_extension:v3:flavor-manage": "", - "compute_extension:v3:flavors:discoverable": "", - "compute_extension:floating_ip_dns": "", - "compute_extension:floating_ip_pools": "", - "compute_extension:floating_ips": "", - "compute_extension:floating_ips_bulk": "", - "compute_extension:fping": "", - "compute_extension:fping:all_tenants": "is_admin:True", - "compute_extension:hide_server_addresses": "", - "compute_extension:v3:os-hide-server-addresses": "", - "compute_extension:hosts": "", - "compute_extension:v3:os-hosts": "rule:admin_api", - "compute_extension:hypervisors": "", - "compute_extension:v3:os-hypervisors": "rule:admin_api", - "compute_extension:image_size": "", - "compute_extension:instance_actions": "", - "compute_extension:v3:os-instance-actions": "", - "compute_extension:instance_actions:events": "is_admin:True", - "compute_extension:v3:os-instance-actions:events": "is_admin:True", - "compute_extension:instance_usage_audit_log": "", - "compute_extension:keypairs": "", - "compute_extension:keypairs:index": "", - "compute_extension:keypairs:show": "", - "compute_extension:keypairs:create": "", - "compute_extension:keypairs:delete": "", - - "compute_extension:v3:keypairs": "", - "compute_extension:v3:keypairs:index": "", - "compute_extension:v3:keypairs:show": "", - "compute_extension:v3:keypairs:create": "", - "compute_extension:v3:keypairs:delete": "", - "compute_extension:v3:os-lock-server:lock": "", - "compute_extension:v3:os-lock-server:unlock": "", - "compute_extension:v3:os-migrate-server:migrate": "", - "compute_extension:v3:os-migrate-server:migrate_live": "", - "compute_extension:multinic": "", - "compute_extension:v3:os-multinic": "", - "compute_extension:networks": "", - "compute_extension:networks:view": "", - "compute_extension:networks_associate": "", - "compute_extension:os-tenant-networks": "", - "compute_extension:v3:os-pause-server:pause": "", - "compute_extension:v3:os-pause-server:unpause": "", - "compute_extension:v3:os-pci:pci_servers": "", - "compute_extension:v3:os-pci:index": "", - "compute_extension:v3:os-pci:detail": "", - "compute_extension:v3:os-pci:show": "", - "compute_extension:quotas:show": "", - "compute_extension:quotas:update": "", - "compute_extension:quotas:delete": "", - "compute_extension:v3:os-quota-sets:show": "", - "compute_extension:v3:os-quota-sets:update": "", - "compute_extension:v3:os-quota-sets:delete": "", - "compute_extension:v3:os-quota-sets:detail": "", - "compute_extension:rescue": "", - "compute_extension:v3:os-rescue": "", - "compute_extension:security_group_default_rules": "", - "compute_extension:security_groups": "", - "compute_extension:v3:os-security-groups": "", - "compute_extension:server_diagnostics": "", - "compute_extension:v3:os-server-diagnostics": "", - "compute_extension:server_groups": "", - "compute_extension:server_password": "", - "compute_extension:v3:os-server-password": "", - "compute_extension:server_usage": "", - "compute_extension:v3:os-server-usage": "", - "compute_extension:services": "", - "compute_extension:v3:os-services": "", - "compute_extension:shelve": "", - "compute_extension:shelveOffload": "", - "compute_extension:v3:os-shelve:shelve": "", - "compute_extension:v3:os-shelve:shelve_offload": "", - "compute_extension:simple_tenant_usage:show": "", - "compute_extension:simple_tenant_usage:list": "", - "compute_extension:unshelve": "", - "compute_extension:v3:os-shelve:unshelve": "", - "compute_extension:v3:os-suspend-server:suspend": "", - "compute_extension:v3:os-suspend-server:resume": "", - "compute_extension:users": "", - "compute_extension:virtual_interfaces": "", - "compute_extension:virtual_storage_arrays": "", - "compute_extension:volumes": "", - "compute_extension:volume_attachments:index": "", - "compute_extension:volume_attachments:show": "", - "compute_extension:volume_attachments:create": "", - "compute_extension:volume_attachments:update": "", - "compute_extension:volume_attachments:delete": "", - "compute_extension:volumetypes": "", - "compute_extension:zones": "", - "compute_extension:availability_zone:list": "", - "compute_extension:v3:os-availability-zone:list": "", - "compute_extension:availability_zone:detail": "is_admin:True", - "compute_extension:v3:os-availability-zone:detail": "is_admin:True", - "compute_extension:used_limits_for_admin": "is_admin:True", - "compute_extension:migrations:index": "is_admin:True", - "compute_extension:v3:os-migrations:index": "is_admin:True", - "compute_extension:os-assisted-volume-snapshots:create": "", - "compute_extension:os-assisted-volume-snapshots:delete": "", - "compute_extension:console_auth_tokens": "is_admin:True", - "compute_extension:v3:os-console-auth-tokens": "is_admin:True", - "compute_extension:os-server-external-events:create": "rule:admin_api", - "compute_extension:v3:os-server-external-events:create": "rule:admin_api", - - "volume:create": "", - "volume:get": "", - "volume:get_all": "", - "volume:get_volume_metadata": "", - "volume:delete": "", - "volume:update": "", - "volume:delete_volume_metadata": "", - "volume:update_volume_metadata": "", - "volume:attach": "", - "volume:detach": "", - "volume:reserve_volume": "", - "volume:unreserve_volume": "", - "volume:begin_detaching": "", - "volume:roll_detaching": "", - "volume:check_attach": "", - "volume:check_detach": "", - "volume:initialize_connection": "", - "volume:terminate_connection": "", - "volume:create_snapshot": "", - "volume:delete_snapshot": "", - "volume:get_snapshot": "", - "volume:get_all_snapshots": "", - - - "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", - "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", - "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_actions:upload_image": "", - "volume_extension:types_manage": "", - "volume_extension:types_extra_specs": "", - - - "network:get_all": "", - "network:get": "", - "network:create": "", - "network:delete": "", - "network:associate": "", - "network:disassociate": "", - "network:get_vifs_by_instance": "", - "network:get_vif_by_mac_address": "", - "network:allocate_for_instance": "", - "network:deallocate_for_instance": "", - "network:validate_networks": "", - "network:get_instance_uuids_by_ip_filter": "", - "network:get_instance_id_by_floating_address": "", - "network:setup_networks_on_host": "", - - "network:get_floating_ip": "", - "network:get_floating_ip_pools": "", - "network:get_floating_ip_by_address": "", - "network:get_floating_ips_by_project": "", - "network:get_floating_ips_by_fixed_address": "", - "network:allocate_floating_ip": "", - "network:deallocate_floating_ip": "", - "network:associate_floating_ip": "", - "network:disassociate_floating_ip": "", - "network:release_floating_ip": "", - "network:migrate_instance_start": "", - "network:migrate_instance_finish": "", - - "network:get_fixed_ip": "", - "network:get_fixed_ip_by_address": "", - "network:add_fixed_ip_to_instance": "", - "network:remove_fixed_ip_from_instance": "", - "network:add_network_to_project": "", - "network:get_instance_nw_info": "", - - "network:get_dns_domains": "", - "network:add_dns_entry": "", - "network:modify_dns_entry": "", - "network:delete_dns_entry": "", - "network:get_dns_entries_by_address": "", - "network:get_dns_entries_by_name": "", - "network:create_private_dns_domain": "", - "network:create_public_dns_domain": "", - "network:delete_dns_domain": "" -} -""" diff --git a/rack/tests/policy_fixture.py b/rack/tests/policy_fixture.py deleted file mode 100644 index f79fb6d..0000000 --- a/rack/tests/policy_fixture.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os - -import fixtures -from oslo.config import cfg - -from rack.openstack.common import policy as common_policy -import rack.policy -from rack.tests import fake_policy - -CONF = cfg.CONF - - -class PolicyFixture(fixtures.Fixture): - - def setUp(self): - super(PolicyFixture, self).setUp() - self.policy_dir = self.useFixture(fixtures.TempDir()) - self.policy_file_name = os.path.join(self.policy_dir.path, - 'policy.json') - with open(self.policy_file_name, 'w') as policy_file: - policy_file.write(fake_policy.policy_data) - CONF.set_override('policy_file', self.policy_file_name) - rack.policy.reset() - rack.policy.init() - self.addCleanup(rack.policy.reset) - - def set_rules(self, rules): - common_policy.set_rules(common_policy.Rules( - dict((k, common_policy.parse_rule(v)) - for k, v in rules.items()))) - - -class RoleBasedPolicyFixture(fixtures.Fixture): - - def __init__(self, role="admin", *args, **kwargs): - super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs) - self.role = role - - def setUp(self): - """Copy live policy.json file and convert all actions to - allow users of the specified role only - """ - super(RoleBasedPolicyFixture, self).setUp() - policy = json.load(open(CONF.policy_file)) - - # Convert all actions to require specified role - for action, rule in policy.iteritems(): - policy[action] = 'role:%s' % self.role - - self.policy_dir = self.useFixture(fixtures.TempDir()) - self.policy_file_name = os.path.join(self.policy_dir.path, - 'policy.json') - with open(self.policy_file_name, 'w') as policy_file: - json.dump(policy, policy_file) - CONF.set_override('policy_file', self.policy_file_name) - rack.policy.reset() - rack.policy.init() - self.addCleanup(rack.policy.reset) diff --git a/rack/tests/resourceoperator/__init__.py b/rack/tests/resourceoperator/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/tests/resourceoperator/openstack/__init__.py b/rack/tests/resourceoperator/openstack/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/rack/tests/resourceoperator/openstack/test_keypairs.py b/rack/tests/resourceoperator/openstack/test_keypairs.py deleted file mode 100644 index 89042c0..0000000 --- a/rack/tests/resourceoperator/openstack/test_keypairs.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Unit Tests for rack.resourceoperator.openstack.keypairs -""" - -from oslo.config import cfg - -from rack.resourceoperator import openstack as os_client -from rack.resourceoperator.openstack import keypairs -from rack import test - -import uuid - - -CONF = cfg.CONF - -CREDENTIALS = { - "os_username": "fake", - "os_password": "fake", - "os_tenant_name": "fake", - "os_auth_url": "fake", - "os_region_name": "fake" -} -cfg.set_defaults(os_client.openstack_client_opts, **CREDENTIALS) - - -class FakeKeypairModel(object): - id = uuid.uuid4() - name = "fake_keypair" - private_key = "fake_private_key" - - -class KeypairTestCase(test.NoDBTestCase): - - def setUp(self): - super(KeypairTestCase, self).setUp() - self.keypair_client = keypairs.KeypairAPI() - self.nova = os_client.get_nova_client() - self.mox.StubOutWithMock(self.nova, "keypairs") - self.mox.StubOutWithMock(os_client, "get_nova_client") - os_client.get_nova_client().AndReturn(self.nova) - - def test_keypair_list(self): - fake_keypar1 = FakeKeypairModel() - fake_keypar2 = FakeKeypairModel() - self.nova.keypairs.list().AndReturn([fake_keypar1, - fake_keypar2]) - self.mox.ReplayAll() - - keypair_ids = self.keypair_client.keypair_list() - self.assertEqual(keypair_ids[0], fake_keypar1.id) - self.assertEqual(keypair_ids[1], fake_keypar2.id) - - def test_keypair_show(self): - fake_keypar1 = FakeKeypairModel() - self.nova.keypairs.get(fake_keypar1.id)\ - .AndReturn(fake_keypar1) - self.mox.ReplayAll() - - keypair = self.keypair_client.keypair_show(fake_keypar1.id) - self.assertEqual(keypair, fake_keypar1) - - def test_keypair_create(self): - fake_keypar1 = FakeKeypairModel() - self.nova.keypairs.create(fake_keypar1.name)\ - .AndReturn(fake_keypar1) - self.mox.ReplayAll() - - keypair = self.keypair_client.keypair_create(fake_keypar1.name) - self.assertEqual(keypair["nova_keypair_id"], fake_keypar1.name) - self.assertEqual(keypair["private_key"], fake_keypar1.private_key) - - def test_keypair_delete(self): - fake_keypar1 = FakeKeypairModel() - self.nova.keypairs.delete(fake_keypar1.id) - self.mox.ReplayAll() - - self.keypair_client.keypair_delete(fake_keypar1.id) diff --git a/rack/tests/resourceoperator/openstack/test_networks.py b/rack/tests/resourceoperator/openstack/test_networks.py deleted file mode 100644 index 599128d..0000000 --- a/rack/tests/resourceoperator/openstack/test_networks.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from neutronclient.v2_0 import client as neutron_client -from oslo.config import cfg - -from rack import exception -from rack.resourceoperator import openstack as os_client -from rack.resourceoperator.openstack import networks -from rack import test - -CONF = cfg.CONF - -CREDENTIALS = { - "os_username": "fake", - "os_password": "fake", - "os_tenant_name": "fake", - "os_auth_url": "fake", - "os_region_name": "fake" -} -cfg.set_defaults(os_client.openstack_client_opts, **CREDENTIALS) - - -CIDR = "10.0.0.0/24" - - -class NetworkTestCase(test.NoDBTestCase): - - def setUp(self): - super(NetworkTestCase, self).setUp() - self.network_client = networks.NetworkAPI() - self.neutron_mock = self.mox.CreateMock(neutron_client.Client) - self.mox.StubOutWithMock(os_client, "get_neutron_client") - os_client.get_neutron_client().AndReturn(self.neutron_mock) - - def test_network_list(self): - network_list = [{"id": "fake_id1"}, - {"id": "fake_id2"}] - self.neutron_mock.list_networks()\ - .AndReturn({"networks": network_list}) - self.mox.ReplayAll() - - network_ids = self.network_client.network_list() - self.assertEqual(network_ids[0], network_list[0].get("id")) - self.assertEqual(network_ids[1], network_list[1].get("id")) - - def test_network_show(self): - fake_neutron_network_id = "neutron_network_id" - fake_network = {"id": "fake_id"} - self.neutron_mock.show_network(fake_neutron_network_id)\ - .AndReturn(fake_network) - self.mox.ReplayAll() - - network = self.network_client.network_show(fake_neutron_network_id) - self.assertEqual(network, fake_network) - - def test_network_create_only_essential_items(self): - fake_neutron_network_id = "fake_neutron_network_id" - fake_subunet_id = "fake_subnet_id" - fake_name = "fake_name" - fake_cidr = "fake_cidr" - fake_network = {"network": {"id": fake_neutron_network_id}} - self.neutron_mock.create_network({"network": {"name": fake_name}})\ - .AndReturn(fake_network) - fake_subnet_body = {"subnet": {"network_id": fake_neutron_network_id, - "ip_version": 4, - "cidr": fake_cidr}} - fake_subnet = {"subnet": {"id": fake_subunet_id}} - self.neutron_mock.create_subnet(fake_subnet_body)\ - .AndReturn(fake_subnet) - self.mox.ReplayAll() - - network = self.network_client.network_create(fake_name, - fake_cidr) - self.assertEqual( - network["neutron_network_id"], fake_neutron_network_id) - - def test_network_create_all_arguments(self): - fake_neutron_network_id = "fake_neutron_network_id" - fake_subunet_id = "fake_subnet_id" - fake_name = "fake_name" - fake_cidr = "fake_cidr" - fake_gateway = "fake_gateway" - fake_ext_router = "fake_ext_router" - fake_dns_nameservers = "fake_dns_nameservers" - fake_network = {"network": {"id": fake_neutron_network_id}} - - self.neutron_mock.create_network({"network": {"name": fake_name}})\ - .AndReturn(fake_network) - - fake_subnet_body = {"subnet": { - "network_id": fake_neutron_network_id, - "ip_version": 4, - "cidr": fake_cidr, - "gateway_ip": fake_gateway, - "dns_nameservers": fake_dns_nameservers}} - fake_subnet = {"subnet": {"id": fake_subunet_id}} - self.neutron_mock.create_subnet(fake_subnet_body)\ - .AndReturn(fake_subnet) - - fake_router_body = {"subnet_id": fake_subunet_id} - self.neutron_mock.add_interface_router(fake_ext_router, - fake_router_body) - self.mox.ReplayAll() - - network = self.network_client.network_create(fake_name, - fake_cidr, - fake_gateway, - fake_ext_router, - fake_dns_nameservers) - self.assertEqual( - network["neutron_network_id"], fake_neutron_network_id) - - def test_network_create_exception_create_subnet_faild(self): - fake_neutron_network_id = "fake_neutron_network_id" - fake_subunet_id = "fake_subnet_id" - fake_name = "fake_name" - fake_cidr = "fake_cidr" - fake_gateway = "fake_gateway" - fake_ext_router = "fake_ext_router" - fake_dns_nameservers = "fake_dns_nameservers" - fake_network = {"network": {"id": fake_neutron_network_id}} - - self.neutron_mock.create_network({"network": {"name": fake_name}})\ - .AndReturn(fake_network) - - fake_subnet_body = {"subnet": { - "network_id": fake_neutron_network_id, - "ip_version": 4, - "cidr": fake_cidr, - "gateway_ip": fake_gateway, - "dns_nameservers": fake_dns_nameservers}} - fake_subnet = {"subnet": {"id": fake_subunet_id}} - self.neutron_mock.create_subnet(fake_subnet_body)\ - .AndReturn(fake_subnet) - - fake_router_body = {"subnet_id": fake_subunet_id} - self.neutron_mock.add_interface_router(fake_ext_router, - fake_router_body)\ - .AndRaise(exception.OpenStackException(400, "fake_msg")) - - self.neutron_mock.delete_network(fake_neutron_network_id) - self.mox.ReplayAll() - - try: - self.network_client.network_create(fake_name, - fake_cidr, - fake_gateway, - fake_ext_router, - fake_dns_nameservers) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_network_create_exception_add_interface_router_faild(self): - fake_neutron_network_id = "fake_neutron_network_id" - fake_name = "fake_name" - fake_cidr = "fake_cidr" - fake_gateway = "fake_gateway" - fake_ext_router = "fake_ext_router" - fake_dns_nameservers = "fake_dns_nameservers" - fake_network = {"network": {"id": fake_neutron_network_id}} - - self.neutron_mock.create_network({"network": {"name": fake_name}})\ - .AndReturn(fake_network) - - fake_subnet_body = {"subnet": { - "network_id": fake_neutron_network_id, - "ip_version": 4, - "cidr": fake_cidr, - "gateway_ip": fake_gateway, - "dns_nameservers": fake_dns_nameservers}} - self.neutron_mock.create_subnet(fake_subnet_body)\ - .AndRaise(exception.OpenStackException(400, "fake_msg")) - - self.neutron_mock.delete_network(fake_neutron_network_id) - self.mox.ReplayAll() - - try: - self.network_client.network_create(fake_name, - fake_cidr, - fake_gateway, - fake_ext_router, - fake_dns_nameservers) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_network_delete_ext_router_none(self): - fake_neutron_network_id = "neutron_network_id" - fake_subnets = ["subnet1", "subnet2"] - fake_network = {"network": {"subnets": fake_subnets}} - fake_ext_router = "fake_ext_router" - self.neutron_mock.show_network(fake_neutron_network_id)\ - .AndReturn(fake_network) - self.neutron_mock.remove_interface_router( - fake_ext_router, {"subnet_id": fake_subnets[0]}) - self.neutron_mock.remove_interface_router( - fake_ext_router, {"subnet_id": fake_subnets[1]}) - self.neutron_mock.delete_network(fake_neutron_network_id) - self.mox.ReplayAll() - self.network_client.network_delete(fake_neutron_network_id, - fake_ext_router) diff --git a/rack/tests/resourceoperator/openstack/test_processes.py b/rack/tests/resourceoperator/openstack/test_processes.py deleted file mode 100644 index d9507d3..0000000 --- a/rack/tests/resourceoperator/openstack/test_processes.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from mox import IsA -from oslo.config import cfg -from rack.resourceoperator import openstack as os_client -from rack import test - -import uuid - -from rack.resourceoperator.openstack import processes - - -class fake_process(object): - pass - - -class ProcessesTest(test.NoDBTestCase): - - def setUp(self): - super(ProcessesTest, self).setUp() - cfg.CONF.os_username = "os_username" - cfg.CONF.os_password = "os_password" - cfg.CONF.os_tenant_name = "os_tenant_name" - cfg.CONF.os_auth_url = "os_auth_url" - cfg.CONF.os_region_name = "os_region_name" - - self.process_client = processes.ProcessesAPI() - self.nova = os_client.get_nova_client() - self.mox.StubOutWithMock(self.nova, "servers") - self.mox.StubOutWithMock(os_client, "get_nova_client") - os_client.get_nova_client().AndReturn(self.nova) - self.process_id = unicode(uuid.uuid4()) - - def test_process_list(self): - - class _dummy(object): - - def __init__(self, id, status, addresses): - self.id = id - self.status = status - self.addresses = addresses - - mock_list = [] - address1 = [ - {"OS-EXT-IPS:type": "fixed", "addr": "0.0.0.0"}, - {"OS-EXT-IPS:type": "floating", "addr": "0.0.0.1"}] - address2 = [ - {"OS-EXT-IPS:type": "fixed", "addr": "0.0.0.3"}] - addresses = {} - addresses.update(network_id_1=address1) - addresses.update(network_id_2=address2) - mock_list.append(_dummy("1", "ACTIVE1", addresses)) - mock_list.append(_dummy("2", "ACTIVE2", addresses)) - self.nova.servers.list().AndReturn(mock_list) - self.mox.ReplayAll() - - expect_net = [ - {"display_name": "network_id_2", - "fixed": "0.0.0.3"}, - {"display_name": "network_id_1", - "fixed": "0.0.0.0"}, - {"display_name": "network_id_1", - "floating": "0.0.0.1"}] - - expect = [ - {"nova_instance_id": "1", "status": "ACTIVE1", - "networks": expect_net}, - {"nova_instance_id": "2", "status": "ACTIVE2", - "networks": expect_net}] - actual = self.process_client.process_list() - - expect.sort() - [x["networks"].sort() for x in expect] - actual.sort() - [x["networks"].sort() for x in actual] - self.assertEqual(expect, actual) - - def test_process_show(self): - - class _dummy(object): - - def __init__(self, id, status, addresses): - self.id = id - self.status = status - self.addresses = addresses - - address1 = [ - {"OS-EXT-IPS:type": "fixed", "addr": "0.0.0.0"}, - {"OS-EXT-IPS:type": "floating", "addr": "0.0.0.1"}] - address2 = [ - {"OS-EXT-IPS:type": "fixed", "addr": "0.0.0.3"}] - addresses = {} - addresses.update(network_id_1=address1) - addresses.update(network_id_2=address2) - self.nova.servers.get(IsA(str)).AndReturn( - _dummy("1", "ACTIVE1", addresses)) - self.mox.ReplayAll() - - expect_net = [ - {"display_name": "network_id_2", - "fixed": "0.0.0.3"}, - {"display_name": "network_id_1", - "fixed": "0.0.0.0"}, - {"display_name": "network_id_1", - "floating": "0.0.0.1"}] - - expect = { - "status": "ACTIVE1", - "networks": expect_net} - actual = self.process_client.process_show("id") - expect["networks"].sort() - actual["networks"].sort() - self.assertEqual(expect, actual) - - def test_process_create(self): - display_name = "display_name" - glance_image_id = "5aea309f-9638-44de-827d-5125ff7e4689" - nova_flavor_id = "3" - nova_keypair_id = "test" - neutron_securitygroup_ids = [ - "947dc616-e737-4cb9-b816-52ad80cb9e37", - "1892987f-3874-46ef-a487-fb8e925210ce"] - neutron_network_ids = [ - "a3c6488a-a236-46f7-aab6-8f1fe91ad9ef", - "43015163-babe-4bee-8fe8-38470d28b2a2"] - metadata = {"metadata": "metadata"} - userdata = "userdata" - process_build = fake_process() - process_build.status = "BUILD" - process_build.id = self.process_id - nics = [] - for network_id in neutron_network_ids: - nics.append(network_id) - - self.nova.servers.create(name=display_name, - image=glance_image_id, - flavor=nova_flavor_id, - meta=metadata, - userdata=userdata, - nics=nics, - key_name=nova_keypair_id, - security_groups=neutron_securitygroup_ids)\ - .AndReturn(process_build) - process_active = fake_process() - process_active.status = "ACTIVE" - process_active.id = self.process_id - self.mox.ReplayAll() - - process_id = self.process_client.process_create( - display_name, - nova_keypair_id, - neutron_securitygroup_ids, - glance_image_id, - nova_flavor_id, - userdata, - metadata, - neutron_network_ids) - self.assertEqual(process_id, (self.process_id, "BUILD")) - - def test_process_delete(self): - self.nova.servers.delete(self.process_id) - self.mox.ReplayAll() - self.process_client.process_delete(self.process_id) - - def test_get_process_address(self): - - class _dummy(object): - - def __init__(self): - self.addresses = {"key": [ - {"OS-EXT-IPS:type": "fixed", "addr": "ip_data1"}, - {"OS-EXT-IPS:type": "fixed", "addr": "ip_data2"}]} - - self.nova.servers.get(self.process_id).AndReturn(_dummy()) - self.mox.ReplayAll() - expect = "ip_data1,ip_data2" - actual = self.process_client.get_process_address(self.process_id) - self.assertEqual(actual, expect) diff --git a/rack/tests/resourceoperator/openstack/test_securitygroups.py b/rack/tests/resourceoperator/openstack/test_securitygroups.py deleted file mode 100644 index e3e42d6..0000000 --- a/rack/tests/resourceoperator/openstack/test_securitygroups.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Unit Tests for rack.resourceoperator.openstack.securitygroups -""" -from oslo.config import cfg - -from rack import exception -from rack.resourceoperator import openstack as os_client -from rack.resourceoperator.openstack import securitygroups -from rack import test - -CONF = cfg.CONF - -CREDENTIALS = { - "os_username": "fake", - "os_password": "fake", - "os_tenant_name": "fake", - "os_auth_url": "fake", - "os_region_name": "fake" -} -cfg.set_defaults(os_client.openstack_client_opts, **CREDENTIALS) - - -def fake_securitygroup(): - return {"security_group": fake_securitygroup_list()[0]} - - -def fake_securitygroup_list(): - return [{"id": "neutron_securitygroup_id"}, - {"id": "neutron_securitygroup_id"}] - - -class SecuritygroupTestCase(test.NoDBTestCase): - - def setUp(self): - super(SecuritygroupTestCase, self).setUp() - self.securitygroup_client = securitygroups.SecuritygroupAPI() - self.neutron = os_client.get_neutron_client() - self.mox.StubOutWithMock(self.neutron, "list_security_groups") - self.mox.StubOutWithMock(self.neutron, "show_security_group") - self.mox.StubOutWithMock(self.neutron, "create_security_group") - self.mox.StubOutWithMock(self.neutron, "create_security_group_rule") - self.mox.StubOutWithMock(self.neutron, "delete_security_group") - self.mox.StubOutWithMock(os_client, "get_neutron_client") - os_client.get_neutron_client().AndReturn(self.neutron) - - def test_securitygroup_list(self): - fake_securitygroups = {"security_groups": fake_securitygroup_list()} - self.neutron.list_security_groups()\ - .AndReturn(fake_securitygroups) - self.mox.ReplayAll() - - neutron_securitygroup_ids = self.securitygroup_client.\ - securitygroup_list() - for neutron_securitygroup_id in neutron_securitygroup_ids: - self.assertEqual(neutron_securitygroup_id, - "neutron_securitygroup_id") - - def test_securitygroup_get(self): - fake_security_group = fake_securitygroup() - fake_security_group_id = fake_security_group["security_group"]["id"] - self.neutron.show_security_group(fake_security_group_id)\ - .AndReturn(fake_security_group) - self.mox.ReplayAll() - - security_group_id = self.securitygroup_client.securitygroup_get( - fake_security_group_id) - self.assertEqual(security_group_id, fake_security_group_id) - - def test_securitygroup_create(self): - fake_name = "fake_name" - fake_securitygroup_id = "neutron_securitygroup_id" - fake_sec_body = {'security_group': {'name': fake_name}} - fake_sec_rule_body = {"security_group_rule": { - "direction": "ingress", - "ethertype": "IPv4", - "security_group_id": fake_securitygroup_id, - "protocol": "tcp", - "port_range_min": "80", - "port_range_max": "80"}} - self.neutron.create_security_group( - fake_sec_body).AndReturn(fake_securitygroup()) - self.neutron.create_security_group_rule(fake_sec_rule_body) - self.mox.ReplayAll() - - fake_rules = [{"protocol": "tcp", - "port_range_max": "80"}] - sec_group_dict = self.securitygroup_client.securitygroup_create( - fake_name, fake_rules) - self.assertEqual( - sec_group_dict, {"neutron_securitygroup_id": - fake_securitygroup_id}) - - def test_securitygroup_create_all_arguments(self): - fake_name = "fake_name" - fake_securitygroup_id = "neutron_securitygroup_id" - fake_sec_body = {'security_group': {'name': fake_name}} - fake_sec_rule_body1 = { - "security_group_rule": { - "direction": "ingress", - "ethertype": "IPv4", - "security_group_id": fake_securitygroup_id, - "protocol": "tcp", - "port_range_min": "80", - "port_range_max": "80", - "remote_group_id": "remote_neutron_securitygroup_id"}} - fake_sec_rule_body2 = {"security_group_rule": { - "direction": "ingress", - "ethertype": "IPv4", - "security_group_id": fake_securitygroup_id, - "protocol": "tcp", - "port_range_min": "80", - "port_range_max": "80", - "remote_ip_prefix": "remote_ip_prefix"}} - self.neutron.create_security_group( - fake_sec_body).AndReturn(fake_securitygroup()) - self.neutron.create_security_group_rule(fake_sec_rule_body1) - self.neutron.create_security_group_rule(fake_sec_rule_body2) - self.mox.ReplayAll() - - fake_rules = [ - {"protocol": "tcp", - "port_range_max": "80", - "remote_neutron_securitygroup_id": - "remote_neutron_securitygroup_id"}, - {"protocol": "tcp", - "port_range_max": "80", - "remote_ip_prefix": "remote_ip_prefix"}] - sec_group_dict = self.securitygroup_client.securitygroup_create( - fake_name, fake_rules) - self.assertEqual( - sec_group_dict, {"neutron_securitygroup_id": - fake_securitygroup_id}) - - def test_securitygroup_create_exception_securitygroup_rule_create_faild( - self): - fake_name = "fake_name" - fake_securitygroup_id = "neutron_securitygroup_id" - fake_sec_body = {'security_group': {'name': fake_name}} - fake_sec_rule_body = { - "security_group_rule": {"direction": "ingress", - "ethertype": "IPv4", - "security_group_id": fake_securitygroup_id, - "protocol": "tcp", - "port_range_min": "80", - "port_range_max": "80"}} - self.neutron.create_security_group( - fake_sec_body).AndReturn(fake_securitygroup()) - self.neutron.create_security_group_rule(fake_sec_rule_body)\ - .AndRaise(exception.OpenStackException(400, "fake_msg")) - self.neutron.delete_security_group(fake_securitygroup_id) - self.mox.ReplayAll() - - fake_rules = [{"protocol": "tcp", - "port_range_max": "80"}] - try: - self.securitygroup_client.securitygroup_create( - fake_name, fake_rules) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_securitygroup_delete(self): - fake_security_group = fake_securitygroup() - fake_security_group_id = fake_security_group["security_group"]["id"] - self.neutron.delete_security_group(fake_security_group_id) - self.mox.ReplayAll() - - self.securitygroup_client.securitygroup_delete(fake_security_group_id) diff --git a/rack/tests/resourceoperator/test_manager.py b/rack/tests/resourceoperator/test_manager.py deleted file mode 100644 index 0a50a7f..0000000 --- a/rack/tests/resourceoperator/test_manager.py +++ /dev/null @@ -1,833 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo.config import cfg -import uuid - -from neutronclient.common import exceptions as neutron_exceptions - -from __builtin__ import Exception -from rack import context -from rack import exception -from rack.resourceoperator import manager as operator_manager -from rack import test - -CONF = cfg.CONF - -GID = unicode(uuid.uuid4()) -KEYPAIR_ID = unicode(uuid.uuid4()) -NETWORK_ID = unicode(uuid.uuid4()) -NEUTRON_NETWORK_ID = unicode(uuid.uuid4()) - -NOVA_INSTANCE_ID = unicode(uuid.uuid4()) - - -def fake_keypair_create(name): - return { - "nova_keypair_id": name, - "name": name - } - - -def fake_keypair_list(): - return ["nova_keypair_ids1", "nova_keypair_ids2"] - - -def fake_securitygrouprule_create(neutron_securitygroup_id, protocol, - port_range_min=None, port_range_max=None, - remote_neutron_securitygroup_id=None, - remote_ip_prefix=None, - direction="ingress", ethertype="IPv4"): - pass - - -def fake_network_create(): - return NEUTRON_NETWORK_ID - - -class ResourceOperatorManagerKeypairTestCase(test.NoDBTestCase): - - def setUp(self): - super(ResourceOperatorManagerKeypairTestCase, self).setUp() - self.manager = operator_manager.ResourceOperator() - self.context = context.RequestContext('fake_user', 'fake_project') - - def test_keypair_list(self): - self.stubs.Set( - self.manager.keypair_client, "keypair_list", fake_keypair_list) - self.mox.ReplayAll() - - test_keypairs = [{"nova_keypair_id": "nova_keypair_ids1"}, - {"nova_keypair_id": "nova_keypair_ids2"}] - keypairs = self.manager.keypair_list(self.context, test_keypairs) - for keypair in keypairs: - self.assertEqual(keypair["status"], "Exist") - - def test_keypair_list_not_exist(self): - self.stubs.Set( - self.manager.keypair_client, "keypair_list", fake_keypair_list) - self.mox.ReplayAll() - - test_keypairs = [{"nova_keypair_id": "fake"}, - {"nova_keypair_id": "fake"}] - keypairs = self.manager.keypair_list(self.context, test_keypairs) - for keypair in keypairs: - self.assertEqual(keypair["status"], "NotExist") - - def test_keypair_list_exception_OpenStackException(self): - self.mox.StubOutWithMock( - self.manager.keypair_client, "keypair_list") - self.manager.keypair_client.keypair_list()\ - .AndRaise(exception.OpenStackException(400, "fake")) - self.mox.ReplayAll() - - test_keypairs = [{"nova_keypair_id": "nova_keypair_ids1"}, - {"nova_keypair_id": "nova_keypair_ids2"}] - try: - self.manager.keypair_list(self.context, test_keypairs) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake") - - def test_keypair_show(self): - self.mox.StubOutWithMock( - self.manager.keypair_client, "keypair_show") - self.manager.keypair_client.keypair_show("fake") - self.mox.ReplayAll() - - test_keypair = {"nova_keypair_id": "fake"} - self.manager.keypair_show(self.context, test_keypair) - self.assertEqual(test_keypair["status"], "Exist") - - def test_keypair_show_status_not_exist(self): - self.mox.StubOutWithMock( - self.manager.keypair_client, "keypair_show") - self.manager.keypair_client.keypair_show("fake")\ - .AndRaise(exception.OpenStackException(404, "")) - self.mox.ReplayAll() - - test_keypair = {"nova_keypair_id": "fake"} - self.manager.keypair_show(self.context, test_keypair) - self.assertEqual(test_keypair["status"], "NotExist") - - def test_keypair_show_exception(self): - self.mox.StubOutWithMock( - self.manager.keypair_client, "keypair_show") - self.manager.keypair_client.keypair_show("fake")\ - .AndRaise(exception.OpenStackException(405, "fake_msg")) - self.mox.ReplayAll() - - test_keypair = {"nova_keypair_id": "fake"} - try: - self.manager.keypair_show(self.context, test_keypair) - except Exception as e: - self.assertEqual(e.code, 405) - self.assertEqual(e.message, "fake_msg") - - def test_keypair_create(self): - self.mox.StubOutWithMock( - self.manager.keypair_client, "keypair_create") - self.manager.keypair_client.keypair_create("fake_keypair")\ - .AndReturn({}) - self.mox.ReplayAll() - - self.manager.keypair_create( - self.context, "fake_keypair") - - def test_keypair_create_exception(self): - self.mox.StubOutWithMock( - self.manager.keypair_client, "keypair_create") - self.manager.keypair_client.keypair_create("fake_keypair")\ - .AndRaise(exception.OpenStackException(405, "fake_msg")) - self.mox.ReplayAll() - - try: - self.manager.keypair_create(self.context, - "fake_keypair") - except Exception as e: - self.assertEqual(e.code, 405) - self.assertEqual(e.message, "fake_msg") - - def test_keypair_delete(self): - self.mox.StubOutWithMock(self.manager.keypair_client, "keypair_delete") - self.manager.keypair_client.keypair_delete("fake_keypair") - self.mox.ReplayAll() - - self.manager.keypair_delete(self.context, "fake_keypair") - - def test_keypair_delete_exception_404(self): - self.mox.StubOutWithMock(self.manager.keypair_client, "keypair_delete") - self.manager.keypair_client.keypair_delete("fake_keypair")\ - .AndRaise(exception.OpenStackException(404, "fake_msg")) - self.mox.ReplayAll() - - self.manager.keypair_delete(self.context, "fake_keypair") - - def test_keypair_delete_exception_not_404(self): - self.mox.StubOutWithMock(self.manager.keypair_client, "keypair_delete") - self.manager.keypair_client.keypair_delete("fake_keypair")\ - .AndRaise(exception.OpenStackException(405, "fake_msg")) - self.mox.ReplayAll() - - try: - self.manager.keypair_delete(self.context, "fake_keypair") - except Exception as e: - self.assertEqual(e.code, 405) - self.assertEqual(e.message, "fake_msg") - - -class ResourceOperatorManagerNetworkTestCase(test.NoDBTestCase): - - def setUp(self): - super(ResourceOperatorManagerNetworkTestCase, self).setUp() - self.exception = neutron_exceptions.\ - NeutronClientException(status_code=400, message="fake_msg") - self.manager = operator_manager.ResourceOperator() - self.context = context.RequestContext('fake_user', 'fake_project') - - def test_network_list_exist(self): - self.mox.StubOutWithMock(self.manager.network_client, "network_list") - self.manager.network_client.network_list()\ - .AndReturn(["fake_neutron_network_id1", - "fake_neutron_network_id2"]) - self.mox.ReplayAll() - - fake_networks = [{"neutron_network_id": "fake_neutron_network_id1"}, - {"neutron_network_id": "fake_neutron_network_id2"}] - networks = self.manager.network_list(self.context, - fake_networks) - for network in networks: - self.assertEqual(network["status"], "Exist") - - def test_network_list_not_exist(self): - self.mox.StubOutWithMock(self.manager.network_client, "network_list") - self.manager.network_client.network_list()\ - .AndReturn(["fake_neutron_network_id3"]) - self.mox.ReplayAll() - fake_networks = [ - {"neutron_network_id": "fake_neutron_network_id1"}, - {"neutron_network_id": "fake_neutron_network_id2"}] - networks = self.manager.network_list(self.context, - fake_networks) - for network in networks: - self.assertEqual(network["status"], "NotExist") - - def test_network_list_exception_network_list_faild(self): - self.mox.StubOutWithMock(self.manager.network_client, "network_list") - self.manager.network_client.network_list()\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - fake_networks = [{"neutron_network_id": "fake_neutron_network_id1"}, - {"neutron_network_id": "fake_neutron_network_id2"}] - try: - self.manager.network_list(self.context, fake_networks) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_network_show_exist(self): - self.mox.StubOutWithMock(self.manager.network_client, "network_show") - self.manager.network_client.network_show(NEUTRON_NETWORK_ID) - self.mox.ReplayAll() - - fake_network = {"neutron_network_id": NEUTRON_NETWORK_ID} - self.manager.network_show(self.context, - fake_network) - self.assertEqual(fake_network["status"], "Exist") - - def test_network_show_not_exist(self): - self.mox.StubOutWithMock(self.manager.network_client, "network_show") - self.exception.status_code = 404 - self.manager.network_client.network_show(NEUTRON_NETWORK_ID)\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - fake_network = {"neutron_network_id": NEUTRON_NETWORK_ID} - self.manager.network_show(self.context, - fake_network) - self.assertEqual(fake_network["status"], "NotExist") - - def test_network_show_exception_network_show_faild(self): - self.mox.StubOutWithMock(self.manager.network_client, "network_show") - self.manager.network_client.network_show(NEUTRON_NETWORK_ID)\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - fake_network = {"neutron_network_id": NEUTRON_NETWORK_ID} - try: - self.manager.network_show(self.context, - fake_network) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_network_create(self): - self.mox.StubOutWithMock(self.manager.network_client, "network_create") - name = "fake_nat" - cidr = "10.0.0.0/24" - gateway = "10.0.0.1" - ext_router = uuid.uuid4() - dns_nameservers = ["8.8.8.8"] - expected_values = {"neutron_network_id": NEUTRON_NETWORK_ID} - self.manager.network_client.network_create(name, - cidr, - gateway, - ext_router, - dns_nameservers)\ - .AndReturn(expected_values) - self.mox.ReplayAll() - - network = self.manager.network_create(self.context, - name, - cidr, - gateway, - ext_router, - dns_nameservers) - self.assertEqual(network, expected_values) - - def test_network_create_exception_create_faild(self): - self.mox.StubOutWithMock(self.manager.network_client, "network_create") - name = "fake_nat" - cidr = "10.0.0.0/24" - gateway = "10.0.0.1" - ext_router = "fake_ext_router" - dns_nameservers = ["8.8.8.8"] - self.manager.network_client.network_create(name, - cidr, - gateway, - ext_router, - dns_nameservers)\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - try: - self.manager.network_create(self.context, - name, - cidr, - gateway, - ext_router, - dns_nameservers) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_network_delete(self): - ext_router = "fake_ext_router" - self.mox.StubOutWithMock(self.manager.network_client, - "network_delete") - self.manager.network_client.network_delete(NEUTRON_NETWORK_ID, - ext_router) - self.mox.ReplayAll() - - self.manager.network_delete(self.context, - NEUTRON_NETWORK_ID, - ext_router) - - def test_delete_network_exception_is_not404(self): - ext_router = "fake_ext_router" - self.mox.StubOutWithMock(self.manager.network_client, - "network_delete") - self.manager.network_client.\ - network_delete(NEUTRON_NETWORK_ID, - ext_router)\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - try: - self.manager.network_delete(self.context, - NEUTRON_NETWORK_ID, - ext_router) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_delete_network_exception_404(self): - ext_router = "fake_ext_router" - self.mox.StubOutWithMock(self.manager.network_client, - "network_delete") - self.exception.status_code = 404 - self.manager.network_client.\ - network_delete(NEUTRON_NETWORK_ID, - ext_router)\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - self.manager.network_delete(self.context, - NEUTRON_NETWORK_ID, - ext_router) - - -class ResourceOperatorManagerSecuritygroupTestCase(test.NoDBTestCase): - - def setUp(self): - super(ResourceOperatorManagerSecuritygroupTestCase, self).setUp() - self.manager = operator_manager.ResourceOperator() - self.exception = neutron_exceptions.\ - NeutronClientException(status_code=400, message="fake_msg") - self.context = context.RequestContext('fake_user', 'fake_project') - self.name = "securitygroup_name" - - def _securitygroups(self): - return [ - { - "protocol": "tcp", - "port_range_min": None, - "port_range_max": "80", - "remote_neutron_securitygroup_id": None, - "remote_ip_prefix": "192.168.1.1/32", - }, - { - "protocol": "tcp", - "port_range_min": "1", - "port_range_max": "80", - "remote_neutron_securitygroup_id": "fake", - "remote_ip_prefix": None, - }, - ] - - def test_securitygroup_create(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_create") - self.manager.securitygroup_client.\ - securitygroup_create(self.name, - self._securitygroups())\ - .AndReturn("neutron_securitygroup_id") - self.mox.ReplayAll() - - securitygroup = self.manager.securitygroup_create( - self.context, - self.name, - self._securitygroups()) - self.assertEqual(securitygroup, "neutron_securitygroup_id") - - def test_securitygroup_create_exception_securitygroup_create_faild(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_create") - self.manager.securitygroup_client.\ - securitygroup_create(self.name, - self._securitygroups())\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - try: - self.manager.securitygroup_create( - self.context, - self.name, - self ._securitygroups()) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_securitygroup_list_exist(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_list") - self.manager.securitygroup_client.\ - securitygroup_list().AndReturn(["neutron_securitygroup_id1", - "neutron_securitygroup_id2"]) - self.mox.ReplayAll() - - fake_securitygroups = [ - {"neutron_securitygroup_id": "neutron_securitygroup_id1"}, - {"neutron_securitygroup_id": "neutron_securitygroup_id1"}] - securitygroups = self.manager.securitygroup_list(self.context, - fake_securitygroups) - for securitygroup in securitygroups: - self.assertEqual(securitygroup["status"], "Exist") - - def test_securitygroup_list_not_exist(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_list") - self.manager.securitygroup_client.\ - securitygroup_list().AndReturn([]) - self.mox.ReplayAll() - - fake_securitygroups = [ - {"neutron_securitygroup_id": "neutron_securitygroup_id1"}, - {"neutron_securitygroup_id": "neutron_securitygroup_id1"}] - securitygroups = self.manager.securitygroup_list(self.context, - fake_securitygroups) - for securitygroup in securitygroups: - self.assertEqual(securitygroup["status"], "NotExist") - - def test_securitygroup_list_not_exception(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_list") - self.manager.securitygroup_client.\ - securitygroup_list().AndRaise(self.exception) - self.mox.ReplayAll() - - fake_securitygroups = [ - {"neutron_securitygroup_id": "neutron_securitygroup_id1"}, - {"neutron_securitygroup_id": "neutron_securitygroup_id1"}] - try: - self.manager.securitygroup_list( - self.context, - fake_securitygroups) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_securitygroup_show_exist(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_get") - fake_neutron_securitygroup_id = "neutron_securitygroup_id1" - self.manager.securitygroup_client.\ - securitygroup_get(fake_neutron_securitygroup_id) - self.mox.ReplayAll() - - fake_securitygroup = { - "neutron_securitygroup_id": fake_neutron_securitygroup_id} - self.manager.securitygroup_show(self.context, - fake_securitygroup) - self.assertEqual(fake_securitygroup["status"], "Exist") - - def test_securitygroup_show_not_exist(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_get") - fake_neutron_securitygroup_id = "neutron_securitygroup_id1" - self.exception.status_code = 404 - self.manager.securitygroup_client.\ - securitygroup_get("neutron_securitygroup_id1")\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - fake_securitygroup = { - "neutron_securitygroup_id": fake_neutron_securitygroup_id} - self.manager.securitygroup_show(self.context, - fake_securitygroup) - self.assertEqual(fake_securitygroup["status"], "NotExist") - - def test_securitygroup_show_exception_securitygroup_show_failed(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_get") - fake_neutron_securitygroup_id = "neutron_securitygroup_id1" - self.manager.securitygroup_client.\ - securitygroup_get("neutron_securitygroup_id1")\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - fake_securitygroup = { - "neutron_securitygroup_id": fake_neutron_securitygroup_id} - try: - self.manager.securitygroup_show(self.context, - fake_securitygroup) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_securitygroup_delete(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_delete") - fake_neutron_securitygroup_id = "neutron_securitygroup_id1" - self.manager.securitygroup_client.\ - securitygroup_delete(fake_neutron_securitygroup_id) - self.mox.ReplayAll() - - self.manager.securitygroup_delete( - self.context, - fake_neutron_securitygroup_id) - - def test_securitygroup_delete_exception_404(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_delete") - fake_neutron_securitygroup_id = "neutron_securitygroup_id1" - self.exception.status_code = 404 - self.manager.securitygroup_client.\ - securitygroup_delete(fake_neutron_securitygroup_id)\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - self.manager.securitygroup_delete( - self.context, - fake_neutron_securitygroup_id) - - def test_securitygroup_delete_exception_securitygroup_delete_faild(self): - self.mox.StubOutWithMock(self.manager.securitygroup_client, - "securitygroup_delete") - fake_neutron_securitygroup_id = "neutron_securitygroup_id1" - self.manager.securitygroup_client.\ - securitygroup_delete(fake_neutron_securitygroup_id)\ - .AndRaise(self.exception) - self.mox.ReplayAll() - - try: - self.manager.securitygroup_delete( - self.context, - fake_neutron_securitygroup_id) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - -class ResourceOperatorManagerProcessesTestCase(test.NoDBTestCase): - - def setUp(self): - super(ResourceOperatorManagerProcessesTestCase, self).setUp() - self.manager = operator_manager.ResourceOperator() - self.context = context.RequestContext('fake_user', 'fake_project') - - def test_processes_list(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_list") - fake_processes = [{"nova_instance_id": "nova_instance_id1", - "status": "Active", - "networks": [{ - "display_name": "00", - "network_id": "1", - "fixed": "0.0.0.1"}]}, - {"nova_instance_id": "nova_instance_id2", - "status": "Active", - "networks": [{ - "display_name": "01", - "network_id": "2", - "fixed": "0.0.0.2"}]}] - self.manager.process_client.process_list().AndReturn(fake_processes) - self.mox.ReplayAll() - - processes = self.manager.process_list(self.context, - fake_processes) - for i, process in enumerate(processes): - self.assertEqual(process, fake_processes[i]) - - def test_processes_list_not_exist(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_list") - fake_processes = [{"nova_instance_id": "nova_instance_id1", - "status": "Active", - "networks": [{ - "display_name": "00", - "network_id": "1", - "fixed": "0.0.0.1"}]}, - {"nova_instance_id": "nova_instance_id2", - "status": "Active", - "networks": [{ - "display_name": "01", - "network_id": "2", - "fixed": "0.0.0.2"}]}] - - expect = [{"nova_instance_id": "nova_instance_id", - "status": "NotExist", - "networks": [{ - "display_name": "00", - "network_id": "1", - "fixed": "0.0.0.1"}]}, - {"nova_instance_id": "nova_instance_id2", - "status": "Active", - "networks": [{ - "display_name": "01", - "network_id": "2", - "fixed": "0.0.0.2"}]}] - - self.manager.process_client.process_list().AndReturn(fake_processes) - self.mox.ReplayAll() - actual = self.manager.process_list(self.context, expect) - self.assertEqual(expect, actual) - - def test_processes_list_exception(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_list") - fake_processes = [{"nova_instance_id": "nova_instance_id1", - "status": "Active"}, - {"nova_instance_id": "nova_instance_id2", - "status": "Active"}] - self.manager.process_client.process_list()\ - .AndRaise(exception.OpenStackException(400, "fake_msg")) - self.mox.ReplayAll() - - try: - self.manager.process_list(self.context, fake_processes) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_process_show(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_show") - fake_processes = {"nova_instance_id": "nova_instance_id1", - "status": "Active", - "networks": [{ - "display_name": "00", - "network_id": "1", - "fixed": "0.0.0.1"}]} - - fake_nova_instance_id = "nova_instance_id1" - self.manager.process_client.process_show( - fake_nova_instance_id).AndReturn(fake_processes) - self.mox.ReplayAll() - - input = {"nova_instance_id": fake_nova_instance_id, - "networks": [{ - "display_name": "00", - "network_id": "1", - "fixed": "0.0.0.1"}]} - self.manager.process_show(self.context, input) - self.assertEqual(input, fake_processes) - - def test_process_show_exception_404(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_show") - fake_nova_instance_id = "nova_instance_id1" - self.manager.process_client.process_show(fake_nova_instance_id)\ - .AndRaise(exception.OpenStackException(404, "fake_msg")) - self.mox.ReplayAll() - - process = {"nova_instance_id": fake_nova_instance_id} - self.manager.process_show(self.context, process) - self.assertEqual(process["status"], "NotExist") - - def test_process_show_exception_not_404(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_show") - fake_nova_instance_id = "nova_instance_id1" - self.manager.process_client.process_show(fake_nova_instance_id)\ - .AndRaise(exception.OpenStackException(400, "fake_msg")) - self.mox.ReplayAll() - - process = {"nova_instance_id": fake_nova_instance_id} - try: - self.manager.process_show(self.context, process) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_process_create(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_create") - fake_name = "fake_name" - fake_key_name = "fake_key_name" - face_security_groups = ["security_group_id1"] - fake_image = "fake_image" - fake_flavor = "fake_flavor" - fake_userdata = "fake_userdata" - fake_meta = "fake_meta" - fake_nics = "fake_nics" - fake_process = {"pid": "fake_pid"} - self.manager.process_client.process_create(fake_name, - fake_key_name, - face_security_groups, - fake_image, - fake_flavor, - fake_userdata, - fake_meta, - fake_nics)\ - .AndReturn(fake_process) - self.mox.ReplayAll() - - process = self.manager.process_create(self.context, - fake_name, - fake_key_name, - face_security_groups, - fake_image, - fake_flavor, - fake_userdata, - fake_meta, - fake_nics) - self.assertEqual(process["pid"], "fake_pid") - - def test_process_create_exception_process_create_faild(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_create") - fake_name = "fake_name" - fake_key_name = "fake_key_name" - face_security_groups = ["security_group_id1"] - fake_image = "fake_image" - fake_flavor = "fake_flavor" - fake_userdata = "fake_userdata" - fake_meta = "fake_meta" - fake_nics = "fake_nics" - self.manager.process_client.process_create(fake_name, - fake_key_name, - face_security_groups, - fake_image, - fake_flavor, - fake_userdata, - fake_meta, - fake_nics)\ - .AndRaise(exception.OpenStackException(400, "fake_msg")) - self.mox.ReplayAll() - - try: - self.manager.process_create(self.context, - fake_name, - fake_key_name, - face_security_groups, - fake_image, - fake_flavor, - fake_userdata, - fake_meta, - fake_nics) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_process_delete(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_delete") - fake_nova_instance_id = "nova_instance_id1" - self.manager.process_client.process_delete(fake_nova_instance_id) - self.mox.ReplayAll() - - self.manager.process_delete(self.context, fake_nova_instance_id) - - def test_process_delete_exception_404(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_delete") - fake_nova_instance_id = "nova_instance_id1" - self.manager.process_client.process_delete(fake_nova_instance_id)\ - .AndRaise(exception.OpenStackException(404, "fake_msg")) - self.mox.ReplayAll() - - self.manager.process_delete(self.context, fake_nova_instance_id) - - def test_process_delete_exception_not_404(self): - self.mox.StubOutWithMock(self.manager.process_client, - "process_delete") - fake_nova_instance_id = "nova_instance_id1" - self.manager.process_client.process_delete(fake_nova_instance_id)\ - .AndRaise(exception.OpenStackException(400, "fake_msg")) - self.mox.ReplayAll() - - try: - self.manager.process_delete(self.context, fake_nova_instance_id) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") - - def test_get_process_address(self): - self.mox.StubOutWithMock(self.manager.process_client, - "get_process_address") - fake_nova_instance_id = "nova_instance_id1" - fake_process_address = "address1,adress2" - self.manager.process_client.get_process_address(fake_nova_instance_id)\ - .AndReturn(fake_process_address) - self.mox.ReplayAll() - - process_address = self.manager\ - .get_process_address(self.context, fake_nova_instance_id) - self.assertEqual(process_address, fake_process_address) - - def test_get_process_address_exception_get_process_address_faild(self): - self.mox.StubOutWithMock(self.manager.process_client, - "get_process_address") - fake_nova_instance_id = "nova_instance_id1" - self.manager.process_client.get_process_address(fake_nova_instance_id)\ - .AndRaise(exception.OpenStackException(400, "fake_msg")) - self.mox.ReplayAll() - - try: - self.manager.get_process_address(self.context, - fake_nova_instance_id) - except Exception as e: - self.assertEqual(e.code, 400) - self.assertEqual(e.message, "fake_msg") diff --git a/rack/tests/test_service.py b/rack/tests/test_service.py deleted file mode 100644 index 81e7a43..0000000 --- a/rack/tests/test_service.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mox -import testtools - -from oslo.config import cfg - -from rack import exception -from rack import service -from rack import test -from rack.tests import utils -from rack import wsgi - -test_service_opts = [ - cfg.StrOpt("fake_manager", - default="rack.tests.test_service.FakeManager", - help="Manager for testing"), - cfg.StrOpt("test_service_listen", - default='127.0.0.1', - help="Host to bind test service to"), - cfg.IntOpt("test_service_listen_port", - default=0, - help="Port number to bind test service to"), -] - -CONF = cfg.CONF -CONF.register_opts(test_service_opts) - - -class TestWSGIService(test.TestCase): - - def setUp(self): - super(TestWSGIService, self).setUp() - self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) - - def test_service_random_port(self): - test_service = service.WSGIService("test_service") - test_service.start() - self.assertNotEqual(0, test_service.port) - test_service.stop() - - def test_service_start_with_illegal_workers(self): - CONF.set_override("rackapi_workers", -1) - self.assertRaises(exception.InvalidInput, - service.WSGIService, "rackapi") - - @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support") - def test_service_random_port_with_ipv6(self): - CONF.set_default("test_service_listen", "::1") - test_service = service.WSGIService("test_service") - test_service.start() - self.assertEqual("::1", test_service.host) - self.assertNotEqual(0, test_service.port) - test_service.stop() - - -class TestLauncher(test.TestCase): - - def setUp(self): - super(TestLauncher, self).setUp() - self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) - self.service = service.WSGIService("test_service") - - def test_launch_app(self): - service.serve(self.service) - self.assertNotEqual(0, self.service.port) - service._launcher.stop() diff --git a/rack/tests/utils.py b/rack/tests/utils.py deleted file mode 100644 index ec603a7..0000000 --- a/rack/tests/utils.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import errno -import platform -import socket -import sys - -from oslo.config import cfg - -import rack.context - -CONF = cfg.CONF -CONF.import_opt('use_ipv6', 'rack.netconf') - - -def get_test_admin_context(): - return rack.context.get_admin_context() - - -def is_osx(): - return platform.mac_ver()[0] != '' - - -test_dns_managers = [] - - -def cleanup_dns_managers(): - global test_dns_managers - for manager in test_dns_managers: - manager.delete_dns_file() - test_dns_managers = [] - - -def killer_xml_body(): - return ((""" - - ]> - - - %(d)s - - """) % { - 'a': 'A' * 10, - 'b': '&a;' * 10, - 'c': '&b;' * 10, - 'd': '&c;' * 9999, - }).strip() - - -def is_ipv6_supported(): - has_ipv6_support = socket.has_ipv6 - try: - s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) - s.close() - except socket.error as e: - if e.errno == errno.EAFNOSUPPORT: - has_ipv6_support = False - else: - raise - - # check if there is at least one interface with ipv6 - if has_ipv6_support and sys.platform.startswith('linux'): - try: - with open('/proc/net/if_inet6') as f: - if not f.read(): - has_ipv6_support = False - except IOError: - has_ipv6_support = False - - return has_ipv6_support diff --git a/rack/utils.py b/rack/utils.py deleted file mode 100644 index 4feb6f9..0000000 --- a/rack/utils.py +++ /dev/null @@ -1,1174 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Utilities and helper functions.""" - -import contextlib -import datetime -import functools -import hashlib -import inspect -import multiprocessing -import os -import pyclbr -import random -import re -import shutil -import socket -import struct -import sys -import tempfile -from xml.sax import saxutils - -import eventlet -import netaddr -from oslo.config import cfg -from oslo import messaging -import six - -from rack import exception -from rack.openstack.common import excutils -from rack.openstack.common import gettextutils -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import importutils -from rack.openstack.common import lockutils -from rack.openstack.common import log as logging -from rack.openstack.common import processutils -from rack.openstack.common import timeutils - -notify_decorator = 'rack.notifications.notify_decorator' - -monkey_patch_opts = [ - cfg.BoolOpt('monkey_patch', - default=False, - help='Whether to log monkey patching'), - cfg.ListOpt('monkey_patch_modules', - default=[ - 'rack.api.ec2.cloud:%s' % (notify_decorator), - 'rack.compute.api:%s' % (notify_decorator) - ], - help='List of modules/decorators to monkey patch'), -] -utils_opts = [ - cfg.IntOpt('password_length', - default=12, - help='Length of generated instance admin passwords'), - cfg.StrOpt('instance_usage_audit_period', - default='month', - help='Time period to generate instance usages for. ' - 'Time period must be hour, day, month or year'), - cfg.StrOpt('rootwrap_config', - default="/etc/rack/rootwrap.conf", - help='Path to the rootwrap configuration file to use for ' - 'running commands as root'), - cfg.StrOpt('tempdir', - help='Explicitly specify the temporary working directory'), -] -CONF = cfg.CONF -CONF.register_opts(monkey_patch_opts) -CONF.register_opts(utils_opts) - -LOG = logging.getLogger(__name__) - -TIME_UNITS = { - 'SECOND': 1, - 'MINUTE': 60, - 'HOUR': 3600, - 'DAY': 84400 -} - - -_IS_NEUTRON = None - -synchronized = lockutils.synchronized_with_prefix('rack-') - -SM_IMAGE_PROP_PREFIX = "image_" -SM_INHERITABLE_KEYS = ( - 'min_ram', 'min_disk', 'disk_format', 'container_format', -) - - -def vpn_ping(address, port, timeout=0.05, session_id=None): - """Sends a vpn negotiation packet and returns the server session. - - Returns False on a failure. Basic packet structure is below. - - Client packet (14 bytes):: - - 0 1 8 9 13 - +-+--------+-----+ - |x| cli_id |?????| - +-+--------+-----+ - x = packet identifier 0x38 - cli_id = 64 bit identifier - ? = unknown, probably flags/padding - - Server packet (26 bytes):: - - 0 1 8 9 13 14 21 2225 - +-+--------+-----+--------+----+ - |x| srv_id |?????| cli_id |????| - +-+--------+-----+--------+----+ - x = packet identifier 0x40 - cli_id = 64 bit identifier - ? = unknown, probably flags/padding - bit 9 was 1 and the rest were 0 in testing - - """ - if session_id is None: - session_id = random.randint(0, 0xffffffffffffffff) - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - data = struct.pack('!BQxxxxx', 0x38, session_id) - sock.sendto(data, (address, port)) - sock.settimeout(timeout) - try: - received = sock.recv(2048) - except socket.timeout: - return False - finally: - sock.close() - fmt = '!BQxxxxxQxxxx' - if len(received) != struct.calcsize(fmt): - LOG.warn(_('Expected to receive %(exp)s bytes, but actually %(act)s') % - dict(exp=struct.calcsize(fmt), act=len(received))) - return False - (identifier, server_sess, client_sess) = struct.unpack(fmt, received) - if identifier == 0x40 and client_sess == session_id: - return server_sess - - -def _get_root_helper(): - return 'sudo rack-rootwrap %s' % CONF.rootwrap_config - - -def execute(*cmd, **kwargs): - """Convenience wrapper around oslo's execute() method.""" - if 'run_as_root' in kwargs and 'root_helper' not in kwargs: - kwargs['root_helper'] = _get_root_helper() - return processutils.execute(*cmd, **kwargs) - - -def trycmd(*args, **kwargs): - """Convenience wrapper around oslo's trycmd() method.""" - if 'run_as_root' in kwargs and 'root_helper' not in kwargs: - kwargs['root_helper'] = _get_root_helper() - return processutils.trycmd(*args, **kwargs) - - -def rackdir(): - import rack - return os.path.abspath(rack.__file__).split('rack/__init__.py')[0] - - -def generate_uid(topic, size=8): - characters = '01234567890abcdefghijklmnopqrstuvwxyz' - choices = [random.choice(characters) for _x in xrange(size)] - return '%s-%s' % (topic, ''.join(choices)) - - -DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 - 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O - 'abcdefghijkmnopqrstuvwxyz') # Removed: l - - -EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1 - 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O - - -def last_completed_audit_period(unit=None, before=None): - """This method gives you the most recently *completed* audit period. - - arguments: - units: string, one of 'hour', 'day', 'month', 'year' - Periods normally begin at the beginning (UTC) of the - period unit (So a 'day' period begins at midnight UTC, - a 'month' unit on the 1st, a 'year' on Jan, 1) - unit string may be appended with an optional offset - like so: 'day@18' This will begin the period at 18:00 - UTC. 'month@15' starts a monthly period on the 15th, - and year@3 begins a yearly one on March 1st. - before: Give the audit period most recently completed before - . Defaults to now. - - - returns: 2 tuple of datetimes (begin, end) - The begin timestamp of this audit period is the same as the - end of the previous. - """ - if not unit: - unit = CONF.instance_usage_audit_period - - offset = 0 - if '@' in unit: - unit, offset = unit.split("@", 1) - offset = int(offset) - - if before is not None: - rightnow = before - else: - rightnow = timeutils.utcnow() - if unit not in ('month', 'day', 'year', 'hour'): - raise ValueError('Time period must be hour, day, month or year') - if unit == 'month': - if offset == 0: - offset = 1 - end = datetime.datetime(day=offset, - month=rightnow.month, - year=rightnow.year) - if end >= rightnow: - year = rightnow.year - if 1 >= rightnow.month: - year -= 1 - month = 12 + (rightnow.month - 1) - else: - month = rightnow.month - 1 - end = datetime.datetime(day=offset, - month=month, - year=year) - year = end.year - if 1 >= end.month: - year -= 1 - month = 12 + (end.month - 1) - else: - month = end.month - 1 - begin = datetime.datetime(day=offset, month=month, year=year) - - elif unit == 'year': - if offset == 0: - offset = 1 - end = datetime.datetime(day=1, month=offset, year=rightnow.year) - if end >= rightnow: - end = datetime.datetime(day=1, - month=offset, - year=rightnow.year - 1) - begin = datetime.datetime(day=1, - month=offset, - year=rightnow.year - 2) - else: - begin = datetime.datetime(day=1, - month=offset, - year=rightnow.year - 1) - - elif unit == 'day': - end = datetime.datetime(hour=offset, - day=rightnow.day, - month=rightnow.month, - year=rightnow.year) - if end >= rightnow: - end = end - datetime.timedelta(days=1) - begin = end - datetime.timedelta(days=1) - - elif unit == 'hour': - end = rightnow.replace(minute=offset, second=0, microsecond=0) - if end >= rightnow: - end = end - datetime.timedelta(hours=1) - begin = end - datetime.timedelta(hours=1) - - return (begin, end) - - -def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): - """Generate a random password from the supplied symbol groups. - - At least one symbol from each group will be included. Unpredictable - results if length is less than the number of symbol groups. - - Believed to be reasonably secure (with a reasonable password length!) - - """ - if length is None: - length = CONF.password_length - - r = random.SystemRandom() - - # NOTE(jerdfelt): Some password policies require at least one character - # from each group of symbols, so start off with one random character - # from each symbol group - password = [r.choice(s) for s in symbolgroups] - # If length < len(symbolgroups), the leading characters will only - # be from the first length groups. Try our best to not be predictable - # by shuffling and then truncating. - r.shuffle(password) - password = password[:length] - length -= len(password) - - # then fill with random characters from all symbol groups - symbols = ''.join(symbolgroups) - password.extend([r.choice(symbols) for _i in xrange(length)]) - - # finally shuffle to ensure first x characters aren't from a - # predictable group - r.shuffle(password) - - return ''.join(password) - - -def get_my_ipv4_address(): - """Run ip route/addr commands to figure out the best ipv4 - """ - LOCALHOST = '127.0.0.1' - try: - out = execute('ip', '-f', 'inet', '-o', 'route', 'show') - - # Find the default route - regex_default = ('default\s*via\s*' - '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - '\s*dev\s*(\w*)\s*') - default_routes = re.findall(regex_default, out[0]) - if not default_routes: - return LOCALHOST - gateway, iface = default_routes[0] - - # Find the right subnet for the gateway/interface for - # the default route - route = ('(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\/(\d{1,2})' - '\s*dev\s*(\w*)\s*') - for match in re.finditer(route, out[0]): - subnet = netaddr.IPNetwork(match.group(1) + "/" + match.group(2)) - if (match.group(3) == iface and - netaddr.IPAddress(gateway) in subnet): - try: - return _get_ipv4_address_for_interface(iface) - except exception.RackException: - pass - except Exception as ex: - LOG.error(_("Couldn't get IPv4 : %(ex)s") % {'ex': ex}) - return LOCALHOST - - -def _get_ipv4_address_for_interface(iface): - """Run ip addr show for an interface and grab its ipv4 addresses - """ - try: - out = execute('ip', '-f', 'inet', '-o', 'addr', 'show', iface) - regexp_address = re.compile('inet\s*' - '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})') - address = [m.group(1) for m in regexp_address.finditer(out[0]) - if m.group(1) != '127.0.0.1'] - if address: - return address[0] - else: - msg = _('IPv4 address is not found.: %s') % out[0] - raise exception.RackException(msg) - except Exception as ex: - msg = _("Couldn't get IPv4 of %(interface)s" - " : %(ex)s") % {'interface': iface, 'ex': ex} - LOG.error(msg) - raise exception.RackException(msg) - - -def get_my_linklocal(interface): - try: - if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) - condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' - links = [re.search(condition, x) for x in if_str[0].split('\n')] - address = [w.group(1) for w in links if w is not None] - if address[0] is not None: - return address[0] - else: - msg = _('Link Local address is not found.:%s') % if_str - raise exception.RackException(msg) - except Exception as ex: - msg = _("Couldn't get Link Local IP of %(interface)s" - " :%(ex)s") % {'interface': interface, 'ex': ex} - raise exception.RackException(msg) - - -class LazyPluggable(object): - - """A pluggable backend loaded lazily based on some value.""" - - def __init__(self, pivot, config_group=None, **backends): - self.__backends = backends - self.__pivot = pivot - self.__backend = None - self.__config_group = config_group - - def __get_backend(self): - if not self.__backend: - if self.__config_group is None: - backend_name = CONF[self.__pivot] - else: - backend_name = CONF[self.__config_group][self.__pivot] - if backend_name not in self.__backends: - msg = _('Invalid backend: %s') % backend_name - raise exception.RackException(msg) - - backend = self.__backends[backend_name] - if isinstance(backend, tuple): - name = backend[0] - fromlist = backend[1] - else: - name = backend - fromlist = backend - - self.__backend = __import__(name, None, None, fromlist) - return self.__backend - - def __getattr__(self, key): - backend = self.__get_backend() - return getattr(backend, key) - - -def xhtml_escape(value): - """Escapes a string so it is valid within XML or XHTML. - - """ - return saxutils.escape(value, {'"': '"', "'": '''}) - - -def utf8(value): - """Try to turn a string into utf-8 if possible. - - Code is directly from the utf8 function in - http://github.com/facebook/tornado/blob/master/tornado/escape.py - - """ - if isinstance(value, unicode): - return value.encode('utf-8') - elif isinstance(value, gettextutils.Message): - return unicode(value).encode('utf-8') - assert isinstance(value, str) - return value - - -def check_isinstance(obj, cls): - """Checks that obj is of type cls, and lets PyLint infer types.""" - if isinstance(obj, cls): - return obj - raise Exception(_('Expected object of type: %s') % (str(cls))) - - -def parse_server_string(server_str): - """Parses the given server_string and returns a list of host and port. - If it's not a combination of host part and port, the port element - is a null string. If the input is invalid expression, return a null - list. - """ - try: - # First of all, exclude pure IPv6 address (w/o port). - if netaddr.valid_ipv6(server_str): - return (server_str, '') - - # Next, check if this is IPv6 address with a port number combination. - if server_str.find("]:") != -1: - (address, port) = server_str.replace('[', '', 1).split(']:') - return (address, port) - - # Third, check if this is a combination of an address and a port - if server_str.find(':') == -1: - return (server_str, '') - - # This must be a combination of an address and a port - (address, port) = server_str.split(':') - return (address, port) - - except Exception: - LOG.error(_('Invalid server_string: %s'), server_str) - return ('', '') - - -def is_int_like(val): - """Check if a value looks like an int.""" - try: - return str(int(val)) == str(val) - except Exception: - return False - - -def is_valid_protocol(protocol): - return protocol in ["tcp", "udp", "icmp"] - - -def is_valid_ipv4(address): - """Verify that address represents a valid IPv4 address.""" - try: - return netaddr.valid_ipv4(address) - except Exception: - return False - - -def is_valid_ipv6(address): - try: - return netaddr.valid_ipv6(address) - except Exception: - return False - - -def is_valid_ip_address(address): - return is_valid_ipv4(address) or is_valid_ipv6(address) - - -def is_valid_ipv6_cidr(address): - try: - str(netaddr.IPNetwork(address, version=6).cidr) - return True - except Exception: - return False - - -def get_shortened_ipv6(address): - addr = netaddr.IPAddress(address, version=6) - return str(addr.ipv6()) - - -def get_shortened_ipv6_cidr(address): - net = netaddr.IPNetwork(address, version=6) - return str(net.cidr) - - -def is_valid_cidr(address): - """Check if address is valid - - The provided address can be a IPv6 or a IPv4 - CIDR address. - """ - try: - # Validate the correct CIDR Address - netaddr.IPNetwork(address) - except netaddr.core.AddrFormatError: - return False - except UnboundLocalError: - # NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in - # https://github.com/drkjam/netaddr/issues/2) - return False - - # Prior validation partially verify /xx part - # Verify it here - ip_segment = address.split('/') - - if (len(ip_segment) <= 1 or - ip_segment[1] == ''): - return False - - return True - - -def get_ip_version(network): - """Returns the IP version of a network (IPv4 or IPv6). - - Raises AddrFormatError if invalid network. - """ - if netaddr.IPNetwork(network).version == 6: - return "IPv6" - elif netaddr.IPNetwork(network).version == 4: - return "IPv4" - - -def monkey_patch(): - """If the CONF.monkey_patch set as True, - this function patches a decorator - for all functions in specified modules. - You can set decorators for each modules - using CONF.monkey_patch_modules. - The format is "Module path:Decorator function". - Example: - 'rack.api.ec2.cloud:rack.notifications.notify_decorator' - - Parameters of the decorator is as follows. - (See rack.notifications.notify_decorator) - - name - name of the function - function - object of the function - """ - # If CONF.monkey_patch is not True, this function do nothing. - if not CONF.monkey_patch: - return - # Get list of modules and decorators - for module_and_decorator in CONF.monkey_patch_modules: - module, decorator_name = module_and_decorator.split(':') - # import decorator function - decorator = importutils.import_class(decorator_name) - __import__(module) - # Retrieve module information using pyclbr - module_data = pyclbr.readmodule_ex(module) - for key in module_data.keys(): - # set the decorator for the class methods - if isinstance(module_data[key], pyclbr.Class): - clz = importutils.import_class("%s.%s" % (module, key)) - for method, func in inspect.getmembers(clz, inspect.ismethod): - setattr( - clz, - method, - decorator("%s.%s.%s" % (module, key, method), func)) - # set the decorator for the function - if isinstance(module_data[key], pyclbr.Function): - func = importutils.import_class("%s.%s" % (module, key)) - setattr(sys.modules[module], key, - decorator("%s.%s" % (module, key), func)) - - -def convert_to_list_dict(lst, label): - """Convert a value or list into a list of dicts.""" - if not lst: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - - -def make_dev_path(dev, partition=None, base='/dev'): - """Return a path to a particular device. - - >>> make_dev_path('xvdc') - /dev/xvdc - - >>> make_dev_path('xvdc', 1) - /dev/xvdc1 - """ - path = os.path.join(base, dev) - if partition: - path += str(partition) - return path - - -def sanitize_hostname(hostname): - """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" - if isinstance(hostname, unicode): - hostname = hostname.encode('latin-1', 'ignore') - - hostname = re.sub('[ _]', '-', hostname) - hostname = re.sub('[^\w.-]+', '', hostname) - hostname = hostname.lower() - hostname = hostname.strip('.-') - - return hostname - - -def read_cached_file(filename, cache_info, reload_func=None): - """Read from a file if it has been modified. - - :param cache_info: dictionary to hold opaque cache. - :param reload_func: optional function to be called with data when - file is reloaded due to a modification. - - :returns: data from file - - """ - mtime = os.path.getmtime(filename) - if not cache_info or mtime != cache_info.get('mtime'): - LOG.debug(_("Reloading cached file %s") % filename) - with open(filename) as fap: - cache_info['data'] = fap.read() - cache_info['mtime'] = mtime - if reload_func: - reload_func(cache_info['data']) - return cache_info['data'] - - -@contextlib.contextmanager -def temporary_mutation(obj, **kwargs): - """Temporarily set the attr on a particular object to a given value then - revert when finished. - - One use of this is to temporarily set the read_deleted flag on a context - object: - - with temporary_mutation(context, read_deleted="yes"): - do_something_that_needed_deleted_objects() - """ - def is_dict_like(thing): - return hasattr(thing, 'has_key') - - def get(thing, attr, default): - if is_dict_like(thing): - return thing.get(attr, default) - else: - return getattr(thing, attr, default) - - def set_value(thing, attr, val): - if is_dict_like(thing): - thing[attr] = val - else: - setattr(thing, attr, val) - - def delete(thing, attr): - if is_dict_like(thing): - del thing[attr] - else: - delattr(thing, attr) - - NOT_PRESENT = object() - - old_values = {} - for attr, new_value in kwargs.items(): - old_values[attr] = get(obj, attr, NOT_PRESENT) - set_value(obj, attr, new_value) - - try: - yield - finally: - for attr, old_value in old_values.items(): - if old_value is NOT_PRESENT: - delete(obj, attr) - else: - set_value(obj, attr, old_value) - - -def generate_mac_address(): - """Generate an Ethernet MAC address.""" - # NOTE(vish): We would prefer to use 0xfe here to ensure that linux - # bridge mac addresses don't change, but it appears to - # conflict with libvirt, so we use the next highest octet - # that has the unicast and locally administered bits set - # properly: 0xfa. - # Discussion: https://bugs.launchpad.net/rack/+bug/921838 - mac = [0xfa, 0x16, 0x3e, - random.randint(0x00, 0xff), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) - - -def read_file_as_root(file_path): - """Secure helper to read file as root.""" - try: - out, _err = execute('cat', file_path, run_as_root=True) - return out - except processutils.ProcessExecutionError: - raise exception.FileNotFound(file_path=file_path) - - -@contextlib.contextmanager -def temporary_chown(path, owner_uid=None): - """Temporarily chown a path. - - :param owner_uid: UID of temporary owner (defaults to current user) - """ - if owner_uid is None: - owner_uid = os.getuid() - - orig_uid = os.stat(path).st_uid - - if orig_uid != owner_uid: - execute('chown', owner_uid, path, run_as_root=True) - try: - yield - finally: - if orig_uid != owner_uid: - execute('chown', orig_uid, path, run_as_root=True) - - -def chown(path, owner_uid=None): - """chown a path. - - :param owner_uid: UID of owner (defaults to current user) - """ - if owner_uid is None: - owner_uid = os.getuid() - - orig_uid = os.stat(path).st_uid - - if orig_uid != owner_uid: - execute('chown', owner_uid, path, run_as_root=True) - - -@contextlib.contextmanager -def tempdir(**kwargs): - argdict = kwargs.copy() - if 'dir' not in argdict: - argdict['dir'] = CONF.tempdir - tmpdir = tempfile.mkdtemp(**argdict) - try: - yield tmpdir - finally: - try: - shutil.rmtree(tmpdir) - except OSError as e: - LOG.error(_('Could not remove tmpdir: %s'), str(e)) - - -def walk_class_hierarchy(clazz, encountered=None): - """Walk class hierarchy, yielding most derived classes first.""" - if not encountered: - encountered = [] - for subclass in clazz.__subclasses__(): - if subclass not in encountered: - encountered.append(subclass) - # drill down to leaves first - for subsubclass in walk_class_hierarchy(subclass, encountered): - yield subsubclass - yield subclass - - -class UndoManager(object): - - """Provides a mechanism to facilitate rolling back a series of actions - when an exception is raised. - """ - - def __init__(self): - self.undo_stack = [] - - def undo_with(self, undo_func): - self.undo_stack.append(undo_func) - - def _rollback(self): - for undo_func in reversed(self.undo_stack): - undo_func() - - def rollback_and_reraise(self, msg=None, **kwargs): - """Rollback a series of actions then re-raise the exception. - - .. note:: (sirp) This should only be called within an - exception handler. - """ - with excutils.save_and_reraise_exception(): - if msg: - LOG.exception(msg, **kwargs) - - self._rollback() - - -def mkfs(fs, path, label=None, run_as_root=False): - """Format a file or block device - - :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' - 'btrfs', etc.) - :param path: Path to file or block device to format - :param label: Volume label to use - """ - if fs == 'swap': - args = ['mkswap'] - else: - args = ['mkfs', '-t', fs] - # add -F to force no interactive execute on non-block device. - if fs in ('ext3', 'ext4', 'ntfs'): - args.extend(['-F']) - if label: - if fs in ('msdos', 'vfat'): - label_opt = '-n' - else: - label_opt = '-L' - args.extend([label_opt, label]) - args.append(path) - execute(*args, run_as_root=run_as_root) - - -def last_bytes(file_like_object, num): - """Return num bytes from the end of the file, and remaining byte count. - - :param file_like_object: The file to read - :param num: The number of bytes to return - - :returns (data, remaining) - """ - - try: - file_like_object.seek(-num, os.SEEK_END) - except IOError as e: - if e.errno == 22: - file_like_object.seek(0, os.SEEK_SET) - else: - raise - - remaining = file_like_object.tell() - return (file_like_object.read(), remaining) - - -def metadata_to_dict(metadata): - result = {} - for item in metadata: - if not item.get('deleted'): - result[item['key']] = item['value'] - return result - - -def dict_to_metadata(metadata): - result = [] - for key, value in metadata.iteritems(): - result.append(dict(key=key, value=value)) - return result - - -def instance_meta(instance): - if isinstance(instance['metadata'], dict): - return instance['metadata'] - else: - return metadata_to_dict(instance['metadata']) - - -def instance_sys_meta(instance): - if not instance.get('system_metadata'): - return {} - if isinstance(instance['system_metadata'], dict): - return instance['system_metadata'] - else: - return metadata_to_dict(instance['system_metadata']) - - -def get_wrapped_function(function): - """Get the method at the bottom of a stack of decorators.""" - if not hasattr(function, 'func_closure') or not function.func_closure: - return function - - def _get_wrapped_function(function): - if not hasattr(function, 'func_closure') or not function.func_closure: - return None - - for closure in function.func_closure: - func = closure.cell_contents - - deeper_func = _get_wrapped_function(func) - if deeper_func: - return deeper_func - elif hasattr(closure.cell_contents, '__call__'): - return closure.cell_contents - - return _get_wrapped_function(function) - - -def expects_func_args(*args): - def _decorator_checker(dec): - @functools.wraps(dec) - def _decorator(f): - base_f = get_wrapped_function(f) - arg_names, a, kw, _default = inspect.getargspec(base_f) - if a or kw or set(args) <= set(arg_names): - # NOTE (ndipanov): We can't really tell if correct stuff will - # be passed if it's a function with *args or **kwargs so - # we still carry on and hope for the best - return dec(f) - else: - raise TypeError("Decorated function %(f_name)s does not " - "have the arguments expected by the " - "decorator %(d_name)s" % - {'f_name': base_f.__name__, - 'd_name': dec.__name__}) - return _decorator - return _decorator_checker - - -class ExceptionHelper(object): - - """Class to wrap another and translate the ClientExceptions raised by its - function calls to the actual ones. - """ - - def __init__(self, target): - self._target = target - - def __getattr__(self, name): - func = getattr(self._target, name) - - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except messaging.ExpectedException as e: - raise (e.exc_info[1], None, e.exc_info[2]) - return wrapper - - -def check_string_length(value, name, min_length=0, max_length=None): - """Check the length of specified string - :param value: the value of the string - :param name: the name of the string - :param min_length: the min_length of the string - :param max_length: the max_length of the string - """ - if not isinstance(value, six.string_types): - msg = _("%s is not a string or unicode") % name - raise exception.InvalidInput(message=msg) - - if len(value) < min_length: - msg = _("%(name)s has a minimum character requirement of " - "%(min_length)s.") % {'name': name, 'min_length': min_length} - raise exception.InvalidInput(message=msg) - - if max_length and len(value) > max_length: - msg = _("%(name)s has more than %(max_length)s " - "characters.") % {'name': name, 'max_length': max_length} - raise exception.InvalidInput(message=msg) - - -def validate_integer(value, name, min_value=None, max_value=None): - """Make sure that value is a valid integer, potentially within range.""" - try: - value = int(str(value)) - except (ValueError, UnicodeEncodeError): - msg = _('%(value_name)s must be an integer') - raise exception.InvalidInput(reason=( - msg % {'value_name': name})) - - if min_value is not None: - if value < min_value: - msg = _('%(value_name)s must be >= %(min_value)d') - raise exception.InvalidInput( - reason=(msg % {'value_name': name, - 'min_value': min_value})) - if max_value is not None: - if value > max_value: - msg = _('%(value_name)s must be <= %(max_value)d') - raise exception.InvalidInput( - reason=( - msg % {'value_name': name, - 'max_value': max_value}) - ) - return value - - -def spawn_n(func, *args, **kwargs): - """Passthrough method for eventlet.spawn_n. - - This utility exists so that it can be stubbed for testing without - interfering with the service spawns. - """ - eventlet.spawn_n(func, *args, **kwargs) - - -def is_none_string(val): - """Check if a string represents a None value. - """ - if not isinstance(val, six.string_types): - return False - - return val.lower() == 'none' - - -def convert_version_to_int(version): - try: - if isinstance(version, six.string_types): - version = convert_version_to_tuple(version) - if isinstance(version, tuple): - return reduce(lambda x, y: (x * 1000) + y, version) - except Exception: - raise exception.RackException(message="Hypervisor version invalid.") - - -def convert_version_to_str(version_int): - version_numbers = [] - factor = 1000 - while version_int != 0: - version_number = version_int - (version_int // factor * factor) - version_numbers.insert(0, str(version_number)) - version_int = version_int / factor - - return reduce(lambda x, y: "%s.%s" % (x, y), version_numbers) - - -def convert_version_to_tuple(version_str): - return tuple(int(part) for part in version_str.split('.')) - -''' -def is_neutron(): - global _IS_NEUTRON - - if _IS_NEUTRON is not None: - return _IS_NEUTRON - - try: - # compatibility with Folsom/Grizzly configs - cls_name = CONF.network_api_class - if cls_name == 'rack.network.quantumv2.api.API': - cls_name = 'rack.network.neutronv2.api.API' - - from rack.network.neutronv2 import api as neutron_api - _IS_NEUTRON = issubclass(importutils.import_class(cls_name), - neutron_api.API) - except ImportError: - _IS_NEUTRON = False - - return _IS_NEUTRON -''' - - -def reset_is_neutron(): - global _IS_NEUTRON - _IS_NEUTRON = None - - -def is_auto_disk_config_disabled(auto_disk_config_raw): - auto_disk_config_disabled = False - if auto_disk_config_raw is not None: - adc_lowered = auto_disk_config_raw.strip().lower() - if adc_lowered == "disabled": - auto_disk_config_disabled = True - return auto_disk_config_disabled - - -def get_auto_disk_config_from_instance(instance=None, sys_meta=None): - if sys_meta is None: - sys_meta = instance_sys_meta(instance) - return sys_meta.get("image_auto_disk_config") - - -def get_auto_disk_config_from_image_props(image_properties): - return image_properties.get("auto_disk_config") - - -def get_system_metadata_from_image(image_meta, flavor=None): - system_meta = {} - prefix_format = SM_IMAGE_PROP_PREFIX + '%s' - - for key, value in image_meta.get('properties', {}).iteritems(): - new_value = unicode(value)[:255] - system_meta[prefix_format % key] = new_value - - for key in SM_INHERITABLE_KEYS: - value = image_meta.get(key) - - if key == 'min_disk' and flavor: - if image_meta.get('disk_format') == 'vhd': - value = flavor['root_gb'] - else: - value = max(value, flavor['root_gb']) - - if value is None: - continue - - system_meta[prefix_format % key] = value - - return system_meta - - -def get_image_from_system_metadata(system_meta): - image_meta = {} - properties = {} - - if not isinstance(system_meta, dict): - system_meta = metadata_to_dict(system_meta) - - for key, value in system_meta.iteritems(): - if value is None: - continue - - # NOTE(xqueralt): Not sure this has to inherit all the properties or - # just the ones we need. Leaving it for now to keep the old behaviour. - if key.startswith(SM_IMAGE_PROP_PREFIX): - key = key[len(SM_IMAGE_PROP_PREFIX):] - - if key in SM_INHERITABLE_KEYS: - image_meta[key] = value - else: - # Skip properties that are non-inheritable - if key in CONF.non_inheritable_image_properties: - continue - properties[key] = value - - if properties: - image_meta['properties'] = properties - - return image_meta - - -def get_hash_str(base_str): - """returns string that represents hash of base_str (in hex format).""" - return hashlib.md5(base_str).hexdigest() - - -def cpu_count(): - try: - return multiprocessing.cpu_count() - except NotImplementedError: - return 1 diff --git a/rack/version.py b/rack/version.py deleted file mode 100644 index 00282d0..0000000 --- a/rack/version.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pbr.version - -from rack.openstack.common.gettextutils import _ - -RACK_VENDOR = "OpenStack Foundation" -RACK_PRODUCT = "OpenStack Rack" -RACK_PACKAGE = None # OS distro package version suffix - -loaded = False -version_info = pbr.version.VersionInfo('rack') -version_string = version_info.version_string - - -def _load_config(): - # Don't load in global context, since we can't assume - # these modules are accessible when distutils uses - # this module - import ConfigParser - - from oslo.config import cfg - - from rack.openstack.common import log as logging - - global loaded, RACK_VENDOR, RACK_PRODUCT, RACK_PACKAGE - if loaded: - return - - loaded = True - - cfgfile = cfg.CONF.find_file("release") - if cfgfile is None: - return - - try: - cfg = ConfigParser.RawConfigParser() - cfg.read(cfgfile) - - RACK_VENDOR = cfg.get("Rack", "vendor") - if cfg.has_option("Rack", "vendor"): - RACK_VENDOR = cfg.get("Rack", "vendor") - - RACK_PRODUCT = cfg.get("Rack", "product") - if cfg.has_option("Rack", "product"): - RACK_PRODUCT = cfg.get("Rack", "product") - - RACK_PACKAGE = cfg.get("Rack", "package") - if cfg.has_option("Rack", "package"): - RACK_PACKAGE = cfg.get("Rack", "package") - except Exception as ex: - LOG = logging.getLogger(__name__) - LOG.error(_("Failed to load %(cfgfile)s: %(ex)s"), - {'cfgfile': cfgfile, 'ex': ex}) - - -def vendor_string(): - _load_config() - - return RACK_VENDOR - - -def product_string(): - _load_config() - - return RACK_PRODUCT - - -def package_string(): - _load_config() - - return RACK_PACKAGE - - -def version_string_with_package(): - if package_string() is None: - return version_info.version_string() - else: - return "%s-%s" % (version_info.version_string(), package_string()) diff --git a/rack/wsgi.py b/rack/wsgi.py deleted file mode 100644 index 88c432f..0000000 --- a/rack/wsgi.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utility methods for working with WSGI servers.""" - -from __future__ import print_function - -import os.path -import socket -import sys - -import eventlet -import eventlet.wsgi -import greenlet -from oslo.config import cfg -from paste import deploy -import routes.middleware -import ssl -import webob.dec -import webob.exc - -from rack import exception -from rack.openstack.common import excutils -from rack.openstack.common.gettextutils import _ -from rack.openstack.common import log as logging - -wsgi_opts = [ - cfg.StrOpt('api_paste_config', - default="api-paste.ini", - help='File name for the paste.deploy config for rack-api'), - cfg.StrOpt('wsgi_log_format', - default='%(client_ip)s "%(request_line)s" status:' - '%(status_code)s len: %(body_length)s time: %(wall_seconds).7f', - help='A python format string that is used as the template to ' - 'generate log lines. The following values can be formatted ' - 'into it: client_ip, date_time, request_line, status_code, ' - 'body_length, wall_seconds.'), - cfg.StrOpt('ssl_ca_file', - help="CA certificate file to use to verify " - "connecting clients"), - cfg.StrOpt('ssl_cert_file', - help="SSL certificate of API server"), - cfg.StrOpt('ssl_key_file', - help="SSL private key of API server"), - cfg.IntOpt('tcp_keepidle', - default=600, - help="Sets the value of TCP_KEEPIDLE in seconds for each " - "server socket. Not supported on OS X."), - cfg.IntOpt('wsgi_default_pool_size', - default=1000, - help="Size of the pool of greenthreads used by wsgi"), - cfg.IntOpt('max_header_line', - default=16384, - help="Maximum line size of message headers to be accepted. " - "max_header_line may need to be increased when using " - "large tokens (typically those generated by the " - "Keystone v3 API with big service catalogs)."), -] -CONF = cfg.CONF -CONF.register_opts(wsgi_opts) - -LOG = logging.getLogger(__name__) - - -class Server(object): - - """Server class to manage a WSGI server, serving a WSGI application.""" - - default_pool_size = CONF.wsgi_default_pool_size - - def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, - protocol=eventlet.wsgi.HttpProtocol, backlog=128, - use_ssl=False, max_url_len=None): - """Initialize, but do not start, a WSGI server. - - :param name: Pretty name for logging. - :param app: The WSGI application to serve. - :param host: IP address to serve the application. - :param port: Port number to server the application. - :param pool_size: Maximum number of eventlets to spawn concurrently. - :param backlog: Maximum number of queued connections. - :param max_url_len: Maximum length of permitted URLs. - :returns: None - :raises: rack.exception.InvalidInput - """ - # Allow operators to customize http requests max header line size. - eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line - self.name = name - self.app = app - self._server = None - self._protocol = protocol - self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) - self._logger = logging.getLogger("rack.%s.wsgi.server" % self.name) - self._wsgi_logger = logging.WritableLogger(self._logger) - self._use_ssl = use_ssl - self._max_url_len = max_url_len - - if backlog < 1: - raise exception.InvalidInput( - reason='The backlog must be more than 1') - - bind_addr = (host, port) - # TODO(dims): eventlet's green dns/socket module does not actually - # support IPv6 in getaddrinfo(). We need to get around this in the - # future or monitor upstream for a fix - try: - info = socket.getaddrinfo(bind_addr[0], - bind_addr[1], - socket.AF_UNSPEC, - socket.SOCK_STREAM)[0] - family = info[0] - bind_addr = info[-1] - except Exception: - family = socket.AF_INET - - try: - self._socket = eventlet.listen(bind_addr, family, backlog=backlog) - except EnvironmentError: - LOG.error(_("Could not bind to %(host)s:%(port)s"), - {'host': host, 'port': port}) - raise - - (self.host, self.port) = self._socket.getsockname()[0:2] - LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__) - - def start(self): - """Start serving a WSGI application. - - :returns: None - """ - if self._use_ssl: - try: - ca_file = CONF.ssl_ca_file - cert_file = CONF.ssl_cert_file - key_file = CONF.ssl_key_file - - if cert_file and not os.path.exists(cert_file): - raise RuntimeError( - _("Unable to find cert_file : %s") % cert_file) - - if ca_file and not os.path.exists(ca_file): - raise RuntimeError( - _("Unable to find ca_file : %s") % ca_file) - - if key_file and not os.path.exists(key_file): - raise RuntimeError( - _("Unable to find key_file : %s") % key_file) - - if self._use_ssl and (not cert_file or not key_file): - raise RuntimeError( - _("When running server in SSL mode, you must " - "specify both a cert_file and key_file " - "option value in your configuration file")) - ssl_kwargs = { - 'server_side': True, - 'certfile': cert_file, - 'keyfile': key_file, - 'cert_reqs': ssl.CERT_NONE, - } - - if CONF.ssl_ca_file: - ssl_kwargs['ca_certs'] = ca_file - ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED - - self._socket = eventlet.wrap_ssl(self._socket, - **ssl_kwargs) - - self._socket.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - # sockets can hang around forever without keepalive - self._socket.setsockopt(socket.SOL_SOCKET, - socket.SO_KEEPALIVE, 1) - - # This option isn't available in the OS X version of eventlet - if hasattr(socket, 'TCP_KEEPIDLE'): - self._socket.setsockopt(socket.IPPROTO_TCP, - socket.TCP_KEEPIDLE, - CONF.tcp_keepidle) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_("Failed to start %(name)s on %(host)s" - ":%(port)s with SSL support") % self.__dict__) - - wsgi_kwargs = { - 'func': eventlet.wsgi.server, - 'sock': self._socket, - 'site': self.app, - 'protocol': self._protocol, - 'custom_pool': self._pool, - 'log': self._wsgi_logger, - 'log_format': CONF.wsgi_log_format, - 'debug': False - } - - if self._max_url_len: - wsgi_kwargs['url_length_limit'] = self._max_url_len - - self._server = eventlet.spawn(**wsgi_kwargs) - - def stop(self): - """Stop this server. - - This is not a very nice action, as currently the method by which a - server is stopped is by killing its eventlet. - - :returns: None - - """ - LOG.info(_("Stopping WSGI server.")) - - if self._server is not None: - # Resize pool to stop new requests from being processed - self._pool.resize(0) - self._server.kill() - - def wait(self): - """Block, until the server has stopped. - - Waits on the server's eventlet to finish, then returns. - - :returns: None - - """ - try: - if self._server is not None: - self._server.wait() - except greenlet.GreenletExit: - LOG.info(_("WSGI server has stopped.")) - - -class Request(webob.Request): - pass - - -class Application(object): - - """Base WSGI application wrapper. Subclasses need to implement __call__.""" - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [app:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [app:wadl] - latest_version = 1.3 - paste.app_factory = rack.api.fancy_api:Wadl.factory - - which would result in a call to the `Wadl` class as - - import rack.api.fancy_api - fancy_api.Wadl(latest_version='1.3') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - return cls(**local_config) - - def __call__(self, environ, start_response): - r"""Subclasses will probably want to implement __call__ like this: - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - # Any of the following objects work as responses: - - # Option 1: simple string - res = 'message\n' - - # Option 2: a nicely formatted HTTP exception page - res = exc.HTTPForbidden(explanation='Nice try') - - # Option 3: a webob Response object (in case you need to play with - # headers, or you want to be treated like an iterable, or or or) - res = Response(); - res.app_iter = open('somefile') - - # Option 4: any wsgi app to be run next - res = self.application - - # Option 5: you can get a Response object for a wsgi app, too, to - # play with headers etc - res = req.get_response(self.application) - - # You can then just return your response... - return res - # ... or set req.response and return None. - req.response = res - - See the end of http://pythonpaste.org/webob/modules/dec.html - for more info. - - """ - raise NotImplementedError(_('You must implement __call__')) - - -class Middleware(Application): - - """Base WSGI middleware. - - These classes require an application to be - initialized that will be called next. By default the middleware will - simply call its wrapped app, or you can override __call__ to customize its - behavior. - - """ - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [filter:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [filter:analytics] - redis_host = 127.0.0.1 - paste.filter_factory = rack.api.analytics:Analytics.factory - - which would result in a call to the `Analytics` class as - - import rack.api.analytics - analytics.Analytics(app_from_paste, redis_host='127.0.0.1') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - def _factory(app): - return cls(app, **local_config) - return _factory - - def __init__(self, application): - self.application = application - - def process_request(self, req): - """Called on each request. - - If this returns None, the next application down the stack will be - executed. If it returns a response then that response will be returned - and execution will stop here. - - """ - return None - - def process_response(self, response): - """Do whatever you'd like to the response.""" - return response - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - response = self.process_request(req) - if response: - return response - response = req.get_response(self.application) - return self.process_response(response) - - -class Debug(Middleware): - - """Helper class for debugging a WSGI application. - - Can be inserted into any WSGI application chain to get information - about the request and response. - - """ - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - print(('*' * 40) + ' REQUEST ENVIRON') - for key, value in req.environ.items(): - print(key, '=', value) - print() - resp = req.get_response(self.application) - - print(('*' * 40) + ' RESPONSE HEADERS') - for (key, value) in resp.headers.iteritems(): - print(key, '=', value) - print() - - resp.app_iter = self.print_generator(resp.app_iter) - - return resp - - @staticmethod - def print_generator(app_iter): - """Iterator that prints the contents of a wrapper string.""" - print(('*' * 40) + ' BODY') - for part in app_iter: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print() - - -class Router(object): - - """WSGI middleware that maps incoming requests to WSGI apps.""" - - def __init__(self, mapper): - """Create a router for the given routes.Mapper. - - Each route in `mapper` must specify a 'controller', which is a - WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be an object that can route - the request to the action-specific method. - - Examples: - mapper = routes.Mapper() - sc = ServerController() - - # Explicit mapping of one route to a controller+action - mapper.connect(None, '/svrlist', controller=sc, action='list') - - # Actions are all implicitly defined - mapper.resource('server', 'servers', controller=sc) - - # Pointing to an arbitrary WSGI app. You can specify the - # {path_info:.*} parameter so the target app can be handed just that - # section of the URL. - mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) - - """ - self.map = mapper - self._router = routes.middleware.RoutesMiddleware(self._dispatch, - self.map) - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Route the incoming request to a controller based on self.map. - - If no match, return a 404. - - """ - return self._router - - @staticmethod - @webob.dec.wsgify(RequestClass=Request) - def _dispatch(req): - """Dispatch the request to the appropriate controller. - - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. Either returns 404 - or the routed WSGI app's response. - - """ - match = req.environ['wsgiorg.routing_args'][1] - if not match: - return webob.exc.HTTPNotFound() - app = match['controller'] - return app - - -class Loader(object): - - """Used to load WSGI applications from paste configurations.""" - - def __init__(self, config_path=None): - """Initialize the loader, and attempt to find the config. - - :param config_path: Full or relative path to the paste config. - :returns: None - - """ - self.config_path = None - - config_path = config_path or CONF.api_paste_config - if not os.path.isabs(config_path): - self.config_path = CONF.find_file(config_path) - elif os.path.exists(config_path): - self.config_path = config_path - - if not self.config_path: - raise exception.ConfigNotFound(path=config_path) - - def load_app(self, name): - """Return the paste URLMap wrapped WSGI application. - - :param name: Name of the application to load. - :returns: Paste URLMap object wrapping the requested application. - :raises: `rack.exception.PasteAppNotFound` - - """ - try: - LOG.debug(_("Loading app %(name)s from %(path)s") % - {'name': name, 'path': self.config_path}) - return deploy.loadapp("config:%s" % self.config_path, name=name) - except LookupError as err: - LOG.error(err) - raise exception.PasteAppNotFound(name=name, path=self.config_path) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 11a92c7..0000000 --- a/requirements.txt +++ /dev/null @@ -1,23 +0,0 @@ -anyjson==0.3.3 -Babel==1.3 -eventlet==0.15.0 -iso8601==0.1.10 -Jinja2==2.7.3 -lxml==3.3.5 -netaddr==0.7.12 -oslo.config==1.3.0 -oslo.messaging==1.3.0 -Paste==1.7.5.1 -pbr==0.9.0 -python-keystoneclient==0.9.0 -python-neutronclient==2.3.5 -python-novaclient==2.18.1 -Routes==2.0 -six==1.7.3 -SQLAlchemy==0.9.6 -sqlalchemy-migrate==0.9.1 -stevedore==0.15 -WebOb==1.4 -PasteDeploy==1.5.2 -gevent==1.0.1 -gevent-websocket==0.9.3 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 888a146..0000000 --- a/setup.cfg +++ /dev/null @@ -1,29 +0,0 @@ -[metadata] -name = rack -version = 0.1.0 -summary = Real Application Centric Kernel -description-file = README.md -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = https://wiki.openstack.org/wiki/RACK -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[files] -packages = - rack - -[entry_points] -console_scripts = - rack-api = rack.cmd.api:main - rack-scheduler = rack.cmd.scheduler:main - rack-resourceoperator = rack.cmd.resourceoperator:main - diff --git a/setup.py b/setup.py deleted file mode 100644 index 9ea4d88..0000000 --- a/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 8a8079f..0000000 --- a/test-requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -hacking>=0.8.0,<0.9 -coverage>=3.6 -discover -feedparser -fixtures>=0.3.14 -mock>=1.0 -mox>=0.5.3 -MySQL-python -psycopg2 -pylint==0.25.2 -python-subunit>=0.0.18 -sphinx>=1.1.2,<1.2 -oslosphinx -testrepository>=0.0.18 -testtools>=0.9.34 diff --git a/tools/ansible-openstack-log/OpenStackLogDashboard b/tools/ansible-openstack-log/OpenStackLogDashboard deleted file mode 100644 index 0878215..0000000 --- a/tools/ansible-openstack-log/OpenStackLogDashboard +++ /dev/null @@ -1,503 +0,0 @@ -{ - "title": "OpenStack log", - "services": { - "query": { - "list": { - "0": { - "query": "COMPONENT: nova", - "alias": "", - "color": "#7EB26D", - "id": 0, - "pin": false, - "type": "lucene", - "enable": true - }, - "1": { - "id": 1, - "color": "#EAB839", - "alias": "", - "pin": false, - "type": "lucene", - "enable": true, - "query": "COMPONENT: neutron" - }, - "2": { - "id": 2, - "color": "#6ED0E0", - "alias": "", - "pin": false, - "type": "lucene", - "enable": true, - "query": "COMPONENT: keystone" - }, - "3": { - "id": 3, - "color": "#EF843C", - "alias": "", - "pin": false, - "type": "lucene", - "enable": true, - "query": "COMPONENT: cinder" - }, - "4": { - "id": 4, - "color": "#E24D42", - "alias": "", - "pin": false, - "type": "lucene", - "enable": true, - "query": "COMPONENT: glance" - }, - "5": { - "id": 5, - "color": "#1F78C1", - "alias": "", - "pin": false, - "type": "lucene", - "enable": true, - "query": "COMPONENT: horizon" - }, - "6": { - "id": 6, - "color": "#BA43A9", - "alias": "", - "pin": false, - "type": "lucene", - "enable": true, - "query": "COMPONENT: ceilometer" - }, - "7": { - "id": 7, - "color": "#705DA0", - "alias": "", - "pin": false, - "type": "lucene", - "enable": true, - "query": "COMPONENT: heat" - } - }, - "ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7 - ] - }, - "filter": { - "list": { - "0": { - "type": "time", - "field": "@timestamp", - "from": "now-6h", - "to": "now", - "mandate": "must", - "active": true, - "alias": "", - "id": 0 - } - }, - "ids": [ - 0 - ] - } - }, - "rows": [ - { - "title": "RWR", - "height": "300px", - "editable": true, - "collapse": false, - "collapsable": true, - "panels": [ - { - "error": false, - "span": 4, - "editable": true, - "type": "terms", - "loadingEditor": false, - "field": "COMPONENT", - "exclude": [], - "missing": true, - "other": true, - "size": 10, - "order": "count", - "style": { - "font-size": "10pt" - }, - "donut": false, - "tilt": false, - "labels": true, - "arrangement": "horizontal", - "chart": "pie", - "counter_pos": "none", - "spyable": true, - "queries": { - "mode": "all", - "ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7 - ] - }, - "tmode": "terms", - "tstat": "total", - "valuefield": "", - "title": "LOG COUNT by COMPONENT" - }, - { - "error": false, - "span": 4, - "editable": true, - "type": "terms", - "loadingEditor": false, - "field": "hostname", - "exclude": [], - "missing": true, - "other": true, - "size": 10, - "order": "count", - "style": { - "font-size": "10pt" - }, - "donut": false, - "tilt": false, - "labels": true, - "arrangement": "horizontal", - "chart": "pie", - "counter_pos": "none", - "spyable": true, - "queries": { - "mode": "all", - "ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7 - ] - }, - "tmode": "terms", - "tstat": "total", - "valuefield": "", - "title": "LOG COUNT by host" - }, - { - "error": false, - "span": 4, - "editable": true, - "type": "terms", - "loadingEditor": false, - "field": "LEVEL", - "exclude": [], - "missing": true, - "other": true, - "size": 10, - "order": "count", - "style": { - "font-size": "10pt" - }, - "donut": false, - "tilt": false, - "labels": true, - "arrangement": "horizontal", - "chart": "pie", - "counter_pos": "none", - "spyable": true, - "queries": { - "mode": "all", - "ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7 - ] - }, - "tmode": "terms", - "tstat": "total", - "valuefield": "", - "title": "LOG COUNT by Level" - } - ], - "notice": false - }, - { - "title": "RWR-trend", - "height": "150px", - "editable": true, - "collapse": false, - "collapsable": true, - "panels": [ - { - "span": 6, - "editable": true, - "type": "sparklines", - "loadingEditor": false, - "mode": "count", - "time_field": "@timestamp", - "value_field": null, - "interval": "30m", - "spyable": true, - "queries": { - "mode": "all", - "ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7 - ] - }, - "title": "Trends for each COMPONENT" - } - ], - "notice": false - }, - { - "title": "Graph", - "height": "350px", - "editable": true, - "collapse": false, - "collapsable": true, - "panels": [ - { - "span": 12, - "editable": true, - "group": [ - "default" - ], - "type": "histogram", - "mode": "count", - "time_field": "@timestamp", - "value_field": null, - "auto_int": true, - "resolution": 100, - "interval": "5m", - "fill": 3, - "linewidth": 3, - "timezone": "browser", - "spyable": true, - "zoomlinks": true, - "bars": true, - "stack": true, - "points": false, - "lines": false, - "legend": true, - "x-axis": true, - "y-axis": true, - "percentage": false, - "interactive": true, - "queries": { - "mode": "all", - "ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7 - ] - }, - "title": "Events over time", - "intervals": [ - "auto", - "1s", - "1m", - "5m", - "10m", - "30m", - "1h", - "3h", - "12h", - "1d", - "1w", - "1M", - "1y" - ], - "options": true, - "tooltip": { - "value_type": "cumulative", - "query_as_alias": true - }, - "scale": 1, - "y_format": "none", - "grid": { - "max": null, - "min": 0 - }, - "annotate": { - "enable": false, - "query": "*", - "size": 20, - "field": "_type", - "sort": [ - "_score", - "desc" - ] - }, - "pointradius": 5, - "show_query": true, - "legend_counts": true, - "zerofill": true, - "derivative": false - } - ], - "notice": false - }, - { - "title": "Events", - "height": "350px", - "editable": true, - "collapse": false, - "collapsable": true, - "panels": [ - { - "title": "All events", - "error": false, - "span": 12, - "editable": true, - "group": [ - "default" - ], - "type": "table", - "size": 100, - "pages": 5, - "offset": 0, - "sort": [ - "time_raw", - "asc" - ], - "style": { - "font-size": "9pt" - }, - "overflow": "min-height", - "fields": [], - "localTime": false, - "timeField": "@timestamp", - "highlight": [], - "sortable": true, - "header": true, - "paging": true, - "spyable": true, - "queries": { - "mode": "all", - "ids": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7 - ] - }, - "field_list": true, - "status": "Stable", - "trimFactor": 300, - "normTimes": true, - "all_fields": false - } - ], - "notice": false - } - ], - "editable": true, - "failover": false, - "index": { - "interval": "day", - "pattern": "[logstash-]YYYY.MM.DD", - "default": "NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED", - "warm_fields": true - }, - "style": "dark", - "panel_hints": true, - "pulldowns": [ - { - "type": "query", - "collapse": false, - "notice": false, - "query": "*", - "pinned": true, - "history": [], - "remember": 10, - "enable": true - }, - { - "type": "filtering", - "collapse": false, - "notice": true, - "enable": true - } - ], - "nav": [ - { - "type": "timepicker", - "collapse": false, - "notice": false, - "status": "Stable", - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ], - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "timefield": "@timestamp", - "now": true, - "filter_id": 0, - "enable": true - } - ], - "loader": { - "save_gist": false, - "save_elasticsearch": true, - "save_local": true, - "save_default": true, - "save_temp": true, - "save_temp_ttl_enable": true, - "save_temp_ttl": "30d", - "load_gist": true, - "load_elasticsearch": true, - "load_elasticsearch_size": 20, - "load_local": true, - "hide": false - }, - "refresh": false -} \ No newline at end of file diff --git a/tools/ansible-openstack-log/README.md b/tools/ansible-openstack-log/README.md deleted file mode 100644 index e53894f..0000000 --- a/tools/ansible-openstack-log/README.md +++ /dev/null @@ -1,100 +0,0 @@ -Ansible playbook for OpenStack Log collection and analysis -================= - -# Description -Since OpenStack logs are dispersed across the nodes and components, it is sometimes difficult to troubleshoot the error or simply grasp what is going on in OpenStack. -Using fluentd(td-agent), elasticsearch and Kibana, you can easily locate the error and quickly grasp the status from logs. -As basic UUIDs (userID, tenantID, instanceID, requestID, etc.) are indexed, you can quickly search and corner the cause of error with them, too. - - -This repository contains playbooks to setup fluentd(td-agent) and elasticsearch. -Fluentd collects logs from all OpenStack nodes and you can analyze them with elasticsaerch/Kibana. - - -These playbooks do.. -* install Elasticsearch to elasticsearch node. -* install td-agent to collector node. -* install td-agent to all OpenStack nodes -* provide Kibana dashboard with cool UI. - - -# Requirements -* Ansible 1.6 or later -* CentOS 6.5 or later -* Internet accessible network -* OpenStack (tested on Icehouse) - -# Assumptions of host network -We assume following three networks for OpenStack hosts. -* External network -A network end-users access from the Internet to interact virtual instances, dashboard and api. - -Also employed to network-gateway node's external link. - -* Internal network -OpenStack components talk to each other via this network. - -* Management network -Ansible accesses OpenStack hosts via this network. - -# Before Running -### Automatic ssh login -You can simply copy ssh public key to the remote node with following command. - - ssh-copy-id root@remoteNode - -Also specify ssh private key in ansible.cfg file. - - private_key=/root/.ssh/id_rsa - ask_pass = False - - -### Manual ssh login(unrecommended) -Or if you don't like ssh key, comment out private_key_file=** and change ask_pass=True in ansible.cfg file. - - # private_key_file=** - ask_pass = True - -### Role of each nodes -Determine the role of each nodes in log_hosts file - * [elasticsearch]: elasticsearch+Kibana node - * [fluentd_collector]: Collector node - * [openstack]: All OpenStack nodes - - -### elasticsearch/fluentd/Kibana settings -Edit the group_vars/all file to accomodate with you environment. - - -### Interface mapping -Determine interface mapping of External/Internal/Management for each role nodes. -Edit `group_vars/{everything except all}` and specify which interface to use for which network. -In default, eth1 for Internal, eth2 for External and eth3 for Management. - - -# How to run -Go to the top directory (where log_hosts and set_log.yml are located), and `ansible-playbook set_log.yml` -Or you can use Jenkins to kick playbook with following shell script. - - /usr/bin/ansible-playbook $WORKSPACE/set_log.yml - - -# Load Kibana dashboard -Access elasticsearch/Kibana node with browser. If you are lucky, you can see kibana dashboard. - -Go to _Load_-> _Advanced_-> _LocalFile_, and select `OpenStackLogDashboard` you downloaded with playbooks. - - -*** -# Tips -## faster installation -You can pick up your own repositories to install faster. -Place your repository files in `templates/etc/yum.repos.d` directory and set `use_your_own_repository true` in `group_vars/all`. - - -*** -## TODO - * Input ceilometer data to analyze - * Use fluent-plugin-multi-format-parser for much understandable regexp definitions. - * Cluster the Elasticsearch - diff --git a/tools/ansible-openstack-log/ansible.cfg b/tools/ansible-openstack-log/ansible.cfg deleted file mode 100644 index 75b17b4..0000000 --- a/tools/ansible-openstack-log/ansible.cfg +++ /dev/null @@ -1,30 +0,0 @@ -[defaults] - -# inventory file -hostfile = log_hosts - -# i don't know what it is. -filter_plugins = filter_plugins - -# remote user for ansible to use during setup -remote_user = root - -# SSH private key to use for accessing remote servers. -# Please make sure that the corresponding public key is installed in remote servers. -private_key_file = /var/lib/jenkins/.ssh/id_rsa - -# You'd better not ask_pass for jenkins -ask_pass = False -#ask_sudo_pass = True - -# uncomment this to disable SSH key host checking -host_key_checking = False - -# SSH timeout -timeout = 20 - -# default serial -forks = 10 - -# log -log_path=log.txt diff --git a/tools/ansible-openstack-log/group_vars/Kibana b/tools/ansible-openstack-log/group_vars/Kibana deleted file mode 100644 index 7e4dc67..0000000 --- a/tools/ansible-openstack-log/group_vars/Kibana +++ /dev/null @@ -1,11 +0,0 @@ -my_int_if: eth1 -my_ext_if: eth2 -my_mng_if: eth3 - -my_int_ip: "{{ ansible_eth1.ipv4.address }}" -my_ext_ip: "{{ ansible_eth2.ipv4.address }}" -my_mng_ip: "{{ ansible_eth3.ipv4.address }}" - -my_int_obj: "{{ ansible_eth1 }}" -my_ext_obj: "{{ ansible_eth2 }}" -my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack-log/group_vars/all b/tools/ansible-openstack-log/group_vars/all deleted file mode 100644 index 7532dbc..0000000 --- a/tools/ansible-openstack-log/group_vars/all +++ /dev/null @@ -1,24 +0,0 @@ ---- - -# Please change here at your discretion -ntp_server: ntp.nict.jp -use_your_own_repository: false -elasticsearch_cluster_name: myCluster -Kibana_public_url: kibana.example.com - -#### You don't have to change from here usually #### - -# ip address mapping. -fluentd_collector_ip: "{{ hostvars[groups['fluentd_collector'][0]]['my_int_ip'] }}" -elasticsearch_ip: "{{ hostvars[groups['elasticsearch'][0]]['my_int_ip'] }}" - -# fluentd port. -fluentd_collector_port: 29001 -fluentd_server_port: 9200 - -# Kibana url -Kibana_url: https://download.elasticsearch.org/kibana/kibana/kibana-3.1.0.tar.gz -Kibana_version: kibana-3.1.0 - - - diff --git a/tools/ansible-openstack-log/group_vars/elasticsearch b/tools/ansible-openstack-log/group_vars/elasticsearch deleted file mode 100644 index 7e4dc67..0000000 --- a/tools/ansible-openstack-log/group_vars/elasticsearch +++ /dev/null @@ -1,11 +0,0 @@ -my_int_if: eth1 -my_ext_if: eth2 -my_mng_if: eth3 - -my_int_ip: "{{ ansible_eth1.ipv4.address }}" -my_ext_ip: "{{ ansible_eth2.ipv4.address }}" -my_mng_ip: "{{ ansible_eth3.ipv4.address }}" - -my_int_obj: "{{ ansible_eth1 }}" -my_ext_obj: "{{ ansible_eth2 }}" -my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack-log/group_vars/fluentd-collector b/tools/ansible-openstack-log/group_vars/fluentd-collector deleted file mode 100644 index 7e4dc67..0000000 --- a/tools/ansible-openstack-log/group_vars/fluentd-collector +++ /dev/null @@ -1,11 +0,0 @@ -my_int_if: eth1 -my_ext_if: eth2 -my_mng_if: eth3 - -my_int_ip: "{{ ansible_eth1.ipv4.address }}" -my_ext_ip: "{{ ansible_eth2.ipv4.address }}" -my_mng_ip: "{{ ansible_eth3.ipv4.address }}" - -my_int_obj: "{{ ansible_eth1 }}" -my_ext_obj: "{{ ansible_eth2 }}" -my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack-log/group_vars/openstack b/tools/ansible-openstack-log/group_vars/openstack deleted file mode 100644 index 7e4dc67..0000000 --- a/tools/ansible-openstack-log/group_vars/openstack +++ /dev/null @@ -1,11 +0,0 @@ -my_int_if: eth1 -my_ext_if: eth2 -my_mng_if: eth3 - -my_int_ip: "{{ ansible_eth1.ipv4.address }}" -my_ext_ip: "{{ ansible_eth2.ipv4.address }}" -my_mng_ip: "{{ ansible_eth3.ipv4.address }}" - -my_int_obj: "{{ ansible_eth1 }}" -my_ext_obj: "{{ ansible_eth2 }}" -my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack-log/log_hosts b/tools/ansible-openstack-log/log_hosts deleted file mode 100644 index 8508f99..0000000 --- a/tools/ansible-openstack-log/log_hosts +++ /dev/null @@ -1,16 +0,0 @@ -[elasticsearch] -fluentd - -[fluentd_collector] -fluentd - -[Kibana] -fluentd - -[openstack] -testsrv06 -testsrv07 -testsrv08 - - - diff --git a/tools/ansible-openstack-log/playbooks/Kibana/kibana-setup.yml b/tools/ansible-openstack-log/playbooks/Kibana/kibana-setup.yml deleted file mode 100644 index e751385..0000000 --- a/tools/ansible-openstack-log/playbooks/Kibana/kibana-setup.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -- name: Install Kibana - hosts: Kibana - gather_facts: yes - sudo: yes - tasks: - - name: ensure Apache and java are installed - yum: name={{item}} state=latest - with_items: - - java - - httpd - notify: - - ensure apache is restarted - - - name: ensure httpd.conf is configured - lineinfile: > - dest=/etc/httpd/conf/httpd.conf - state=present - line="ServerName {{ ansible_hostname }}:80" - insertafter='#ServerName www.example.com:80' - notify: - - ensure apache is restarted - - - name: download Kibana binary - get_url: - url: "{{ Kibana_url }}" - dest: /tmp/kibana.tar.gz - mode: 0666 - - - name: unarchive Kibana - shell: - /bin/tar zxf /tmp/kibana.tar.gz -C /var/www/html - - # Kibana reverse Proxy - - name: Ensure Kibana redirects to itsself with /es for reverse proxy - lineinfile: - dest: /var/www/html/{{ Kibana_version }}/config.js - regexp: "elasticsearch:" - line: "elasticsearch: 'http://{{ Kibana_public_url }}/es/', " - - - name: Ensure httpd virtual host file is created for reverse proxy - template: src="templates/etc/httpd/conf.d/vhosts.conf" dest="/etc/httpd/conf.d/vhosts.conf" owner=root group=root mode=0644 - notify: - - ensure apache is restarted - - - - name: ensure httpd port 80 is opened - include: ../reusables/open_firewall_port.yml protocol=tcp port=80 - - - name: ensure httpd port 443 is opened - include: ../reusables/open_firewall_port.yml protocol=tcp port=443 - - - name: ensure apache automatically starts after reboot - service: name={{item}} state=started enabled=yes - with_items: - - httpd - - handlers: - - include: ../reusables/handlers.yml - - - name: ensure apache is restarted - service: name={{ item }} state=restarted - with_items: - - httpd - - - - diff --git a/tools/ansible-openstack-log/playbooks/Kibana/main.yml b/tools/ansible-openstack-log/playbooks/Kibana/main.yml deleted file mode 100644 index 42250fc..0000000 --- a/tools/ansible-openstack-log/playbooks/Kibana/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -- include: kibana-setup.yml - diff --git a/tools/ansible-openstack-log/playbooks/Kibana/templates b/tools/ansible-openstack-log/playbooks/Kibana/templates deleted file mode 120000 index 07531b7..0000000 --- a/tools/ansible-openstack-log/playbooks/Kibana/templates +++ /dev/null @@ -1 +0,0 @@ -../../templates \ No newline at end of file diff --git a/tools/ansible-openstack-log/playbooks/common/common.yml b/tools/ansible-openstack-log/playbooks/common/common.yml deleted file mode 100644 index dbd536e..0000000 --- a/tools/ansible-openstack-log/playbooks/common/common.yml +++ /dev/null @@ -1,64 +0,0 @@ ---- -- name: System settings for all nodes - hosts: elasticsearch:fluentd_collector:Kibana - sudo: yes - - tasks: - - name: ensure python SELinux library is installed - yum: name={{ item }} state=latest - with_items: - - libselinux-python - - - name: ensure SELinux is congirured permissive - selinux: > - policy=targeted - state=permissive - - - name: ensure epel and rdo-release repository are installed - yum: name={{ item }} state=present - with_items: - - "http://ftp.riken.jp/Linux/fedora/epel/6/x86_64/epel-release-6-8.noarch.rpm" - when: not use_your_own_repository - - - name: ensure local repository file is updated(if any) - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: root - group: root - mode: 0644 - with_items: - - { src: templates/etc/yum.repos.d/CentOS-Base.repo, dest: /etc/yum.repos.d/CentOS-Base.repo } - - { src: templates/etc/yum.repos.d/epel.repo, dest: /etc/yum.repos.d/epel.repo } - - { src: templates/etc/yum.repos.d/epel-testing.repo, dest: /etc/yum.repos.d/epel-testing.repo } - when: use_your_own_repository - - - name: yum clean all - command: /usr/bin/yum clean all - - - name: ensure python keyczar is installed - yum: name={{ item }} state=latest - with_items: - - python-keyczar - - - name: ensure python-keyczar port 5099 is opened - include: ../reusables/open_firewall_port.yml protocol=tcp port=5099 - - - name: ensure additional packages are installed - yum: name={{ item }} state=latest - with_items: - - bash-completion - - iptables - - - name: make sure iptables is automatically booted - service: name=iptables state=started enabled=yes - - - name: "Build hosts file for OpenStack nodes" - lineinfile: dest=/etc/hosts regexp='.*{{hostvars[item].ansible_fqdn}}$' line="{{ hostvars[item].my_int_ip }} {{ hostvars[item].ansible_fqdn }} {{hostvars[item].ansible_fqdn}}" state=present - when: hostvars[item].ansible_default_ipv4.address is defined - with_items: groups['all'] - - - - handlers: - - include: ../reusables/handlers.yml diff --git a/tools/ansible-openstack-log/playbooks/common/main.yml b/tools/ansible-openstack-log/playbooks/common/main.yml deleted file mode 100644 index 6b5420b..0000000 --- a/tools/ansible-openstack-log/playbooks/common/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: common.yml - - diff --git a/tools/ansible-openstack-log/playbooks/common/templates b/tools/ansible-openstack-log/playbooks/common/templates deleted file mode 120000 index 07531b7..0000000 --- a/tools/ansible-openstack-log/playbooks/common/templates +++ /dev/null @@ -1 +0,0 @@ -../../templates \ No newline at end of file diff --git a/tools/ansible-openstack-log/playbooks/elastic_search/elasticsearch-head.yml b/tools/ansible-openstack-log/playbooks/elastic_search/elasticsearch-head.yml deleted file mode 100644 index d6f4e4e..0000000 --- a/tools/ansible-openstack-log/playbooks/elastic_search/elasticsearch-head.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: setup elasticsearch-head - hosts: elasticsearch - gather_facts: yes - sudo: yes - tasks: - - name: install elasticsearch-head(not yet idempotent) - command: /usr/share/elasticsearch/bin/plugin -install mobz/elasticsearch-head - ignore_errors: yes - - handlers: - - - diff --git a/tools/ansible-openstack-log/playbooks/elastic_search/elasticsearch.yml b/tools/ansible-openstack-log/playbooks/elastic_search/elasticsearch.yml deleted file mode 100644 index 5766c3e..0000000 --- a/tools/ansible-openstack-log/playbooks/elastic_search/elasticsearch.yml +++ /dev/null @@ -1,86 +0,0 @@ ---- -- name: setup elasticsearch - hosts: elasticsearch - gather_facts: yes - sudo: yes - tasks: - - name: gpg key import - rpm_key: state=present key=http://packages.elasticsearch.org/GPG-KEY-elasticsearch - - - name: ensure repository file for elasticsearch is in place. - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: root - group: root - mode: 0644 - with_items: - - { src: templates/etc/yum.repos.d/elasticsearch.repo, dest: /etc/yum.repos.d/elasticsearch.repo } - - - name: yum clean all - command: /usr/bin/yum clean all - - - name: ensure elasticsearch and java are installed - yum: name={{item}} state=latest - with_items: - - java - - elasticsearch - notify: - - ensure elasticsearch is restarted - - - name: ensure elasticsearch listner ports are opened - include: ../reusables/open_firewall_port.yml protocol=tcp port="9200:9300" - - - name: ensure elasticsearch node communication ports are opened - include: ../reusables/open_firewall_port.yml protocol=tcp port="9300:9400" - - - name: ensure multicast transmission is enabled - include: ../reusables/open_firewall_line.yml oneline="-A INPUT -s 224.0.0.0/4 -j ACCEPT" - - - name: ensure multicast transmission is enabled - include: ../reusables/open_firewall_line.yml oneline="-A INPUT -d 224.0.0.0/4 -j ACCEPT" - - - name: ensure multicast transmission is enabled - include: ../reusables/open_firewall_line.yml oneline="-A INPUT -s 240.0.0.0/5 -j ACCEPT" - - - name: ensure multicast transmission is enabled - include: ../reusables/open_firewall_line.yml oneline="-A INPUT -m pkttype --pkt-type multicast -j ACCEPT" - - - name: ensure multicast transmission is enabled - include: ../reusables/open_firewall_line.yml oneline="-A INPUT -m pkttype --pkt-type broadcast -j ACCEPT" - - - name: ensure elasticsearch cluster name is set - lineinfile: - dest: /etc/elasticsearch/elasticsearch.yml - regexp: "^cluster.name:" - line: "cluster.name: {{ elasticsearch_cluster_name }}" - notify: - - ensure elasticsearch is restarted - - - name: ensure elasticsearch tempalte directory is made - file: path=/etc/elasticsearch/templates owner=root group=root mode=0755 state=directory - - - name: ensure mapping templates are installed - copy: src="{{ item.src }}" dest="{{ item.dest }}" owner=root group=root mode=0644 - with_items: - - { src: templates/etc/elasticsearch/templates/tempalte_openstack.json, dest: /etc/elasticsearch/templates/tempalte_openstack.json } - - { src: templates/etc/elasticsearch/templates/tempalte_dmesg.json, dest: /etc/elasticsearch/templates/tempalte_dmesg.json } - - - name: ensure elasticsearch automatically starts after reboot - service: name={{item}} enabled=yes - with_items: - - elasticsearch - - - handlers: - - include: ../reusables/handlers.yml - - - name: ensure elasticsearch is restarted - service: name={{ item }} state=restarted - with_items: - - elasticsearch - - - - - diff --git a/tools/ansible-openstack-log/playbooks/elastic_search/main.yml b/tools/ansible-openstack-log/playbooks/elastic_search/main.yml deleted file mode 100644 index 0b4db0b..0000000 --- a/tools/ansible-openstack-log/playbooks/elastic_search/main.yml +++ /dev/null @@ -1,3 +0,0 @@ -- include: elasticsearch.yml -- include: elasticsearch-head.yml - diff --git a/tools/ansible-openstack-log/playbooks/elastic_search/templates b/tools/ansible-openstack-log/playbooks/elastic_search/templates deleted file mode 120000 index 7cb455a..0000000 --- a/tools/ansible-openstack-log/playbooks/elastic_search/templates +++ /dev/null @@ -1 +0,0 @@ -../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack-log/playbooks/fluentd/fluentd-agent-installation.yml b/tools/ansible-openstack-log/playbooks/fluentd/fluentd-agent-installation.yml deleted file mode 100644 index 971175d..0000000 --- a/tools/ansible-openstack-log/playbooks/fluentd/fluentd-agent-installation.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: td-agent for openstack nodes installation - hosts: openstack - gather_facts: yes - sudo: yes - tasks: - - - name: ensure necessary plugins are installed - command: /usr/lib64/fluent/ruby/bin/fluent-gem install {{item}} - with_items: - - fluent-plugin-tail-ex - - fluent-plugin-record-reformer - - - name: ensure fluentd configuration file is updated - template: > - src=templates/etc/td-agent/td_agent.conf - dest=/etc/td-agent/td-agent.conf - owner=root - group=root - mode=0644 - backup=yes - notify: - - ensure td-agent is restarted - - - name: ensure fluentd can access openstack log directories - file: > - path={{item}} - mode=0755 - state=directory - with_items: - - /var/log/cinder - - /var/log/glance - - /var/log/httpd - - /var/log/keystone - - /var/log/neutron - - /var/log/nova - - - name: ensure td-agent automatically starts after reboot - service: name={{item}} enabled=yes - with_items: - - td-agent - - - handlers: - - name: ensure td-agent is restarted - service: name={{ item }} state=restarted - with_items: - - td-agent - - - - - diff --git a/tools/ansible-openstack-log/playbooks/fluentd/fluentd-collector-installation.yml b/tools/ansible-openstack-log/playbooks/fluentd/fluentd-collector-installation.yml deleted file mode 100644 index 8d40e4d..0000000 --- a/tools/ansible-openstack-log/playbooks/fluentd/fluentd-collector-installation.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: td-agent setup for fluend-collector node - hosts: fluentd_collector - gather_facts: yes - sudo: yes - tasks: - - name: ensure fluentd packages are installed - yum: name={{item}} state=latest - with_items: - - ruby-devel - - gcc - - libcurl-devel - - - name: ensure necessary plugins are installed - command: /usr/lib64/fluent/ruby/bin/fluent-gem install {{item}} - with_items: - - fluent-plugin-tail-ex - - fluent-plugin-record-reformer - - fluent-plugin-elasticsearch - - - name: ensure fluentd configuration for fluentd_collector is installed - template: > - src=templates/etc/td-agent/td_agent_collector.conf - dest=/etc/td-agent/td-agent.conf - owner=root - group=root - mode=0644 - backup=yes - notify: - - ensure td-agent is restarted - - - name: ensure keystone port fluentd_collector_ip is opened - include: ../reusables/open_firewall_port.yml protocol=tcp port="{{ fluentd_collector_port }}" - - - name: ensure keystone port of UDP is also opened - include: ../reusables/open_firewall_port.yml protocol=udp port="{{ fluentd_collector_port }}" - - - name: ensure td-agent automatically starts after reboot - service: name={{item}} enabled=yes - with_items: - - td-agent - - handlers: - - include: ../reusables/handlers.yml - - - name: ensure td-agent is restarted - service: name={{ item }} state=restarted - with_items: - - td-agent - - - - - diff --git a/tools/ansible-openstack-log/playbooks/fluentd/fluentd-packages.yml b/tools/ansible-openstack-log/playbooks/fluentd/fluentd-packages.yml deleted file mode 100644 index 9f7b888..0000000 --- a/tools/ansible-openstack-log/playbooks/fluentd/fluentd-packages.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: td-agent packages for openstack and fluentd_collector nodes - hosts: openstack:fluentd_collector - gather_facts: yes - sudo: yes - tasks: - # - - name: GPG key import - rpm_key: state=present key=http://packages.treasuredata.com/GPG-KEY-td-agent - - - name: add repository - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: root - group: root - mode: 0644 - with_items: - - { src: templates/etc/yum.repos.d/treasure-data.repo, dest: /etc/yum.repos.d/treasure-data.repo } - - - - name: yum clean all - command: /usr/bin/yum clean all - - - name: ensure fluentd packages are installed - yum: name={{item}} state=latest - with_items: - - td-agent - - td-libyaml - - - name: ensure td-agent automatically starts after reboot - service: name={{item}} enabled=yes - with_items: - - td-agent - - - handlers: - - include: ../reusables/handlers.yml - - - - diff --git a/tools/ansible-openstack-log/playbooks/fluentd/fluentd-parameters.yml b/tools/ansible-openstack-log/playbooks/fluentd/fluentd-parameters.yml deleted file mode 100644 index 951a9dd..0000000 --- a/tools/ansible-openstack-log/playbooks/fluentd/fluentd-parameters.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: td-agent(fluentd) parameters - hosts: openstack:fluentd_collector - gather_facts: yes - sudo: yes - tasks: - # http://docs.fluentd.org/articles/before-install - - name: Increase Max # of File Descriptors (soft) - lineinfile: - dest: /etc/security/limits.conf - regexp: ^root soft nofile - line: root soft nofile 65536 - - - name: Increase Max # of File Descriptors (hard) - lineinfile: - dest: /etc/security/limits.conf - regexp: ^root hard nofile - line: root hard nofile 65536 - - - # - name: set net.ipv4.tcp_tw_recycle one - # sysctl: name=net.ipv4.tcp_tw_recycle value=1 state=present reload=yes sysctl_set=yes - - # - name: set net.ipv4.tcp_tw_reuse one - # sysctl: name=net.ipv4.tcp_tw_reuse value=1 state=present reload=yes sysctl_set=yes - - # - name: set net.ipv4.ip_local_port_range - # sysctl: name=net.ipv4.ip_local_port_range value="10240 65536" state=present reload=yes sysctl_set=yes - - - - - handlers: - - - - - - diff --git a/tools/ansible-openstack-log/playbooks/fluentd/main.yml b/tools/ansible-openstack-log/playbooks/fluentd/main.yml deleted file mode 100644 index 458c206..0000000 --- a/tools/ansible-openstack-log/playbooks/fluentd/main.yml +++ /dev/null @@ -1,5 +0,0 @@ -- include: fluentd-parameters.yml -- include: fluentd-packages.yml -- include: fluentd-collector-installation.yml -- include: fluentd-agent-installation.yml - diff --git a/tools/ansible-openstack-log/playbooks/fluentd/templates b/tools/ansible-openstack-log/playbooks/fluentd/templates deleted file mode 120000 index 7cb455a..0000000 --- a/tools/ansible-openstack-log/playbooks/fluentd/templates +++ /dev/null @@ -1 +0,0 @@ -../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack-log/playbooks/ntp/main.yml b/tools/ansible-openstack-log/playbooks/ntp/main.yml deleted file mode 100644 index 2f8d14b..0000000 --- a/tools/ansible-openstack-log/playbooks/ntp/main.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: NTP Server - hosts: all - sudo: yes - - tasks: - - name: ensure ntp package is installed - yum: name=ntp state=latest - when: "ntp_server is defined" - - - name: ensure ntp.conf file is configured - template: > - src=templates/etc/ntp.conf - dest=/etc/ntp.conf - owner=root - group=root - mode=0644 - notify: restart ntp - when: "ntp_server is defined" - - - name: synchronize clock now. - command: /usr/sbin/ntpdate -bu {{ ntp_server }} - when: "ntp_server is defined" - - - handlers: - - name: restart ntp - service: name=ntpd state=restarted enabled=on - - - - - - diff --git a/tools/ansible-openstack-log/playbooks/ntp/templates b/tools/ansible-openstack-log/playbooks/ntp/templates deleted file mode 120000 index 7cb455a..0000000 --- a/tools/ansible-openstack-log/playbooks/ntp/templates +++ /dev/null @@ -1 +0,0 @@ -../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack-log/playbooks/pre_action/determine_network.yml b/tools/ansible-openstack-log/playbooks/pre_action/determine_network.yml deleted file mode 100644 index b78b66c..0000000 --- a/tools/ansible-openstack-log/playbooks/pre_action/determine_network.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: determine networks of elasticsearch/fluentd/Kibana - hosts: fluentd_collector:elasticsearch:Kibana - gather_facts: yes - sudo: yes - - tasks: - - - name: ensure my_int_ip is a fact variable - action: set_fact - args: - my_int_ip: "{{ my_int_ip }}" - - - - name: ensure my_mng_ip is a fact variable - action: set_fact - args: - my_mng_ip: "{{ my_mng_ip }}" - diff --git a/tools/ansible-openstack-log/playbooks/pre_action/determine_openstack_network.yml b/tools/ansible-openstack-log/playbooks/pre_action/determine_openstack_network.yml deleted file mode 100644 index 9edc8e2..0000000 --- a/tools/ansible-openstack-log/playbooks/pre_action/determine_openstack_network.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: determine host networks - hosts: openstack - gather_facts: yes - sudo: yes - - tasks: - - - name: ensure my_int_ip is a fact variable - action: set_fact - args: - my_int_ip: "{{ my_int_ip }}" - - - - name: ensure my_ext_ip is a fact variable - action: set_fact - args: - my_ext_ip: "{{ my_ext_ip }}" - when: my_ext_obj['ipv4']['address'] is defined - - # When linuxbridge is constructed for flat network, it enslaves the eth interface - # and IP address for interface is reattached to the bridge. - # Following two tasks determine ip address attached to the bridge and set_fact for later use. - - name: determine ipAddr for linuxbridge linked interface - shell: /sbin/bridge link show | grep {{ my_ext_if }} | awk '{print $10}' | sed -e "s/-/_/" - register: linked_bridge - when: '"ipv4" not in my_ext_obj' - - - name: ensure my_ext_ip is fact variable for linuxbridge linked interface - action: set_fact - args: - my_ext_ip: "{{ hostvars[inventory_hostname]['ansible_' + linked_bridge.stdout]['ipv4']['address'] }}" - when: '"ipv4" not in my_ext_obj' - - - - name: ensure my_mng_ip is a fact variable - action: set_fact - args: - my_mng_ip: "{{ my_mng_ip }}" - - - - name: ensure my_ext_ip is a fact variable (br-ex) - action: set_fact - args: - my_ext_ip: "{{ ansible_br_ex['ipv4']['address'] }}" - when: "ansible_br_ex is defined" diff --git a/tools/ansible-openstack-log/playbooks/pre_action/main.yml b/tools/ansible-openstack-log/playbooks/pre_action/main.yml deleted file mode 100644 index 69b30ce..0000000 --- a/tools/ansible-openstack-log/playbooks/pre_action/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- include: determine_openstack_network.yml -- include: determine_network.yml - - diff --git a/tools/ansible-openstack-log/playbooks/reusables/delete_firewall_rule.yml b/tools/ansible-openstack-log/playbooks/reusables/delete_firewall_rule.yml deleted file mode 100644 index a8ecbb2..0000000 --- a/tools/ansible-openstack-log/playbooks/reusables/delete_firewall_rule.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# delete iptables rule from /etc/sysconfig/iptables. Don't forget to include handler.yml at caller script. - - name: ensure iptables rule is deleted. - lineinfile: "dest=/etc/sysconfig/iptables regexp=^'{{ delete_line }}' line='{{ '#' + delete_line }}'" - notify: restart iptables - diff --git a/tools/ansible-openstack-log/playbooks/reusables/handlers.yml b/tools/ansible-openstack-log/playbooks/reusables/handlers.yml deleted file mode 100644 index cab3a95..0000000 --- a/tools/ansible-openstack-log/playbooks/reusables/handlers.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# handlers here - - - name: restart iptables - service: name=iptables state=restarted - - diff --git a/tools/ansible-openstack-log/playbooks/reusables/open_firewall_line.yml b/tools/ansible-openstack-log/playbooks/reusables/open_firewall_line.yml deleted file mode 100644 index 7985376..0000000 --- a/tools/ansible-openstack-log/playbooks/reusables/open_firewall_line.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# open port for iptables. Don't forget to include handler.yml at caller script. - - name: ensure iptables port is opened. - lineinfile: dest=/etc/sysconfig/iptables - line="{{ oneline }}" - insertafter="-A INPUT -i lo -j ACCEPT" - state=present - notify: restart iptables - diff --git a/tools/ansible-openstack-log/playbooks/reusables/open_firewall_port.yml b/tools/ansible-openstack-log/playbooks/reusables/open_firewall_port.yml deleted file mode 100644 index 90ee533..0000000 --- a/tools/ansible-openstack-log/playbooks/reusables/open_firewall_port.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# open port for iptables. Don't forget to include handler.yml at caller script. - - name: ensure port is opened. - lineinfile: dest=/etc/sysconfig/iptables - line="-A INPUT -p {{ protocol }} --dport {{ port }} -m state --state NEW -j ACCEPT" - insertafter="-A INPUT -i lo -j ACCEPT" - state=present - notify: restart iptables - diff --git a/tools/ansible-openstack-log/set_log.yml b/tools/ansible-openstack-log/set_log.yml deleted file mode 100644 index 923c18c..0000000 --- a/tools/ansible-openstack-log/set_log.yml +++ /dev/null @@ -1,6 +0,0 @@ -- include: playbooks/pre_action/main.yml -- include: playbooks/common/main.yml -- include: playbooks/ntp/main.yml -- include: playbooks/elastic_search/main.yml -- include: playbooks/fluentd/main.yml -- include: playbooks/Kibana/main.yml diff --git a/tools/ansible-openstack-log/templates/etc/elasticsearch/templates/tempalte_dmesg.json b/tools/ansible-openstack-log/templates/etc/elasticsearch/templates/tempalte_dmesg.json deleted file mode 100644 index 4fa604d..0000000 --- a/tools/ansible-openstack-log/templates/etc/elasticsearch/templates/tempalte_dmesg.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "tempalte_dmesg" : { - "template" : "logstash*", - "mappings" : { - "dmesg" : { - "properties" : { - "hostname": { "type": "string", "index":"not_analyzed" } - } - } - } -} - diff --git a/tools/ansible-openstack-log/templates/etc/elasticsearch/templates/tempalte_openstack.json b/tools/ansible-openstack-log/templates/etc/elasticsearch/templates/tempalte_openstack.json deleted file mode 100644 index a7c5286..0000000 --- a/tools/ansible-openstack-log/templates/etc/elasticsearch/templates/tempalte_openstack.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "tempalte_openstack" : { - "template" : "logstash*", - "mappings" : { - "openstack" : { - "properties" : { - "process": {"type": "string", "index":"not_analyzed" }, - "LEVEL": {"type": "string", "index":"not_analyzed" }, - "name": {"type": "string", "index":"not_analyzed" }, - "requestID": {"type": "string", "index":"not_analyzed" }, - "userID": {"type": "string", "index":"not_analyzed" }, - "tenantID": {"type": "string", "index":"not_analyzed" }, - "instanceID": {"type": "string", "index":"not_analyzed" }, - "message": { "type": "string", "index":"not_analyzed" }, - "hostname": { "type": "string", "index":"not_analyzed" }, - "COMPONENT": { "type": "string", "index":"not_analyzed" }, - "logfile": { "type": "string", "index":"not_analyzed" }, - "time_raw": { "type": "date", "index": "not_analyzed", "format": "yyyy-MM-dd HH:mm:ss.SSS" } - } - } - } -} - diff --git a/tools/ansible-openstack-log/templates/etc/httpd/conf.d/vhosts.conf b/tools/ansible-openstack-log/templates/etc/httpd/conf.d/vhosts.conf deleted file mode 100644 index fc7403a..0000000 --- a/tools/ansible-openstack-log/templates/etc/httpd/conf.d/vhosts.conf +++ /dev/null @@ -1,11 +0,0 @@ -NameVirtualHost *:80 - - DocumentRoot /var/www/html/kibana-3.1.0 - ServerName {{ hostvars[groups['elasticsearch'][0]].ansible_fqdn }} - ProxyPass /es/ http://{{ elasticsearch_ip }}:9200/ - ProxyPassReverse /es/ http://{{ elasticsearch_ip }}:9200/ - CustomLog logs/access_log custom - ErrorLog logs/error_log - Options FollowSymLinks - - diff --git a/tools/ansible-openstack-log/templates/etc/ntp.conf b/tools/ansible-openstack-log/templates/etc/ntp.conf deleted file mode 100644 index 23d410c..0000000 --- a/tools/ansible-openstack-log/templates/etc/ntp.conf +++ /dev/null @@ -1,8 +0,0 @@ -driftfile /var/lib/ntp/drift -restrict default kod nomodify notrap nopeer noquery -restrict -6 default kod nomodify notrap nopeer noquery -restrict 127.0.0.1 -restrict -6 ::1 -server {{ ntp_server }} -includefile /etc/ntp/crypto/pw -keys /etc/ntp/keys diff --git a/tools/ansible-openstack-log/templates/etc/td-agent/td_agent.conf b/tools/ansible-openstack-log/templates/etc/td-agent/td_agent.conf deleted file mode 100644 index 2d67024..0000000 --- a/tools/ansible-openstack-log/templates/etc/td-agent/td_agent.conf +++ /dev/null @@ -1,162 +0,0 @@ -# nova log - - type tail_ex - format /(^(?