GCE-API support service for OpenStack
Change-Id: I80f6b024cb40ba31ebaacf35d7364f66115da9c6 Implements: blueprint gce-api
This commit is contained in:
parent
d2068218da
commit
754725a4aa
4
.testr.conf
Normal file
4
.testr.conf
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ gceapi/tests $LISTOPT $IDOPTION
|
||||||
|
test_id_option=--load-list $IDFILE
|
||||||
|
test_list_option=--list
|
43
HACKING.rst
Normal file
43
HACKING.rst
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
Gceapi Style Commandments
|
||||||
|
=========================
|
||||||
|
|
||||||
|
- Step 1: Read the OpenStack Style Commandments
|
||||||
|
https://github.com/openstack-dev/hacking/blob/master/doc/source/index.rst
|
||||||
|
- Step 2: Read on
|
||||||
|
|
||||||
|
Gceapi Specific Commandments
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
General
|
||||||
|
-------
|
||||||
|
- Do not use locals(). Example::
|
||||||
|
|
||||||
|
LOG.debug(_("volume %(vol_name)s: creating size %(vol_size)sG") %
|
||||||
|
locals()) # BAD
|
||||||
|
|
||||||
|
LOG.debug(_("volume %(vol_name)s: creating size %(vol_size)sG") %
|
||||||
|
{'vol_name': vol_name,
|
||||||
|
'vol_size': vol_size}) # OKAY
|
||||||
|
|
||||||
|
- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised::
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
...
|
||||||
|
raise e # BAD
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
...
|
||||||
|
raise # OKAY
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Creating Unit Tests
|
||||||
|
-------------------
|
||||||
|
For every new feature, unit tests should be created that both test and
|
||||||
|
(implicitly) document the usage of said feature. If submitting a patch for a
|
||||||
|
bug that had no unit test, a new passing unit test should be added. If a
|
||||||
|
submitted bug fix does have a unit test, be sure to add a new one that fails
|
||||||
|
without the patch and passes with the patch.
|
||||||
|
|
||||||
|
For more information on creating unit tests and utilizing the testing
|
||||||
|
infrastructure in OpenStack Gceapi, please read gceapi/testing/README.rst.
|
176
LICENSE
Normal file
176
LICENSE
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
20
MANIFEST.in
Normal file
20
MANIFEST.in
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
include run_tests.sh ChangeLog
|
||||||
|
include README.rst builddeb.sh
|
||||||
|
include MANIFEST.in pylintrc
|
||||||
|
include AUTHORS
|
||||||
|
include run_tests.py
|
||||||
|
include HACKING.rst
|
||||||
|
include LICENSE
|
||||||
|
include ChangeLog
|
||||||
|
include babel.cfg tox.ini
|
||||||
|
include openstack-common.conf
|
||||||
|
include gceapi/openstack/common/README
|
||||||
|
include gceapi/db/sqlalchemy/migrate_repo/README
|
||||||
|
include gceapi/db/sqlalchemy/migrate_repo/migrate.cfg
|
||||||
|
include gceapi/db/sqlalchemy/migrate_repo/versions/*.sql
|
||||||
|
graft doc
|
||||||
|
graft etc
|
||||||
|
graft gceapi/locale
|
||||||
|
graft gceapi/tests
|
||||||
|
graft tools
|
||||||
|
global-exclude *.pyc
|
228
README.rst
Normal file
228
README.rst
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
OpenStack Nova GCE API README
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
Support of GCE-API for OpenStack.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
=====
|
||||||
|
|
||||||
|
Download gcloud from Google and install it.
|
||||||
|
There are two ways for using it:
|
||||||
|
|
||||||
|
1. Run authorization command:
|
||||||
|
python google-cloud-sdk/platform/gcutil/gcutil --authorization_uri_base=http://localhost:8777 auth
|
||||||
|
and next any other commands:
|
||||||
|
python google-cloud-sdk/platform/gcutil/gcutil --api_host=http://localhost:8777/ --authorization_uri_base=http://localhost:8777 --project demo listzones
|
||||||
|
|
||||||
|
2. You have to have Google account
|
||||||
|
You can activate an already-authorized account with
|
||||||
|
gcloud config set account <account>
|
||||||
|
or authorize a new account with
|
||||||
|
gcloud auth login
|
||||||
|
Next you must authorize in Openstack by running authorization command:
|
||||||
|
gcutil --authorization_uri_base=http://localhost:8777 auth
|
||||||
|
and next you can run any other commands:
|
||||||
|
gcutil --api_host=http://localhost:8777/ --authorization_uri_base=http://localhost:8777 --project demo listzones
|
||||||
|
|
||||||
|
Make gcutil always use your GCE API endpoint using '--api_host' flag and your GCE API
|
||||||
|
authorization endpoint using '--authorization_uri_base' flag. Also you can store this
|
||||||
|
settings in "~/.gcutil.flags" file.
|
||||||
|
|
||||||
|
If it doesn't work by some reason check that your PYTHONPATH is exported and set correctly to something like
|
||||||
|
``/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/dist-packages``.
|
||||||
|
|
||||||
|
Limitations
|
||||||
|
===========
|
||||||
|
|
||||||
|
* Names are unique in GCE and are used for identification. Names are not unique in Nova. IDs are used instead.
|
||||||
|
Solution: GCE-managed OpenStack installation should also maintain unique naming.
|
||||||
|
|
||||||
|
* GCE IDs are ulong (8 bytes). Openstack IDs can be different (int, string) but mostly they are GUID (16 bytes).
|
||||||
|
Solution: Since Openstack IDs are of different length and nature and because GCE API never uses ID as a parameter
|
||||||
|
now, 8-byte hashes are generated and returned for any ID to report.
|
||||||
|
|
||||||
|
* GCE allows per-user SSH key specification, but Nova supports only one key.
|
||||||
|
Solution: Nova GCE API just uses first key.
|
||||||
|
|
||||||
|
Authentication specifics
|
||||||
|
========================
|
||||||
|
|
||||||
|
GCE API uses OAuth2.0 for authentication. Simple sufficient implementation of this protocol
|
||||||
|
was added into GCE API service in nova because of its absence in keystone.
|
||||||
|
Current implementation allows operation with several OpenStack projects for
|
||||||
|
one authenticated user as Google allows. For this initial token returned during
|
||||||
|
authentication doesn't contain information about project required by keystone.
|
||||||
|
Instead another authentication happens with each request when incoming project
|
||||||
|
information is added to existing user info and new token is acquired in keystone.
|
||||||
|
|
||||||
|
Supported Features
|
||||||
|
==================
|
||||||
|
|
||||||
|
Standard Query Params (except for fields and prettyPrint) are not supported.
|
||||||
|
|
||||||
|
Supported resource types
|
||||||
|
|
||||||
|
* Addresses
|
||||||
|
* Disks
|
||||||
|
* Firewalls
|
||||||
|
* Images
|
||||||
|
* Instances
|
||||||
|
* MachineTypes
|
||||||
|
* Networks
|
||||||
|
* Projects
|
||||||
|
* Regions
|
||||||
|
* Zones
|
||||||
|
|
||||||
|
Unsupported resource types
|
||||||
|
|
||||||
|
* ForwardingRules
|
||||||
|
* HttpHealthChecks
|
||||||
|
* TargetPools
|
||||||
|
|
||||||
|
In the lists below:
|
||||||
|
"+" means supported
|
||||||
|
"-" unsupported
|
||||||
|
|
||||||
|
+Addresses
|
||||||
|
|
||||||
|
+aggregatedList GET /project/aggregated/addresses
|
||||||
|
+delete DELETE /project/regions/region/addresses/address
|
||||||
|
+get GET /project/regions/region/addresses/address
|
||||||
|
+insert POST /project/regions/region/addresses
|
||||||
|
+list GET /project/regions/region/addresses
|
||||||
|
|
||||||
|
+Disks
|
||||||
|
|
||||||
|
+aggregatedList GET /project/aggregated/disks
|
||||||
|
+createSnapshot POST /project/zones/zone/disks/disk/createSnapshot
|
||||||
|
+delete DELETE /project/zones/zone/disks/disk
|
||||||
|
+get GET /project/zones/zone/disks/disk
|
||||||
|
+insert POST /project/zones/zone/disks
|
||||||
|
+list GET /project/zones/zone/disks
|
||||||
|
|
||||||
|
+Firewalls
|
||||||
|
|
||||||
|
+delete DELETE /project/global/firewalls/firewall
|
||||||
|
+get GET /project/global/firewalls/firewall
|
||||||
|
+insert POST /project/global/firewalls
|
||||||
|
+list GET /project/global/firewalls
|
||||||
|
-patch PATCH /project/global/firewalls/firewall
|
||||||
|
-update PUT /project/global/firewalls/firewall
|
||||||
|
|
||||||
|
-ForwardingRules
|
||||||
|
|
||||||
|
-aggregatedList GET /project/aggregated/forwardingRules
|
||||||
|
-delete DELETE /project/regions/region/forwardingRules/forwardingRule
|
||||||
|
-get GET /project/regions/region/forwardingRules/forwardingRule
|
||||||
|
-insert POST /project/regions/region/forwardingRules
|
||||||
|
-list GET /project/regions/region/forwardingRules
|
||||||
|
-setTarget POST /project/regions/region/forwardingRules/forwardingRule/setTarget
|
||||||
|
|
||||||
|
+GlobalOperations
|
||||||
|
|
||||||
|
+aggregatedList GET /project/aggregated/operations
|
||||||
|
+delete DELETE /project/global/operations/operation
|
||||||
|
+get GET /project/global/operations/operation
|
||||||
|
+list GET /project/global/operations
|
||||||
|
|
||||||
|
-HttpHealthChecks
|
||||||
|
|
||||||
|
-delete DELETE /project/global/httpHealthChecks/httpHealthCheck
|
||||||
|
-get GET /project/global/httpHealthChecks/httpHealthCheck
|
||||||
|
-insert POST /project/global/httpHealthChecks
|
||||||
|
-list GET /project/global/httpHealthChecks
|
||||||
|
-patch PATCH /project/global/httpHealthChecks/httpHealthCheck
|
||||||
|
-update PUT /project/global/httpHealthChecks/httpHealthCheck
|
||||||
|
|
||||||
|
+Images
|
||||||
|
|
||||||
|
+delete DELETE /project/global/images/image
|
||||||
|
-deprecate POST /project/global/images/image/deprecate
|
||||||
|
+get GET /project/global/images/image
|
||||||
|
+insert POST /project/global/images
|
||||||
|
+list GET /project/global/images
|
||||||
|
|
||||||
|
+Instances
|
||||||
|
|
||||||
|
+addAccessConfig POST /project/zones/zone/instances/instance/addAccessConfig
|
||||||
|
+aggregatedList GET /project/aggregated/instances
|
||||||
|
+attachDisk POST /project/zones/zone/instances/instance/attachDisk
|
||||||
|
+delete DELETE /project/zones/zone/instances/instance
|
||||||
|
+deleteAccessConfig POST /project/zones/zone/instances/instance/deleteAccessConfig
|
||||||
|
+detachDisk POST /project/zones/zone/instances/instance/detachDisk
|
||||||
|
+get GET /project/zones/zone/instances/instance
|
||||||
|
-getSerialPortOutput GET /project/zones/zone/instances/instance/serialPort
|
||||||
|
+insert POST /project/zones/zone/instances
|
||||||
|
+list GET /project/zones/zone/instances
|
||||||
|
+reset POST /project/zones/zone/instances/instance/reset
|
||||||
|
-setMetadata POST /project/zones/zone/instances/instance/setMetadata
|
||||||
|
-setTags POST /project/zones/zone/instances/instance/setTags
|
||||||
|
-setScheduling POST /project/zones/zone/instances/instance/setScheduling
|
||||||
|
|
||||||
|
+MachineTypes
|
||||||
|
|
||||||
|
+aggregatedList GET /project/aggregated/machineTypes
|
||||||
|
+get GET /project/zones/zone/machineTypes/machineType
|
||||||
|
+list GET /project/zones/zone/machineTypes
|
||||||
|
|
||||||
|
+Networks
|
||||||
|
|
||||||
|
+delete DELETE /project/global/networks/network
|
||||||
|
+get GET /project/global/networks/network
|
||||||
|
+insert POST /project/global/networks
|
||||||
|
+list GET /project/global/networks
|
||||||
|
|
||||||
|
+Projects
|
||||||
|
|
||||||
|
+get GET /project
|
||||||
|
+setCommonInstanceMetadata POST /project/setCommonInstanceMetadata
|
||||||
|
|
||||||
|
-RegionOperations
|
||||||
|
|
||||||
|
+delete DELETE /project/regions/region/operations/operation
|
||||||
|
+get GET /project/regions/region/operations/operation
|
||||||
|
+list GET /project/regions/region/operations
|
||||||
|
|
||||||
|
+Regions
|
||||||
|
|
||||||
|
+get GET /project/regions/region
|
||||||
|
+list GET /project/regions
|
||||||
|
|
||||||
|
+Routes
|
||||||
|
|
||||||
|
+delete DELETE /project/global/routes/route
|
||||||
|
+get GET /project/global/routes/route
|
||||||
|
+insert POST /project/global/routes
|
||||||
|
+list GET /project/global/routes
|
||||||
|
|
||||||
|
+Snapshots
|
||||||
|
|
||||||
|
+delete DELETE /project/global/snapshots/snapshot
|
||||||
|
+get GET /project/global/snapshots/snapshot
|
||||||
|
+list GET /project/global/snapshots
|
||||||
|
|
||||||
|
-TargetPools
|
||||||
|
|
||||||
|
-addHealthCheck POST /project/regions/region/targetPools/targetPool/addHealthCheck
|
||||||
|
-addInstance POST /project/regions/region/targetPools/targetPool/addInstance
|
||||||
|
-aggregatedList GET /project/aggregated/targetPools
|
||||||
|
-delete DELETE /project/regions/region/targetPools/targetPool
|
||||||
|
-get GET /project/regions/region/targetPools/targetPool
|
||||||
|
-getHealth POST /project/regions/region/targetPools/targetPool/getHealth
|
||||||
|
-insert POST /project/regions/region/targetPools
|
||||||
|
-list GET /project/regions/region/targetPools
|
||||||
|
-removeHealthCheck POST /project/regions/region/targetPools/targetPool/removeHealthCheck
|
||||||
|
-removeInstance POST /project/regions/region/targetPools/targetPool/removeInstance
|
||||||
|
-setBackup POST /project/regions/region/targetPools/targetPool/setBackup
|
||||||
|
|
||||||
|
+ZoneOperations
|
||||||
|
|
||||||
|
+delete DELETE /project/zones/zone/operations/operation
|
||||||
|
+get GET /project/zones/zone/operations/operation
|
||||||
|
+list GET /project/zones/zone/operations
|
||||||
|
|
||||||
|
+Zones
|
||||||
|
|
||||||
|
+get GET /project/zones/zone
|
||||||
|
+list GET /project/zones
|
||||||
|
|
291
bin/gceapi-db-setup
Executable file
291
bin/gceapi-db-setup
Executable file
@ -0,0 +1,291 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# Print --help output and exit.
|
||||||
|
#
|
||||||
|
usage() {
|
||||||
|
|
||||||
|
cat << EOF
|
||||||
|
Set up a local MySQL database for use with gceapi.
|
||||||
|
This script will create a 'gceapi' database that is accessible
|
||||||
|
only on localhost by user 'gceapi' with password 'gceapi'.
|
||||||
|
|
||||||
|
Usage: gceapi-db-setup <rpm|deb> [options]
|
||||||
|
Options:
|
||||||
|
select a distro type (rpm or debian)
|
||||||
|
|
||||||
|
--help | -h
|
||||||
|
Print usage information.
|
||||||
|
--password <pw> | -p <pw>
|
||||||
|
Specify the password for the 'gceapi' MySQL user that will
|
||||||
|
use to connect to the 'gceapi' MySQL database. By default,
|
||||||
|
the password 'gceapi' will be used.
|
||||||
|
--rootpw <pw> | -r <pw>
|
||||||
|
Specify the root MySQL password. If the script installs
|
||||||
|
the MySQL server, it will set the root password to this value
|
||||||
|
instead of prompting for a password. If the MySQL server is
|
||||||
|
already installed, this password will be used to connect to the
|
||||||
|
database instead of having to prompt for it.
|
||||||
|
--yes | -y
|
||||||
|
In cases where the script would normally ask for confirmation
|
||||||
|
before doing something, such as installing mysql-server,
|
||||||
|
just assume yes. This is useful if you want to run the script
|
||||||
|
non-interactively.
|
||||||
|
EOF
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
install_mysql_server() {
|
||||||
|
if [ -z "${ASSUME_YES}" ] ; then
|
||||||
|
$PACKAGE_INSTALL mysql-server
|
||||||
|
else
|
||||||
|
$PACKAGE_INSTALL -y mysql-server
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
start_mysql_server() {
|
||||||
|
$SERVICE_START
|
||||||
|
}
|
||||||
|
|
||||||
|
MYSQL_GCEAPI_PW_DEFAULT="gceapi"
|
||||||
|
MYSQL_GCEAPI_PW=${MYSQL_GCEAPI_PW_DEFAULT}
|
||||||
|
GCEAPI_CONFIG="/etc/gceapi/gceapi.conf"
|
||||||
|
ASSUME_YES=""
|
||||||
|
ELEVATE=""
|
||||||
|
|
||||||
|
# Check for root privileges
|
||||||
|
if [[ $EUID -ne 0 ]] ; then
|
||||||
|
echo "This operation requires superuser privileges, using sudo:"
|
||||||
|
if sudo -l > /dev/null ; then
|
||||||
|
ELEVATE="sudo"
|
||||||
|
else
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
rpm)
|
||||||
|
echo "Installing on an RPM system."
|
||||||
|
PACKAGE_INSTALL="$ELEVATE yum install"
|
||||||
|
PACKAGE_STATUS="rpm -q"
|
||||||
|
SERVICE_MYSQLD="mysqld"
|
||||||
|
SERVICE_START="$ELEVATE service $SERVICE_MYSQLD start"
|
||||||
|
SERVICE_STATUS="service $SERVICE_MYSQLD status"
|
||||||
|
SERVICE_ENABLE="$ELEVATE chkconfig"
|
||||||
|
;;
|
||||||
|
deb)
|
||||||
|
echo "Installing on a Debian system."
|
||||||
|
PACKAGE_INSTALL="$ELEVATE apt-get install"
|
||||||
|
PACKAGE_STATUS="dpkg-query -s"
|
||||||
|
SERVICE_MYSQLD="mysql"
|
||||||
|
SERVICE_START="$ELEVATE service $SERVICE_MYSQLD start"
|
||||||
|
SERVICE_STATUS="$ELEVATE service $SERVICE_MYSQLD status"
|
||||||
|
SERVICE_ENABLE=""
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
while [ $# -gt 0 ]
|
||||||
|
do
|
||||||
|
case "$1" in
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
-p|--password)
|
||||||
|
shift
|
||||||
|
MYSQL_GCEAPI_PW=${1}
|
||||||
|
;;
|
||||||
|
-r|--rootpw)
|
||||||
|
shift
|
||||||
|
MYSQL_ROOT_PW=${1}
|
||||||
|
;;
|
||||||
|
-y|--yes)
|
||||||
|
ASSUME_YES="yes"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
# ignore
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
# Make sure MySQL is installed.
|
||||||
|
|
||||||
|
NEW_MYSQL_INSTALL=0
|
||||||
|
if ! $PACKAGE_STATUS mysql-server && ! $PACKAGE_STATUS mariadb-server > /dev/null
|
||||||
|
then
|
||||||
|
if [ -z "${ASSUME_YES}" ] ; then
|
||||||
|
printf "mysql-server is not installed. Would you like to install it now? (y/n): "
|
||||||
|
read response
|
||||||
|
case "$response" in
|
||||||
|
y|Y)
|
||||||
|
;;
|
||||||
|
n|N)
|
||||||
|
echo "mysql-server must be installed. Please install it before proceeding."
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Invalid response."
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
NEW_MYSQL_INSTALL=1
|
||||||
|
install_mysql_server
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Make sure mysqld is running.
|
||||||
|
|
||||||
|
if ! $SERVICE_STATUS > /dev/null
|
||||||
|
then
|
||||||
|
if [ -z "${ASSUME_YES}" ] ; then
|
||||||
|
printf "$SERVICE_MYSQLD is not running. Would you like to start it now? (y/n): "
|
||||||
|
read response
|
||||||
|
case "$response" in
|
||||||
|
y|Y)
|
||||||
|
;;
|
||||||
|
n|N)
|
||||||
|
echo "$SERVICE_MYSQLD must be running. Please start it before proceeding."
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Invalid response."
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
start_mysql_server
|
||||||
|
|
||||||
|
# If we both installed and started, ensure it starts at boot
|
||||||
|
[ $NEW_MYSQL_INSTALL -eq 1 ] && $SERVICE_ENABLE $SERVICE_MYSQLD on
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Get MySQL root access.
|
||||||
|
|
||||||
|
if [ $NEW_MYSQL_INSTALL -eq 1 ]
|
||||||
|
then
|
||||||
|
if [ ! "${MYSQL_ROOT_PW+defined}" ] ; then
|
||||||
|
echo "Since this is a fresh installation of MySQL, please set a password for the 'root' mysql user."
|
||||||
|
|
||||||
|
PW_MATCH=0
|
||||||
|
while [ $PW_MATCH -eq 0 ]
|
||||||
|
do
|
||||||
|
printf "Enter new password for 'root' mysql user: "
|
||||||
|
read -s MYSQL_ROOT_PW
|
||||||
|
echo
|
||||||
|
printf "Enter new password again: "
|
||||||
|
read -s PW2
|
||||||
|
echo
|
||||||
|
if [ "${MYSQL_ROOT_PW}" = "${PW2}" ] ; then
|
||||||
|
PW_MATCH=1
|
||||||
|
else
|
||||||
|
echo "Passwords did not match."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "UPDATE mysql.user SET password = password('${MYSQL_ROOT_PW}') WHERE user = 'root'; DELETE FROM mysql.user WHERE user = ''; flush privileges;" | mysql -u root
|
||||||
|
if ! [ $? -eq 0 ] ; then
|
||||||
|
echo "Failed to set password for 'root' MySQL user."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
elif [ ! "${MYSQL_ROOT_PW+defined}" ] ; then
|
||||||
|
printf "Please enter the password for the 'root' MySQL user: "
|
||||||
|
read -s MYSQL_ROOT_PW
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Sanity check MySQL credentials.
|
||||||
|
|
||||||
|
MYSQL_ROOT_PW_ARG=""
|
||||||
|
if [ "${MYSQL_ROOT_PW+defined}" ]
|
||||||
|
then
|
||||||
|
MYSQL_ROOT_PW_ARG="--password=${MYSQL_ROOT_PW}"
|
||||||
|
fi
|
||||||
|
echo "SELECT 1;" | mysql -u root ${MYSQL_ROOT_PW_ARG} > /dev/null
|
||||||
|
if ! [ $? -eq 0 ]
|
||||||
|
then
|
||||||
|
echo "Failed to connect to the MySQL server. Please check your root user credentials."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "Verified connectivity to MySQL."
|
||||||
|
|
||||||
|
|
||||||
|
# Now create the db.
|
||||||
|
|
||||||
|
echo "Creating 'gceapi' database."
|
||||||
|
cat << EOF | mysql -u root ${MYSQL_ROOT_PW_ARG}
|
||||||
|
CREATE DATABASE IF NOT EXISTS gceapi;
|
||||||
|
GRANT ALL ON gceapi.* TO 'gceapi'@'localhost' IDENTIFIED BY '${MYSQL_GCEAPI_PW}';
|
||||||
|
GRANT ALL ON gceapi.* TO 'gceapi'@'%' IDENTIFIED BY '${MYSQL_GCEAPI_PW}';
|
||||||
|
flush privileges;
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
# Make sure gceapi configuration has the right MySQL password.
|
||||||
|
|
||||||
|
if [ "${MYSQL_GCEAPI_PW}" != "${MYSQL_GCEAPI_PW_DEFAULT}" ] ; then
|
||||||
|
echo "Updating 'gceapi' database password in ${GCEAPI_CONFIG}"
|
||||||
|
sed -i -e "s/mysql:\/\/gceapi:\(.*\)@/mysql:\/\/gceapi:${MYSQL_GCEAPI_PW}@/" ${GCEAPI_CONFIG}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# override the logging config in gceapi.conf
|
||||||
|
log_conf=$(mktemp /tmp/gceapi-logging.XXXXXXXXXX.conf)
|
||||||
|
cat <<EOF > $log_conf
|
||||||
|
[loggers]
|
||||||
|
keys=root
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys=consoleHandler
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys=simpleFormatter
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level=INFO
|
||||||
|
handlers=consoleHandler
|
||||||
|
|
||||||
|
[handler_consoleHandler]
|
||||||
|
class=StreamHandler
|
||||||
|
formatter=simpleFormatter
|
||||||
|
args=(sys.stdout,)
|
||||||
|
|
||||||
|
[formatter_simpleFormatter]
|
||||||
|
format=%(name)s - %(levelname)s - %(message)s
|
||||||
|
EOF
|
||||||
|
|
||||||
|
gce-api-manage --log-config=$log_conf db_sync
|
||||||
|
rm $log_conf
|
||||||
|
|
||||||
|
# Do a final sanity check on the database.
|
||||||
|
|
||||||
|
echo "SELECT * FROM migrate_version;" | mysql -u gceapi --password=${MYSQL_GCEAPI_PW} gceapi > /dev/null
|
||||||
|
if ! [ $? -eq 0 ]
|
||||||
|
then
|
||||||
|
echo "Final sanity check failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Complete!"
|
48
etc/gceapi/api-paste.ini
Normal file
48
etc/gceapi/api-paste.ini
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
##########
|
||||||
|
# Shared #
|
||||||
|
##########
|
||||||
|
|
||||||
|
[filter:keystonecontext]
|
||||||
|
paste.filter_factory = gceapi.auth:GceapiKeystoneContext.factory
|
||||||
|
|
||||||
|
[filter:authtoken]
|
||||||
|
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||||
|
auth_port = 35357
|
||||||
|
auth_protocol = http
|
||||||
|
auth_version = v2.0
|
||||||
|
|
||||||
|
#######
|
||||||
|
# GCE #
|
||||||
|
#######
|
||||||
|
[composite:gce]
|
||||||
|
use = egg:Paste#urlmap
|
||||||
|
/: gceapi_oauth
|
||||||
|
/compute/v1/projects/: gceapi
|
||||||
|
/discovery/v1/apis/compute/: gceapi_discovery
|
||||||
|
|
||||||
|
[composite:gceapi]
|
||||||
|
use = call:gceapi.auth:pipeline_factory
|
||||||
|
keystone = gceauthtoken authtoken keystonecontext gceexecutor
|
||||||
|
|
||||||
|
[filter:gceauthtoken]
|
||||||
|
paste.filter_factory = gceapi.api.oauth:filter_factory
|
||||||
|
|
||||||
|
[app:gceexecutor]
|
||||||
|
paste.app_factory = gceapi.api:APIRouter.factory
|
||||||
|
|
||||||
|
[composite:gceapi_oauth]
|
||||||
|
use = call:gceapi.auth:pipeline_factory
|
||||||
|
noauth = gceexecutor_oauth
|
||||||
|
keystone = gceexecutor_oauth
|
||||||
|
|
||||||
|
[app:gceexecutor_oauth]
|
||||||
|
paste.app_factory = gceapi.api:APIRouterOAuth.factory
|
||||||
|
|
||||||
|
[composite:gceapi_discovery]
|
||||||
|
use = call:gceapi.auth:pipeline_factory
|
||||||
|
noauth = gceexecutor_discovery
|
||||||
|
keystone = gceexecutor_discovery
|
||||||
|
|
||||||
|
[app:gceexecutor_discovery]
|
||||||
|
paste.app_factory = gceapi.api:APIRouterDiscovery.factory
|
||||||
|
####
|
31
gceapi/__init__.py
Normal file
31
gceapi/__init__.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
:mod:`gceapi` -- Cloud IaaS Platform
|
||||||
|
===================================
|
||||||
|
|
||||||
|
.. automodule:: gceapi
|
||||||
|
:platform: Unix
|
||||||
|
:synopsis: Infrastructure-as-a-Service Cloud platform.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
|
||||||
|
|
||||||
|
gettext.install('gceapi', unicode=1)
|
253
gceapi/api/__init__.py
Normal file
253
gceapi/api/__init__.py
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.api import addresses
|
||||||
|
from gceapi.api import discovery
|
||||||
|
from gceapi.api import disks
|
||||||
|
from gceapi.api import firewalls
|
||||||
|
from gceapi.api import images
|
||||||
|
from gceapi.api import instances
|
||||||
|
from gceapi.api import machine_types
|
||||||
|
from gceapi.api import networks
|
||||||
|
from gceapi.api import oauth
|
||||||
|
from gceapi.api import operations
|
||||||
|
from gceapi.api import projects
|
||||||
|
from gceapi.api import regions
|
||||||
|
from gceapi.api import routes
|
||||||
|
from gceapi.api import snapshots
|
||||||
|
from gceapi.api import zones
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi import wsgi
|
||||||
|
from gceapi import wsgi_ext as openstack_api
|
||||||
|
|
||||||
|
gce_opts = [
|
||||||
|
cfg.StrOpt('keystone_gce_url',
|
||||||
|
default='http://127.0.0.1:5000/v2.0',
|
||||||
|
help='Keystone URL'),
|
||||||
|
cfg.IntOpt('gce_port',
|
||||||
|
default=8777,
|
||||||
|
help='the port of the gce api server'),
|
||||||
|
cfg.StrOpt('gce_scheme',
|
||||||
|
default='http',
|
||||||
|
help='the protocol to use when connecting to the gce api '
|
||||||
|
'server (http, https)'),
|
||||||
|
cfg.StrOpt('gce_path',
|
||||||
|
default='/compute/v1beta15/projects',
|
||||||
|
help='the path prefix used to call the gce api server'),
|
||||||
|
cfg.StrOpt('public_network',
|
||||||
|
default='public',
|
||||||
|
help='name of public network'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(gce_opts)
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class APIRouter(wsgi.Router):
|
||||||
|
"""
|
||||||
|
Routes requests on the GCE API to the appropriate controller
|
||||||
|
and method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def factory(cls, global_config, **local_config):
|
||||||
|
"""Simple paste factory, `gceapi.wsgi.Router` doesn't have one."""
|
||||||
|
|
||||||
|
return cls()
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
mapper = openstack_api.ProjectMapper()
|
||||||
|
self.resources = {}
|
||||||
|
self._setup_routes(mapper)
|
||||||
|
super(APIRouter, self).__init__(mapper)
|
||||||
|
|
||||||
|
def _setup_routes(self, mapper):
|
||||||
|
mapper.redirect("", "/")
|
||||||
|
|
||||||
|
self.resources['regions'] = regions.create_resource()
|
||||||
|
self.resources['firewalls'] = firewalls.create_resource()
|
||||||
|
self.resources['disks'] = disks.create_resource()
|
||||||
|
self.resources['machineTypes'] = machine_types.create_resource()
|
||||||
|
self.resources['instances'] = instances.create_resource()
|
||||||
|
self.resources['images'] = images.create_resource()
|
||||||
|
self.resources['instances'] = instances.create_resource()
|
||||||
|
self.resources['zones'] = zones.create_resource()
|
||||||
|
self.resources['networks'] = networks.create_resource()
|
||||||
|
self.resources['instances'] = instances.create_resource()
|
||||||
|
self.resources['projects'] = projects.create_resource()
|
||||||
|
self.resources['snapshots'] = snapshots.create_resource()
|
||||||
|
self.resources['addresses'] = addresses.create_resource()
|
||||||
|
self.resources['routes'] = routes.create_resource()
|
||||||
|
self.resources['operations'] = operations.create_resource()
|
||||||
|
|
||||||
|
mapper.resource("disks", "zones/{scope_id}/disks",
|
||||||
|
controller=self.resources['disks'])
|
||||||
|
mapper.connect("/{project_id}/aggregated/disks",
|
||||||
|
controller=self.resources['disks'],
|
||||||
|
action="aggregated_list",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/{project_id}/zones/{scope_id}/disks/{id}/"
|
||||||
|
"createSnapshot",
|
||||||
|
controller=self.resources['disks'],
|
||||||
|
action="create_snapshot",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
|
||||||
|
mapper.resource("machineTypes", "zones/{scope_id}/machineTypes",
|
||||||
|
controller=self.resources['machineTypes'])
|
||||||
|
mapper.connect("/{project_id}/aggregated/machineTypes",
|
||||||
|
controller=self.resources['machineTypes'],
|
||||||
|
action="aggregated_list",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
|
||||||
|
mapper.resource("instances", "zones/{scope_id}/instances",
|
||||||
|
controller=self.resources['instances'])
|
||||||
|
mapper.connect("/{project_id}/aggregated/instances",
|
||||||
|
controller=self.resources['instances'],
|
||||||
|
action="aggregated_list",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/"
|
||||||
|
"addAccessConfig",
|
||||||
|
controller=self.resources['instances'],
|
||||||
|
action="add_access_config",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/"
|
||||||
|
"deleteAccessConfig",
|
||||||
|
controller=self.resources['instances'],
|
||||||
|
action="delete_access_config",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/reset",
|
||||||
|
controller=self.resources['instances'],
|
||||||
|
action="reset_instance",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/"
|
||||||
|
"attachDisk",
|
||||||
|
controller=self.resources['instances'],
|
||||||
|
action="attach_disk",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/"
|
||||||
|
"detachDisk",
|
||||||
|
controller=self.resources['instances'],
|
||||||
|
action="detach_disk",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
|
||||||
|
mapper.resource("images", "global/images",
|
||||||
|
controller=self.resources['images'])
|
||||||
|
mapper.resource("regions", "regions",
|
||||||
|
controller=self.resources['regions'])
|
||||||
|
mapper.resource("zones", "zones",
|
||||||
|
controller=self.resources['zones'])
|
||||||
|
mapper.resource("networks", "global/networks",
|
||||||
|
controller=self.resources["networks"])
|
||||||
|
mapper.resource("firewalls", "global/firewalls",
|
||||||
|
controller=self.resources["firewalls"])
|
||||||
|
mapper.resource("routes", "global/routes",
|
||||||
|
controller=self.resources['routes'])
|
||||||
|
|
||||||
|
mapper.connect("/{project_id}", controller=self.resources['projects'],
|
||||||
|
action="show", conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/{project_id}/setCommonInstanceMetadata",
|
||||||
|
controller=self.resources['projects'],
|
||||||
|
action="set_common_instance_metadata",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
|
||||||
|
mapper.resource("addresses", "regions/{scope_id}/addresses",
|
||||||
|
controller=self.resources['addresses'])
|
||||||
|
mapper.connect("/{project_id}/aggregated/addresses",
|
||||||
|
controller=self.resources['addresses'],
|
||||||
|
action="aggregated_list",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
|
||||||
|
mapper.resource("snapshots", "global/snapshots",
|
||||||
|
controller=self.resources['snapshots'])
|
||||||
|
|
||||||
|
mapper.resource("operations", "global/operations",
|
||||||
|
controller=self.resources['operations'])
|
||||||
|
mapper.resource("operations", "regions/{scope_id}/operations",
|
||||||
|
controller=self.resources['operations'])
|
||||||
|
mapper.resource("operations", "zones/{scope_id}/operations",
|
||||||
|
controller=self.resources['operations'])
|
||||||
|
mapper.connect("/{project_id}/aggregated/operations",
|
||||||
|
controller=self.resources['operations'],
|
||||||
|
action="aggregated_list",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
|
||||||
|
|
||||||
|
class APIRouterOAuth(wsgi.Router):
|
||||||
|
"""
|
||||||
|
Routes requests on the OAuth2.0 to the appropriate controller
|
||||||
|
and method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def factory(cls, global_config, **local_config):
|
||||||
|
"""Simple paste factory, `gceapi.wsgi.Router` doesn't have one."""
|
||||||
|
|
||||||
|
return cls()
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
mapper = openstack_api.ProjectMapper()
|
||||||
|
self.resources = {}
|
||||||
|
self._setup_routes(mapper)
|
||||||
|
super(APIRouterOAuth, self).__init__(mapper)
|
||||||
|
|
||||||
|
def _setup_routes(self, mapper):
|
||||||
|
mapper.redirect("", "/")
|
||||||
|
|
||||||
|
self.resources['oauth'] = oauth.create_resource()
|
||||||
|
|
||||||
|
mapper.connect("/auth",
|
||||||
|
controller=self.resources['oauth'],
|
||||||
|
action="auth",
|
||||||
|
conditions={"method": ["GET"]})
|
||||||
|
mapper.connect("/approval",
|
||||||
|
controller=self.resources['oauth'],
|
||||||
|
action="approval",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
mapper.connect("/token",
|
||||||
|
controller=self.resources['oauth'],
|
||||||
|
action="token",
|
||||||
|
conditions={"method": ["POST"]})
|
||||||
|
|
||||||
|
|
||||||
|
class APIRouterDiscovery(wsgi.Router):
|
||||||
|
"""
|
||||||
|
Routes requests on the GCE discovery API to the appropriate controller
|
||||||
|
and method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def factory(cls, global_config, **local_config):
|
||||||
|
"""Simple paste factory, `gceapi.wsgi.Router` doesn't have one."""
|
||||||
|
|
||||||
|
return cls()
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
mapper = openstack_api.ProjectMapper()
|
||||||
|
self.resources = {}
|
||||||
|
self._setup_routes(mapper)
|
||||||
|
super(APIRouterDiscovery, self).__init__(mapper)
|
||||||
|
|
||||||
|
def _setup_routes(self, mapper):
|
||||||
|
mapper.redirect("", "/")
|
||||||
|
|
||||||
|
self.resources['discovery'] = discovery.create_resource()
|
||||||
|
|
||||||
|
mapper.connect("/{version}/rest",
|
||||||
|
controller=self.resources['discovery'],
|
||||||
|
action="discovery",
|
||||||
|
conditions={"method": ["GET"]})
|
26
gceapi/api/address_api.py
Normal file
26
gceapi/api/address_api.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import address_neutron_api
|
||||||
|
from gceapi.api import address_nova_api
|
||||||
|
from gceapi.api import base_api
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Address API."""
|
||||||
|
|
||||||
|
NEUTRON_API_MODULE = address_neutron_api
|
||||||
|
NOVA_API_MODULE = address_nova_api
|
||||||
|
|
||||||
|
__metaclass__ = base_api.NetSingleton
|
120
gceapi/api/address_neutron_api.py
Normal file
120
gceapi/api/address_neutron_api.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import network_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import region_api
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Address API - neutron implementation."""
|
||||||
|
|
||||||
|
KIND = "address"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "name", "description"]
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
self._region_api = region_api.API()
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_scopes(self, context, item):
|
||||||
|
region = item["scope"]
|
||||||
|
if region is not None:
|
||||||
|
return [scopes.RegionScope(region)]
|
||||||
|
return self._region_api.get_items_as_scopes(context)
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
return self._get_floating_ips(context, scope, name)[0]
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
return self._get_floating_ips(context, scope)
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
floating_ip = self._get_floating_ips(context, scope, name)[0]
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
self._delete_db_item(context, floating_ip)
|
||||||
|
clients.neutron(context).delete_floatingip(floating_ip["id"])
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
if any(x["name"] == name
|
||||||
|
for x in self._get_floating_ips(context, scope)):
|
||||||
|
raise exception.InvalidInput(
|
||||||
|
_("The resource '%s' already exists.") % name)
|
||||||
|
public_network_id = network_api.API().get_public_network_id(context)
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
floating_ip = clients.neutron(context).create_floatingip(
|
||||||
|
{"floatingip": {"floating_network_id": public_network_id}})
|
||||||
|
floating_ip = self._prepare_floating_ip(
|
||||||
|
clients.nova(context), floating_ip["floatingip"], scope)
|
||||||
|
floating_ip["name"] = body["name"]
|
||||||
|
if "description" in body:
|
||||||
|
floating_ip["description"] = body["description"]
|
||||||
|
floating_ip = self._add_db_item(context, floating_ip)
|
||||||
|
return floating_ip
|
||||||
|
|
||||||
|
def _get_floating_ips(self, context, scope, name=None):
|
||||||
|
results = clients.neutron(context).list_floatingips(
|
||||||
|
tenant_id=context.project_id)["floatingips"]
|
||||||
|
gce_floating_ips = self._get_db_items_dict(context)
|
||||||
|
nova_client = clients.nova(context)
|
||||||
|
results = [self._prepare_floating_ip(nova_client, x, scope,
|
||||||
|
gce_floating_ips.get(x["id"]))
|
||||||
|
for x in results]
|
||||||
|
unnamed_ips = self._purge_db(context, results, gce_floating_ips)
|
||||||
|
self._add_nonnamed_items(context, unnamed_ips)
|
||||||
|
if name is None:
|
||||||
|
return results
|
||||||
|
|
||||||
|
for item in results:
|
||||||
|
if item["name"] == name:
|
||||||
|
return [item]
|
||||||
|
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def _prepare_floating_ip(self, nova_client, floating_ip, scope,
|
||||||
|
db_item=None):
|
||||||
|
floating_ip["scope"] = scope
|
||||||
|
fixed_ip_address = floating_ip.get("fixed_ip_address")
|
||||||
|
floating_ip["status"] = "IN USE" if fixed_ip_address else "RESERVED"
|
||||||
|
|
||||||
|
if fixed_ip_address is not None:
|
||||||
|
instances = nova_client.servers.list(
|
||||||
|
search_opts={"fixed_ip": fixed_ip_address})
|
||||||
|
if instances:
|
||||||
|
floating_ip["instance_name"] = instances[0].name
|
||||||
|
floating_ip["instance_zone"] = getattr(
|
||||||
|
instances[0], "OS-EXT-AZ:availability_zone")
|
||||||
|
|
||||||
|
return self._prepare_item(floating_ip, db_item)
|
||||||
|
|
||||||
|
def _add_nonnamed_items(self, context, items):
|
||||||
|
for item in items:
|
||||||
|
item["name"] = ("address-" +
|
||||||
|
item["floating_ip_address"].replace(".", "-"))
|
||||||
|
item["creationTimestamp"] = ""
|
||||||
|
self._add_db_item(context, item)
|
124
gceapi/api/address_nova_api.py
Normal file
124
gceapi/api/address_nova_api.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import region_api
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Address API - nova-network implementation."""
|
||||||
|
|
||||||
|
KIND = "address"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "name", "description"]
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
self._region_api = region_api.API()
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_scopes(self, context, item):
|
||||||
|
region = item["scope"]
|
||||||
|
if region is not None:
|
||||||
|
return [scopes.RegionScope(region)]
|
||||||
|
return self._region_api.get_items_as_scopes(context)
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
return self._get_floating_ips(client, context, scope, name)[0]
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
return self._get_floating_ips(client, context, scope)
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
floating_ip = self._get_floating_ips(client, context, scope, name)[0]
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
self._delete_db_item(context, floating_ip)
|
||||||
|
client.floating_ips.delete(floating_ip["id"])
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
if any(x["name"] == name
|
||||||
|
for x in self._get_floating_ips(client, context, scope)):
|
||||||
|
raise exception.InvalidInput(
|
||||||
|
_("The resource '%s' already exists.") % name)
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
result = client.floating_ips.create()
|
||||||
|
floating_ip = self._prepare_floating_ip(client, context, result, scope)
|
||||||
|
floating_ip["name"] = body["name"]
|
||||||
|
if "description" in body:
|
||||||
|
floating_ip["description"] = body["description"]
|
||||||
|
floating_ip = self._add_db_item(context, floating_ip)
|
||||||
|
return floating_ip
|
||||||
|
|
||||||
|
def _get_floating_ips(self, client, context, scope, name=None):
|
||||||
|
results = client.floating_ips.list()
|
||||||
|
gce_floating_ips = self._get_db_items_dict(context)
|
||||||
|
results = [self._prepare_floating_ip(
|
||||||
|
client, context, x, scope,
|
||||||
|
gce_floating_ips.get(str(x.id)))
|
||||||
|
for x in results]
|
||||||
|
unnamed_ips = self._purge_db(context, results, gce_floating_ips)
|
||||||
|
self._add_nonnamed_items(context, unnamed_ips)
|
||||||
|
|
||||||
|
if name is None:
|
||||||
|
return results
|
||||||
|
|
||||||
|
for item in results:
|
||||||
|
if item["name"] == name:
|
||||||
|
return [item]
|
||||||
|
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def _prepare_floating_ip(self, client, context, floating_ip, scope,
|
||||||
|
db_item=None):
|
||||||
|
floating_ip = utils.to_dict(floating_ip)
|
||||||
|
fixed_ip = floating_ip.get("fixed_ip")
|
||||||
|
floating_ip = {
|
||||||
|
"fixed_ip_address": fixed_ip if fixed_ip else None,
|
||||||
|
"floating_ip_address": floating_ip["ip"],
|
||||||
|
"id": floating_ip["id"],
|
||||||
|
"port_id": None,
|
||||||
|
"tenant_id": context.project_id,
|
||||||
|
"scope": scope,
|
||||||
|
"status": "IN USE" if fixed_ip else "RESERVED",
|
||||||
|
}
|
||||||
|
|
||||||
|
instance_id = floating_ip.get("instance_id")
|
||||||
|
if instance_id is not None:
|
||||||
|
instance = client.servers.get(instance_id)
|
||||||
|
floating_ip["instance_name"] = instance.name
|
||||||
|
floating_ip["instance_zone"] = getattr(
|
||||||
|
instance, "OS-EXT-AZ:availability_zone")
|
||||||
|
|
||||||
|
return self._prepare_item(floating_ip, db_item)
|
||||||
|
|
||||||
|
def _add_nonnamed_items(self, context, items):
|
||||||
|
for item in items:
|
||||||
|
item["name"] = ("address-" +
|
||||||
|
item["floating_ip_address"].replace(".", "-"))
|
||||||
|
item["creationTimestamp"] = ""
|
||||||
|
self._add_db_item(context, item)
|
49
gceapi/api/addresses.py
Normal file
49
gceapi/api/addresses.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import address_api
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Address controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(address_api.API(),
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, floating_ip, scope):
|
||||||
|
result_dict = {
|
||||||
|
"creationTimestamp": floating_ip.get("creationTimestamp", ""),
|
||||||
|
"status": floating_ip["status"],
|
||||||
|
"name": floating_ip["name"],
|
||||||
|
"address": floating_ip["floating_ip_address"],
|
||||||
|
}
|
||||||
|
if "description" in floating_ip:
|
||||||
|
result_dict["description"] = floating_ip["description"]
|
||||||
|
else:
|
||||||
|
result_dict["description"] = ""
|
||||||
|
|
||||||
|
if "instance_name" in floating_ip:
|
||||||
|
result_dict["users"] = [self._qualify(
|
||||||
|
request, "instances", floating_ip["instance_name"],
|
||||||
|
scopes.ZoneScope(floating_ip["instance_zone"]))]
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
213
gceapi/api/base_api.py
Normal file
213
gceapi/api/base_api.py
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Base classes of GCE API conversion layer.
|
||||||
|
|
||||||
|
Classes in this layer aggregate functionality of OpenStack necessary
|
||||||
|
and sufficient to handle supported GCE API requests
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi import db
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
FLAGS = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class Singleton(type):
|
||||||
|
"""Singleton metaclass.
|
||||||
|
|
||||||
|
KIND must be overriden in classes based on this type.
|
||||||
|
"""
|
||||||
|
_instances = {}
|
||||||
|
KIND = ""
|
||||||
|
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
if not self.KIND:
|
||||||
|
raise NotImplementedError
|
||||||
|
if self.KIND not in self._instances:
|
||||||
|
singleton = super(Singleton, self).__call__(*args, **kwargs)
|
||||||
|
self._instances[self.KIND] = singleton
|
||||||
|
return self._instances[self.KIND]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_instance(cls, kind):
|
||||||
|
"""Get singleton by name."""
|
||||||
|
|
||||||
|
return cls._instances.get(kind)
|
||||||
|
|
||||||
|
|
||||||
|
class NetSingleton(Singleton):
|
||||||
|
"""Proxy loader for net depended API.
|
||||||
|
|
||||||
|
NEUTRON_API_MODULE and NOVA_API_MODULE must be overriden in classes
|
||||||
|
based on this type.
|
||||||
|
"""
|
||||||
|
|
||||||
|
NEUTRON_API_MODULE = None
|
||||||
|
NOVA_API_MODULE = None
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
net_api = FLAGS.get("network_api")
|
||||||
|
# NOTE(Alex): Initializing proper network singleton
|
||||||
|
if net_api is None or ("quantum" in net_api
|
||||||
|
or "neutron" in net_api):
|
||||||
|
return self.NEUTRON_API_MODULE.API()
|
||||||
|
else:
|
||||||
|
return self.NOVA_API_MODULE.API()
|
||||||
|
|
||||||
|
|
||||||
|
class API(object):
|
||||||
|
"""Base GCE API abstraction class
|
||||||
|
|
||||||
|
Inherited classes should implement one class of GCE API functionality.
|
||||||
|
There should be enough public methods implemented to cover necessary
|
||||||
|
methods of GCE API in the class. Other public methods can exist to be
|
||||||
|
invoked from other APIs of this layer.
|
||||||
|
Class in this layer should use each others functionality instead of
|
||||||
|
calling corresponding low-level routines.
|
||||||
|
Basic methods should be named including "item(s)" instead of specific
|
||||||
|
functional names.
|
||||||
|
|
||||||
|
Descendants are stateless singletons.
|
||||||
|
Supports callbacks for interaction of APIs in this layer
|
||||||
|
"""
|
||||||
|
# TODO(Alex): Now action methods get body of parameters straight from GCE
|
||||||
|
# request while returning results in terms of Openstack to be converted
|
||||||
|
# to GCE terms in controller. In next version this layer should be revised
|
||||||
|
# to work symmetrically with incoming and outgoing data.
|
||||||
|
|
||||||
|
__metaclass__ = Singleton
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
self._callbacks = []
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
"""GCE API object type method. Should be overriden."""
|
||||||
|
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
"""Iterable of name of columns stored in GCE API database.
|
||||||
|
|
||||||
|
Should be overriden.
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
"""Returns fully filled item for particular inherited API."""
|
||||||
|
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
"""Returns list of items."""
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
"""Deletes an item."""
|
||||||
|
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
"""Creates an item. It returns created item."""
|
||||||
|
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def get_scopes(self, context, item):
|
||||||
|
"""Returns which zones/regions the item belongs too."""
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _process_callbacks(self, context, reason, item, **kwargs):
|
||||||
|
for cb_reason, cb_func in self._callbacks:
|
||||||
|
if cb_reason == reason:
|
||||||
|
cb_func(context, item, **kwargs)
|
||||||
|
|
||||||
|
def _register_callback(self, reason, func):
|
||||||
|
"""Callbacks registration
|
||||||
|
|
||||||
|
Callbacks can be registered by one API to be called by another before
|
||||||
|
some action for checking possibility of the action or to process
|
||||||
|
pre-actions
|
||||||
|
"""
|
||||||
|
|
||||||
|
self._callbacks.append((reason, func))
|
||||||
|
|
||||||
|
def _prepare_item(self, item, db_item):
|
||||||
|
if db_item is not None:
|
||||||
|
item.update(db_item)
|
||||||
|
return item
|
||||||
|
|
||||||
|
def _add_db_item(self, context, item):
|
||||||
|
db_item = dict((key, item.get(key))
|
||||||
|
for key in self._get_persistent_attributes()
|
||||||
|
if key in item)
|
||||||
|
if ("creationTimestamp" in self._get_persistent_attributes() and
|
||||||
|
"creationTimestamp" not in db_item):
|
||||||
|
# TODO(ft): Google not returns microseconds but returns
|
||||||
|
# server time zone: 2013-12-06T03:34:31.340-08:00
|
||||||
|
utcnow = timeutils.isotime(None, True)
|
||||||
|
db_item["creationTimestamp"] = utcnow
|
||||||
|
item["creationTimestamp"] = utcnow
|
||||||
|
db.add_item(context, self._get_type(), db_item)
|
||||||
|
return item
|
||||||
|
|
||||||
|
def _delete_db_item(self, context, item):
|
||||||
|
return db.delete_item(context, self._get_type(), item["id"])
|
||||||
|
|
||||||
|
def _update_db_item(self, context, item):
|
||||||
|
db_item = dict((key, item.get(key))
|
||||||
|
for key in self._get_persistent_attributes()
|
||||||
|
if key in item)
|
||||||
|
db.update_item(context, self._get_type(), db_item)
|
||||||
|
|
||||||
|
def _get_db_items(self, context):
|
||||||
|
return db.get_items(context, self._get_type())
|
||||||
|
|
||||||
|
def _get_db_items_dict(self, context):
|
||||||
|
return dict((item["id"], item) for item in self._get_db_items(context))
|
||||||
|
|
||||||
|
def _get_db_item_by_id(self, context, item_id):
|
||||||
|
return db.get_item_by_id(context, self._get_type(), item_id)
|
||||||
|
|
||||||
|
def _get_db_item_by_name(self, context, name):
|
||||||
|
return db.get_item_by_name(context, self._get_type(), name)
|
||||||
|
|
||||||
|
def _purge_db(self, context, os_items, db_items_dict):
|
||||||
|
only_os_items = []
|
||||||
|
existed_db_items = set()
|
||||||
|
for item in os_items:
|
||||||
|
db_item = db_items_dict.get(str(item["id"]))
|
||||||
|
if db_item is None:
|
||||||
|
only_os_items.append(item)
|
||||||
|
else:
|
||||||
|
existed_db_items.add(db_item["id"])
|
||||||
|
for item in db_items_dict.itervalues():
|
||||||
|
if item["id"] not in existed_db_items:
|
||||||
|
self._delete_db_item(context, item)
|
||||||
|
return only_os_items
|
||||||
|
|
||||||
|
|
||||||
|
class _CallbackReasons(object):
|
||||||
|
check_delete = 1
|
||||||
|
pre_delete = 2
|
||||||
|
post_add = 3
|
||||||
|
|
||||||
|
|
||||||
|
_callback_reasons = _CallbackReasons()
|
141
gceapi/api/clients.py
Normal file
141
gceapi/api/clients.py
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from keystoneclient.v2_0 import client as kc
|
||||||
|
from novaclient import client as novaclient
|
||||||
|
from novaclient import shell as novashell
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from neutronclient.v2_0 import client as neutronclient
|
||||||
|
except ImportError:
|
||||||
|
neutronclient = None
|
||||||
|
logger.info(_('neutronclient not available'))
|
||||||
|
try:
|
||||||
|
from cinderclient import client as cinderclient
|
||||||
|
except ImportError:
|
||||||
|
cinderclient = None
|
||||||
|
logger.info(_('cinderclient not available'))
|
||||||
|
try:
|
||||||
|
from glanceclient import client as glanceclient
|
||||||
|
except ImportError:
|
||||||
|
glanceclient = None
|
||||||
|
logger.info(_('glanceclient not available'))
|
||||||
|
|
||||||
|
|
||||||
|
def nova(context, service_type='compute'):
|
||||||
|
computeshell = novashell.OpenStackComputeShell()
|
||||||
|
extensions = computeshell._discover_extensions("1.1")
|
||||||
|
|
||||||
|
args = {
|
||||||
|
'project_id': context.project_id,
|
||||||
|
'auth_url': CONF.keystone_gce_url,
|
||||||
|
'service_type': service_type,
|
||||||
|
'username': None,
|
||||||
|
'api_key': None,
|
||||||
|
'extensions': extensions,
|
||||||
|
}
|
||||||
|
|
||||||
|
client = novaclient.Client(1.1, **args)
|
||||||
|
|
||||||
|
management_url = _url_for(context, service_type=service_type)
|
||||||
|
client.client.auth_token = context.auth_token
|
||||||
|
client.client.management_url = management_url
|
||||||
|
|
||||||
|
return client
|
||||||
|
|
||||||
|
|
||||||
|
def neutron(context):
|
||||||
|
if neutronclient is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
args = {
|
||||||
|
'auth_url': CONF.keystone_gce_url,
|
||||||
|
'service_type': 'network',
|
||||||
|
'token': context.auth_token,
|
||||||
|
'endpoint_url': _url_for(context, service_type='network'),
|
||||||
|
}
|
||||||
|
|
||||||
|
return neutronclient.Client(**args)
|
||||||
|
|
||||||
|
|
||||||
|
def glance(context):
|
||||||
|
if glanceclient is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
args = {
|
||||||
|
'auth_url': CONF.keystone_gce_url,
|
||||||
|
'service_type': 'image',
|
||||||
|
'token': context.auth_token,
|
||||||
|
}
|
||||||
|
|
||||||
|
return glanceclient.Client(
|
||||||
|
"1", endpoint=_url_for(context, service_type='image'), **args)
|
||||||
|
|
||||||
|
|
||||||
|
def cinder(context):
|
||||||
|
if cinderclient is None:
|
||||||
|
return nova(context, 'volume')
|
||||||
|
|
||||||
|
args = {
|
||||||
|
'service_type': 'volume',
|
||||||
|
'auth_url': CONF.keystone_gce_url,
|
||||||
|
'username': None,
|
||||||
|
'api_key': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
_cinder = cinderclient.Client('1', **args)
|
||||||
|
management_url = _url_for(context, service_type='volume')
|
||||||
|
_cinder.client.auth_token = context.auth_token
|
||||||
|
_cinder.client.management_url = management_url
|
||||||
|
|
||||||
|
return _cinder
|
||||||
|
|
||||||
|
|
||||||
|
def keystone(context):
|
||||||
|
_keystone = kc.Client(
|
||||||
|
token=context.auth_token,
|
||||||
|
tenant_id=context.project_id,
|
||||||
|
auth_url=CONF.keystone_gce_url)
|
||||||
|
|
||||||
|
return _keystone
|
||||||
|
|
||||||
|
|
||||||
|
def _url_for(context, **kwargs):
|
||||||
|
service_catalog = context.service_catalog
|
||||||
|
if not service_catalog:
|
||||||
|
catalog = keystone(context).service_catalog.catalog
|
||||||
|
service_catalog = catalog["serviceCatalog"]
|
||||||
|
context.service_catalog = service_catalog
|
||||||
|
|
||||||
|
service_type = kwargs["service_type"]
|
||||||
|
for service in service_catalog:
|
||||||
|
if service["type"] != service_type:
|
||||||
|
continue
|
||||||
|
for endpoint in service["endpoints"]:
|
||||||
|
if "publicURL" in endpoint:
|
||||||
|
return endpoint["publicURL"]
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return None
|
389
gceapi/api/common.py
Normal file
389
gceapi/api/common.py
Normal file
@ -0,0 +1,389 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Base GCE API controller"""
|
||||||
|
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
from webob import exc
|
||||||
|
|
||||||
|
from gceapi.api import operation_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(object):
|
||||||
|
"""Base controller
|
||||||
|
|
||||||
|
Implements base CRUD methods.
|
||||||
|
Individual GCE controllers should inherit this and:
|
||||||
|
- implement format_item() method,
|
||||||
|
- override _get_type() method,
|
||||||
|
- add necessary specific request handlers,
|
||||||
|
- use _api to hold instance of related GCE API (see base_api.py).
|
||||||
|
"""
|
||||||
|
|
||||||
|
_api = None
|
||||||
|
|
||||||
|
# Initialization
|
||||||
|
def __init__(self, api):
|
||||||
|
"""Base initialization.
|
||||||
|
|
||||||
|
Inherited classes should init _api and call super().
|
||||||
|
"""
|
||||||
|
|
||||||
|
self._api = api
|
||||||
|
self._type_name = self._api._get_type()
|
||||||
|
self._collection_name = utils.get_collection_name(self._type_name)
|
||||||
|
self._type_kind = utils.get_type_kind(self._type_name)
|
||||||
|
self._list_kind = utils.get_list_kind(self._type_name)
|
||||||
|
self._aggregated_kind = utils.get_aggregated_kind(self._type_name)
|
||||||
|
self._operation_api = operation_api.API()
|
||||||
|
|
||||||
|
def process_result(self, request, action, action_result):
|
||||||
|
context = self._get_context(request)
|
||||||
|
operation = operation_util.save_operaton(context, action_result)
|
||||||
|
if operation is not None:
|
||||||
|
scope = self._operation_api.get_scopes(context, operation)[0]
|
||||||
|
action_result = self._format_operation(request, operation, scope)
|
||||||
|
|
||||||
|
if isinstance(action_result, Exception):
|
||||||
|
return self._format_error(action_result)
|
||||||
|
if action_result is None:
|
||||||
|
return None, 204
|
||||||
|
return self._format_output(request, action, action_result), 200
|
||||||
|
|
||||||
|
# Base methods, should be overriden
|
||||||
|
|
||||||
|
def format_item(self, request, image, scope):
|
||||||
|
"""Main item resource conversion routine
|
||||||
|
|
||||||
|
Overriden in inherited classes should implement conversion of
|
||||||
|
OpenStack resource into GCE resource.
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise exc.HTTPNotImplemented
|
||||||
|
|
||||||
|
# Actions
|
||||||
|
def index(self, req, scope_id=None):
|
||||||
|
"""GCE list requests, global or with zone/region specified."""
|
||||||
|
|
||||||
|
context = self._get_context(req)
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
|
||||||
|
items = self._api.get_items(context, scope)
|
||||||
|
items = [{
|
||||||
|
"scope": scope,
|
||||||
|
"item": self.format_item(req, i, scope)
|
||||||
|
} for i in items]
|
||||||
|
items = self._filter_items(req, items)
|
||||||
|
items, next_page_token = self._page_items(req, items)
|
||||||
|
items = [i["item"] for i in items]
|
||||||
|
|
||||||
|
return self._format_list(req, items, next_page_token, scope)
|
||||||
|
|
||||||
|
def show(self, req, id=None, scope_id=None):
|
||||||
|
"""GCE get requests, global or zone/region specified."""
|
||||||
|
|
||||||
|
context = self._get_context(req)
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
try:
|
||||||
|
item = self._api.get_item(context, id, scope)
|
||||||
|
return self.format_item(req, item, scope)
|
||||||
|
except (exception.NotFound, KeyError, IndexError):
|
||||||
|
msg = _("Resource '%s' could not be found") % id
|
||||||
|
raise exc.HTTPNotFound(explanation=msg)
|
||||||
|
|
||||||
|
def aggregated_list(self, req):
|
||||||
|
"""GCE aggregated list requests for all zones/regions."""
|
||||||
|
|
||||||
|
context = self._get_context(req)
|
||||||
|
items = list()
|
||||||
|
for item in self._api.get_items(context, None):
|
||||||
|
for scope in self._api.get_scopes(context, item):
|
||||||
|
items.append({
|
||||||
|
"scope": scope,
|
||||||
|
"item": self.format_item(req, item, scope)
|
||||||
|
})
|
||||||
|
items = self._filter_items(req, items)
|
||||||
|
items, next_page_token = self._page_items(req, items)
|
||||||
|
|
||||||
|
items_by_scopes = {}
|
||||||
|
for item in items:
|
||||||
|
scope_path = item["scope"].get_path()
|
||||||
|
items_by_scope = items_by_scopes.setdefault(scope_path,
|
||||||
|
{self._collection_name: []})[self._collection_name]
|
||||||
|
items_by_scope.append(item["item"])
|
||||||
|
|
||||||
|
return self._format_list(req, items_by_scopes, next_page_token,
|
||||||
|
scopes.AggregatedScope())
|
||||||
|
|
||||||
|
def delete(self, req, id, scope_id=None):
|
||||||
|
"""GCE delete requests."""
|
||||||
|
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
context = self._get_context(req)
|
||||||
|
operation_util.init_operation(context, "delete",
|
||||||
|
self._type_name, id, scope)
|
||||||
|
try:
|
||||||
|
self._api.delete_item(context, id, scope)
|
||||||
|
except (exception.NotFound, KeyError, IndexError):
|
||||||
|
msg = _("Resource '%s' could not be found") % id
|
||||||
|
raise exc.HTTPNotFound(explanation=msg)
|
||||||
|
|
||||||
|
def create(self, req, body, scope_id=None):
|
||||||
|
"""GCE add requests."""
|
||||||
|
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
context = self._get_context(req)
|
||||||
|
operation_util.init_operation(context, "insert",
|
||||||
|
self._type_name, body["name"], scope)
|
||||||
|
self._api.add_item(context, body['name'], body, scope)
|
||||||
|
|
||||||
|
# Filtering
|
||||||
|
def _filter_items(self, req, items):
|
||||||
|
"""Filtering result list
|
||||||
|
|
||||||
|
Only one filter is supported(eg. by one field)
|
||||||
|
Only two comparison strings are supported: 'eq' and 'ne'
|
||||||
|
There are no logical expressions with fields
|
||||||
|
"""
|
||||||
|
if not items:
|
||||||
|
return items
|
||||||
|
if "filter" not in req.params:
|
||||||
|
return items
|
||||||
|
|
||||||
|
filter_def = req.params["filter"].split()
|
||||||
|
if len(filter_def) != 3:
|
||||||
|
# TODO(apavlov): raise exception
|
||||||
|
return items
|
||||||
|
if filter_def[1] != "eq" and filter_def[1] != "ne":
|
||||||
|
# TODO(apavlov): raise exception
|
||||||
|
return items
|
||||||
|
if filter_def[0] not in items[0]["item"]:
|
||||||
|
# TODO(apavlov): raise exception
|
||||||
|
return items
|
||||||
|
|
||||||
|
filter_field = filter_def[0]
|
||||||
|
filter_cmp = filter_def[1] == "eq"
|
||||||
|
filter_pattern = filter_def[2]
|
||||||
|
if filter_pattern[0] == "'" and filter_pattern[-1] == "'":
|
||||||
|
filter_pattern = filter_pattern[1:-1]
|
||||||
|
|
||||||
|
result_list = list()
|
||||||
|
for item in items:
|
||||||
|
field = item["item"][filter_field]
|
||||||
|
result = re.match(filter_pattern, field)
|
||||||
|
if filter_cmp != (result is None):
|
||||||
|
result_list.append(item)
|
||||||
|
|
||||||
|
return result_list
|
||||||
|
|
||||||
|
# Paging
|
||||||
|
def _page_items(self, req, items):
|
||||||
|
if not items:
|
||||||
|
return items, None
|
||||||
|
if "maxResults" not in req.params:
|
||||||
|
return items, None
|
||||||
|
|
||||||
|
limit = int(req.params["maxResults"])
|
||||||
|
if limit >= len(items):
|
||||||
|
return items, None
|
||||||
|
|
||||||
|
page_index = int(req.params.get("pageToken", 0))
|
||||||
|
if page_index < 0 or page_index * limit > len(items):
|
||||||
|
# TODO(apavlov): raise exception
|
||||||
|
return [], None
|
||||||
|
|
||||||
|
items.sort(None, lambda x: x["item"].get("name"))
|
||||||
|
start = limit * page_index
|
||||||
|
if start + limit >= len(items):
|
||||||
|
return items[start:], None
|
||||||
|
|
||||||
|
return items[start:start + limit], str(page_index + 1)
|
||||||
|
|
||||||
|
# Utility
|
||||||
|
def _get_context(self, req):
|
||||||
|
return req.environ['gceapi.context']
|
||||||
|
|
||||||
|
def _get_scope(self, req, scope_id):
|
||||||
|
scope = scopes.construct_from_path(req.path_info, scope_id)
|
||||||
|
if scope is None:
|
||||||
|
return None
|
||||||
|
scope_api = scope.get_scope_api()
|
||||||
|
if scope_api is not None:
|
||||||
|
try:
|
||||||
|
context = self._get_context(req)
|
||||||
|
scope_api.get_item(context, scope.get_name(), None)
|
||||||
|
except ValueError as ex:
|
||||||
|
raise exc.HTTPNotFound(detail=ex)
|
||||||
|
|
||||||
|
return scope
|
||||||
|
|
||||||
|
# Result formatting
|
||||||
|
def _format_date(self, date_string):
|
||||||
|
"""Returns standard format for given date."""
|
||||||
|
if date_string is None:
|
||||||
|
return None
|
||||||
|
if isinstance(date_string, basestring):
|
||||||
|
date_string = timeutils.parse_isotime(date_string)
|
||||||
|
return date_string.strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||||
|
|
||||||
|
def _get_id(self, link):
|
||||||
|
hashed_link = hash(link)
|
||||||
|
if hashed_link < 0:
|
||||||
|
hashed_link = -hashed_link
|
||||||
|
return str(hashed_link)
|
||||||
|
|
||||||
|
def _qualify(self, request, controller, identifier, scope):
|
||||||
|
"""Creates fully qualified selfLink for an item or collection
|
||||||
|
|
||||||
|
Specific formatting for projects and zones/regions,
|
||||||
|
'global' prefix For global resources,
|
||||||
|
'zones/zone_id' prefix for zone(similar for regions) resources.
|
||||||
|
"""
|
||||||
|
|
||||||
|
result = os.path.join(
|
||||||
|
request.application_url, self._get_context(request).project_name)
|
||||||
|
if controller:
|
||||||
|
if scope:
|
||||||
|
result = os.path.join(result, scope.get_path())
|
||||||
|
result = os.path.join(result, controller)
|
||||||
|
if identifier:
|
||||||
|
result = os.path.join(result, identifier)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _format_item(self, request, result_dict, scope):
|
||||||
|
return self._add_item_header(request, result_dict, scope,
|
||||||
|
self._type_kind, self._collection_name)
|
||||||
|
|
||||||
|
def _format_operation(self, request, operation, scope):
|
||||||
|
result_dict = {
|
||||||
|
"name": operation["name"],
|
||||||
|
"operationType": operation["type"],
|
||||||
|
"insertTime": operation["insert_time"],
|
||||||
|
"startTime": operation["start_time"],
|
||||||
|
"status": operation["status"],
|
||||||
|
"progress": operation["progress"],
|
||||||
|
"user": operation["user"],
|
||||||
|
}
|
||||||
|
result_dict["targetLink"] = self._qualify(
|
||||||
|
request, utils.get_collection_name(operation["target_type"]),
|
||||||
|
operation["target_name"], scope)
|
||||||
|
result_dict["targetId"] = self._get_id(result_dict["targetLink"])
|
||||||
|
if "end_time" in operation:
|
||||||
|
result_dict["endTime"] = operation["end_time"]
|
||||||
|
if "error_code" in operation:
|
||||||
|
result_dict.update({
|
||||||
|
"httpErrorStatusCode": operation["error_code"],
|
||||||
|
"httpErrorMessage": operation["error_message"],
|
||||||
|
"error": {"errors": operation["errors"]},
|
||||||
|
})
|
||||||
|
type_name = self._operation_api._get_type()
|
||||||
|
return self._add_item_header(request, result_dict, scope,
|
||||||
|
utils.get_type_kind(type_name),
|
||||||
|
utils.get_collection_name(type_name))
|
||||||
|
|
||||||
|
def _add_item_header(self, request, result_dict, scope,
|
||||||
|
_type_kind, _collection_name):
|
||||||
|
if scope is not None and scope.get_name() is not None:
|
||||||
|
result_dict[scope.get_type()] = self._qualify(
|
||||||
|
request, scope.get_collection(), scope.get_name(), None)
|
||||||
|
result_dict["kind"] = _type_kind
|
||||||
|
result_dict["selfLink"] = self._qualify(
|
||||||
|
request, _collection_name, result_dict.get("name"), scope)
|
||||||
|
result_dict["id"] = self._get_id(result_dict["selfLink"])
|
||||||
|
return result_dict
|
||||||
|
|
||||||
|
def _format_list(self, request, result_list, next_page_token, scope):
|
||||||
|
result_dict = {}
|
||||||
|
result_dict["items"] = result_list
|
||||||
|
if next_page_token:
|
||||||
|
result_dict["nextPageToken"] = next_page_token
|
||||||
|
result_dict["kind"] = (self._aggregated_kind
|
||||||
|
if scope and isinstance(scope, scopes.AggregatedScope)
|
||||||
|
else self._list_kind)
|
||||||
|
|
||||||
|
context = self._get_context(request)
|
||||||
|
list_id = os.path.join("projects", context.project_name)
|
||||||
|
if scope:
|
||||||
|
list_id = os.path.join(list_id, scope.get_path())
|
||||||
|
list_id = os.path.join(list_id, self._collection_name)
|
||||||
|
result_dict["id"] = list_id
|
||||||
|
|
||||||
|
result_dict["selfLink"] = self._qualify(
|
||||||
|
request, self._collection_name, None, scope)
|
||||||
|
return result_dict
|
||||||
|
|
||||||
|
def _format_error(self, ex_value):
|
||||||
|
if isinstance(ex_value, exception.NotAuthorized):
|
||||||
|
msg = _('Unauthorized')
|
||||||
|
code = 401
|
||||||
|
elif isinstance(ex_value, exc.HTTPException):
|
||||||
|
msg = ex_value.explanation
|
||||||
|
code = ex_value.code
|
||||||
|
elif isinstance(ex_value, exception.GceapiException):
|
||||||
|
msg = ex_value.args[0]
|
||||||
|
code = ex_value.code
|
||||||
|
else:
|
||||||
|
msg = _('Internal server error')
|
||||||
|
code = 500
|
||||||
|
|
||||||
|
return {
|
||||||
|
'error': {'errors': [{'message': msg}]},
|
||||||
|
'code': code,
|
||||||
|
'message': msg
|
||||||
|
}, code
|
||||||
|
|
||||||
|
def _format_output(self, request, action, action_result):
|
||||||
|
# TODO(ft): this metod must be safe and ignore unknown fields
|
||||||
|
fields = request.params.get('fields', None)
|
||||||
|
# TODO(ft): GCE can also format results of other action
|
||||||
|
if action not in ('index', 'show') or fields is None:
|
||||||
|
return action_result
|
||||||
|
|
||||||
|
if action == 'show':
|
||||||
|
action_result = utils.apply_template(fields, action_result)
|
||||||
|
return action_result
|
||||||
|
sp = utils.split_by_comma(fields)
|
||||||
|
top_level = []
|
||||||
|
items = []
|
||||||
|
for string in sp:
|
||||||
|
if 'items' in string:
|
||||||
|
items.append(string)
|
||||||
|
else:
|
||||||
|
top_level.append(string)
|
||||||
|
res = {}
|
||||||
|
if len(items) > 0:
|
||||||
|
res['items'] = []
|
||||||
|
for string in top_level:
|
||||||
|
dct = utils.apply_template(string, action_result)
|
||||||
|
for key, val in dct.items():
|
||||||
|
res[key] = val
|
||||||
|
for string in items:
|
||||||
|
if '(' in string:
|
||||||
|
dct = utils.apply_template(string, action_result)
|
||||||
|
for key, val in dct.items():
|
||||||
|
res[key] = val
|
||||||
|
elif string.startswith('items/'):
|
||||||
|
string = string[len('items/'):]
|
||||||
|
for element in action_result['items']:
|
||||||
|
dct = utils.apply_template(string, element)
|
||||||
|
res['items'].append(dct)
|
||||||
|
|
||||||
|
return res
|
6734
gceapi/api/compute/v1.json
Normal file
6734
gceapi/api/compute/v1.json
Normal file
File diff suppressed because it is too large
Load Diff
63
gceapi/api/discovery.py
Normal file
63
gceapi/api/discovery.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi import wsgi_ext as openstack_wsgi
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(object):
|
||||||
|
|
||||||
|
_lock = threading.RLock()
|
||||||
|
_files = {}
|
||||||
|
|
||||||
|
def discovery(self, req, version):
|
||||||
|
"""Returns appropriate json by its version."""
|
||||||
|
|
||||||
|
if version in self._files:
|
||||||
|
return self._files[version]
|
||||||
|
|
||||||
|
self._lock.acquire()
|
||||||
|
try:
|
||||||
|
if version in self._files:
|
||||||
|
return self._files[version]
|
||||||
|
|
||||||
|
jfile = self._load_file(version)
|
||||||
|
jfile = jfile.replace("{HOST_URL}", req.host_url)
|
||||||
|
self._files[version] = jfile
|
||||||
|
return jfile
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def _load_file(self, version):
|
||||||
|
current_file = os.path.abspath(inspect.getsourcefile(lambda _: None))
|
||||||
|
current_dir = os.path.dirname(current_file)
|
||||||
|
file_name = os.path.join(current_dir, "compute", version + ".json")
|
||||||
|
try:
|
||||||
|
f = open(file_name)
|
||||||
|
except Exception as ex:
|
||||||
|
raise webob.exc.HTTPNotFound(ex)
|
||||||
|
result = f.read()
|
||||||
|
f.close()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return openstack_wsgi.Resource(Controller())
|
161
gceapi/api/disk_api.py
Normal file
161
gceapi/api/disk_api.py
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import image_api
|
||||||
|
from gceapi.api import operation_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
|
||||||
|
GB = 1024 ** 3
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Disk API."""
|
||||||
|
|
||||||
|
KIND = "disk"
|
||||||
|
_status_map = {
|
||||||
|
"creating": "CREATING",
|
||||||
|
"downloading": "CREATING",
|
||||||
|
"available": "READY",
|
||||||
|
"attaching": "READY",
|
||||||
|
"in-use": "READY",
|
||||||
|
# "deleting": "",
|
||||||
|
"error": "FAILED",
|
||||||
|
# "error_deleting": "",
|
||||||
|
"backing-up": "READY",
|
||||||
|
"restoring-backup": "READY",
|
||||||
|
# "error_restoring": ""
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"disk-add",
|
||||||
|
self._get_add_item_progress)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"disk-delete",
|
||||||
|
self._get_delete_item_progress)
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
client = clients.cinder(context)
|
||||||
|
volumes = client.volumes.list(search_opts={"display_name": name})
|
||||||
|
volumes = self._filter_volumes_by_zone(volumes, scope)
|
||||||
|
volumes = [utils.to_dict(item) for item in volumes]
|
||||||
|
if not volumes or len(volumes) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
return self._prepare_item(client, volumes[0])
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
client = clients.cinder(context)
|
||||||
|
volumes = client.volumes.list()
|
||||||
|
volumes = self._filter_volumes_by_zone(volumes, scope)
|
||||||
|
volumes = [utils.to_dict(item) for item in volumes]
|
||||||
|
for volume in volumes:
|
||||||
|
self._prepare_item(client, volume)
|
||||||
|
return volumes
|
||||||
|
|
||||||
|
def get_scopes(self, context, item):
|
||||||
|
return [scopes.ZoneScope(item["availability_zone"])]
|
||||||
|
|
||||||
|
def _prepare_item(self, client, item):
|
||||||
|
snapshot = None
|
||||||
|
snapshot_id = item["snapshot_id"]
|
||||||
|
if snapshot_id:
|
||||||
|
snapshot = utils.to_dict(client.volume_snapshots.get(snapshot_id))
|
||||||
|
item["snapshot"] = snapshot
|
||||||
|
item["status"] = self._status_map.get(item["status"], item["status"])
|
||||||
|
item["name"] = item["display_name"]
|
||||||
|
image = item.get("volume_image_metadata")
|
||||||
|
if image:
|
||||||
|
item["image_name"] = image["image_name"]
|
||||||
|
return item
|
||||||
|
|
||||||
|
def _filter_volumes_by_zone(self, volumes, scope):
|
||||||
|
if scope is None:
|
||||||
|
return volumes
|
||||||
|
return filter(
|
||||||
|
lambda volume: volume.availability_zone == scope.get_name(),
|
||||||
|
volumes)
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
client = clients.cinder(context).volumes
|
||||||
|
volumes = client.list(search_opts={"display_name": name})
|
||||||
|
if not volumes or len(volumes) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
operation_util.start_operation(context,
|
||||||
|
self._get_delete_item_progress,
|
||||||
|
volumes[0].id)
|
||||||
|
client.delete(volumes[0])
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
sizeGb = int(body['sizeGb']) if 'sizeGb' in body else None
|
||||||
|
|
||||||
|
snapshot_uri = body.get("sourceSnapshot")
|
||||||
|
image_uri = body.get("sourceImage")
|
||||||
|
snapshot_id = None
|
||||||
|
image_id = None
|
||||||
|
|
||||||
|
client = clients.cinder(context)
|
||||||
|
if snapshot_uri:
|
||||||
|
snapshot_name = utils._extract_name_from_url(snapshot_uri)
|
||||||
|
snapshots = client.volume_snapshots.list(
|
||||||
|
search_opts={"display_name": snapshot_name})
|
||||||
|
if not snapshots or len(snapshots) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
snapshot_id = snapshots[0].id
|
||||||
|
elif image_uri:
|
||||||
|
image_name = utils._extract_name_from_url(image_uri)
|
||||||
|
image = image_api.API().get_item(context, image_name, scope)
|
||||||
|
image_id = image['id']
|
||||||
|
# Cinder API doesn't get size from image, so we do this
|
||||||
|
image_size_in_gb = (int(image['size']) + GB - 1) / GB
|
||||||
|
if not sizeGb or sizeGb < image_size_in_gb:
|
||||||
|
sizeGb = image_size_in_gb
|
||||||
|
|
||||||
|
operation_util.start_operation(context, self._get_add_item_progress)
|
||||||
|
volume = client.volumes.create(
|
||||||
|
sizeGb, snapshot_id=snapshot_id,
|
||||||
|
display_name=body.get('name'),
|
||||||
|
display_description=body.get('description'),
|
||||||
|
imageRef=image_id,
|
||||||
|
availability_zone=scope.get_name())
|
||||||
|
operation_util.set_item_id(context, volume.id)
|
||||||
|
|
||||||
|
return self._prepare_item(client, utils.to_dict(volume))
|
||||||
|
|
||||||
|
def _get_add_item_progress(self, context, volume_id):
|
||||||
|
client = clients.cinder(context)
|
||||||
|
try:
|
||||||
|
volume = client.volumes.get(volume_id)
|
||||||
|
except clients.cinderclient.exceptions.NotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if (volume.status not in ["creating", "downloading"]):
|
||||||
|
return operation_api.gef_final_progress(volume.status == "error")
|
||||||
|
|
||||||
|
def _get_delete_item_progress(self, context, volume_id):
|
||||||
|
client = clients.cinder(context)
|
||||||
|
try:
|
||||||
|
volume = client.volumes.get(volume_id)
|
||||||
|
except clients.cinderclient.exceptions.NotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if volume.status not in ["deleting", "deleted"]:
|
||||||
|
return operation_api.gef_final_progress(True)
|
69
gceapi/api/disks.py
Normal file
69
gceapi/api/disks.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import disk_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import snapshot_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Disk controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(disk_api.API(),
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, volume, scope):
|
||||||
|
result_dict = {
|
||||||
|
"creationTimestamp": self._format_date(volume["created_at"]),
|
||||||
|
"status": volume["status"],
|
||||||
|
"name": volume["display_name"],
|
||||||
|
"description": volume["display_description"],
|
||||||
|
"sizeGb": volume["size"],
|
||||||
|
}
|
||||||
|
snapshot = volume["snapshot"]
|
||||||
|
if snapshot:
|
||||||
|
result_dict["sourceSnapshot"] = self._qualify(request,
|
||||||
|
"snapshots", snapshot["display_name"],
|
||||||
|
scopes.GlobalScope())
|
||||||
|
result_dict["sourceSnapshotId"] = snapshot["id"]
|
||||||
|
image_name = volume.get("image_name")
|
||||||
|
if image_name:
|
||||||
|
result_dict["sourceImage"] = self._qualify(request,
|
||||||
|
"images", image_name, scopes.GlobalScope())
|
||||||
|
result_dict["sourceImageId"] = self._get_id(
|
||||||
|
result_dict["sourceImage"])
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
def create(self, req, body, scope_id):
|
||||||
|
source_image = req.params.get("sourceImage")
|
||||||
|
if source_image is not None:
|
||||||
|
body["sourceImage"] = source_image
|
||||||
|
return super(Controller, self).create(req, body, scope_id)
|
||||||
|
|
||||||
|
def create_snapshot(self, req, body, scope_id, id):
|
||||||
|
body["disk_name"] = id
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
context = self._get_context(req)
|
||||||
|
operation_util.init_operation(context, "createSnapshot",
|
||||||
|
self._type_name, id, scope)
|
||||||
|
snapshot_api.API().add_item(context, body, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
256
gceapi/api/firewall_api.py
Normal file
256
gceapi/api/firewall_api.py
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import network_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
PROTOCOL_MAP = {
|
||||||
|
'1': 'icmp',
|
||||||
|
'6': 'tcp',
|
||||||
|
'17': 'udp',
|
||||||
|
}
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Firewall API."""
|
||||||
|
|
||||||
|
KIND = "firewall"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "network_name"]
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
network_api.API()._register_callback(
|
||||||
|
base_api._callback_reasons.pre_delete,
|
||||||
|
self.delete_network_firewalls)
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
try:
|
||||||
|
firewall = client.security_groups.find(name=name)
|
||||||
|
except (clients.novaclient.exceptions.NotFound,
|
||||||
|
clients.novaclient.exceptions.NoUniqueMatch):
|
||||||
|
raise exception.NotFound()
|
||||||
|
firewall = self._prepare_firewall(utils.to_dict(firewall))
|
||||||
|
db_firewall = self._get_db_item_by_id(context, firewall["id"])
|
||||||
|
self._prepare_item(firewall, db_firewall)
|
||||||
|
return firewall
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
firewalls = client.security_groups.list()
|
||||||
|
items = list()
|
||||||
|
gce_firewalls = self._get_db_items_dict(context)
|
||||||
|
for firewall in firewalls:
|
||||||
|
item = self._prepare_firewall(utils.to_dict(firewall))
|
||||||
|
self._prepare_item(item, gce_firewalls.get(item["id"]))
|
||||||
|
items.append(item)
|
||||||
|
self._purge_db(context, items, gce_firewalls)
|
||||||
|
return items
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
network = self._get_network_by_url(context, body['network'])
|
||||||
|
self._check_rules(body)
|
||||||
|
group_description = body.get("description", "")
|
||||||
|
client = clients.nova(context)
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
sg = client.security_groups.create(body['name'], group_description)
|
||||||
|
try:
|
||||||
|
rules = self._convert_to_secgroup_rules(body)
|
||||||
|
for rule in rules:
|
||||||
|
client.security_group_rules.create(
|
||||||
|
sg.id, ip_protocol=rule["protocol"],
|
||||||
|
from_port=rule["from_port"], to_port=rule["to_port"],
|
||||||
|
cidr=rule["cidr"], )
|
||||||
|
except Exception:
|
||||||
|
client.security_groups.delete(sg)
|
||||||
|
raise
|
||||||
|
new_firewall = utils.to_dict(client.security_groups.get(sg.id))
|
||||||
|
new_firewall = self._prepare_firewall(new_firewall)
|
||||||
|
new_firewall["creationTimestamp"] = 1
|
||||||
|
new_firewall["network_name"] = network["name"]
|
||||||
|
new_firewall = self._add_db_item(context, new_firewall)
|
||||||
|
self._process_callbacks(
|
||||||
|
context, base_api._callback_reasons.post_add, new_firewall)
|
||||||
|
return new_firewall
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
firewall = self.get_item(context, name)
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
self._process_callbacks(
|
||||||
|
context, base_api._callback_reasons.pre_delete, firewall)
|
||||||
|
client = clients.nova(context)
|
||||||
|
try:
|
||||||
|
client.security_groups.delete(firewall["id"])
|
||||||
|
self._delete_db_item(context, firewall)
|
||||||
|
except clients.novaclient.exceptions.ClientException as ex:
|
||||||
|
raise exception.GceapiException(message=ex.message, code=ex.code)
|
||||||
|
|
||||||
|
def _prepare_firewall(self, firewall):
|
||||||
|
# NOTE(ft): OpenStack security groups are more powerful than
|
||||||
|
# gce firewalls so when we cannot completely convert secgroup
|
||||||
|
# we add prefixes to firewall description
|
||||||
|
# [*] - cidr rules too complex to convert
|
||||||
|
# [+] - non-cidr rules presents
|
||||||
|
|
||||||
|
non_cidr_rule_exists = False
|
||||||
|
too_complex_for_gce = False
|
||||||
|
|
||||||
|
# NOTE(ft): group OpenStack rules by cidr and proto
|
||||||
|
# cidr group must be comparable object
|
||||||
|
def _ports_to_str(rule):
|
||||||
|
if rule['from_port'] == rule['to_port']:
|
||||||
|
return str(rule['from_port'])
|
||||||
|
else:
|
||||||
|
return "%s-%s" % (rule['from_port'], rule['to_port'])
|
||||||
|
|
||||||
|
grouped_rules = {}
|
||||||
|
for rule in firewall["rules"]:
|
||||||
|
if "cidr" not in rule["ip_range"] or not rule["ip_range"]["cidr"]:
|
||||||
|
non_cidr_rule_exists = True
|
||||||
|
continue
|
||||||
|
cidr = rule.get("ip_range", {}).get("cidr")
|
||||||
|
proto = rule["ip_protocol"]
|
||||||
|
cidr_group = grouped_rules.setdefault(cidr, {})
|
||||||
|
proto_ports = cidr_group.setdefault(proto, set())
|
||||||
|
proto_ports.add(_ports_to_str(rule))
|
||||||
|
|
||||||
|
# NOTE(ft): compare cidr grups to understand
|
||||||
|
# whether OpenStack rules are too complex or not
|
||||||
|
common_rules = None
|
||||||
|
for cidr in grouped_rules:
|
||||||
|
if common_rules:
|
||||||
|
if common_rules != grouped_rules[cidr]:
|
||||||
|
too_complex_for_gce = True
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
common_rules = grouped_rules[cidr]
|
||||||
|
|
||||||
|
# NOTE(ft): check icmp rules:
|
||||||
|
# if per icmp type rule present then rules are too complex
|
||||||
|
if not too_complex_for_gce and common_rules and "icmp" in common_rules:
|
||||||
|
icmp_rules = common_rules["icmp"]
|
||||||
|
if len(icmp_rules) == 1:
|
||||||
|
icmp_rule = icmp_rules.pop()
|
||||||
|
if icmp_rule != "-1":
|
||||||
|
too_complex_for_gce = True
|
||||||
|
else:
|
||||||
|
too_complex_for_gce = True
|
||||||
|
|
||||||
|
# NOTE(ft): build gce rules if possible
|
||||||
|
def _build_gce_port_rule(proto, rules):
|
||||||
|
gce_rule = {"IPProtocol": proto}
|
||||||
|
if proto != "icmp":
|
||||||
|
gce_rule["ports"] = rules
|
||||||
|
return gce_rule
|
||||||
|
|
||||||
|
sourceRanges = []
|
||||||
|
allowed = []
|
||||||
|
if not too_complex_for_gce:
|
||||||
|
sourceRanges = [cidr for cidr in grouped_rules] or ["0.0.0.0/0"]
|
||||||
|
if common_rules:
|
||||||
|
allowed = [_build_gce_port_rule(proto, common_rules[proto])
|
||||||
|
for proto in common_rules]
|
||||||
|
firewall["sourceRanges"] = sourceRanges
|
||||||
|
firewall["allowed"] = allowed
|
||||||
|
|
||||||
|
# NOTE(ft): add prefixes to description
|
||||||
|
description = firewall.get("description")
|
||||||
|
prefixes = []
|
||||||
|
if too_complex_for_gce:
|
||||||
|
prefixes.append("[*]")
|
||||||
|
if non_cidr_rule_exists:
|
||||||
|
prefixes.append("[+]")
|
||||||
|
if prefixes:
|
||||||
|
if description is not None:
|
||||||
|
prefixes.append(description)
|
||||||
|
description = "".join(prefixes)
|
||||||
|
firewall["description"] = description
|
||||||
|
|
||||||
|
return firewall
|
||||||
|
|
||||||
|
def _get_network_by_url(self, context, url):
|
||||||
|
# NOTE(apavlov): Check existence of such network
|
||||||
|
network_name = utils._extract_name_from_url(url)
|
||||||
|
return network_api.API().get_item(context, network_name)
|
||||||
|
|
||||||
|
def _check_rules(self, firewall):
|
||||||
|
if not firewall.get('sourceRanges') or firewall.get('sourceTags'):
|
||||||
|
msg = _("Not 'sourceRange' neither 'sourceTags' is provided")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
for allowed in firewall.get('allowed', []):
|
||||||
|
proto = allowed.get('IPProtocol')
|
||||||
|
proto = PROTOCOL_MAP.get(proto, proto)
|
||||||
|
if not proto or proto not in PROTOCOL_MAP.values():
|
||||||
|
msg = _("Invlaid protocol")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
if proto == 'icmp' and allowed.get('ports'):
|
||||||
|
msg = _("Invalid options for icmp protocol")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
def _convert_to_secgroup_rules(self, firewall):
|
||||||
|
rules = []
|
||||||
|
for source_range in firewall['sourceRanges']:
|
||||||
|
for allowed in firewall.get('allowed', []):
|
||||||
|
proto = allowed['IPProtocol']
|
||||||
|
proto = PROTOCOL_MAP.get(proto, proto)
|
||||||
|
rule = {
|
||||||
|
"protocol": proto,
|
||||||
|
"cidr": source_range,
|
||||||
|
}
|
||||||
|
if proto == "icmp":
|
||||||
|
rule["from_port"] = -1
|
||||||
|
rule["to_port"] = -1
|
||||||
|
rules.append(rule)
|
||||||
|
else:
|
||||||
|
for port in allowed.get('ports', []):
|
||||||
|
if "-" in port:
|
||||||
|
from_port, to_port = port.split("-")
|
||||||
|
else:
|
||||||
|
from_port = to_port = port
|
||||||
|
rule["from_port"] = from_port
|
||||||
|
rule["to_port"] = to_port
|
||||||
|
rules.append(copy.copy(rule))
|
||||||
|
return rules
|
||||||
|
|
||||||
|
def get_network_firewalls(self, context, network_name):
|
||||||
|
firewalls = self.get_items(context, None)
|
||||||
|
return [f for f in firewalls
|
||||||
|
if f.get("network_name", None) == network_name]
|
||||||
|
|
||||||
|
def delete_network_firewalls(self, context, network):
|
||||||
|
network_name = network["name"]
|
||||||
|
client = clients.nova(context)
|
||||||
|
for secgroup in self.get_network_firewalls(context, network_name):
|
||||||
|
try:
|
||||||
|
client.security_groups.delete(secgroup["id"])
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(("Failed to delete security group (%s) while"
|
||||||
|
"delete network (%s))"),
|
||||||
|
secgroup["name"], network_name)
|
45
gceapi/api/firewalls.py
Normal file
45
gceapi/api/firewalls.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import firewall_api
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Firewall controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(firewall_api.API(), *args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, firewall, scope):
|
||||||
|
result_dict = {
|
||||||
|
"creationTimestamp": firewall.get("creationTimestamp", ""),
|
||||||
|
"name": firewall["name"],
|
||||||
|
"description": firewall["description"],
|
||||||
|
"sourceRanges": firewall["sourceRanges"],
|
||||||
|
"allowed": firewall["allowed"],
|
||||||
|
}
|
||||||
|
|
||||||
|
network = firewall.get("network_name")
|
||||||
|
if network:
|
||||||
|
result_dict["network"] = self._qualify(request,
|
||||||
|
"networks", network, scopes.GlobalScope())
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
142
gceapi/api/image_api.py
Normal file
142
gceapi/api/image_api.py
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
try:
|
||||||
|
from glanceclient import exc as glanceclient_exc
|
||||||
|
except ImportError:
|
||||||
|
glanceclient_exc = None
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import operation_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Image API."""
|
||||||
|
|
||||||
|
KIND = "image"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "description", "image_ref"]
|
||||||
|
|
||||||
|
_status_map = {
|
||||||
|
"queued": "PENDING",
|
||||||
|
"saving": "PENDING",
|
||||||
|
"active": "READY",
|
||||||
|
"killed": "FAILED",
|
||||||
|
# "deleted": "",
|
||||||
|
# "pending_delete": ""
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"image-add",
|
||||||
|
self._get_add_item_progress)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"image-delete",
|
||||||
|
self._get_delete_item_progress)
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
image_service = clients.glance(context).images
|
||||||
|
images = image_service.list(
|
||||||
|
filters={"name": name, "disk_format": "raw"})
|
||||||
|
result = None
|
||||||
|
for image in images:
|
||||||
|
if image.status == "deleted":
|
||||||
|
continue
|
||||||
|
if result:
|
||||||
|
msg = _("Image resource '%s' found more than once") % name
|
||||||
|
raise exception.NotFound(msg)
|
||||||
|
result = self._prepare_image(utils.to_dict(image))
|
||||||
|
db_image = self._get_db_item_by_id(context, result["id"])
|
||||||
|
self._prepare_item(result, db_image)
|
||||||
|
if not result:
|
||||||
|
msg = _("Image resource '%s' could not be found") % name
|
||||||
|
raise exception.NotFound(msg)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
image_service = clients.glance(context).images
|
||||||
|
images = image_service.list(filters={"disk_format": "raw"})
|
||||||
|
items = list()
|
||||||
|
gce_images = self._get_db_items_dict(context)
|
||||||
|
for image in images:
|
||||||
|
result = self._prepare_image(utils.to_dict(image))
|
||||||
|
self._prepare_item(result, gce_images.get(result["id"]))
|
||||||
|
items.append(result)
|
||||||
|
self._purge_db(context, items, gce_images)
|
||||||
|
return items
|
||||||
|
|
||||||
|
def _prepare_image(self, item):
|
||||||
|
item["status"] = self._status_map.get(item["status"], item["status"])
|
||||||
|
return item
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
"""Delete an image, if allowed."""
|
||||||
|
image = self.get_item(context, name, scope)
|
||||||
|
image_service = clients.glance(context).images
|
||||||
|
operation_util.start_operation(context,
|
||||||
|
self._get_delete_item_progress,
|
||||||
|
image["id"])
|
||||||
|
image_service.delete(image["id"])
|
||||||
|
self._delete_db_item(context, image)
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
name = body['name']
|
||||||
|
image_ref = body['rawDisk']['source']
|
||||||
|
meta = {
|
||||||
|
'name': name,
|
||||||
|
'disk_format': 'raw',
|
||||||
|
'container_format': 'bare',
|
||||||
|
'min_disk': 0,
|
||||||
|
'min_ram': 0,
|
||||||
|
'copy_from': image_ref,
|
||||||
|
}
|
||||||
|
image_service = clients.glance(context).images
|
||||||
|
operation_util.start_operation(context, self._get_add_item_progress)
|
||||||
|
image = image_service.create(**meta)
|
||||||
|
operation_util.set_item_id(context, image.id)
|
||||||
|
|
||||||
|
new_image = self._prepare_image(utils.to_dict(image))
|
||||||
|
new_image["description"] = body.get("description", "")
|
||||||
|
new_image["image_ref"] = image_ref
|
||||||
|
new_image = self._add_db_item(context, new_image)
|
||||||
|
return new_image
|
||||||
|
|
||||||
|
def _get_add_item_progress(self, context, image_id):
|
||||||
|
image_service = clients.glance(context).images
|
||||||
|
try:
|
||||||
|
image = image_service.get(image_id)
|
||||||
|
except glanceclient_exc.HTTPNotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if image.status not in ["queued", "saving"]:
|
||||||
|
return operation_api.gef_final_progress(image.status == "killed")
|
||||||
|
|
||||||
|
def _get_delete_item_progress(self, context, image_id):
|
||||||
|
image_service = clients.glance(context).images
|
||||||
|
try:
|
||||||
|
image = image_service.get(image_id)
|
||||||
|
except glanceclient_exc.HTTPNotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if image.status not in ["pending_delete", "deleted"]:
|
||||||
|
return operation_api.gef_final_progress(True)
|
44
gceapi/api/images.py
Normal file
44
gceapi/api/images.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import image_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Image controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(image_api.API(), *args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, image, scope):
|
||||||
|
result_dict = {
|
||||||
|
"creationTimestamp": self._format_date(image["created_at"]),
|
||||||
|
"name": image["name"],
|
||||||
|
"sourceType": image["disk_format"].upper(),
|
||||||
|
"rawDisk": {
|
||||||
|
"containerType": "TAR",
|
||||||
|
"source": image.get("image_ref", ""),
|
||||||
|
},
|
||||||
|
"status": image["status"],
|
||||||
|
"archiveSizeBytes": image["size"],
|
||||||
|
"description": image.get("description", "")
|
||||||
|
}
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
162
gceapi/api/instance_address_api.py
Normal file
162
gceapi/api/instance_address_api.py
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Access config API."""
|
||||||
|
|
||||||
|
KIND = "access_config"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "instance_name",
|
||||||
|
"nic", "name", "type", "addr"]
|
||||||
|
DEFAULT_ACCESS_CONFIG_TYPE = "ONE_TO_ONE_NAT"
|
||||||
|
DEFAULT_ACCESS_CONFIG_NAME = "External NAT"
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_item(self, context, instance_name, name):
|
||||||
|
items = self._get_db_items(context)
|
||||||
|
items = [i for i in items
|
||||||
|
if i["instance_name"] == instance_name and i["name"] == name]
|
||||||
|
if len(items) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
return items[0]
|
||||||
|
|
||||||
|
def get_items(self, context, instance_name):
|
||||||
|
items = self._get_db_items(context)
|
||||||
|
return [i for i in items if i["instance_name"] == instance_name]
|
||||||
|
|
||||||
|
def add_item(self, context, instance_name, nic, addr, addr_type, name):
|
||||||
|
if not nic:
|
||||||
|
msg = _("Network interface is invalid or empty")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
if addr_type is None:
|
||||||
|
addr_type = self.DEFAULT_ACCESS_CONFIG_TYPE
|
||||||
|
elif addr_type != self.DEFAULT_ACCESS_CONFIG_TYPE:
|
||||||
|
msg = _("Only '%s' type of access config currently supported.")\
|
||||||
|
% self.DEFAULT_ACCESS_CONFIG_TYPE
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
client = clients.nova(context)
|
||||||
|
instances = client.servers.list(search_opts={"name": instance_name})
|
||||||
|
if not instances or len(instances) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
instance = instances[0]
|
||||||
|
|
||||||
|
fixed_ip = None
|
||||||
|
for network in instance.addresses:
|
||||||
|
if nic != network:
|
||||||
|
continue
|
||||||
|
for address in instance.addresses[network]:
|
||||||
|
atype = address["OS-EXT-IPS:type"]
|
||||||
|
if atype == "floating":
|
||||||
|
msg = _('At most one access config currently supported.')
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
if atype == "fixed":
|
||||||
|
fixed_ip = address["addr"]
|
||||||
|
|
||||||
|
if not fixed_ip:
|
||||||
|
msg = _('Network interface not found')
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
floating_ips = client.floating_ips.list()
|
||||||
|
if addr is None:
|
||||||
|
# NOTE(apavlov): try to find unused
|
||||||
|
for floating_ip in floating_ips:
|
||||||
|
if floating_ip.instance_id is None:
|
||||||
|
addr = floating_ip.ip
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
msg = _('There is no unused floating ips.')
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
else:
|
||||||
|
for floating_ip in floating_ips:
|
||||||
|
if floating_ip.ip != addr:
|
||||||
|
continue
|
||||||
|
if floating_ip.instance_id is None:
|
||||||
|
break
|
||||||
|
msg = _("Floating ip '%s' is already associated") % floating_ip
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
else:
|
||||||
|
msg = _("There is no such floating ip '%s'.") % addr
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
instance.add_floating_ip(addr, fixed_ip)
|
||||||
|
|
||||||
|
return self.register_item(context, instance_name,
|
||||||
|
nic, addr, addr_type, name)
|
||||||
|
|
||||||
|
def register_item(self, context, instance_name,
|
||||||
|
nic, addr, addr_type, name):
|
||||||
|
if not nic:
|
||||||
|
msg = _("Network interface is invalid or empty")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
if addr_type is None:
|
||||||
|
addr_type = self.DEFAULT_ACCESS_CONFIG_TYPE
|
||||||
|
elif addr_type != self.DEFAULT_ACCESS_CONFIG_TYPE:
|
||||||
|
msg = _("Only '%s' type of access config currently supported.")\
|
||||||
|
% self.DEFAULT_ACCESS_CONFIG_TYPE
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
if name is None:
|
||||||
|
name = self.DEFAULT_ACCESS_CONFIG_NAME
|
||||||
|
if not addr:
|
||||||
|
msg = _("There is no address to assign.")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
new_item = {
|
||||||
|
"id": instance_name + "-" + addr,
|
||||||
|
"instance_name": instance_name,
|
||||||
|
"nic": nic,
|
||||||
|
"name": name,
|
||||||
|
"type": addr_type,
|
||||||
|
"addr": addr
|
||||||
|
}
|
||||||
|
new_item = self._add_db_item(context, new_item)
|
||||||
|
return new_item
|
||||||
|
|
||||||
|
def delete_item(self, context, instance_name, name):
|
||||||
|
client = clients.nova(context)
|
||||||
|
instances = client.servers.list(search_opts={"name": instance_name})
|
||||||
|
if not instances or len(instances) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
instance = instances[0]
|
||||||
|
|
||||||
|
item = self.get_item(context, instance_name, name)
|
||||||
|
floating_ip = item["addr"]
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
instance.remove_floating_ip(floating_ip)
|
||||||
|
self._delete_db_item(context, item)
|
||||||
|
|
||||||
|
def unregister_item(self, context, instance_name, name):
|
||||||
|
item = self.get_item(context, instance_name, name)
|
||||||
|
self._delete_db_item(context, item)
|
377
gceapi/api/instance_api.py
Normal file
377
gceapi/api/instance_api.py
Normal file
@ -0,0 +1,377 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import string
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import disk_api
|
||||||
|
from gceapi.api import firewall_api
|
||||||
|
from gceapi.api import instance_address_api
|
||||||
|
from gceapi.api import instance_disk_api
|
||||||
|
from gceapi.api import machine_type_api
|
||||||
|
from gceapi.api import network_api
|
||||||
|
from gceapi.api import operation_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import project_api
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Instance API."""
|
||||||
|
|
||||||
|
KIND = "instance"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "description"]
|
||||||
|
|
||||||
|
_status_map = {
|
||||||
|
"UNKNOWN": "STOPPED",
|
||||||
|
"ACTIVE": "RUNNING",
|
||||||
|
"REBOOT": "RUNNING",
|
||||||
|
"HARD_REBOOT": "RUNNING",
|
||||||
|
"PASSWORD": "RUNNING",
|
||||||
|
"REBUILD": "RUNNING",
|
||||||
|
"MIGRATING": "RUNNING",
|
||||||
|
"RESIZE": "RUNNING",
|
||||||
|
"BUILD": "PROVISIONING",
|
||||||
|
"SHUTOFF": "STOPPED",
|
||||||
|
"VERIFY_RESIZE": "RUNNING",
|
||||||
|
"REVERT_RESIZE": "RUNNING",
|
||||||
|
"PAUSED": "STOPPED",
|
||||||
|
"SUSPENDED": "STOPPED",
|
||||||
|
"RESCUE": "RUNNING",
|
||||||
|
"ERROR": "STOPPED",
|
||||||
|
"DELETED": "TERMINATED",
|
||||||
|
"SOFT_DELETED": "TERMINATED",
|
||||||
|
"SHELVED": "STOPPED",
|
||||||
|
"SHELVED_OFFLOADED": "STOPPED",
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
network_api.API()._register_callback(
|
||||||
|
base_api._callback_reasons.check_delete,
|
||||||
|
self._can_delete_network)
|
||||||
|
firewall_api.API()._register_callback(
|
||||||
|
base_api._callback_reasons.post_add,
|
||||||
|
self._add_secgroup_to_instances)
|
||||||
|
firewall_api.API()._register_callback(
|
||||||
|
base_api._callback_reasons.pre_delete,
|
||||||
|
self._remove_secgroup_from_instances)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"instance-add",
|
||||||
|
self._get_add_item_progress)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"instance-delete",
|
||||||
|
self._get_delete_item_progress)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"instance-reset",
|
||||||
|
self._get_reset_instance_progress)
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
return self.search_items(context, {"name": name}, scope)[0]
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
return self.search_items(context, None, scope)
|
||||||
|
|
||||||
|
def get_scopes(self, context, item):
|
||||||
|
return [scopes.ZoneScope(item["OS-EXT-AZ:availability_zone"])]
|
||||||
|
|
||||||
|
def search_items(self, context, search_opts, scope):
|
||||||
|
client = clients.nova(context)
|
||||||
|
instances = client.servers.list(search_opts=search_opts)
|
||||||
|
|
||||||
|
filtered_instances = []
|
||||||
|
for instance in instances:
|
||||||
|
iscope = getattr(instance, "OS-EXT-AZ:availability_zone")
|
||||||
|
if scope is not None and scope.get_name() != iscope:
|
||||||
|
continue
|
||||||
|
|
||||||
|
instance = utils.to_dict(instance)
|
||||||
|
instance = self._prepare_instance(client, context, instance)
|
||||||
|
db_instance = self._get_db_item_by_id(context, instance["id"])
|
||||||
|
self._prepare_item(instance, db_instance)
|
||||||
|
filtered_instances.append(instance)
|
||||||
|
|
||||||
|
if len(filtered_instances) == len(instances) and not search_opts:
|
||||||
|
gce_instances = self._get_db_items_dict(context)
|
||||||
|
self._purge_db(context, filtered_instances, gce_instances)
|
||||||
|
|
||||||
|
return filtered_instances
|
||||||
|
|
||||||
|
def _prepare_instance(self, client, context, instance):
|
||||||
|
instance["statusMessage"] = instance["status"]
|
||||||
|
instance["status"] = self._status_map.get(
|
||||||
|
instance["status"], "STOPPED")
|
||||||
|
instance["flavor"]["name"] = machine_type_api.API().get_item_by_id(
|
||||||
|
context, instance["flavor"]["id"])["name"]
|
||||||
|
|
||||||
|
cinder_client = clients.cinder(context)
|
||||||
|
volumes = instance["os-extended-volumes:volumes_attached"]
|
||||||
|
instance["volumes"] = [utils.to_dict(
|
||||||
|
cinder_client.volumes.get(v["id"])) for v in volumes]
|
||||||
|
ads = instance_disk_api.API().get_items(context, instance["name"])
|
||||||
|
ads = dict((ad["volume_id"], ad) for ad in ads)
|
||||||
|
for volume in instance["volumes"]:
|
||||||
|
ad = ads.pop(volume["id"], None)
|
||||||
|
if not ad:
|
||||||
|
name = volume["display_name"]
|
||||||
|
ad = instance_disk_api.API().register_item(context,
|
||||||
|
instance["name"], volume["id"], name)
|
||||||
|
volume["device_name"] = ad["name"]
|
||||||
|
# NOTE(apavlov): cleanup unused from db for this instance
|
||||||
|
for ad in ads:
|
||||||
|
ad = instance_disk_api.API().unregister_item(context,
|
||||||
|
instance["name"], ads[ad]["name"])
|
||||||
|
|
||||||
|
acs = instance_address_api.API().get_items(context, instance["name"])
|
||||||
|
acs = dict((ac["addr"], ac) for ac in acs)
|
||||||
|
for network in instance["addresses"]:
|
||||||
|
for address in instance["addresses"][network]:
|
||||||
|
if address["OS-EXT-IPS:type"] == "floating":
|
||||||
|
ac = acs.pop(address["addr"], None)
|
||||||
|
if not ac:
|
||||||
|
ac = instance_address_api.API().register_item(context,
|
||||||
|
instance["name"], network, address["addr"],
|
||||||
|
None, None)
|
||||||
|
address["name"] = ac["name"]
|
||||||
|
address["type"] = ac["type"]
|
||||||
|
# NOTE(apavlov): cleanup unused from db for this instance
|
||||||
|
for ac in acs:
|
||||||
|
ac = instance_address_api.API().unregister_item(context,
|
||||||
|
instance["name"], acs[ac]["name"])
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
def _can_delete_network(self, context, network):
|
||||||
|
client = clients.nova(context)
|
||||||
|
instances = client.servers.list(search_opts=None)
|
||||||
|
for instance in instances:
|
||||||
|
if network["name"] in instance.networks:
|
||||||
|
raise exception.NetworkInUse(network_id=network["id"])
|
||||||
|
|
||||||
|
def _get_instances_with_network(self, context, network_name, scope):
|
||||||
|
affected_instances = []
|
||||||
|
client = clients.nova(context)
|
||||||
|
instances = client.servers.list(search_opts=None)
|
||||||
|
for instance in instances:
|
||||||
|
if network_name in instance.networks:
|
||||||
|
affected_instances.append(instance)
|
||||||
|
return affected_instances
|
||||||
|
|
||||||
|
def _add_secgroup_to_instances(self, context, secgroup, **kwargs):
|
||||||
|
network_name = secgroup.get("network_name")
|
||||||
|
if not network_name:
|
||||||
|
return
|
||||||
|
affected_instances = self._get_instances_with_network(
|
||||||
|
context, network_name, kwargs.get("scope"))
|
||||||
|
# TODO(ft): implement common safe method
|
||||||
|
# to run add/remove with exception logging
|
||||||
|
for instance in affected_instances:
|
||||||
|
try:
|
||||||
|
instance.add_security_group(secgroup["name"])
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(("Failed to add instance "
|
||||||
|
"(%s) to security group (%s)"),
|
||||||
|
instance.id, secgroup["name"])
|
||||||
|
|
||||||
|
def _remove_secgroup_from_instances(self, context, secgroup, **kwargs):
|
||||||
|
network_name = secgroup.get("network_name")
|
||||||
|
if not network_name:
|
||||||
|
return
|
||||||
|
affected_instances = self._get_instances_with_network(
|
||||||
|
context, network_name, kwargs.get("scope"))
|
||||||
|
# TODO(ft): implement common safe method
|
||||||
|
# to run add/remove with exception logging
|
||||||
|
for instance in affected_instances:
|
||||||
|
try:
|
||||||
|
instance.remove_security_group(secgroup["name"])
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(("Failed to remove securiy group (%s) "
|
||||||
|
"from instance (%s)"),
|
||||||
|
secgroup["name"], instance.id)
|
||||||
|
|
||||||
|
def reset_instance(self, context, scope, name):
|
||||||
|
client = clients.nova(context)
|
||||||
|
instances = client.servers.list(search_opts={"name": name})
|
||||||
|
if not instances or len(instances) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
instance = instances[0]
|
||||||
|
operation_util.start_operation(context,
|
||||||
|
self._get_reset_instance_progress,
|
||||||
|
instance.id)
|
||||||
|
instance.reboot("HARD")
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
instances = client.servers.list(search_opts={"name": name})
|
||||||
|
if not instances or len(instances) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
instance = instances[0]
|
||||||
|
operation_util.start_operation(context,
|
||||||
|
self._get_delete_item_progress,
|
||||||
|
instance.id)
|
||||||
|
instance.delete()
|
||||||
|
instance = utils.to_dict(instance)
|
||||||
|
instance = self._prepare_instance(client, context, instance)
|
||||||
|
self._delete_db_item(context, instance)
|
||||||
|
|
||||||
|
ads = instance_disk_api.API().get_items(context, instance["name"])
|
||||||
|
for ad in ads:
|
||||||
|
ad = instance_disk_api.API().unregister_item(context,
|
||||||
|
instance["name"], ad["name"])
|
||||||
|
|
||||||
|
acs = instance_address_api.API().get_items(context, instance["name"])
|
||||||
|
for ac in acs:
|
||||||
|
ac = instance_address_api.API().unregister_item(context,
|
||||||
|
instance["name"], ac["name"])
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
name = body['name']
|
||||||
|
client = clients.nova(context)
|
||||||
|
|
||||||
|
flavor_name = utils._extract_name_from_url(body['machineType'])
|
||||||
|
flavor_id = machine_type_api.API().get_item(
|
||||||
|
context, flavor_name, scope)["id"]
|
||||||
|
|
||||||
|
try:
|
||||||
|
metadatas = body['metadata']['items']
|
||||||
|
except KeyError:
|
||||||
|
metadatas = []
|
||||||
|
instance_metadata = dict([(x['key'], x['value']) for x in metadatas])
|
||||||
|
|
||||||
|
ssh_keys = instance_metadata.pop('sshKeys', None)
|
||||||
|
if ssh_keys is not None:
|
||||||
|
key_name = ssh_keys.split('\n')[0].split(":")[0]
|
||||||
|
else:
|
||||||
|
key_name = project_api.API().get_gce_user_keypair_name(context)
|
||||||
|
|
||||||
|
disks = body.get('disks', [])
|
||||||
|
disks.sort(None, lambda x: x.get("boot", False), True)
|
||||||
|
bdm = dict()
|
||||||
|
diskDevice = 0
|
||||||
|
for disk in disks:
|
||||||
|
device_name = "vd" + string.ascii_lowercase[diskDevice]
|
||||||
|
volume_name = utils._extract_name_from_url(disk["source"])
|
||||||
|
volume = disk_api.API().get_item(context, volume_name, scope)
|
||||||
|
disk["id"] = volume["id"]
|
||||||
|
bdm[device_name] = volume["id"]
|
||||||
|
diskDevice += 1
|
||||||
|
|
||||||
|
nics = []
|
||||||
|
#NOTE(ft) 'default' security group contains output rules
|
||||||
|
#but output rules doesn't configurable by GCE API
|
||||||
|
#all outgoing traffic permitted
|
||||||
|
#so we support this behaviour
|
||||||
|
groups_names = set(['default'])
|
||||||
|
acs = dict()
|
||||||
|
for net_iface in body['networkInterfaces']:
|
||||||
|
net_name = utils._extract_name_from_url(net_iface["network"])
|
||||||
|
ac = net_iface.get("accessConfigs")
|
||||||
|
if ac:
|
||||||
|
if len(ac) > 1:
|
||||||
|
msg = _('At most one access config currently supported.')
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
else:
|
||||||
|
acs[net_name] = ac[0]
|
||||||
|
|
||||||
|
network = network_api.API().get_item(context, net_name, None)
|
||||||
|
nics.append({"net-id": network["id"]})
|
||||||
|
for sg in firewall_api.API().get_network_firewalls(
|
||||||
|
context, net_name):
|
||||||
|
groups_names.add(sg["name"])
|
||||||
|
groups_names = list(groups_names)
|
||||||
|
|
||||||
|
operation_util.start_operation(context, self._get_add_item_progress)
|
||||||
|
instance = client.servers.create(name, None, flavor_id,
|
||||||
|
meta=instance_metadata, min_count=1, max_count=1,
|
||||||
|
security_groups=groups_names, key_name=key_name,
|
||||||
|
availability_zone=scope.get_name(), block_device_mapping=bdm,
|
||||||
|
nics=nics)
|
||||||
|
if not acs:
|
||||||
|
operation_util.set_item_id(context, instance.id)
|
||||||
|
|
||||||
|
for disk in disks:
|
||||||
|
instance_disk_api.API().register_item(context, name,
|
||||||
|
disk["id"], disk["deviceName"])
|
||||||
|
|
||||||
|
instance = utils.to_dict(client.servers.get(instance.id))
|
||||||
|
instance = self._prepare_instance(client, context, instance)
|
||||||
|
if "descripton" in body:
|
||||||
|
instance["description"] = body["description"]
|
||||||
|
instance = self._add_db_item(context, instance)
|
||||||
|
|
||||||
|
if acs:
|
||||||
|
operation_util.continue_operation(
|
||||||
|
context,
|
||||||
|
lambda: self._add_access_config(context, instance,
|
||||||
|
scope, acs))
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
def _add_access_config(self, context, instance, scope, acs):
|
||||||
|
progress = self._get_add_item_progress(context, instance["id"])
|
||||||
|
if progress is None or not operation_api.is_final_progress(progress):
|
||||||
|
return progress
|
||||||
|
|
||||||
|
client = clients.nova(context)
|
||||||
|
try:
|
||||||
|
instance = client.servers.get(instance["id"])
|
||||||
|
except clients.novaclient.exceptions.NotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
|
||||||
|
for net in acs:
|
||||||
|
ac = acs[net]
|
||||||
|
instance_address_api.API().add_item(context, instance.name,
|
||||||
|
net, ac.get("natIP"), ac.get("type"), ac.get("name"))
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
|
||||||
|
def _get_add_item_progress(self, context, instance_id):
|
||||||
|
client = clients.nova(context)
|
||||||
|
try:
|
||||||
|
instance = client.servers.get(instance_id)
|
||||||
|
except clients.novaclient.exceptions.NotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if instance.status != "BUILD":
|
||||||
|
return operation_api.gef_final_progress(instance.status == "ERROR")
|
||||||
|
|
||||||
|
def _get_delete_item_progress(self, context, instance_id):
|
||||||
|
client = clients.nova(context)
|
||||||
|
try:
|
||||||
|
instance = client.servers.get(instance_id)
|
||||||
|
except clients.novaclient.exceptions.NotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if getattr(instance, "OS-EXT-STS:task_state") != "deleting":
|
||||||
|
return operation_api.gef_final_progress(
|
||||||
|
instance.status != "DELETED")
|
||||||
|
|
||||||
|
def _get_reset_instance_progress(self, context, instance_id):
|
||||||
|
client = clients.nova(context)
|
||||||
|
try:
|
||||||
|
instance = client.servers.get(instance_id)
|
||||||
|
except clients.novaclient.exceptions.NotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if instance.status != "HARD_REBOOT":
|
||||||
|
return operation_api.gef_final_progress()
|
146
gceapi/api/instance_disk_api.py
Normal file
146
gceapi/api/instance_disk_api.py
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import string
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import disk_api
|
||||||
|
from gceapi.api import operation_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Attached disk API."""
|
||||||
|
|
||||||
|
KIND = "attached_disk"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "instance_name", "volume_id", "name"]
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"attached_disk-add",
|
||||||
|
self._get_add_item_progress)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"attached_disk-delete",
|
||||||
|
self._get_delete_item_progress)
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_item(self, context, instance_name, name):
|
||||||
|
items = self._get_db_items(context)
|
||||||
|
items = [i for i in items
|
||||||
|
if i["instance_name"] == instance_name and i["name"] == name]
|
||||||
|
if len(items) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
return items[0]
|
||||||
|
|
||||||
|
def get_items(self, context, instance_name):
|
||||||
|
items = self._get_db_items(context)
|
||||||
|
return [i for i in items if i["instance_name"] == instance_name]
|
||||||
|
|
||||||
|
def add_item(self, context, instance_name, source, name):
|
||||||
|
if not name:
|
||||||
|
msg = _("There is no name to assign.")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
volume_name = utils._extract_name_from_url(source)
|
||||||
|
if not volume_name:
|
||||||
|
msg = _("There is no volume to assign.")
|
||||||
|
raise exception.NotFound(msg)
|
||||||
|
volume = disk_api.API().get_item(context, volume_name, None)
|
||||||
|
|
||||||
|
nova_client = clients.nova(context)
|
||||||
|
instances = nova_client.servers.list(
|
||||||
|
search_opts={"name": instance_name})
|
||||||
|
if not instances or len(instances) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
instance = instances[0]
|
||||||
|
|
||||||
|
devices = list()
|
||||||
|
volumes_client = nova_client.volumes
|
||||||
|
for server_volume in volumes_client.get_server_volumes(instance.id):
|
||||||
|
devices.append(server_volume.device)
|
||||||
|
device_name = None
|
||||||
|
for letter in string.ascii_lowercase[1:]:
|
||||||
|
device_name = "vd" + letter
|
||||||
|
for device in devices:
|
||||||
|
if device_name in device:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise exception.OverQuota
|
||||||
|
|
||||||
|
operation_util.start_operation(context, self._get_add_item_progress)
|
||||||
|
volumes_client.create_server_volume(
|
||||||
|
instance.id, volume["id"], "/dev/" + device_name)
|
||||||
|
|
||||||
|
item = self.register_item(context, instance_name, volume["id"], name)
|
||||||
|
operation_util.set_item_id(context, item["id"])
|
||||||
|
|
||||||
|
def register_item(self, context, instance_name, volume_id, name):
|
||||||
|
if not name:
|
||||||
|
msg = _("There is no name to assign.")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
if not volume_id:
|
||||||
|
msg = _("There is no volume_id to assign.")
|
||||||
|
raise exception.InvalidRequest(msg)
|
||||||
|
|
||||||
|
new_item = {
|
||||||
|
"id": instance_name + "-" + volume_id,
|
||||||
|
"instance_name": instance_name,
|
||||||
|
"volume_id": volume_id,
|
||||||
|
"name": name,
|
||||||
|
}
|
||||||
|
new_item = self._add_db_item(context, new_item)
|
||||||
|
return new_item
|
||||||
|
|
||||||
|
def delete_item(self, context, instance_name, name):
|
||||||
|
item = self.get_item(context, instance_name, name)
|
||||||
|
volume_id = item["volume_id"]
|
||||||
|
|
||||||
|
nova_client = clients.nova(context)
|
||||||
|
instances = nova_client.servers.list(
|
||||||
|
search_opts={"name": instance_name})
|
||||||
|
if not instances or len(instances) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
instance = instances[0]
|
||||||
|
|
||||||
|
operation_util.start_operation(context,
|
||||||
|
self._get_delete_item_progress,
|
||||||
|
item["id"])
|
||||||
|
nova_client.volumes.delete_server_volume(instance.id, volume_id)
|
||||||
|
|
||||||
|
self._delete_db_item(context, item)
|
||||||
|
|
||||||
|
def unregister_item(self, context, instance_name, name):
|
||||||
|
item = self.get_item(context, instance_name, name)
|
||||||
|
self._delete_db_item(context, item)
|
||||||
|
|
||||||
|
def _get_add_item_progress(self, context, dummy_id):
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
|
||||||
|
def _get_delete_item_progress(self, context, dummy_id):
|
||||||
|
return operation_api.gef_final_progress()
|
157
gceapi/api/instances.py
Normal file
157
gceapi/api/instances.py
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import instance_address_api
|
||||||
|
from gceapi.api import instance_api
|
||||||
|
from gceapi.api import instance_disk_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Instance controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(instance_api.API(),
|
||||||
|
*args, **kwargs)
|
||||||
|
self._instance_address_api = instance_address_api.API()
|
||||||
|
self._instance_disk_api = instance_disk_api.API()
|
||||||
|
|
||||||
|
def format_item(self, request, instance, scope):
|
||||||
|
result_dict = {
|
||||||
|
"creationTimestamp": self._format_date(instance["created"]),
|
||||||
|
"status": instance["status"],
|
||||||
|
"statusMessage": instance["statusMessage"],
|
||||||
|
"name": instance["name"],
|
||||||
|
"machineType": self._qualify(request,
|
||||||
|
"machineTypes", instance["flavor"]["name"], scope),
|
||||||
|
"networkInterfaces": [],
|
||||||
|
"disks": [],
|
||||||
|
"metadata": {
|
||||||
|
"kind": "compute#metadata",
|
||||||
|
"items": list(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
description = instance.get("description", "")
|
||||||
|
if description:
|
||||||
|
result_dict["description"] = description
|
||||||
|
|
||||||
|
metadata = instance.get("metadata", {})
|
||||||
|
for i in metadata:
|
||||||
|
result_dict["metadata"]["items"].append(
|
||||||
|
{"key": i, "value": metadata[i]})
|
||||||
|
|
||||||
|
for network in instance["addresses"]:
|
||||||
|
ni = dict()
|
||||||
|
ni["network"] = self._qualify(request,
|
||||||
|
"networks", network,
|
||||||
|
scopes.GlobalScope())
|
||||||
|
# NOTE(apavlov): The name of the network interface, generated by
|
||||||
|
# the server. For network devices, these are eth0, eth1, etc.
|
||||||
|
# But we provide name of network here because Openstack doesn`t
|
||||||
|
# have device name.
|
||||||
|
ni["name"] = network
|
||||||
|
ni["accessConfigs"] = []
|
||||||
|
for address in instance["addresses"][network]:
|
||||||
|
atype = address["OS-EXT-IPS:type"]
|
||||||
|
if atype == "fixed" and "networkIP" not in ni:
|
||||||
|
ni["networkIP"] = address["addr"]
|
||||||
|
continue
|
||||||
|
if atype == "floating":
|
||||||
|
ni["accessConfigs"].append({
|
||||||
|
"kind": "compute#accessConfig",
|
||||||
|
"name": address["name"],
|
||||||
|
"type": address["type"],
|
||||||
|
"natIP": address["addr"]
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
logger.warn(_("Unexpected address for instance '%(i)' in "
|
||||||
|
"network '%(n)") % {"i": instance["name"], "n": network})
|
||||||
|
result_dict["networkInterfaces"].append(ni)
|
||||||
|
|
||||||
|
disk_index = 0
|
||||||
|
for volume in instance["volumes"]:
|
||||||
|
readonly = volume.get("metadata", {}).get("readonly", "False")
|
||||||
|
google_disk = {
|
||||||
|
"kind": "compute#attachedDisk",
|
||||||
|
"index": disk_index,
|
||||||
|
"type": "PERSISTENT",
|
||||||
|
"mode": "READ_ONLY" if readonly == "True" else "READ_WRITE",
|
||||||
|
"source": self._qualify(request,
|
||||||
|
"disks", volume["display_name"], scope),
|
||||||
|
"deviceName": volume["device_name"],
|
||||||
|
"boot": True if volume["bootable"] == "true" else False
|
||||||
|
}
|
||||||
|
result_dict["disks"].append(google_disk)
|
||||||
|
disk_index += 1
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
def reset_instance(self, req, scope_id, id):
|
||||||
|
context = self._get_context(req)
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
operation_util.init_operation(context, "reset",
|
||||||
|
self._type_name, id, scope)
|
||||||
|
try:
|
||||||
|
self._api.reset_instance(context, scope, id)
|
||||||
|
except (exception.NotFound, KeyError, IndexError):
|
||||||
|
msg = _("Instance %s could not be found") % id
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=msg)
|
||||||
|
|
||||||
|
def add_access_config(self, req, body, scope_id, id):
|
||||||
|
context = self._get_context(req)
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
operation_util.init_operation(context, "addAccessConfig",
|
||||||
|
self._type_name, id, scope)
|
||||||
|
self._instance_address_api.add_item(context, id,
|
||||||
|
req.params.get('networkInterface'), body.get("natIP"),
|
||||||
|
body.get("type"), body.get("name"))
|
||||||
|
|
||||||
|
def delete_access_config(self, req, scope_id, id):
|
||||||
|
context = self._get_context(req)
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
operation_util.init_operation(context, "deleteAccessConfig",
|
||||||
|
self._type_name, id, scope)
|
||||||
|
self._instance_address_api.delete_item(context, id,
|
||||||
|
req.params.get('accessConfig'))
|
||||||
|
|
||||||
|
def attach_disk(self, req, body, scope_id, id):
|
||||||
|
context = self._get_context(req)
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
operation_util.init_operation(context, "attachDisk",
|
||||||
|
self._type_name, id, scope)
|
||||||
|
self._instance_disk_api.add_item(context, id,
|
||||||
|
body["source"], body.get("deviceName"))
|
||||||
|
|
||||||
|
def detach_disk(self, req, scope_id, id):
|
||||||
|
context = self._get_context(req)
|
||||||
|
scope = self._get_scope(req, scope_id)
|
||||||
|
operation_util.init_operation(context, "detachDisk",
|
||||||
|
self._type_name, id, scope)
|
||||||
|
self._instance_disk_api.delete_item(context, id,
|
||||||
|
req.params.get('deviceName'))
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
68
gceapi/api/machine_type_api.py
Normal file
68
gceapi/api/machine_type_api.py
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi.api import zone_api
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Machine types API."""
|
||||||
|
|
||||||
|
KIND = "machineType"
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
self._zone_api = zone_api.API()
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
try:
|
||||||
|
item = client.flavors.find(name=self._from_gce(name))
|
||||||
|
except (clients.novaclient.exceptions.NotFound,
|
||||||
|
clients.novaclient.exceptions.NoUniqueMatch):
|
||||||
|
raise exception.NotFound
|
||||||
|
if not item:
|
||||||
|
raise exception.NotFound
|
||||||
|
return self._prepare_item(utils.to_dict(item))
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
items = client.flavors.list()
|
||||||
|
return [self._prepare_item(utils.to_dict(item))
|
||||||
|
for item in items]
|
||||||
|
|
||||||
|
def get_scopes(self, context, item):
|
||||||
|
# TODO(apavlov): too slow for all...
|
||||||
|
return self._zone_api.get_items_as_scopes(context)
|
||||||
|
|
||||||
|
def get_item_by_id(self, context, machine_type_id):
|
||||||
|
client = clients.nova(context)
|
||||||
|
item = client.flavors.get(machine_type_id)
|
||||||
|
return self._prepare_item(utils.to_dict(item))
|
||||||
|
|
||||||
|
def _prepare_item(self, item):
|
||||||
|
item["name"] = self._to_gce(item["name"])
|
||||||
|
return item
|
||||||
|
|
||||||
|
def _from_gce(self, name):
|
||||||
|
return name.replace("-", ".")
|
||||||
|
|
||||||
|
def _to_gce(self, name):
|
||||||
|
return name.replace(".", "-")
|
49
gceapi/api/machine_types.py
Normal file
49
gceapi/api/machine_types.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import machine_type_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Machine types controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(machine_type_api.API(),
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, flavor, scope):
|
||||||
|
result_dict = {
|
||||||
|
"name": flavor["name"],
|
||||||
|
"description": "",
|
||||||
|
"guestCpus": flavor["vcpus"],
|
||||||
|
"memoryMb": flavor["ram"],
|
||||||
|
"imageSpaceGb": flavor["disk"],
|
||||||
|
# NOTE(Alex): Is not supported by Openstack
|
||||||
|
"maximumPersistentDisks": 0,
|
||||||
|
# NOTE(Alex): Is not supported by Openstack
|
||||||
|
"maximumPersistentDisksSizeGb": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
if "OS-FLV-EXT-DATA:ephemeral" in flavor:
|
||||||
|
size = flavor["OS-FLV-EXT-DATA:ephemeral"]
|
||||||
|
if size > 0:
|
||||||
|
result_dict["scratchDisks"] = [{"diskGb": size}]
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
26
gceapi/api/network_api.py
Normal file
26
gceapi/api/network_api.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import network_neutron_api
|
||||||
|
from gceapi.api import network_nova_api
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Network API."""
|
||||||
|
|
||||||
|
NEUTRON_API_MODULE = network_neutron_api
|
||||||
|
NOVA_API_MODULE = network_nova_api
|
||||||
|
|
||||||
|
__metaclass__ = base_api.NetSingleton
|
143
gceapi/api/network_neutron_api.py
Normal file
143
gceapi/api/network_neutron_api.py
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import netaddr
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Network API - neutron implementation."""
|
||||||
|
|
||||||
|
KIND = "network"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "description"]
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
self._public_network_name = CONF.public_network
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
client = clients.neutron(context)
|
||||||
|
networks = client.list_networks(
|
||||||
|
tenant_id=context.project_id, name=name)["networks"]
|
||||||
|
if not networks:
|
||||||
|
msg = _("Network resource '%s' could not be found.") % name
|
||||||
|
raise exception.NotFound(msg)
|
||||||
|
else:
|
||||||
|
# NOTE(Alex) There might be more than one network with this name.
|
||||||
|
# TODO(Alex) We have to decide if we should support IDs as
|
||||||
|
# parameters for names as well and return error if we have
|
||||||
|
# multi-results when addressed by name.
|
||||||
|
network = networks[0]
|
||||||
|
gce_network = self._get_db_item_by_id(context, network["id"])
|
||||||
|
return self._prepare_network(client, network, gce_network)
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
client = clients.neutron(context)
|
||||||
|
networks = client.list_networks(tenant_id=context.project_id)
|
||||||
|
networks = networks["networks"]
|
||||||
|
gce_networks = self._get_db_items_dict(context)
|
||||||
|
result_networks = []
|
||||||
|
for network in networks:
|
||||||
|
network = self._prepare_network(client, network,
|
||||||
|
gce_networks.get(network["id"]))
|
||||||
|
result_networks.append(network)
|
||||||
|
self._purge_db(context, result_networks, gce_networks)
|
||||||
|
return result_networks
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
client = clients.neutron(context)
|
||||||
|
network = self.get_item(context, name)
|
||||||
|
|
||||||
|
self._process_callbacks(
|
||||||
|
context, base_api._callback_reasons.check_delete, network)
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
self._delete_db_item(context, network)
|
||||||
|
self._process_callbacks(
|
||||||
|
context, base_api._callback_reasons.pre_delete, network)
|
||||||
|
|
||||||
|
client.delete_network(network["id"])
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
ip_range = body['IPv4Range']
|
||||||
|
gateway = body.get('gatewayIPv4')
|
||||||
|
if gateway is None:
|
||||||
|
network_cidr = netaddr.IPNetwork(ip_range)
|
||||||
|
gateway_ip = netaddr.IPAddress(network_cidr.first + 1)
|
||||||
|
gateway = str(gateway_ip)
|
||||||
|
client = clients.neutron(context)
|
||||||
|
network = None
|
||||||
|
try:
|
||||||
|
network = self.get_item(context, name)
|
||||||
|
except exception.NotFound:
|
||||||
|
pass
|
||||||
|
if network is not None:
|
||||||
|
raise exception.DuplicateVlan
|
||||||
|
network_body = {}
|
||||||
|
network_body["network"] = {"name": name}
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
network = client.create_network(network_body)
|
||||||
|
network = network["network"]
|
||||||
|
if ip_range:
|
||||||
|
subnet_body = {}
|
||||||
|
subnet_body["subnet"] = {
|
||||||
|
# NOTE(Alex) "name": name + ".default_subnet",
|
||||||
|
# Won't give it a name for now
|
||||||
|
"network_id": network["id"],
|
||||||
|
"ip_version": "4",
|
||||||
|
"cidr": ip_range,
|
||||||
|
"gateway_ip": gateway}
|
||||||
|
result_data = client.create_subnet(subnet_body)
|
||||||
|
subnet_id = result_data["subnet"]["id"]
|
||||||
|
network = self._prepare_network(client, network)
|
||||||
|
network["description"] = body.get("description")
|
||||||
|
network = self._add_db_item(context, network)
|
||||||
|
self._process_callbacks(
|
||||||
|
context, base_api._callback_reasons.post_add,
|
||||||
|
network, subnet_id=subnet_id)
|
||||||
|
return network
|
||||||
|
|
||||||
|
def _prepare_network(self, client, network, db_network=None):
|
||||||
|
subnets = network['subnets']
|
||||||
|
if subnets and len(subnets) > 0:
|
||||||
|
subnet = client.show_subnet(subnets[0])
|
||||||
|
subnet = subnet["subnet"]
|
||||||
|
network["subnet_id"] = subnet["id"]
|
||||||
|
network["IPv4Range"] = subnet.get("cidr", None)
|
||||||
|
network["gatewayIPv4"] = subnet.get("gateway_ip", None)
|
||||||
|
return self._prepare_item(network, db_network)
|
||||||
|
|
||||||
|
def get_public_network_id(self, context):
|
||||||
|
"""Get id of public network appointed to GCE in config."""
|
||||||
|
client = clients.neutron(context)
|
||||||
|
search_opts = {"name": self._public_network_name,
|
||||||
|
"router:external": True}
|
||||||
|
networks = client.list_networks(**search_opts)["networks"]
|
||||||
|
return networks[0]["id"]
|
94
gceapi/api/network_nova_api.py
Normal file
94
gceapi/api/network_nova_api.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import netaddr
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Network API - nova-network implementation."""
|
||||||
|
|
||||||
|
KIND = "network"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "description"]
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
network = client.networks.find(label=name)
|
||||||
|
gce_network = self._get_db_item_by_id(context, network.id)
|
||||||
|
return self._prepare_network(utils.to_dict(network), gce_network)
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
networks = client.networks.list()
|
||||||
|
gce_networks = self._get_db_items_dict(context)
|
||||||
|
result_networks = []
|
||||||
|
for network in networks:
|
||||||
|
result_networks.append(
|
||||||
|
self._prepare_network(utils.to_dict(network),
|
||||||
|
gce_networks.get(network["id"])))
|
||||||
|
self._purge_db(context, result_networks, gce_networks)
|
||||||
|
return result_networks
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
network = self.get_item(context, name)
|
||||||
|
self._process_callbacks(
|
||||||
|
context, base_api._callback_reasons.check_delete, network)
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
self._delete_db_item(context, network)
|
||||||
|
self._process_callbacks(
|
||||||
|
context, base_api._callback_reasons.pre_delete, network)
|
||||||
|
client = clients.nova(context)
|
||||||
|
client.networks.delete(network["id"])
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
ip_range = body['IPv4Range']
|
||||||
|
gateway = body.get('gatewayIPv4')
|
||||||
|
if gateway is None:
|
||||||
|
network_cidr = netaddr.IPNetwork(ip_range)
|
||||||
|
gateway_ip = netaddr.IPAddress(network_cidr.first + 1)
|
||||||
|
gateway = str(gateway_ip)
|
||||||
|
network = None
|
||||||
|
try:
|
||||||
|
network = self.get_item(context, name)
|
||||||
|
except clients.novaclient.exceptions.NotFound:
|
||||||
|
pass
|
||||||
|
if network is not None:
|
||||||
|
raise exception.DuplicateVlan
|
||||||
|
kwargs = {'label': name, 'cidr': ip_range, 'gateway': gateway}
|
||||||
|
client = clients.nova(context)
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
network = client.networks.create(**kwargs)
|
||||||
|
network = self._prepare_network(utils.to_dict(network))
|
||||||
|
if "description" in body:
|
||||||
|
network["description"] = body["description"]
|
||||||
|
return self._add_db_item(context, network)
|
||||||
|
|
||||||
|
def _prepare_network(self, network, db_data=None):
|
||||||
|
return self._prepare_item({
|
||||||
|
'name': network['label'],
|
||||||
|
'IPv4Range': network['cidr'],
|
||||||
|
'gatewayIPv4': network['gateway'],
|
||||||
|
'id': network['id']},
|
||||||
|
db_data)
|
40
gceapi/api/networks.py
Normal file
40
gceapi/api/networks.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import network_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Network controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(network_api.API(), *args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, network, scope):
|
||||||
|
result_dict = {
|
||||||
|
"name": network["name"],
|
||||||
|
"IPv4Range": network.get("IPv4Range", ""),
|
||||||
|
"gatewayIPv4": network.get("gatewayIPv4", ""),
|
||||||
|
"creationTimestamp": network.get("creationTimestamp", ""),
|
||||||
|
}
|
||||||
|
if "description" in network:
|
||||||
|
result_dict["description"] = network["description"]
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
239
gceapi/api/oauth.py
Normal file
239
gceapi/api/oauth.py
Normal file
@ -0,0 +1,239 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from keystoneclient import exceptions
|
||||||
|
from keystoneclient.v2_0 import client as keystone_client
|
||||||
|
from oslo.config import cfg
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
from gceapi import wsgi_ext as openstack_wsgi
|
||||||
|
|
||||||
|
FLAGS = cfg.CONF
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
INTERNAL_GCUTIL_PROJECTS = ["debian-cloud", "centos-cloud", "google"]
|
||||||
|
|
||||||
|
|
||||||
|
class OAuthFault(openstack_wsgi.Fault):
|
||||||
|
"""Fault compliant with RFC
|
||||||
|
|
||||||
|
To prevent extra info added by openstack.wsgi.Fault class
|
||||||
|
to response which is not compliant RFC6749.
|
||||||
|
"""
|
||||||
|
@webob.dec.wsgify(RequestClass=openstack_wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
return self.wrapped_exc
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(object):
|
||||||
|
"""Simple OAuth2.0 Controller
|
||||||
|
|
||||||
|
If you need other apps to work with GCE API you should add it here
|
||||||
|
in VALID_CLIENTS.
|
||||||
|
Based on https://developers.google.com/accounts/docs/OAuth2InstalledApp
|
||||||
|
and on RFC 6749(paragraph 4.1).
|
||||||
|
"""
|
||||||
|
|
||||||
|
AUTH_TIMEOUT = 300
|
||||||
|
VALID_CLIENTS = {
|
||||||
|
"32555940559.apps.googleusercontent.com": "ZmssLNjJy2998hD4CTg2ejr2"}
|
||||||
|
|
||||||
|
INTERNAL_REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
|
||||||
|
AUTH_PAGE_TEMPLATE =\
|
||||||
|
"<!DOCTYPE html>"\
|
||||||
|
"<html xmlns=\"http://www.w3.org/1999/xhtml\"><body>"\
|
||||||
|
"Enter Openstack username and password to access GCE API<br/>"\
|
||||||
|
"<br/>"\
|
||||||
|
"<form action=\"approval\" name=\"approval\" method=\"post\">"\
|
||||||
|
"<input type=\"hidden\" name=\"redirect_uri\" value=\""\
|
||||||
|
+ "{redirect_uri}\"/>"\
|
||||||
|
"<input type=\"hidden\" name=\"code\" value=\"{code}\"/>"\
|
||||||
|
"<input type=\"text\" name=\"username\" value=\"\"/><br/>"\
|
||||||
|
"<input type=\"password\" name=\"password\" value=\"\"/><br/>"\
|
||||||
|
"<input type=\"submit\" value=\"Login\"/>"\
|
||||||
|
"</form>"\
|
||||||
|
"</body></html>"
|
||||||
|
|
||||||
|
class Client:
|
||||||
|
auth_start_time = 0
|
||||||
|
auth_token = None
|
||||||
|
expires_in = 1
|
||||||
|
|
||||||
|
# NOTE(apavlov): there is no cleaning of the dictionary
|
||||||
|
_clients = {}
|
||||||
|
|
||||||
|
def _check_redirect_uri(self, uri):
|
||||||
|
if uri is None:
|
||||||
|
msg = _("redirect_uri should be present")
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
if "localhost" not in uri and uri != self.INTERNAL_REDIRECT_URI:
|
||||||
|
msg = _("redirect_uri has invalid format."
|
||||||
|
"it must confirms installed application uri of GCE")
|
||||||
|
json_body = {"error": "invalid_request",
|
||||||
|
"error_description": msg}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
|
||||||
|
def auth(self, req):
|
||||||
|
"""OAuth protocol authorization endpoint handler
|
||||||
|
|
||||||
|
Returns login authorization webpage invoked for example by gcutil auth.
|
||||||
|
"""
|
||||||
|
client_id = req.GET.get("client_id")
|
||||||
|
if client_id is None or client_id not in self.VALID_CLIENTS:
|
||||||
|
json_body = {"error": "unauthorized_client"}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
|
||||||
|
if req.GET.get("response_type") != "code":
|
||||||
|
json_body = {"error": "unsupported_response_type"}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
self._check_redirect_uri(req.GET.get("redirect_uri"))
|
||||||
|
|
||||||
|
code = base64.urlsafe_b64encode(uuid.uuid4().bytes).replace('=', '')
|
||||||
|
self._clients[code] = self.Client()
|
||||||
|
self._clients[code].auth_start_time = time.time()
|
||||||
|
|
||||||
|
html_page = self.AUTH_PAGE_TEMPLATE.format(
|
||||||
|
redirect_uri=req.GET.get("redirect_uri"),
|
||||||
|
code=code)
|
||||||
|
return html_page
|
||||||
|
|
||||||
|
def approval(self, req):
|
||||||
|
"""OAuth protocol authorization endpoint handler second part
|
||||||
|
|
||||||
|
Returns webpage with verification code or redirects to provided
|
||||||
|
redirect_uri specified in auth request.
|
||||||
|
"""
|
||||||
|
code = req.POST.get("code")
|
||||||
|
if code is None:
|
||||||
|
json_body = {"error": "invalid_request"}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
|
||||||
|
client = self._clients.get(code)
|
||||||
|
if client is None:
|
||||||
|
json_body = {"error": "invalid_client"}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
|
||||||
|
if time.time() - client.auth_start_time > self.AUTH_TIMEOUT:
|
||||||
|
raise webob.exc.HTTPRequestTimeout()
|
||||||
|
|
||||||
|
redirect_uri = req.POST.get("redirect_uri")
|
||||||
|
self._check_redirect_uri(redirect_uri)
|
||||||
|
|
||||||
|
username = req.POST.get("username")
|
||||||
|
password = req.POST.get("password")
|
||||||
|
|
||||||
|
try:
|
||||||
|
keystone = keystone_client.Client(
|
||||||
|
username=username,
|
||||||
|
password=password,
|
||||||
|
auth_url=FLAGS.keystone_gce_url)
|
||||||
|
token = keystone.auth_ref["token"]
|
||||||
|
client.auth_token = token["id"]
|
||||||
|
s = timeutils.parse_isotime(token["issued_at"])
|
||||||
|
e = timeutils.parse_isotime(token["expires"])
|
||||||
|
client.expires_in = (e - s).seconds
|
||||||
|
except Exception as ex:
|
||||||
|
return webob.exc.HTTPUnauthorized(ex)
|
||||||
|
|
||||||
|
if redirect_uri == self.INTERNAL_REDIRECT_URI:
|
||||||
|
return "<html><body>Verification code is: "\
|
||||||
|
+ code + "</body></html>"
|
||||||
|
|
||||||
|
uri = redirect_uri + "?code=" + code
|
||||||
|
raise webob.exc.HTTPFound(location=uri)
|
||||||
|
|
||||||
|
def token(self, req):
|
||||||
|
"""OAuth protocol authorization endpoint handler second part
|
||||||
|
|
||||||
|
Returns json with tokens(access_token and optionally refresh_token).
|
||||||
|
"""
|
||||||
|
client_id = req.POST.get("client_id")
|
||||||
|
if client_id is None or client_id not in self.VALID_CLIENTS:
|
||||||
|
json_body = {"error": "unauthorized_client"}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
valid_secret = self.VALID_CLIENTS[client_id]
|
||||||
|
client_secret = req.POST.get("client_secret")
|
||||||
|
if client_secret is None or client_secret != valid_secret:
|
||||||
|
json_body = {"error": "unauthorized_client"}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
|
||||||
|
if req.POST.get("grant_type") != "authorization_code":
|
||||||
|
json_body = {"error": "unsupported_grant_type"}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
|
||||||
|
code = req.POST.get("code")
|
||||||
|
client = self._clients.get(code)
|
||||||
|
if client is None:
|
||||||
|
json_body = {"error": "invalid_client"}
|
||||||
|
raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body))
|
||||||
|
|
||||||
|
result = {"access_token": client.auth_token,
|
||||||
|
"expires_in": client.expires_in,
|
||||||
|
"token_type": "Bearer"}
|
||||||
|
return json.dumps(result)
|
||||||
|
|
||||||
|
|
||||||
|
class AuthProtocol(object):
|
||||||
|
"""Filter for translating oauth token to keystone token."""
|
||||||
|
def __init__(self, app):
|
||||||
|
self.app = app
|
||||||
|
self.keystone_url = FLAGS.keystone_gce_url
|
||||||
|
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
auth_token = env.get("HTTP_AUTHORIZATION")
|
||||||
|
if auth_token is None:
|
||||||
|
return self._reject_request(start_response)
|
||||||
|
|
||||||
|
project = env["PATH_INFO"].split("/")[1]
|
||||||
|
try:
|
||||||
|
keystone = keystone_client.Client(
|
||||||
|
token=auth_token.split()[1],
|
||||||
|
tenant_name=project,
|
||||||
|
force_new_token=True,
|
||||||
|
auth_url=self.keystone_url)
|
||||||
|
env["HTTP_X_AUTH_TOKEN"] = keystone.auth_ref["token"]["id"]
|
||||||
|
return self.app(env, start_response)
|
||||||
|
except exceptions.Unauthorized:
|
||||||
|
if project in INTERNAL_GCUTIL_PROJECTS:
|
||||||
|
# NOTE(apavlov): return empty if no such projects(by gcutil)
|
||||||
|
headers = [('Content-type', 'application/json;charset=UTF-8')]
|
||||||
|
start_response('200 Ok', headers)
|
||||||
|
return ["{}"]
|
||||||
|
|
||||||
|
return self._reject_request(start_response)
|
||||||
|
|
||||||
|
def _reject_request(self, start_response):
|
||||||
|
headers = [('Content-type', 'application/json;charset=UTF-8')]
|
||||||
|
start_response('401 Unauthorized', headers)
|
||||||
|
json_body = {"error": "access_denied"}
|
||||||
|
return [json.dumps(json_body)]
|
||||||
|
|
||||||
|
|
||||||
|
def filter_factory(global_conf, **local_conf):
|
||||||
|
def auth_filter(app):
|
||||||
|
return AuthProtocol(app)
|
||||||
|
return auth_filter
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return openstack_wsgi.Resource(Controller())
|
168
gceapi/api/operation_api.py
Normal file
168
gceapi/api/operation_api.py
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE operation API."""
|
||||||
|
|
||||||
|
KIND = "operation"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "insert_time", "start_time", "end_time",
|
||||||
|
"name", "type", "user", "status", "progress",
|
||||||
|
"scope_type", "scope_name",
|
||||||
|
"target_type", "target_name",
|
||||||
|
"method_key", "item_id",
|
||||||
|
"error_code", "error_message", "errors"]
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
self._method_keys = {}
|
||||||
|
self._get_progress_methods = {}
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def register_get_progress_method(self, method_key, method):
|
||||||
|
if method_key in self._get_progress_methods:
|
||||||
|
raise exception.Invalid()
|
||||||
|
# TODO(ft): check 'method' formal arguments
|
||||||
|
self._method_keys[method] = method_key
|
||||||
|
self._get_progress_methods[method_key] = method
|
||||||
|
|
||||||
|
def get_scopes(self, context, item):
|
||||||
|
return [scopes.construct(item["scope_type"], item["scope_name"])]
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
operation = self._get_db_item_by_name(context, name)
|
||||||
|
if (operation is None or
|
||||||
|
operation["scope_type"] != scope.get_type() or
|
||||||
|
operation["scope_name"] != scope.get_name()):
|
||||||
|
raise exception.NotFound
|
||||||
|
operation = self._update_operation_progress(context, operation)
|
||||||
|
return operation
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
operations = self._get_db_items(context)
|
||||||
|
if scope is not None:
|
||||||
|
operations = [operation for operation in operations
|
||||||
|
if (operation["scope_type"] == scope.get_type() and
|
||||||
|
operation["scope_name"] == scope.get_name())]
|
||||||
|
for operation in operations:
|
||||||
|
operation = self._update_operation_progress(context, operation)
|
||||||
|
return operations
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
# NOTE(ft): Google deletes operation with no check it's scope
|
||||||
|
item = self._get_db_item_by_name(context, name)
|
||||||
|
if item is None:
|
||||||
|
raise exception.NotFound
|
||||||
|
self._delete_db_item(context, item)
|
||||||
|
|
||||||
|
def _update_operation_progress(self, context, operation):
|
||||||
|
if operation["status"] == "DONE" or not operation.get("item_id"):
|
||||||
|
return operation
|
||||||
|
method_key = operation["method_key"]
|
||||||
|
get_progress = self._get_progress_methods[method_key]
|
||||||
|
operation_progress = get_progress(context, operation["item_id"])
|
||||||
|
if operation_progress is None:
|
||||||
|
return operation
|
||||||
|
operation.update(operation_progress)
|
||||||
|
if operation["progress"] == 100:
|
||||||
|
operation["status"] = "DONE"
|
||||||
|
operation["end_time"] = timeutils.isotime(None, True)
|
||||||
|
self._update_db_item(context, operation)
|
||||||
|
return operation
|
||||||
|
|
||||||
|
def construct_operation(self, context, op_type, target_type, target_name,
|
||||||
|
scope):
|
||||||
|
operation_id = str(uuid.uuid4())
|
||||||
|
operation = {
|
||||||
|
"id": operation_id,
|
||||||
|
"name": "operation-" + operation_id,
|
||||||
|
"insert_time": timeutils.isotime(context.timestamp, True),
|
||||||
|
"user": context.user_name,
|
||||||
|
"type": op_type,
|
||||||
|
"target_type": target_type,
|
||||||
|
"target_name": target_name,
|
||||||
|
"scope_type": scope.get_type(),
|
||||||
|
"scope_name": scope.get_name(),
|
||||||
|
}
|
||||||
|
return operation
|
||||||
|
|
||||||
|
def save_operation(self, context, operation, start_time,
|
||||||
|
get_progress_method, item_id, operation_result):
|
||||||
|
if isinstance(operation_result, Exception):
|
||||||
|
operation.update(_error_from_exception(operation_result))
|
||||||
|
operation["start_time"] = start_time
|
||||||
|
method_key = self._method_keys.get(get_progress_method)
|
||||||
|
if method_key is None or "error_code" in operation:
|
||||||
|
operation["progress"] = 100
|
||||||
|
operation["status"] = "DONE"
|
||||||
|
operation["end_time"] = timeutils.isotime(None, True)
|
||||||
|
else:
|
||||||
|
operation["progress"] = 0
|
||||||
|
operation["status"] = "RUNNING"
|
||||||
|
operation["method_key"] = method_key
|
||||||
|
if item_id is not None:
|
||||||
|
operation["item_id"] = item_id
|
||||||
|
return self._add_db_item(context, operation)
|
||||||
|
|
||||||
|
def update_operation(self, context, operation_id, operation_result):
|
||||||
|
operation = self._get_db_item_by_id(context, operation_id)
|
||||||
|
if operation is None:
|
||||||
|
# NOTE(ft): it may lead to hungup not finished operation in DB
|
||||||
|
return
|
||||||
|
if isinstance(operation_result, Exception):
|
||||||
|
operation.update(_error_from_exception(operation_result))
|
||||||
|
else:
|
||||||
|
operation.update(operation_result)
|
||||||
|
if operation["progress"] == 100 or "error_code" in operation:
|
||||||
|
operation["status"] = "DONE"
|
||||||
|
operation["end_time"] = timeutils.isotime(None, True)
|
||||||
|
operation.update(operation)
|
||||||
|
self._update_db_item(context, operation)
|
||||||
|
|
||||||
|
|
||||||
|
def gef_final_progress(with_error=False):
|
||||||
|
progress = {"progress": 100}
|
||||||
|
if with_error:
|
||||||
|
progress["error_code"] = 500
|
||||||
|
progress["error_message"] = _('Internal server error')
|
||||||
|
progress["errors"] = [{
|
||||||
|
"code": "UNKNOWN_OS_ERROR",
|
||||||
|
"message": _("Operation finished with unknown error. "
|
||||||
|
"See OpenStack logs.")
|
||||||
|
}]
|
||||||
|
return progress
|
||||||
|
|
||||||
|
|
||||||
|
def is_final_progress(progress):
|
||||||
|
return progress is not None and (progress.get("progress") == 100 or
|
||||||
|
progress.get("error_code") is not None)
|
||||||
|
|
||||||
|
|
||||||
|
def _error_from_exception(ex):
|
||||||
|
return {"errors": [{"code": ex.__class__.__name__, "message": str(ex)}],
|
||||||
|
"error_code": 500,
|
||||||
|
"error_message": _('Internal server error')}
|
73
gceapi/api/operation_util.py
Normal file
73
gceapi/api/operation_util.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import threading
|
||||||
|
|
||||||
|
from gceapi.api import operation_api
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
def init_operation(context, op_type, target_type, target_name, scope):
|
||||||
|
if context.operation is not None:
|
||||||
|
return
|
||||||
|
operation = operation_api.API().construct_operation(
|
||||||
|
context, op_type, target_type, target_name, scope)
|
||||||
|
context.operation = operation
|
||||||
|
return operation
|
||||||
|
|
||||||
|
|
||||||
|
def save_operaton(context, action_result):
|
||||||
|
if context.operation is None or context.operation_start_time is None:
|
||||||
|
return None
|
||||||
|
return operation_api.API().save_operation(
|
||||||
|
context,
|
||||||
|
context.operation,
|
||||||
|
context.operation_start_time,
|
||||||
|
context.operation_get_progress_method,
|
||||||
|
context.operation_item_id,
|
||||||
|
action_result)
|
||||||
|
|
||||||
|
|
||||||
|
def start_operation(context, get_progress_method=None, item_id=None):
|
||||||
|
if context.operation is None or context.operation_start_time is not None:
|
||||||
|
return
|
||||||
|
context.operation_start_time = timeutils.isotime(None, True)
|
||||||
|
context.operation_get_progress_method = get_progress_method
|
||||||
|
context.operation_item_id = item_id
|
||||||
|
set_item_id(context, item_id)
|
||||||
|
|
||||||
|
|
||||||
|
def set_item_id(context, item_id):
|
||||||
|
if context.operation is None or context.operation_start_time is None:
|
||||||
|
return
|
||||||
|
context.operation_item_id = item_id
|
||||||
|
|
||||||
|
|
||||||
|
def continue_operation(context, func, timeout=5):
|
||||||
|
threading.Timer(timeout, _continue_operation, [context, func]).start()
|
||||||
|
|
||||||
|
|
||||||
|
def _continue_operation(context, func):
|
||||||
|
operation = context.operation
|
||||||
|
try:
|
||||||
|
operation_result = func()
|
||||||
|
except Exception as ex:
|
||||||
|
operation_result = ex
|
||||||
|
if operation is None:
|
||||||
|
return
|
||||||
|
if operation_result is None:
|
||||||
|
continue_operation(context, func, timeout=2)
|
||||||
|
else:
|
||||||
|
operation_api.API().update_operation(context, operation["id"],
|
||||||
|
operation_result)
|
31
gceapi/api/operations.py
Normal file
31
gceapi/api/operations.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import operation_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Route controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(operation_api.API(), *args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, operation, scope):
|
||||||
|
return self._format_operation(request, operation, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
103
gceapi/api/project_api.py
Normal file
103
gceapi/api/project_api.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Projects API."""
|
||||||
|
|
||||||
|
KIND = "project"
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
project_name = context.project_name
|
||||||
|
|
||||||
|
keystone = clients.keystone(context)
|
||||||
|
project = [t for t in keystone.tenants.list()
|
||||||
|
if t.name == project_name][0]
|
||||||
|
|
||||||
|
result = utils.to_dict(project)
|
||||||
|
result["keypair"] = self._get_gce_keypair(context)
|
||||||
|
project_id = project.id
|
||||||
|
|
||||||
|
nova_limits = clients.nova(context).limits.get(tenant_id=project_id)
|
||||||
|
result["nova_limits"] = dict((l.name, l.value)
|
||||||
|
for l in nova_limits.absolute)
|
||||||
|
|
||||||
|
cinder_client = clients.cinder(context)
|
||||||
|
result["cinder_quotas"] = utils.to_dict(
|
||||||
|
cinder_client.quotas.get(project_id, usage=True))
|
||||||
|
|
||||||
|
neutron_client = clients.neutron(context)
|
||||||
|
result["neutron_quota"] = (
|
||||||
|
neutron_client.show_quota(project_id)["quota"])
|
||||||
|
result["neutron_quota"]["network_used"] = len(neutron_client
|
||||||
|
.list_networks(tenant_id=project_id)["networks"])
|
||||||
|
result["neutron_quota"]["floatingip_used"] = len(neutron_client
|
||||||
|
.list_floatingips(tenant_id=project_id)["floatingips"])
|
||||||
|
result["neutron_quota"]["security_group_used"] = len(neutron_client
|
||||||
|
.list_security_groups(tenant_id=project_id)["security_groups"])
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def set_common_instance_metadata(self, context, metadata_list):
|
||||||
|
instance_metadata = dict(
|
||||||
|
[(x['key'], x['value']) for x in metadata_list])
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
ssh_keys = instance_metadata.pop('sshKeys', None)
|
||||||
|
if ssh_keys:
|
||||||
|
nova_client = clients.nova(context)
|
||||||
|
for key_data in ssh_keys.split('\n'):
|
||||||
|
user_name, ssh_key = key_data.split(":")
|
||||||
|
self._update_key(nova_client, user_name, ssh_key)
|
||||||
|
|
||||||
|
def get_gce_user_keypair_name(self, context):
|
||||||
|
client = clients.nova(context)
|
||||||
|
for keypair in client.keypairs.list():
|
||||||
|
if keypair.name == context.user_name:
|
||||||
|
return keypair.name
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_gce_keypair(self, context):
|
||||||
|
client = clients.nova(context)
|
||||||
|
key_datas = []
|
||||||
|
for keypair in client.keypairs.list():
|
||||||
|
key_datas.append(keypair.name + ':' + keypair.public_key)
|
||||||
|
|
||||||
|
if not key_datas:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return {'key': 'sshKeys', 'value': "\n".join(key_datas)}
|
||||||
|
|
||||||
|
def _update_key(self, nova_client, user_name, ssh_key):
|
||||||
|
try:
|
||||||
|
keypair = nova_client.keypairs.get(user_name)
|
||||||
|
if keypair.public_key == ssh_key:
|
||||||
|
return
|
||||||
|
|
||||||
|
keypair.delete()
|
||||||
|
except clients.novaclient.exceptions.NotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
keypair = nova_client.keypairs.create(user_name, ssh_key)
|
104
gceapi/api/projects.py
Normal file
104
gceapi/api/projects.py
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import project_api
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Projects controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(project_api.API(), *args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, project, scope):
|
||||||
|
desc = project["description"]
|
||||||
|
result_dict = {
|
||||||
|
"name": project["name"],
|
||||||
|
"description": desc if desc else "",
|
||||||
|
"commonInstanceMetadata": {
|
||||||
|
"kind": "compute#metadata",
|
||||||
|
"items": [project["keypair"]]
|
||||||
|
} if project["keypair"] else {
|
||||||
|
"kind": "compute#metadata",
|
||||||
|
},
|
||||||
|
"quotas": []
|
||||||
|
}
|
||||||
|
|
||||||
|
self._add_quota(result_dict["quotas"], "CPU",
|
||||||
|
project["nova_limits"].get("maxTotalCores", -1),
|
||||||
|
project["nova_limits"].get("totalCoresUsed", -1))
|
||||||
|
self._add_quota(result_dict["quotas"], "INSTANCES",
|
||||||
|
project["nova_limits"].get("maxTotalInstances", -1),
|
||||||
|
project["nova_limits"].get("totalInstancesUsed", -1))
|
||||||
|
|
||||||
|
quota = project["cinder_quotas"].get("gigabytes", {})
|
||||||
|
self._add_quota(result_dict["quotas"], "DISKS_TOTAL_GB",
|
||||||
|
quota.get("limit", -1), quota.get("in_use", -1))
|
||||||
|
quota = project["cinder_quotas"].get("snapshots", {})
|
||||||
|
self._add_quota(result_dict["quotas"], "SNAPSHOTS",
|
||||||
|
quota.get("limit", -1), quota.get("in_use", -1))
|
||||||
|
quota = project["cinder_quotas"].get("volumes", {})
|
||||||
|
self._add_quota(result_dict["quotas"], "DISKS",
|
||||||
|
quota.get("limit", -1), quota.get("in_use", -1))
|
||||||
|
|
||||||
|
self._add_quota(result_dict["quotas"], "FIREWALLS",
|
||||||
|
project["neutron_quota"].get("security_group", -1),
|
||||||
|
project["neutron_quota"].get("security_group_used", -1))
|
||||||
|
self._add_quota(result_dict["quotas"], "STATIC_ADDRESSES",
|
||||||
|
project["neutron_quota"].get("floatingip", -1),
|
||||||
|
project["neutron_quota"].get("floatingip_used", -1))
|
||||||
|
self._add_quota(result_dict["quotas"], "NETWORKS",
|
||||||
|
project["neutron_quota"].get("network", -1),
|
||||||
|
project["neutron_quota"].get("network_used", -1))
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
def set_common_instance_metadata(self, req, body):
|
||||||
|
context = self._get_context(req)
|
||||||
|
operation_util.init_operation(context, "setMetadata", self._type_name,
|
||||||
|
None, scopes.GlobalScope())
|
||||||
|
try:
|
||||||
|
self._api.set_common_instance_metadata(
|
||||||
|
context, body.get("items", []))
|
||||||
|
except exception.KeypairLimitExceeded:
|
||||||
|
msg = _("Quota exceeded, too many key pairs.")
|
||||||
|
raise webob.exc.HTTPRequestEntityTooLarge(
|
||||||
|
explanation=msg,
|
||||||
|
headers={'Retry-After': 0})
|
||||||
|
except exception.InvalidKeypair:
|
||||||
|
msg = _("Keypair data is invalid")
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
except exception.KeyPairExists:
|
||||||
|
msg = _("Key pair already exists.")
|
||||||
|
raise webob.exc.HTTPConflict(explanation=msg)
|
||||||
|
|
||||||
|
def _add_quota(self, quotas, metric, limit, usage):
|
||||||
|
quotas.append({
|
||||||
|
"metric": metric,
|
||||||
|
"limit": float(limit),
|
||||||
|
"usage": float(usage),
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
43
gceapi/api/region_api.py
Normal file
43
gceapi/api/region_api.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Regions API
|
||||||
|
|
||||||
|
Stubbed now for support only one predefined region nova
|
||||||
|
"""
|
||||||
|
|
||||||
|
KIND = "region"
|
||||||
|
_REGIONS = ["nova"]
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
regions = self.get_items(context)
|
||||||
|
for region in regions:
|
||||||
|
if region["name"] == name:
|
||||||
|
return region
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
return [dict(("name", region) for region in self._REGIONS)]
|
||||||
|
|
||||||
|
def get_items_as_scopes(self, context):
|
||||||
|
return [scopes.RegionScope(region) for region in self._REGIONS]
|
41
gceapi/api/regions.py
Normal file
41
gceapi/api/regions.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import region_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
from gceapi.api import zone_api
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Regions controller."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(region_api.API(), *args, **kwargs)
|
||||||
|
self._zone_api = zone_api.API()
|
||||||
|
|
||||||
|
def format_item(self, req, region, scope):
|
||||||
|
zones = self._zone_api.get_items(self._get_context(req), scope)
|
||||||
|
result_dict = {
|
||||||
|
"name": region["name"],
|
||||||
|
"status": "UP",
|
||||||
|
"zones": [self._qualify(req, "zones", zone["name"], None)
|
||||||
|
for zone in zones]
|
||||||
|
}
|
||||||
|
|
||||||
|
return self._format_item(req, result_dict, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
26
gceapi/api/route_api.py
Normal file
26
gceapi/api/route_api.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import route_neutron_api
|
||||||
|
from gceapi.api import route_nova_api
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Route API."""
|
||||||
|
|
||||||
|
NEUTRON_API_MODULE = route_neutron_api
|
||||||
|
NOVA_API_MODULE = route_nova_api
|
||||||
|
|
||||||
|
__metaclass__ = base_api.NetSingleton
|
409
gceapi/api/route_neutron_api.py
Normal file
409
gceapi/api/route_neutron_api.py
Normal file
@ -0,0 +1,409 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import netaddr
|
||||||
|
import string
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import network_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
ALL_IP_CIDR = "0.0.0.0/0"
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Address API - neutron implementation."""
|
||||||
|
|
||||||
|
KIND = "route"
|
||||||
|
PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "description",
|
||||||
|
"is_default"]
|
||||||
|
TRANS_TABLE = string.maketrans("./", "--")
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
network_api.API()._register_callback(
|
||||||
|
base_api._callback_reasons.post_add,
|
||||||
|
self._create_network_router)
|
||||||
|
network_api.API()._register_callback(
|
||||||
|
base_api._callback_reasons.check_delete,
|
||||||
|
self._check_delete_network)
|
||||||
|
network_api.API()._register_callback(
|
||||||
|
base_api._callback_reasons.pre_delete,
|
||||||
|
self._delete_network_router)
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def _get_persistent_attributes(self):
|
||||||
|
return self.PERSISTENT_ATTRIBUTES
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
routes, dummy = self._sync_routes(context)
|
||||||
|
return routes[name]
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
routes, dummy = self._sync_routes(context)
|
||||||
|
return routes.values()
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
routes, aliased_routes = self._sync_routes(context)
|
||||||
|
route = routes[name]
|
||||||
|
if route.get("nexthop") is None:
|
||||||
|
raise exception.InvalidInput(
|
||||||
|
_("The local route cannot be deleted."))
|
||||||
|
destination = route["destination"]
|
||||||
|
nexthop = route["nexthop"]
|
||||||
|
# NOTE(ft): delete OS route only if it doesn't have aliases
|
||||||
|
# at the moment
|
||||||
|
client = clients.neutron(context)
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
if self._get_route_key(route) not in aliased_routes:
|
||||||
|
dummy, router = self._get_network_objects(client,
|
||||||
|
route["network"])
|
||||||
|
if "external_gateway_info" in route:
|
||||||
|
client.remove_gateway_router(router["id"])
|
||||||
|
else:
|
||||||
|
routes = [r for r in router["routes"]
|
||||||
|
if (destination != r["destination"] or
|
||||||
|
nexthop != r["nexthop"])]
|
||||||
|
client.update_router(
|
||||||
|
router["id"],
|
||||||
|
{"router": {"routes": routes, }, })
|
||||||
|
self._delete_db_item(context, route)
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope=None):
|
||||||
|
routes, dummy = self._sync_routes(context)
|
||||||
|
if name in routes:
|
||||||
|
raise exception.InvalidInput(
|
||||||
|
_("The resource '%s' already exists.") % name)
|
||||||
|
|
||||||
|
# NOTE(ft): check network is plugged to router
|
||||||
|
network_name = utils._extract_name_from_url(body["network"])
|
||||||
|
network = network_api.API().get_item(context, network_name)
|
||||||
|
|
||||||
|
nexthop = body.get("nextHopGateway")
|
||||||
|
if (nexthop is not None and
|
||||||
|
(utils._extract_name_from_url(nexthop) ==
|
||||||
|
"default-internet-gateway") and
|
||||||
|
# NOTE(ft): OS doesn't support IP mask for external gateway
|
||||||
|
body.get("destRange") == ALL_IP_CIDR):
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
return self._create_internet_route(context, network, body)
|
||||||
|
|
||||||
|
nexthop = body.get("nextHopIp")
|
||||||
|
if nexthop is not None:
|
||||||
|
operation_util.start_operation(context)
|
||||||
|
return self._create_custom_route(context, network, body)
|
||||||
|
|
||||||
|
raise exception.InvalidInput(_("Unsupported route."))
|
||||||
|
|
||||||
|
def _create_internet_route(self, context, network, body):
|
||||||
|
client = clients.neutron(context)
|
||||||
|
port, router = self._get_network_objects(client, network)
|
||||||
|
public_network_id = network_api.API().get_public_network_id(context)
|
||||||
|
external_gateway_info = {"network_id": public_network_id}
|
||||||
|
router = client.add_gateway_router(
|
||||||
|
router["id"],
|
||||||
|
external_gateway_info)["router"]
|
||||||
|
gateway_port = client.list_ports(
|
||||||
|
device_id=router["id"],
|
||||||
|
device_owner="network:router_gateway")["ports"][0]
|
||||||
|
route = self._add_gce_route(context, network, port, body,
|
||||||
|
is_default=False,
|
||||||
|
destination=gateway_port["id"],
|
||||||
|
nexthop=ALL_IP_CIDR)
|
||||||
|
route["network"] = network
|
||||||
|
route["port"] = port
|
||||||
|
route["external_gateway_info"] = external_gateway_info
|
||||||
|
return route
|
||||||
|
|
||||||
|
def _create_custom_route(self, context, network, body):
|
||||||
|
client = clients.neutron(context)
|
||||||
|
port, router = self._get_network_objects(client, network)
|
||||||
|
destination = body.get("destRange")
|
||||||
|
nexthop = body.get("nextHopIp")
|
||||||
|
routes = router["routes"]
|
||||||
|
if all(r["destination"] != destination or r["nexthop"] != nexthop
|
||||||
|
for r in routes):
|
||||||
|
routes.append({
|
||||||
|
"destination": destination,
|
||||||
|
"nexthop": nexthop,
|
||||||
|
})
|
||||||
|
client.update_router(
|
||||||
|
router["id"],
|
||||||
|
{"router": {"routes": router["routes"], }, })
|
||||||
|
route = self._add_gce_route(context, network, port, body,
|
||||||
|
is_default=False, destination=destination,
|
||||||
|
nexthop=nexthop)
|
||||||
|
route["network"] = network
|
||||||
|
route["port"] = port
|
||||||
|
return route
|
||||||
|
|
||||||
|
def _sync_routes(self, context):
|
||||||
|
os_routes = self._get_os_routes(context)
|
||||||
|
gce_routes = self._get_gce_routes(context)
|
||||||
|
aliased_routes = {}
|
||||||
|
routes = {}
|
||||||
|
for (key, os_route) in os_routes.items():
|
||||||
|
gce_route_list = gce_routes.pop(key, None)
|
||||||
|
if gce_route_list is None:
|
||||||
|
continue
|
||||||
|
for gce_route in gce_route_list:
|
||||||
|
routes[gce_route["name"]] = dict(os_route, **dict(gce_route))
|
||||||
|
os_routes.pop(key)
|
||||||
|
if len(gce_route_list) > 1:
|
||||||
|
aliased_routes[key] = gce_route_list
|
||||||
|
|
||||||
|
# NOTE(ft): add new named routes
|
||||||
|
for os_route in os_routes.itervalues():
|
||||||
|
network = os_route["network"]
|
||||||
|
port = os_route["port"]
|
||||||
|
route = self._add_gce_route(context, network, port, os_route,
|
||||||
|
is_default=True,
|
||||||
|
creationTimestamp="")
|
||||||
|
os_route.update(route)
|
||||||
|
routes[os_route["name"]] = os_route
|
||||||
|
|
||||||
|
# NOTE(ft): delete obsolete named routes
|
||||||
|
for gce_route_list in gce_routes.itervalues():
|
||||||
|
for gce_route in gce_route_list:
|
||||||
|
self._delete_db_item(context, gce_route)
|
||||||
|
return (routes, aliased_routes)
|
||||||
|
|
||||||
|
def _get_gce_routes(self, context):
|
||||||
|
gce_routes = self._get_db_items(context)
|
||||||
|
gce_routes_dict = {}
|
||||||
|
for route in gce_routes:
|
||||||
|
route = self._unpack_route_from_db_format(route)
|
||||||
|
key = self._get_route_key(route)
|
||||||
|
val_array = gce_routes_dict.get(key)
|
||||||
|
if val_array is None:
|
||||||
|
gce_routes_dict[key] = [route]
|
||||||
|
else:
|
||||||
|
val_array.append(route)
|
||||||
|
return gce_routes_dict
|
||||||
|
|
||||||
|
def _get_route_key(self, route):
|
||||||
|
if route["port_id"] is None:
|
||||||
|
return route["network_id"]
|
||||||
|
else:
|
||||||
|
return (route["network_id"] + route["port_id"] +
|
||||||
|
route["destination"] + route["nexthop"])
|
||||||
|
|
||||||
|
def _get_os_routes(self, context):
|
||||||
|
client = clients.neutron(context)
|
||||||
|
routers = client.list_routers(tenant_id=context.project_id)["routers"]
|
||||||
|
routers = dict((r["id"], r) for r in routers)
|
||||||
|
ports = client.list_ports(
|
||||||
|
tenant_id=context.project_id,
|
||||||
|
device_owner="network:router_interface")["ports"]
|
||||||
|
ports = dict((p["network_id"], p) for p in ports)
|
||||||
|
gateway_ports = client.list_ports(
|
||||||
|
device_owner="network:router_gateway")["ports"]
|
||||||
|
gateway_ports = dict((p["device_id"], p) for p in gateway_ports)
|
||||||
|
routes = {}
|
||||||
|
networks = network_api.API().get_items(context)
|
||||||
|
for network in networks:
|
||||||
|
# NOTE(ft): append local route
|
||||||
|
network_id = network["id"]
|
||||||
|
routes[network_id] = self._init_local_route(network)
|
||||||
|
|
||||||
|
port = ports.get(network_id)
|
||||||
|
if port is None:
|
||||||
|
continue
|
||||||
|
router = routers.get(port["device_id"])
|
||||||
|
if router is None:
|
||||||
|
continue
|
||||||
|
key_prefix = network_id + port["id"]
|
||||||
|
|
||||||
|
# NOTE(ft): append internet route
|
||||||
|
external_gateway_info = router.get("external_gateway_info")
|
||||||
|
gateway_port = gateway_ports.get(router["id"])
|
||||||
|
if (external_gateway_info is not None and
|
||||||
|
gateway_port is not None):
|
||||||
|
key = key_prefix + ALL_IP_CIDR + gateway_port["id"]
|
||||||
|
routes[key] = self._init_internet_route(
|
||||||
|
network, port, gateway_port["id"],
|
||||||
|
external_gateway_info)
|
||||||
|
|
||||||
|
# NOTE(ft): append other routes
|
||||||
|
for route in router["routes"]:
|
||||||
|
destination = route["destination"]
|
||||||
|
nexthop = route["nexthop"]
|
||||||
|
key = key_prefix + destination + nexthop
|
||||||
|
routes[key] = self._init_custom_route(
|
||||||
|
network, port, destination, nexthop)
|
||||||
|
return routes
|
||||||
|
|
||||||
|
def _get_network_objects(self, client, network):
|
||||||
|
subnet_id = network.get("subnet_id")
|
||||||
|
if subnet_id is None:
|
||||||
|
raise exception.PortNotFound(_("Network has no router."))
|
||||||
|
ports = client.list_ports(
|
||||||
|
network_id=network["id"],
|
||||||
|
device_owner="network:router_interface")["ports"]
|
||||||
|
port = next((p for p in ports
|
||||||
|
if any(fip["subnet_id"] == subnet_id
|
||||||
|
for fip in p["fixed_ips"])), None)
|
||||||
|
if port is None:
|
||||||
|
raise exception.PortNotFound(_("Network has no router."))
|
||||||
|
router = client.show_router(port["device_id"])["router"]
|
||||||
|
return (port, router)
|
||||||
|
|
||||||
|
def _create_network_router(self, context, network, subnet_id):
|
||||||
|
public_network_id = network_api.API().get_public_network_id(context)
|
||||||
|
client = clients.neutron(context)
|
||||||
|
router = client.create_router(body={"router": {
|
||||||
|
"name": network["name"],
|
||||||
|
"admin_state_up": True,
|
||||||
|
"external_gateway_info": {"network_id": public_network_id},
|
||||||
|
}})["router"]
|
||||||
|
client.add_interface_router(router["id"], {"subnet_id": subnet_id})
|
||||||
|
|
||||||
|
def _check_delete_network(self, context, network):
|
||||||
|
network_id = network["id"]
|
||||||
|
# NOTE(ft): check non default routes not longer exists
|
||||||
|
# must be done for internet routes
|
||||||
|
routes, dummy = self._sync_routes(context)
|
||||||
|
for route in routes.itervalues():
|
||||||
|
if (route["network_id"] == network_id and
|
||||||
|
not route["is_default"]):
|
||||||
|
raise exception.InvalidInput(_("Network contains routes"))
|
||||||
|
# NOTE(ft): check invisible routes not longer exists
|
||||||
|
# must be done for routes on non default subnet and other non GCE stuff
|
||||||
|
client = clients.neutron(context)
|
||||||
|
checked_routers = set()
|
||||||
|
subnets = client.list_subnets(network_id=network_id)["subnets"]
|
||||||
|
cidrs = [netaddr.IPNetwork(subnet["cidr"]) for subnet in subnets]
|
||||||
|
ports = client.list_ports(
|
||||||
|
network_id=network["id"],
|
||||||
|
device_owner="network:router_interface")["ports"]
|
||||||
|
for port in ports:
|
||||||
|
if port["device_id"] in checked_routers:
|
||||||
|
continue
|
||||||
|
checked_routers.add(port["device_id"])
|
||||||
|
router = client.show_router(port["device_id"])["router"]
|
||||||
|
for route in router["routes"]:
|
||||||
|
nexthop = netaddr.IPAddress(route["nexthop"])
|
||||||
|
if any(nexthop in cidr for cidr in cidrs):
|
||||||
|
raise exception.InvalidInput(_("Network contains routes"))
|
||||||
|
# TODO(ft): here is the good place to create default routes in DB
|
||||||
|
# now thew will be created on next 'route' request,
|
||||||
|
# but 'creationTimestamp' will be absent
|
||||||
|
|
||||||
|
def _delete_network_router(self, context, network):
|
||||||
|
client = clients.neutron(context)
|
||||||
|
ports = client.list_ports(
|
||||||
|
network_id=network["id"],
|
||||||
|
device_owner="network:router_interface")["ports"]
|
||||||
|
router_ids = set()
|
||||||
|
for port in ports:
|
||||||
|
if port["device_owner"] == "network:router_interface":
|
||||||
|
router_ids.add(port["device_id"])
|
||||||
|
client.remove_interface_router(port["device_id"],
|
||||||
|
{"port_id": port["id"]})
|
||||||
|
# NOTE(ft): leave routers if network is plugged to more than one route
|
||||||
|
# because it's look like some non GCE settings, so we don't want
|
||||||
|
# to decide whether we can delete router or not
|
||||||
|
if len(router_ids) != 1:
|
||||||
|
return
|
||||||
|
router = router_ids.pop()
|
||||||
|
# NOTE(ft): leave router if other subnets are plugged to it
|
||||||
|
ports = client.list_ports(
|
||||||
|
device_id=router,
|
||||||
|
device_owner="network:router_interface")["ports"]
|
||||||
|
if len(ports) == 0:
|
||||||
|
client.delete_router(router)
|
||||||
|
# TODO(ft): here is the good place to purge DB from routes
|
||||||
|
|
||||||
|
def _add_gce_route(self, context, network, port, route, **kwargs):
|
||||||
|
db_route = {}
|
||||||
|
for key in self.PERSISTENT_ATTRIBUTES:
|
||||||
|
value = route.get(key)
|
||||||
|
if value is None:
|
||||||
|
value = kwargs.get(key)
|
||||||
|
if value is not None or key in kwargs:
|
||||||
|
db_route[key] = value
|
||||||
|
|
||||||
|
def get_from_dicts(key, dict1, dict2, default=None):
|
||||||
|
value = dict1.get(key)
|
||||||
|
if value is None:
|
||||||
|
value = dict2.get(key)
|
||||||
|
return value if value is not None else default
|
||||||
|
|
||||||
|
route_id = "//".join([network["id"],
|
||||||
|
port["id"] if port is not None else "",
|
||||||
|
get_from_dicts("destination", route, kwargs),
|
||||||
|
get_from_dicts("nexthop", route, kwargs, ""),
|
||||||
|
get_from_dicts("name", route, kwargs)])
|
||||||
|
db_route["id"] = route_id
|
||||||
|
db_route = self._add_db_item(context, db_route)
|
||||||
|
return self._unpack_route_from_db_format(db_route)
|
||||||
|
|
||||||
|
def _unpack_route_from_db_format(self, route):
|
||||||
|
parts = route["id"].split("//")
|
||||||
|
route["network_id"] = parts[0]
|
||||||
|
route["port_id"] = parts[1] if parts[1] != "" else None
|
||||||
|
route["destination"] = parts[2]
|
||||||
|
route["nexthop"] = parts[3] if parts[3] != "" else None
|
||||||
|
route["name"] = parts[4]
|
||||||
|
return route
|
||||||
|
|
||||||
|
def _init_local_route(self, network):
|
||||||
|
return {
|
||||||
|
"id": None,
|
||||||
|
"name": "default-route-%s-local" % network["id"],
|
||||||
|
"description": "Default route to the virtual network.",
|
||||||
|
"network": network,
|
||||||
|
"port": None,
|
||||||
|
"destination": network.get("IPv4Range", ""),
|
||||||
|
"nexthop": None,
|
||||||
|
"is_default": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _init_internet_route(self, network, port, nexthop, gateway_info):
|
||||||
|
return {
|
||||||
|
"id": None,
|
||||||
|
"name": "default-route-%s-internet" % network["id"],
|
||||||
|
"description": "Default route to the Internet.",
|
||||||
|
"network": network,
|
||||||
|
"port": port,
|
||||||
|
"destination": ALL_IP_CIDR,
|
||||||
|
"nexthop": nexthop,
|
||||||
|
"is_default": True,
|
||||||
|
"external_gateway_info": gateway_info,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _init_custom_route(self, network, port, destination, nexthop):
|
||||||
|
name = ("custom-route-%(nw)s-dst-%(dst)s-gw-%(nh)s" %
|
||||||
|
{
|
||||||
|
"nw": network["id"],
|
||||||
|
"dst": destination,
|
||||||
|
"nh": nexthop,
|
||||||
|
})
|
||||||
|
name = str(name).translate(self.TRANS_TABLE)
|
||||||
|
return {
|
||||||
|
"id": None,
|
||||||
|
"name": name,
|
||||||
|
"network": network,
|
||||||
|
"port": port,
|
||||||
|
"destination": destination,
|
||||||
|
"nexthop": nexthop,
|
||||||
|
"is_default": False,
|
||||||
|
}
|
41
gceapi/api/route_nova_api.py
Normal file
41
gceapi/api/route_nova_api.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
NOT_SUPPORTED_MESSAGE = _("Routes are not supported with nova network")
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Address API - nova-network implementation."""
|
||||||
|
|
||||||
|
KIND = "route"
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope_id=None):
|
||||||
|
raise exception.InvalidInput(reason=NOT_SUPPORTED_MESSAGE)
|
||||||
|
|
||||||
|
def get_items(self, context, scope_id=None):
|
||||||
|
raise exception.InvalidInput(reason=NOT_SUPPORTED_MESSAGE)
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope_id=None):
|
||||||
|
raise exception.InvalidInput(reason=NOT_SUPPORTED_MESSAGE)
|
||||||
|
|
||||||
|
def add_item(self, context, name, body, scope_id=None):
|
||||||
|
raise exception.InvalidInput(reason=NOT_SUPPORTED_MESSAGE)
|
52
gceapi/api/routes.py
Normal file
52
gceapi/api/routes.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import route_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Route controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(route_api.API(), *args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, route, scope):
|
||||||
|
network_name = self._qualify(
|
||||||
|
request, "networks", route["network"]["name"], None)
|
||||||
|
result_dict = {
|
||||||
|
"name": route["name"],
|
||||||
|
"network": network_name,
|
||||||
|
"destRange": route.get("destination"),
|
||||||
|
"creationTimestamp": route.get("creationTimestamp", ""),
|
||||||
|
"priority": 1000,
|
||||||
|
}
|
||||||
|
if "external_gateway_info" in route:
|
||||||
|
result_dict["nextHopGateway"] = self._qualify(
|
||||||
|
request, "gateways", "default-internet-gateway", scope)
|
||||||
|
else:
|
||||||
|
nextHop = route.get("nexthop")
|
||||||
|
if nextHop is not None:
|
||||||
|
result_dict["nextHopIp"] = nextHop
|
||||||
|
else:
|
||||||
|
result_dict["nextHopNetwork"] = network_name
|
||||||
|
if "description" in route:
|
||||||
|
result_dict["description"] = route["description"]
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
120
gceapi/api/scopes.py
Normal file
120
gceapi/api/scopes.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import abc
|
||||||
|
|
||||||
|
from webob import exc
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import utils
|
||||||
|
|
||||||
|
|
||||||
|
class Scope(object):
|
||||||
|
"""Scope that contains resource.
|
||||||
|
|
||||||
|
The following scopes exists: global, aggregated, zones, regions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_type = None
|
||||||
|
_collection = None
|
||||||
|
_name = None
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def __init__(self, scope_name):
|
||||||
|
self._name = scope_name
|
||||||
|
|
||||||
|
def get_type(self):
|
||||||
|
return self._type
|
||||||
|
|
||||||
|
def get_name(self):
|
||||||
|
return self._name
|
||||||
|
|
||||||
|
def get_collection(self):
|
||||||
|
return self._collection
|
||||||
|
|
||||||
|
def get_path(self):
|
||||||
|
if self._collection is not None and self._name is not None:
|
||||||
|
return "/".join([self._collection, self._name])
|
||||||
|
else:
|
||||||
|
return self._type
|
||||||
|
|
||||||
|
def get_scope_api(self):
|
||||||
|
base_api.Singleton.get_instance(self.get_type())
|
||||||
|
|
||||||
|
|
||||||
|
class GlobalScope(Scope):
|
||||||
|
|
||||||
|
_type = "global"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(GlobalScope, self).__init__(None)
|
||||||
|
|
||||||
|
|
||||||
|
class AggregatedScope(Scope):
|
||||||
|
|
||||||
|
_type = "aggregated"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(AggregatedScope, self).__init__(None)
|
||||||
|
|
||||||
|
|
||||||
|
class ZoneScope(Scope):
|
||||||
|
|
||||||
|
_type = "zone"
|
||||||
|
_collection = utils.get_collection_name(_type)
|
||||||
|
|
||||||
|
def __init__(self, scope_name):
|
||||||
|
super(ZoneScope, self).__init__(scope_name)
|
||||||
|
|
||||||
|
|
||||||
|
class RegionScope(Scope):
|
||||||
|
|
||||||
|
_type = "region"
|
||||||
|
_collection = utils.get_collection_name(_type)
|
||||||
|
|
||||||
|
def __init__(self, scope_name):
|
||||||
|
super(RegionScope, self).__init__(scope_name)
|
||||||
|
|
||||||
|
|
||||||
|
def construct(scope_type, scope_id):
|
||||||
|
if scope_type == "zone":
|
||||||
|
return ZoneScope(scope_id)
|
||||||
|
elif scope_type == "region":
|
||||||
|
return RegionScope(scope_id)
|
||||||
|
elif scope_type == "global":
|
||||||
|
return GlobalScope()
|
||||||
|
elif scope_type == "aggregated":
|
||||||
|
return AggregatedScope()
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def construct_from_path(path, scope_id):
|
||||||
|
path_info = [item for item in path.split("/") if item]
|
||||||
|
path_count = len(path_info)
|
||||||
|
if path_count == 0:
|
||||||
|
raise exc.HTTPBadRequest(comment="Bad path %s" % path)
|
||||||
|
if path_count < 3:
|
||||||
|
return None
|
||||||
|
collection_or_type = path_info[1]
|
||||||
|
if collection_or_type in ("zones", "regions") and scope_id is None:
|
||||||
|
return None
|
||||||
|
if collection_or_type == "zones":
|
||||||
|
return ZoneScope(scope_id)
|
||||||
|
elif collection_or_type == "regions":
|
||||||
|
return RegionScope(scope_id)
|
||||||
|
elif collection_or_type == "global":
|
||||||
|
return GlobalScope()
|
||||||
|
elif collection_or_type == "aggregated":
|
||||||
|
return AggregatedScope()
|
||||||
|
raise exc.HTTPBadRequest(comment="Bad path %s" % path)
|
113
gceapi/api/snapshot_api.py
Normal file
113
gceapi/api/snapshot_api.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import operation_api
|
||||||
|
from gceapi.api import operation_util
|
||||||
|
from gceapi.api import utils
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Snapshot API."""
|
||||||
|
|
||||||
|
KIND = "snapshot"
|
||||||
|
_status_map = {
|
||||||
|
'creating': 'CREATING',
|
||||||
|
'available': 'READY',
|
||||||
|
'active': 'READY',
|
||||||
|
'deleting': 'DELETING',
|
||||||
|
'deleted': 'DELETING',
|
||||||
|
'error': 'FAILED'}
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(API, self).__init__(*args, **kwargs)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"snapshot-add",
|
||||||
|
self._get_add_item_progress)
|
||||||
|
operation_api.API().register_get_progress_method(
|
||||||
|
"snapshot-delete",
|
||||||
|
self._get_delete_item_progress)
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
client = clients.cinder(context)
|
||||||
|
snapshots = client.volume_snapshots.list(
|
||||||
|
search_opts={"display_name": name})
|
||||||
|
if snapshots and len(snapshots) == 1:
|
||||||
|
return self._prepare_item(client, utils.to_dict(snapshots[0]))
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
client = clients.cinder(context)
|
||||||
|
snapshots = [utils.to_dict(item)
|
||||||
|
for item in client.volume_snapshots.list()]
|
||||||
|
for snapshot in snapshots:
|
||||||
|
self._prepare_item(client, snapshot)
|
||||||
|
return snapshots
|
||||||
|
|
||||||
|
def delete_item(self, context, name, scope=None):
|
||||||
|
client = clients.cinder(context).volume_snapshots
|
||||||
|
snapshots = client.list(search_opts={"display_name": name})
|
||||||
|
if not snapshots or len(snapshots) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
operation_util.start_operation(context,
|
||||||
|
self._get_delete_item_progress,
|
||||||
|
snapshots[0].id)
|
||||||
|
client.delete(snapshots[0])
|
||||||
|
|
||||||
|
def add_item(self, context, body, scope=None):
|
||||||
|
name = body["name"]
|
||||||
|
disk_name = body["disk_name"]
|
||||||
|
client = clients.cinder(context)
|
||||||
|
volumes = client.volumes.list(search_opts={"display_name": disk_name})
|
||||||
|
if not volumes or len(volumes) != 1:
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
operation_util.start_operation(context, self._get_add_item_progress)
|
||||||
|
snapshot = client.volume_snapshots.create(
|
||||||
|
volumes[0].id, True, name, body["description"])
|
||||||
|
operation_util.set_item_id(context, snapshot.id)
|
||||||
|
|
||||||
|
return self._prepare_item(client, utils.to_dict(snapshot))
|
||||||
|
|
||||||
|
def _prepare_item(self, client, item):
|
||||||
|
item["name"] = item["display_name"]
|
||||||
|
try:
|
||||||
|
item["disk"] = utils.to_dict(client.volumes.get(item["volume_id"]))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
item["status"] = self._status_map.get(item["status"], item["status"])
|
||||||
|
return item
|
||||||
|
|
||||||
|
def _get_add_item_progress(self, context, snapshot_id):
|
||||||
|
client = clients.cinder(context)
|
||||||
|
try:
|
||||||
|
snapshot = client.volume_snapshots.get(snapshot_id)
|
||||||
|
except clients.cinderclient.exceptions.NotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if (snapshot.status != "creating"):
|
||||||
|
return operation_api.gef_final_progress(snapshot.status == "error")
|
||||||
|
|
||||||
|
def _get_delete_item_progress(self, context, snapshot_id):
|
||||||
|
client = clients.cinder(context)
|
||||||
|
try:
|
||||||
|
snapshot = client.volume_snapshots.get(snapshot_id)
|
||||||
|
except clients.cinderclient.exceptions.NotFound:
|
||||||
|
return operation_api.gef_final_progress()
|
||||||
|
if snapshot.status not in ["deleting", "deleted"]:
|
||||||
|
return operation_api.gef_final_progress(True)
|
51
gceapi/api/snapshots.py
Normal file
51
gceapi/api/snapshots.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi.api import snapshot_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Snapshot controller"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(snapshot_api.API(), *args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, snapshot, scope):
|
||||||
|
result_dict = {
|
||||||
|
"creationTimestamp": self._format_date(snapshot["created_at"]),
|
||||||
|
"status": snapshot["status"],
|
||||||
|
"diskSizeGb": snapshot["size"],
|
||||||
|
"name": snapshot["name"],
|
||||||
|
"description": snapshot["display_description"],
|
||||||
|
}
|
||||||
|
disk = snapshot.get("disk")
|
||||||
|
if disk is not None:
|
||||||
|
result_dict["sourceDisk"] = self._qualify(
|
||||||
|
request, "disks", disk["display_name"],
|
||||||
|
scopes.ZoneScope(disk["availability_zone"]))
|
||||||
|
result_dict["sourceDiskId"] = self._get_id(
|
||||||
|
result_dict["sourceDisk"])
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
def create(self, req, body, scope):
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
160
gceapi/api/utils.py
Normal file
160
gceapi/api/utils.py
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Utilities and helper functions."""
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_slash(string):
|
||||||
|
res = ''
|
||||||
|
sp = string.split('/')
|
||||||
|
for element in reversed(sp):
|
||||||
|
res = {element: res}
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def split_by_comma(string):
|
||||||
|
between = 0
|
||||||
|
last_split = 0
|
||||||
|
sp = []
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
while i < len(string):
|
||||||
|
if string[i] == '(':
|
||||||
|
between += 1
|
||||||
|
elif string[i] == ')':
|
||||||
|
between -= 1
|
||||||
|
elif string[i] == ',' and not between:
|
||||||
|
sp.append(string[last_split:i])
|
||||||
|
last_split = i + 1
|
||||||
|
i += 1
|
||||||
|
sp.append(string[last_split:])
|
||||||
|
return sp
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_template(string):
|
||||||
|
sp = split_by_comma(string)
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
while i < len(sp):
|
||||||
|
if '(' in sp[i]:
|
||||||
|
sp[i] = sp[i].replace('(', ' ').replace(')', ' ').split()
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
json = {}
|
||||||
|
i = 0
|
||||||
|
while i < len(sp):
|
||||||
|
if isinstance(sp[i], list):
|
||||||
|
fields = sp[i][1].split(',')
|
||||||
|
json[sp[i][0]] = [{}]
|
||||||
|
for field in fields:
|
||||||
|
dct = _parse_slash(field)
|
||||||
|
key = dct.keys()[0]
|
||||||
|
json[sp[i][0]][0][key] = dct[key]
|
||||||
|
else:
|
||||||
|
field = _parse_slash(sp[i])
|
||||||
|
key = field.keys()[0]
|
||||||
|
json[key] = field[key]
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return json
|
||||||
|
|
||||||
|
|
||||||
|
def apply_template(template_string, json):
|
||||||
|
|
||||||
|
def apply_recursive(template, json):
|
||||||
|
res = {}
|
||||||
|
if template == '':
|
||||||
|
return json
|
||||||
|
for key, val in template.items():
|
||||||
|
if key in json and val == '':
|
||||||
|
res[key] = json[key]
|
||||||
|
elif key in json and val == '*':
|
||||||
|
pass
|
||||||
|
elif key in json and isinstance(val, list):
|
||||||
|
if not isinstance(json[key], list):
|
||||||
|
raise ValueError()
|
||||||
|
array = []
|
||||||
|
for element in json[key]:
|
||||||
|
r = apply_recursive(val[0], element)
|
||||||
|
array.append(r)
|
||||||
|
res[key] = array
|
||||||
|
elif key in json and isinstance(val, dict):
|
||||||
|
r = apply_recursive(val, json[key])
|
||||||
|
res[key] = r
|
||||||
|
elif key not in json and key == '*':
|
||||||
|
for k, v in json.items():
|
||||||
|
try:
|
||||||
|
r = apply_recursive(val, v)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
res[k] = r
|
||||||
|
elif key not in json:
|
||||||
|
raise ValueError()
|
||||||
|
return res
|
||||||
|
|
||||||
|
return apply_recursive(_parse_template(template_string), json)
|
||||||
|
|
||||||
|
|
||||||
|
def to_dict(obj, recursive=False, classkey=None):
|
||||||
|
if hasattr(obj, "__dict__"):
|
||||||
|
data = dict()
|
||||||
|
for key in dir(obj):
|
||||||
|
try:
|
||||||
|
value = getattr(obj, key)
|
||||||
|
if not callable(value) and not key.startswith('_'):
|
||||||
|
data[key] = (value if not recursive
|
||||||
|
else to_dict(value, recursive, classkey))
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
if classkey is not None and hasattr(obj, "__class__"):
|
||||||
|
data[classkey] = obj.__class__.__name__
|
||||||
|
return data
|
||||||
|
|
||||||
|
if not recursive:
|
||||||
|
return obj
|
||||||
|
|
||||||
|
if isinstance(obj, dict):
|
||||||
|
for k in obj.keys():
|
||||||
|
obj[k] = to_dict(obj[k], recursive, classkey)
|
||||||
|
return obj
|
||||||
|
elif hasattr(obj, "__iter__"):
|
||||||
|
return [to_dict(v, recursive, classkey) for v in obj]
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_name_from_url(url):
|
||||||
|
"""Get object name from fully qualified link."""
|
||||||
|
return url.split('/')[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def get_collection_name(type_name):
|
||||||
|
if type_name == "project":
|
||||||
|
return None
|
||||||
|
elif type_name.endswith("s"):
|
||||||
|
return "%ses" % type_name
|
||||||
|
else:
|
||||||
|
return "%ss" % type_name
|
||||||
|
|
||||||
|
|
||||||
|
def get_type_kind(type_name):
|
||||||
|
return "compute#%s" % type_name
|
||||||
|
|
||||||
|
|
||||||
|
def get_list_kind(type_name):
|
||||||
|
return "compute#%sList" % type_name
|
||||||
|
|
||||||
|
|
||||||
|
def get_aggregated_kind(type_name):
|
||||||
|
return "compute#%sAggregatedList" % type_name
|
223
gceapi/api/wsgi.py
Normal file
223
gceapi/api/wsgi.py
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import jsonutils
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi import wsgi_ext as openstack_wsgi
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class JSONDictSerializer(openstack_wsgi.DictSerializer):
|
||||||
|
"""JSON request body serialization."""
|
||||||
|
|
||||||
|
def serialize(self, data, request):
|
||||||
|
params = {'false': False, 'true': True}
|
||||||
|
pretty_print = request.params.get("prettyPrint", True)
|
||||||
|
if pretty_print in params:
|
||||||
|
pretty_print = params[pretty_print]
|
||||||
|
ident = None
|
||||||
|
if pretty_print:
|
||||||
|
ident = 4
|
||||||
|
ret = jsonutils.dumps(data,
|
||||||
|
default=jsonutils.to_primitive, indent=ident)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
class GCEResponse(openstack_wsgi.ResponseObject):
|
||||||
|
"""GCE Response body serialization."""
|
||||||
|
|
||||||
|
def serialize(self, request, content_type, default_serializers=None):
|
||||||
|
if self.serializer:
|
||||||
|
serializer = self.serializer
|
||||||
|
else:
|
||||||
|
_mtype, _serializer = self.get_serializer(content_type,
|
||||||
|
default_serializers)
|
||||||
|
serializer = _serializer()
|
||||||
|
|
||||||
|
response = webob.Response()
|
||||||
|
response.status_int = self.code
|
||||||
|
for hdr, value in self._headers.items():
|
||||||
|
response.headers[hdr] = value
|
||||||
|
response.headers['Content-Type'] = content_type
|
||||||
|
if self.obj is not None:
|
||||||
|
response.body = serializer.serialize(self.obj, request)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
class GCEFault(webob.exc.HTTPException):
|
||||||
|
"""Wrap webob.exc.HTTPException to provide API friendly response."""
|
||||||
|
|
||||||
|
def __init__(self, exception):
|
||||||
|
"""
|
||||||
|
Create a Fault for the given webob.exc.exception or gceapi.exception.
|
||||||
|
"""
|
||||||
|
self.wrapped_exc = exception
|
||||||
|
for key, value in self.wrapped_exc.headers.items():
|
||||||
|
self.wrapped_exc.headers[key] = str(value)
|
||||||
|
|
||||||
|
|
||||||
|
class GCEResourceExceptionHandler(object):
|
||||||
|
"""Context manager to handle Resource exceptions.
|
||||||
|
|
||||||
|
Used when processing exceptions generated by API implementation
|
||||||
|
methods (or their extensions). Converts most exceptions to Fault
|
||||||
|
exceptions, with the appropriate logging.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def __exit__(self, ex_type, ex_value, ex_traceback):
|
||||||
|
if not ex_value:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if isinstance(ex_value, exception.NotAuthorized):
|
||||||
|
msg = unicode(ex_value)
|
||||||
|
raise GCEFault(webob.exc.HTTPForbidden(explanation=msg))
|
||||||
|
elif isinstance(ex_value, exception.Invalid):
|
||||||
|
msg = unicode(ex_value)
|
||||||
|
raise GCEFault(exception.ConvertedException(
|
||||||
|
code=ex_value.code, explanation=msg))
|
||||||
|
|
||||||
|
# Under python 2.6, TypeError's exception value is actually a string,
|
||||||
|
# so test # here via ex_type instead:
|
||||||
|
# http://bugs.python.org/issue7853
|
||||||
|
elif issubclass(ex_type, TypeError):
|
||||||
|
exc_info = (ex_type, ex_value, ex_traceback)
|
||||||
|
LOG.error(_('Exception handling resource: %s') % ex_value,
|
||||||
|
exc_info=exc_info)
|
||||||
|
raise GCEFault(webob.exc.HTTPBadRequest())
|
||||||
|
elif isinstance(ex_value, GCEFault):
|
||||||
|
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
|
||||||
|
raise ex_value
|
||||||
|
elif isinstance(ex_value, webob.exc.HTTPException):
|
||||||
|
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
|
||||||
|
raise GCEFault(ex_value)
|
||||||
|
elif isinstance(ex_value, exception.GceapiException):
|
||||||
|
LOG.info(_("Gceapi exception thrown: %s"), unicode(ex_value))
|
||||||
|
raise GCEFault(ex_value)
|
||||||
|
else:
|
||||||
|
msg = unicode(ex_value)
|
||||||
|
raise GCEFault(exception.ConvertedException(
|
||||||
|
code=500, title=ex_type.__name__, explanation=msg))
|
||||||
|
|
||||||
|
|
||||||
|
class GCEResource(openstack_wsgi.Resource):
|
||||||
|
"""Common GCE resource response formatter"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(GCEResource, self).__init__(*args, **kwargs)
|
||||||
|
self.default_serializers = dict(json=JSONDictSerializer)
|
||||||
|
|
||||||
|
def _check_requested_project(self, project_id, context):
|
||||||
|
if (not context or project_id is None
|
||||||
|
or (project_id not in [context.project_id, context.project_name])):
|
||||||
|
msg = _("Project '%s' could not be found") % project_id \
|
||||||
|
if project_id is not None \
|
||||||
|
else _("Project hasn`t been provided")
|
||||||
|
|
||||||
|
raise GCEFault(webob.exc.HTTPBadRequest(
|
||||||
|
explanation=msg))
|
||||||
|
|
||||||
|
def _process_stack(self, request, action, action_args,
|
||||||
|
content_type, body, accept):
|
||||||
|
"""Implement the processing stack."""
|
||||||
|
method = None
|
||||||
|
try:
|
||||||
|
# Get the implementing method
|
||||||
|
try:
|
||||||
|
method = self.get_method(request, action, content_type, body)
|
||||||
|
except (AttributeError, TypeError):
|
||||||
|
msg = _("There is no such action: %s") % action
|
||||||
|
raise GCEFault(webob.exc.HTTPNotFound(
|
||||||
|
explanation=msg))
|
||||||
|
except KeyError as ex:
|
||||||
|
msg = _("There is no such action: %s") % ex.args[0]
|
||||||
|
raise GCEFault(webob.exc.HTTPBadRequest(
|
||||||
|
explanation=msg))
|
||||||
|
except exception.MalformedRequestBody:
|
||||||
|
msg = _("Malformed request body")
|
||||||
|
raise GCEFault(webob.exc.HTTPBadRequest(
|
||||||
|
explanation=msg))
|
||||||
|
|
||||||
|
# Now, deserialize the request body...
|
||||||
|
try:
|
||||||
|
if content_type:
|
||||||
|
contents = self.deserialize(method, content_type, body)
|
||||||
|
else:
|
||||||
|
contents = {}
|
||||||
|
except exception.InvalidContentType:
|
||||||
|
msg = _("Unsupported Content-Type")
|
||||||
|
raise GCEFault(webob.exc.HTTPBadRequest(
|
||||||
|
explanation=msg))
|
||||||
|
except exception.MalformedRequestBody:
|
||||||
|
msg = _("Malformed request body")
|
||||||
|
raise GCEFault(webob.exc.HTTPBadRequest(
|
||||||
|
explanation=msg))
|
||||||
|
|
||||||
|
# Update the action args
|
||||||
|
action_args.update(contents)
|
||||||
|
|
||||||
|
# Check project
|
||||||
|
project_id = action_args.pop("project_id", None)
|
||||||
|
context = request.environ.get('gceapi.context')
|
||||||
|
action_result = self._check_requested_project(project_id, context)
|
||||||
|
|
||||||
|
if action_result is None:
|
||||||
|
with GCEResourceExceptionHandler():
|
||||||
|
action_result = self.dispatch(method, request, action_args)
|
||||||
|
|
||||||
|
except GCEFault as ex:
|
||||||
|
action_result = ex.wrapped_exc
|
||||||
|
|
||||||
|
response = None
|
||||||
|
resp_obj = None
|
||||||
|
if (action_result is None or type(action_result) is dict or
|
||||||
|
isinstance(action_result, Exception)):
|
||||||
|
action_result, result_code = self.controller.process_result(
|
||||||
|
request, action, action_result)
|
||||||
|
resp_obj = GCEResponse(action_result, code=result_code)
|
||||||
|
elif isinstance(action_result, GCEResponse):
|
||||||
|
resp_obj = action_result
|
||||||
|
else:
|
||||||
|
response = action_result
|
||||||
|
|
||||||
|
# Serialize response object
|
||||||
|
if resp_obj:
|
||||||
|
if method is not None:
|
||||||
|
serializers = getattr(method, 'wsgi_serializers', {})
|
||||||
|
else:
|
||||||
|
serializers = {}
|
||||||
|
resp_obj._bind_method_serializers(serializers)
|
||||||
|
if method is not None and hasattr(method, 'wsgi_code'):
|
||||||
|
resp_obj._default_code = method.wsgi_code
|
||||||
|
resp_obj.preserialize(accept, self.default_serializers)
|
||||||
|
response = resp_obj.serialize(request, accept,
|
||||||
|
self.default_serializers)
|
||||||
|
|
||||||
|
try:
|
||||||
|
msg_dict = dict(url=request.url, status=response.status_int)
|
||||||
|
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
|
||||||
|
except AttributeError as e:
|
||||||
|
msg_dict = dict(url=request.url, e=e)
|
||||||
|
msg = _("%(url)s returned a fault: %(e)s") % msg_dict
|
||||||
|
|
||||||
|
LOG.info(msg)
|
||||||
|
return response
|
68
gceapi/api/zone_api.py
Normal file
68
gceapi/api/zone_api.py
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import base_api
|
||||||
|
from gceapi.api import clients
|
||||||
|
from gceapi.api import scopes
|
||||||
|
from gceapi import exception
|
||||||
|
|
||||||
|
|
||||||
|
class API(base_api.API):
|
||||||
|
"""GCE Zones API."""
|
||||||
|
|
||||||
|
KIND = "zone"
|
||||||
|
COMPUTE_SERVICE = "nova-compute"
|
||||||
|
|
||||||
|
def _get_type(self):
|
||||||
|
return self.KIND
|
||||||
|
|
||||||
|
def get_item(self, context, name, scope=None):
|
||||||
|
zones = self.get_items(context)
|
||||||
|
for zone in zones:
|
||||||
|
if zone["name"] == name:
|
||||||
|
return zone
|
||||||
|
raise exception.NotFound
|
||||||
|
|
||||||
|
def get_items(self, context, scope=None):
|
||||||
|
client = clients.nova(context)
|
||||||
|
try:
|
||||||
|
nova_zones = client.availability_zones.list()
|
||||||
|
except clients.novaclient.exceptions.Forbidden as e:
|
||||||
|
try:
|
||||||
|
nova_zones = client.availability_zones.list(detailed=False)
|
||||||
|
except Exception:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
filtered_zones = list()
|
||||||
|
for zone in nova_zones:
|
||||||
|
if not zone.hosts:
|
||||||
|
filtered_zones.append(zone)
|
||||||
|
continue
|
||||||
|
for host in zone.hosts:
|
||||||
|
if self.COMPUTE_SERVICE in zone.hosts[host]:
|
||||||
|
filtered_zones.append(zone)
|
||||||
|
break
|
||||||
|
zones = list()
|
||||||
|
for zone in filtered_zones:
|
||||||
|
zones.append({
|
||||||
|
"name": zone.zoneName,
|
||||||
|
"status": "UP" if zone.zoneState["available"] else "DOWN",
|
||||||
|
"hosts": [host for host in zone.hosts]
|
||||||
|
if zone.hosts else list()
|
||||||
|
})
|
||||||
|
return zones
|
||||||
|
|
||||||
|
def get_items_as_scopes(self, context):
|
||||||
|
return [scopes.ZoneScope(zone["name"])
|
||||||
|
for zone in self.get_items(context)]
|
38
gceapi/api/zones.py
Normal file
38
gceapi/api/zones.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.api import common as gce_common
|
||||||
|
from gceapi.api import region_api
|
||||||
|
from gceapi.api import wsgi as gce_wsgi
|
||||||
|
from gceapi.api import zone_api
|
||||||
|
|
||||||
|
|
||||||
|
class Controller(gce_common.Controller):
|
||||||
|
"""GCE Zones controller."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Controller, self).__init__(zone_api.API(), *args, **kwargs)
|
||||||
|
|
||||||
|
def format_item(self, request, zone, scope):
|
||||||
|
result_dict = {
|
||||||
|
"name": zone["name"],
|
||||||
|
"status": zone["status"],
|
||||||
|
"region": region_api.API().get_items(None)[0]["name"],
|
||||||
|
}
|
||||||
|
|
||||||
|
return self._format_item(request, result_dict, scope)
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return gce_wsgi.GCEResource(Controller())
|
142
gceapi/auth.py
Normal file
142
gceapi/auth.py
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 OpenStack Foundation
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Common Auth Middleware.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from gceapi import context
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import jsonutils
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi import wsgi
|
||||||
|
|
||||||
|
|
||||||
|
auth_opts = [
|
||||||
|
cfg.BoolOpt('api_rate_limit',
|
||||||
|
default=False,
|
||||||
|
help='whether to use per-user rate limiting for the api.'),
|
||||||
|
cfg.StrOpt('auth_strategy',
|
||||||
|
default='noauth',
|
||||||
|
help='The strategy to use for auth: noauth or keystone.'),
|
||||||
|
cfg.BoolOpt('use_forwarded_for',
|
||||||
|
default=False,
|
||||||
|
help='Treat X-Forwarded-For as the canonical remote address. '
|
||||||
|
'Only enable this if you have a sanitizing proxy.'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(auth_opts)
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def pipeline_factory(loader, global_conf, **local_conf):
|
||||||
|
"""A paste pipeline replica that keys off of auth_strategy."""
|
||||||
|
pipeline = local_conf[CONF.auth_strategy]
|
||||||
|
if not CONF.api_rate_limit:
|
||||||
|
limit_name = CONF.auth_strategy + '_nolimit'
|
||||||
|
pipeline = local_conf.get(limit_name, pipeline)
|
||||||
|
pipeline = pipeline.split()
|
||||||
|
filters = [loader.get_filter(n) for n in pipeline[:-1]]
|
||||||
|
app = loader.get_app(pipeline[-1])
|
||||||
|
filters.reverse()
|
||||||
|
for filter_func in filters:
|
||||||
|
app = filter_func(app)
|
||||||
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
class InjectContext(wsgi.Middleware):
|
||||||
|
"""Add a 'gceapi.context' to WSGI environ."""
|
||||||
|
|
||||||
|
def __init__(self, context, *args, **kwargs):
|
||||||
|
self.context = context
|
||||||
|
super(InjectContext, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
req.environ['gceapi.context'] = self.context
|
||||||
|
return self.application
|
||||||
|
|
||||||
|
|
||||||
|
class GceapiKeystoneContext(wsgi.Middleware):
|
||||||
|
"""Make a request context from keystone headers."""
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
user_id = req.headers.get('X_USER')
|
||||||
|
user_id = req.headers.get('X_USER_ID', user_id)
|
||||||
|
if user_id is None:
|
||||||
|
LOG.debug("Neither X_USER_ID nor X_USER found in request")
|
||||||
|
return webob.exc.HTTPUnauthorized()
|
||||||
|
|
||||||
|
roles = self._get_roles(req)
|
||||||
|
|
||||||
|
if 'X_TENANT_ID' in req.headers:
|
||||||
|
# This is the new header since Keystone went to ID/Name
|
||||||
|
project_id = req.headers['X_TENANT_ID']
|
||||||
|
else:
|
||||||
|
# This is for legacy compatibility
|
||||||
|
project_id = req.headers['X_TENANT']
|
||||||
|
project_name = req.headers.get('X_TENANT_NAME')
|
||||||
|
user_name = req.headers.get('X_USER_NAME')
|
||||||
|
|
||||||
|
# Get the auth token
|
||||||
|
auth_token = req.headers.get('X_AUTH_TOKEN',
|
||||||
|
req.headers.get('X_STORAGE_TOKEN'))
|
||||||
|
|
||||||
|
# Build a context, including the auth_token...
|
||||||
|
remote_address = req.remote_addr
|
||||||
|
if CONF.use_forwarded_for:
|
||||||
|
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||||
|
|
||||||
|
service_catalog = None
|
||||||
|
if req.headers.get('X_SERVICE_CATALOG') is not None:
|
||||||
|
try:
|
||||||
|
catalog_header = req.headers.get('X_SERVICE_CATALOG')
|
||||||
|
service_catalog = jsonutils.loads(catalog_header)
|
||||||
|
except ValueError:
|
||||||
|
raise webob.exc.HTTPInternalServerError(
|
||||||
|
_('Invalid service catalog json.'))
|
||||||
|
|
||||||
|
ctx = context.RequestContext(user_id,
|
||||||
|
project_id,
|
||||||
|
user_name=user_name,
|
||||||
|
project_name=project_name,
|
||||||
|
roles=roles,
|
||||||
|
auth_token=auth_token,
|
||||||
|
remote_address=remote_address,
|
||||||
|
service_catalog=service_catalog)
|
||||||
|
|
||||||
|
req.environ['gceapi.context'] = ctx
|
||||||
|
return self.application
|
||||||
|
|
||||||
|
def _get_roles(self, req):
|
||||||
|
"""Get the list of roles."""
|
||||||
|
|
||||||
|
if 'X_ROLES' in req.headers:
|
||||||
|
roles = req.headers.get('X_ROLES', '')
|
||||||
|
else:
|
||||||
|
# Fallback to deprecated role header:
|
||||||
|
roles = req.headers.get('X_ROLE', '')
|
||||||
|
if roles:
|
||||||
|
LOG.warn(_("Sourcing roles from deprecated X-Role HTTP "
|
||||||
|
"header"))
|
||||||
|
return [r.strip() for r in roles.split(',')]
|
19
gceapi/cmd/__init__.py
Normal file
19
gceapi/cmd/__init__.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from gceapi.openstack.common import gettextutils
|
||||||
|
gettextutils.install('gceapi')
|
51
gceapi/cmd/api.py
Executable file
51
gceapi/cmd/api.py
Executable file
@ -0,0 +1,51 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Gceapi API Server
|
||||||
|
"""
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
import sys
|
||||||
|
|
||||||
|
eventlet.patcher.monkey_patch(os=False)
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi import config
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi import service
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_opt('use_ssl', 'gceapi.service')
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
config.parse_args(sys.argv)
|
||||||
|
logging.setup('gceapi')
|
||||||
|
|
||||||
|
server = service.WSGIService(
|
||||||
|
'gce', use_ssl=CONF.use_ssl, max_url_len=16384)
|
||||||
|
service.serve(server)
|
||||||
|
service.wait()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
75
gceapi/cmd/manage.py
Normal file
75
gceapi/cmd/manage.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
CLI interface for GCE API management.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.db import migration
|
||||||
|
from gceapi.openstack.common import log
|
||||||
|
from gceapi import version
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
def do_db_version():
|
||||||
|
"""Print database's current migration level."""
|
||||||
|
print(migration.db_version())
|
||||||
|
|
||||||
|
|
||||||
|
def do_db_sync():
|
||||||
|
"""
|
||||||
|
Place a database under migration control and upgrade,
|
||||||
|
creating first if necessary.
|
||||||
|
"""
|
||||||
|
migration.db_sync(CONF.command.version)
|
||||||
|
|
||||||
|
|
||||||
|
def add_command_parsers(subparsers):
|
||||||
|
parser = subparsers.add_parser('db_version')
|
||||||
|
parser.set_defaults(func=do_db_version)
|
||||||
|
|
||||||
|
parser = subparsers.add_parser('db_sync')
|
||||||
|
parser.set_defaults(func=do_db_sync)
|
||||||
|
parser.add_argument('version', nargs='?')
|
||||||
|
parser.add_argument('current_version', nargs='?')
|
||||||
|
|
||||||
|
|
||||||
|
command_opt = cfg.SubCommandOpt('command',
|
||||||
|
title='Commands',
|
||||||
|
help='Available commands',
|
||||||
|
handler=add_command_parsers)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
CONF.register_cli_opt(command_opt)
|
||||||
|
try:
|
||||||
|
default_config_files = cfg.find_config_files('gceapi', 'gceapi-engine')
|
||||||
|
CONF(sys.argv[1:], project='gceapi', prog='gceapi-manage',
|
||||||
|
version=version.version_info.version_string(),
|
||||||
|
default_config_files=default_config_files)
|
||||||
|
log.setup("gceapi")
|
||||||
|
except RuntimeError as e:
|
||||||
|
sys.exit("ERROR: %s" % e)
|
||||||
|
|
||||||
|
try:
|
||||||
|
CONF.command.func()
|
||||||
|
except Exception as e:
|
||||||
|
sys.exit("ERROR: %s" % e)
|
35
gceapi/config.py
Normal file
35
gceapi/config.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
# Copyright 2012 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db.sqlalchemy import session as db_session
|
||||||
|
from gceapi import paths
|
||||||
|
from gceapi import version
|
||||||
|
|
||||||
|
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args(argv, default_config_files=None):
|
||||||
|
db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
|
||||||
|
sqlite_db='gceapi.sqlite')
|
||||||
|
cfg.CONF(argv[1:],
|
||||||
|
project='gceapi',
|
||||||
|
version=version.version_string(),
|
||||||
|
default_config_files=default_config_files)
|
162
gceapi/context.py
Normal file
162
gceapi/context.py
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack Foundation
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""RequestContext: context for requests that persist through all of gceapi."""
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import local
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_request_id():
|
||||||
|
return 'req-' + str(uuid.uuid4())
|
||||||
|
|
||||||
|
|
||||||
|
class RequestContext(object):
|
||||||
|
"""Security context and request information.
|
||||||
|
|
||||||
|
Represents the user taking a given action within the system.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
|
||||||
|
roles=None, remote_address=None, timestamp=None,
|
||||||
|
request_id=None, auth_token=None, overwrite=True,
|
||||||
|
user_name=None, project_name=None,
|
||||||
|
service_catalog=None, **kwargs):
|
||||||
|
"""
|
||||||
|
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
|
||||||
|
indicates deleted records are visible, 'only' indicates that
|
||||||
|
*only* deleted records are visible.
|
||||||
|
|
||||||
|
:param overwrite: Set to False to ensure that the greenthread local
|
||||||
|
copy of the index is not overwritten.
|
||||||
|
|
||||||
|
:param kwargs: Extra arguments that might be present, but we ignore
|
||||||
|
because they possibly came in from older rpc messages.
|
||||||
|
"""
|
||||||
|
if kwargs:
|
||||||
|
LOG.warn(_('Arguments dropped when creating context: %s') %
|
||||||
|
str(kwargs))
|
||||||
|
|
||||||
|
self.user_id = user_id
|
||||||
|
self.project_id = project_id
|
||||||
|
self.roles = roles or []
|
||||||
|
self.read_deleted = read_deleted
|
||||||
|
self.remote_address = remote_address
|
||||||
|
if not timestamp:
|
||||||
|
timestamp = timeutils.utcnow()
|
||||||
|
if isinstance(timestamp, basestring):
|
||||||
|
timestamp = timeutils.parse_strtime(timestamp)
|
||||||
|
self.timestamp = timestamp
|
||||||
|
if not request_id:
|
||||||
|
request_id = generate_request_id()
|
||||||
|
self.request_id = request_id
|
||||||
|
self.auth_token = auth_token
|
||||||
|
|
||||||
|
self.service_catalog = service_catalog
|
||||||
|
|
||||||
|
self.user_name = user_name
|
||||||
|
self.project_name = project_name
|
||||||
|
self.is_admin = is_admin
|
||||||
|
#if self.is_admin is None:
|
||||||
|
# self.is_admin = policy.check_is_admin(self)
|
||||||
|
if overwrite or not hasattr(local.store, 'context'):
|
||||||
|
self.update_store()
|
||||||
|
self.operation = None
|
||||||
|
self.operation_start_time = None
|
||||||
|
self.operation_get_progress_method = None
|
||||||
|
self.operation_item_id = None
|
||||||
|
|
||||||
|
def _get_read_deleted(self):
|
||||||
|
return self._read_deleted
|
||||||
|
|
||||||
|
def _set_read_deleted(self, read_deleted):
|
||||||
|
if read_deleted not in ('no', 'yes', 'only'):
|
||||||
|
raise ValueError(_("read_deleted can only be one of 'no', "
|
||||||
|
"'yes' or 'only', not %r") % read_deleted)
|
||||||
|
self._read_deleted = read_deleted
|
||||||
|
|
||||||
|
def _del_read_deleted(self):
|
||||||
|
del self._read_deleted
|
||||||
|
|
||||||
|
read_deleted = property(_get_read_deleted, _set_read_deleted,
|
||||||
|
_del_read_deleted)
|
||||||
|
|
||||||
|
def update_store(self):
|
||||||
|
local.store.context = self
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {'user_id': self.user_id,
|
||||||
|
'project_id': self.project_id,
|
||||||
|
'is_admin': self.is_admin,
|
||||||
|
'read_deleted': self.read_deleted,
|
||||||
|
'roles': self.roles,
|
||||||
|
'remote_address': self.remote_address,
|
||||||
|
'timestamp': timeutils.strtime(self.timestamp),
|
||||||
|
'request_id': self.request_id,
|
||||||
|
'auth_token': self.auth_token,
|
||||||
|
'user_name': self.user_name,
|
||||||
|
'service_catalog': self.service_catalog,
|
||||||
|
'project_name': self.project_name,
|
||||||
|
'tenant': self.tenant,
|
||||||
|
'user': self.user}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, values):
|
||||||
|
return cls(**values)
|
||||||
|
|
||||||
|
# NOTE(sirp): the openstack/common version of RequestContext uses
|
||||||
|
# tenant/user whereas the gceapi version uses project_id/user_id. We need
|
||||||
|
# this shim in order to use context-aware code from openstack/common, like
|
||||||
|
# logging, until we make the switch to using openstack/common's version of
|
||||||
|
# RequestContext.
|
||||||
|
@property
|
||||||
|
def tenant(self):
|
||||||
|
return self.project_id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user(self):
|
||||||
|
return self.user_id
|
||||||
|
|
||||||
|
|
||||||
|
def is_user_context(context):
|
||||||
|
"""Indicates if the request context is a normal user."""
|
||||||
|
if not context:
|
||||||
|
return False
|
||||||
|
if context.is_admin:
|
||||||
|
return False
|
||||||
|
if not context.user_id or not context.project_id:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def require_context(ctxt):
|
||||||
|
"""Raise exception.NotAuthorized() if context is not a user or an
|
||||||
|
admin context.
|
||||||
|
"""
|
||||||
|
if not ctxt.is_admin and not is_user_context(ctxt):
|
||||||
|
raise exception.NotAuthorized()
|
19
gceapi/db/__init__.py
Normal file
19
gceapi/db/__init__.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
DB abstraction for Gceapi
|
||||||
|
"""
|
||||||
|
|
||||||
|
from gceapi.db.api import *
|
58
gceapi/db/api.py
Normal file
58
gceapi/db/api.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Defines interface for DB access.
|
||||||
|
|
||||||
|
Functions in this module are imported into the gceapi.db namespace. Call these
|
||||||
|
functions from gceapi.db namespace, not the gceapi.db.api namespace.
|
||||||
|
|
||||||
|
**Related Flags**
|
||||||
|
|
||||||
|
:dbackend: string to lookup in the list of LazyPluggable backends.
|
||||||
|
`sqlalchemy` is the only supported backend right now.
|
||||||
|
|
||||||
|
:connection: string specifying the sqlalchemy connection to use, like:
|
||||||
|
`sqlite:///var/lib/gceapi/gceapi.sqlite`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db import api as db_api
|
||||||
|
|
||||||
|
|
||||||
|
_BACKEND_MAPPING = {'sqlalchemy': 'gceapi.db.sqlalchemy.api'}
|
||||||
|
IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
|
||||||
|
|
||||||
|
|
||||||
|
def add_item(context, kind, data):
|
||||||
|
IMPL.add_item(context, kind, data)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_item(context, kind, item_id):
|
||||||
|
IMPL.delete_item(context, kind, item_id)
|
||||||
|
|
||||||
|
|
||||||
|
def update_item(context, kind, item):
|
||||||
|
IMPL.update_item(context, kind, item)
|
||||||
|
|
||||||
|
|
||||||
|
def get_items(context, kind):
|
||||||
|
return IMPL.get_items(context, kind)
|
||||||
|
|
||||||
|
|
||||||
|
def get_item_by_id(context, kind, item_id):
|
||||||
|
return IMPL.get_item_by_id(context, kind, item_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_item_by_name(context, kind, name):
|
||||||
|
return IMPL.get_item_by_name(context, kind, name)
|
73
gceapi/db/migration.py
Normal file
73
gceapi/db/migration.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Database setup and migration commands."""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class LazyPluggable(object):
|
||||||
|
"""A pluggable backend loaded lazily based on some value."""
|
||||||
|
|
||||||
|
def __init__(self, pivot, config_group=None, **backends):
|
||||||
|
self.__backends = backends
|
||||||
|
self.__pivot = pivot
|
||||||
|
self.__backend = None
|
||||||
|
self.__config_group = config_group
|
||||||
|
|
||||||
|
def __get_backend(self):
|
||||||
|
if not self.__backend:
|
||||||
|
if self.__config_group is None:
|
||||||
|
backend_name = CONF[self.__pivot]
|
||||||
|
else:
|
||||||
|
backend_name = CONF[self.__config_group][self.__pivot]
|
||||||
|
if backend_name not in self.__backends:
|
||||||
|
msg = _('Invalid backend: %s') % backend_name
|
||||||
|
raise exception.GceapiException(msg)
|
||||||
|
|
||||||
|
backend = self.__backends[backend_name]
|
||||||
|
if isinstance(backend, tuple):
|
||||||
|
name = backend[0]
|
||||||
|
fromlist = backend[1]
|
||||||
|
else:
|
||||||
|
name = backend
|
||||||
|
fromlist = backend
|
||||||
|
|
||||||
|
self.__backend = __import__(name, None, None, fromlist)
|
||||||
|
return self.__backend
|
||||||
|
|
||||||
|
def __getattr__(self, key):
|
||||||
|
backend = self.__get_backend()
|
||||||
|
return getattr(backend, key)
|
||||||
|
|
||||||
|
IMPL = LazyPluggable('backend',
|
||||||
|
config_group='database',
|
||||||
|
sqlalchemy='gceapi.db.sqlalchemy.migration')
|
||||||
|
|
||||||
|
INIT_VERSION = 0
|
||||||
|
|
||||||
|
|
||||||
|
def db_sync(version=None):
|
||||||
|
"""Migrate the database to `version` or the most recent version."""
|
||||||
|
return IMPL.db_sync(INIT_VERSION, version=version)
|
||||||
|
|
||||||
|
|
||||||
|
def db_version():
|
||||||
|
"""Display the current database version."""
|
||||||
|
return IMPL.db_version(INIT_VERSION)
|
13
gceapi/db/sqlalchemy/__init__.py
Normal file
13
gceapi/db/sqlalchemy/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
133
gceapi/db/sqlalchemy/api.py
Normal file
133
gceapi/db/sqlalchemy/api.py
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Implementation of SQLAlchemy backend."""
|
||||||
|
|
||||||
|
import ast
|
||||||
|
import functools
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
import gceapi.context
|
||||||
|
from gceapi.db.sqlalchemy import models
|
||||||
|
from gceapi.openstack.common.db.sqlalchemy import session as db_session
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_opt('connection',
|
||||||
|
'gceapi.openstack.common.db.sqlalchemy.session',
|
||||||
|
group='database')
|
||||||
|
|
||||||
|
get_session = db_session.get_session
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend():
|
||||||
|
"""The backend is this module itself."""
|
||||||
|
return sys.modules[__name__]
|
||||||
|
|
||||||
|
|
||||||
|
def require_context(f):
|
||||||
|
"""Decorator to require *any* user or admin context.
|
||||||
|
|
||||||
|
The first argument to the wrapped function must be the context.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
gceapi.context.require_context(args[0])
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def model_query(context, model, *args, **kwargs):
|
||||||
|
"""Query helper that accounts for context's `read_deleted` field.
|
||||||
|
|
||||||
|
:param context: context to query under
|
||||||
|
:param session: if present, the session to use
|
||||||
|
"""
|
||||||
|
session = kwargs.get('session') or get_session()
|
||||||
|
|
||||||
|
return session.query(model, *args).\
|
||||||
|
filter_by(project_id=context.project_id)
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def add_item(context, kind, data):
|
||||||
|
item_ref = models.Item()
|
||||||
|
item_ref.update({
|
||||||
|
"project_id": context.project_id,
|
||||||
|
"kind": kind,
|
||||||
|
})
|
||||||
|
item_ref.update(_pack_item_data(data))
|
||||||
|
item_ref.save()
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def delete_item(context, kind, item_id):
|
||||||
|
model_query(context, models.Item).\
|
||||||
|
filter_by(kind=kind,
|
||||||
|
id=item_id).\
|
||||||
|
delete()
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def update_item(context, kind, item):
|
||||||
|
item_ref = model_query(context, models.Item).\
|
||||||
|
filter_by(kind=kind,
|
||||||
|
id=item["id"]).\
|
||||||
|
one()
|
||||||
|
item_ref.update(_pack_item_data(item))
|
||||||
|
item_ref.save()
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def get_items(context, kind):
|
||||||
|
return [_unpack_item_data(item)
|
||||||
|
for item in model_query(context, models.Item).
|
||||||
|
filter_by(kind=kind).
|
||||||
|
all()]
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def get_item_by_id(context, kind, item_id):
|
||||||
|
return _unpack_item_data(model_query(context, models.Item).
|
||||||
|
filter_by(kind=kind,
|
||||||
|
id=item_id).
|
||||||
|
first())
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def get_item_by_name(context, kind, name):
|
||||||
|
return _unpack_item_data(model_query(context, models.Item).
|
||||||
|
filter_by(kind=kind,
|
||||||
|
name=name).
|
||||||
|
first())
|
||||||
|
|
||||||
|
|
||||||
|
def _pack_item_data(item_data):
|
||||||
|
return {
|
||||||
|
"id": item_data.pop("id"),
|
||||||
|
"name": item_data.pop("name", None),
|
||||||
|
"data": str(item_data),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _unpack_item_data(item_ref):
|
||||||
|
if item_ref is None:
|
||||||
|
return None
|
||||||
|
data = ast.literal_eval(item_ref.data)
|
||||||
|
data["id"] = item_ref.id
|
||||||
|
if item_ref.name is not None:
|
||||||
|
data["name"] = item_ref.name
|
||||||
|
return data
|
4
gceapi/db/sqlalchemy/migrate_repo/README
Normal file
4
gceapi/db/sqlalchemy/migrate_repo/README
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
This is a database migration repository.
|
||||||
|
|
||||||
|
More information at
|
||||||
|
http://code.google.com/p/sqlalchemy-migrate/
|
13
gceapi/db/sqlalchemy/migrate_repo/__init__.py
Normal file
13
gceapi/db/sqlalchemy/migrate_repo/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
19
gceapi/db/sqlalchemy/migrate_repo/manage.py
Normal file
19
gceapi/db/sqlalchemy/migrate_repo/manage.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from migrate.versioning.shell import main
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(debug='False', repository='.')
|
20
gceapi/db/sqlalchemy/migrate_repo/migrate.cfg
Normal file
20
gceapi/db/sqlalchemy/migrate_repo/migrate.cfg
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
[db_settings]
|
||||||
|
# Used to identify which repository this database is versioned under.
|
||||||
|
# You can use the name of your project.
|
||||||
|
repository_id=gceapi
|
||||||
|
|
||||||
|
# The name of the database table used to track the schema version.
|
||||||
|
# This name shouldn't already be used by your project.
|
||||||
|
# If this is changed once a database is under version control, you'll need to
|
||||||
|
# change the table name in each database too.
|
||||||
|
version_table=migrate_version
|
||||||
|
|
||||||
|
# When committing a change script, Migrate will attempt to generate the
|
||||||
|
# sql for all supported databases; normally, if one of them fails - probably
|
||||||
|
# because you don't have that database installed - it is ignored and the
|
||||||
|
# commit continues, perhaps ending successfully.
|
||||||
|
# Databases in this list MUST compile successfully during a commit, or the
|
||||||
|
# entire commit will fail. List the databases your application will actually
|
||||||
|
# be using to ensure your updates to that database work properly.
|
||||||
|
# This must be a list; example: ['postgres','sqlite']
|
||||||
|
required_dbs=[]
|
38
gceapi/db/sqlalchemy/migrate_repo/versions/001_icehouse.py
Normal file
38
gceapi/db/sqlalchemy/migrate_repo/versions/001_icehouse.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Index, MetaData, PrimaryKeyConstraint
|
||||||
|
from sqlalchemy import String, Table, Text
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
items = Table('items', meta,
|
||||||
|
Column("id", String(length=255)),
|
||||||
|
Column("project_id", String(length=255)),
|
||||||
|
Column("kind", String(length=50)),
|
||||||
|
Column("name", String(length=63)),
|
||||||
|
Column("data", Text()),
|
||||||
|
PrimaryKeyConstraint('kind', 'id'),
|
||||||
|
Index('items_project_kind_name_idx', 'project_id', 'kind', 'name'),
|
||||||
|
mysql_engine="InnoDB",
|
||||||
|
mysql_charset="utf8"
|
||||||
|
)
|
||||||
|
items.create()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
raise NotImplementedError("Downgrade from Icehouse is unsupported.")
|
13
gceapi/db/sqlalchemy/migrate_repo/versions/__init__.py
Normal file
13
gceapi/db/sqlalchemy/migrate_repo/versions/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
30
gceapi/db/sqlalchemy/migration.py
Normal file
30
gceapi/db/sqlalchemy/migration.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db.sqlalchemy import migration
|
||||||
|
|
||||||
|
|
||||||
|
def db_sync(init_version, version=None):
|
||||||
|
return migration.db_sync(_get_repo_path(), version, init_version)
|
||||||
|
|
||||||
|
|
||||||
|
def db_version(init_version):
|
||||||
|
return migration.db_version(_get_repo_path(), init_version)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_repo_path():
|
||||||
|
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||||
|
'migrate_repo')
|
37
gceapi/db/sqlalchemy/models.py
Normal file
37
gceapi/db/sqlalchemy/models.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# Copyright 2013 Cloudscaling Group, Inc
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
SQLAlchemy models for gceapi data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
|
from sqlalchemy import Column, Index, PrimaryKeyConstraint, String, Text
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db.sqlalchemy import models
|
||||||
|
|
||||||
|
BASE = declarative_base()
|
||||||
|
|
||||||
|
|
||||||
|
class Item(BASE, models.ModelBase):
|
||||||
|
__tablename__ = 'items'
|
||||||
|
__table_args__ = (
|
||||||
|
PrimaryKeyConstraint('kind', 'id'),
|
||||||
|
Index('items_project_kind_name_idx', 'project_id', 'kind', 'name'),
|
||||||
|
)
|
||||||
|
id = Column(String(length=255))
|
||||||
|
project_id = Column(String(length=255))
|
||||||
|
kind = Column(String(length=50))
|
||||||
|
name = Column(String(length=63))
|
||||||
|
data = Column(Text())
|
1423
gceapi/exception.py
Normal file
1423
gceapi/exception.py
Normal file
File diff suppressed because it is too large
Load Diff
0
gceapi/openstack/__init__.py
Normal file
0
gceapi/openstack/__init__.py
Normal file
0
gceapi/openstack/common/__init__.py
Normal file
0
gceapi/openstack/common/__init__.py
Normal file
0
gceapi/openstack/common/db/__init__.py
Normal file
0
gceapi/openstack/common/db/__init__.py
Normal file
57
gceapi/openstack/common/db/api.py
Normal file
57
gceapi/openstack/common/db/api.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
# Copyright (c) 2013 Rackspace Hosting
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Multiple DB API backend support.
|
||||||
|
|
||||||
|
Supported configuration options:
|
||||||
|
|
||||||
|
The following two parameters are in the 'database' group:
|
||||||
|
`backend`: DB backend name or full module path to DB backend module.
|
||||||
|
|
||||||
|
A DB backend module should implement a method named 'get_backend' which
|
||||||
|
takes no arguments. The method can return any object that implements DB
|
||||||
|
API methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.openstack.common import importutils
|
||||||
|
|
||||||
|
|
||||||
|
db_opts = [
|
||||||
|
cfg.StrOpt('backend',
|
||||||
|
default='sqlalchemy',
|
||||||
|
deprecated_name='db_backend',
|
||||||
|
deprecated_group='DEFAULT',
|
||||||
|
help='The backend to use for db'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(db_opts, 'database')
|
||||||
|
|
||||||
|
|
||||||
|
class DBAPI(object):
|
||||||
|
def __init__(self, backend_mapping=None):
|
||||||
|
if backend_mapping is None:
|
||||||
|
backend_mapping = {}
|
||||||
|
backend_name = CONF.database.backend
|
||||||
|
# Import the untranslated name if we don't have a
|
||||||
|
# mapping.
|
||||||
|
backend_path = backend_mapping.get(backend_name, backend_name)
|
||||||
|
backend_mod = importutils.import_module(backend_path)
|
||||||
|
self.__backend = backend_mod.get_backend()
|
||||||
|
|
||||||
|
def __getattr__(self, key):
|
||||||
|
return getattr(self.__backend, key)
|
54
gceapi/openstack/common/db/exception.py
Normal file
54
gceapi/openstack/common/db/exception.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""DB related custom exceptions."""
|
||||||
|
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
class DBError(Exception):
|
||||||
|
"""Wraps an implementation specific exception."""
|
||||||
|
def __init__(self, inner_exception=None):
|
||||||
|
self.inner_exception = inner_exception
|
||||||
|
super(DBError, self).__init__(str(inner_exception))
|
||||||
|
|
||||||
|
|
||||||
|
class DBDuplicateEntry(DBError):
|
||||||
|
"""Wraps an implementation specific exception."""
|
||||||
|
def __init__(self, columns=[], inner_exception=None):
|
||||||
|
self.columns = columns
|
||||||
|
super(DBDuplicateEntry, self).__init__(inner_exception)
|
||||||
|
|
||||||
|
|
||||||
|
class DBDeadlock(DBError):
|
||||||
|
def __init__(self, inner_exception=None):
|
||||||
|
super(DBDeadlock, self).__init__(inner_exception)
|
||||||
|
|
||||||
|
|
||||||
|
class DBInvalidUnicodeParameter(Exception):
|
||||||
|
message = _("Invalid Parameter: "
|
||||||
|
"Unicode is not supported by the current database.")
|
||||||
|
|
||||||
|
|
||||||
|
class DbMigrationError(DBError):
|
||||||
|
"""Wraps migration specific exception."""
|
||||||
|
def __init__(self, message=None):
|
||||||
|
super(DbMigrationError, self).__init__(str(message))
|
||||||
|
|
||||||
|
|
||||||
|
class DBConnectionError(DBError):
|
||||||
|
"""Wraps connection specific exception."""
|
||||||
|
pass
|
0
gceapi/openstack/common/db/sqlalchemy/__init__.py
Normal file
0
gceapi/openstack/common/db/sqlalchemy/__init__.py
Normal file
265
gceapi/openstack/common/db/sqlalchemy/migration.py
Normal file
265
gceapi/openstack/common/db/sqlalchemy/migration.py
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# Base on code in migrate/changeset/databases/sqlite.py which is under
|
||||||
|
# the following license:
|
||||||
|
#
|
||||||
|
# The MIT License
|
||||||
|
#
|
||||||
|
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from migrate.changeset import ansisql
|
||||||
|
from migrate.changeset.databases import sqlite
|
||||||
|
from migrate import exceptions as versioning_exceptions
|
||||||
|
from migrate.versioning import api as versioning_api
|
||||||
|
from migrate.versioning.repository import Repository
|
||||||
|
import sqlalchemy
|
||||||
|
from sqlalchemy.schema import UniqueConstraint
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db import exception
|
||||||
|
from gceapi.openstack.common.db.sqlalchemy import session as db_session
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
get_engine = db_session.get_engine
|
||||||
|
|
||||||
|
|
||||||
|
def _get_unique_constraints(self, table):
|
||||||
|
"""Retrieve information about existing unique constraints of the table
|
||||||
|
|
||||||
|
This feature is needed for _recreate_table() to work properly.
|
||||||
|
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
data = table.metadata.bind.execute(
|
||||||
|
"""SELECT sql
|
||||||
|
FROM sqlite_master
|
||||||
|
WHERE
|
||||||
|
type='table' AND
|
||||||
|
name=:table_name""",
|
||||||
|
table_name=table.name
|
||||||
|
).fetchone()[0]
|
||||||
|
|
||||||
|
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
|
||||||
|
return [
|
||||||
|
UniqueConstraint(
|
||||||
|
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
|
||||||
|
name=name
|
||||||
|
)
|
||||||
|
for name, cols in re.findall(UNIQUE_PATTERN, data)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
|
||||||
|
"""Recreate the table properly
|
||||||
|
|
||||||
|
Unlike the corresponding original method of sqlalchemy-migrate this one
|
||||||
|
doesn't drop existing unique constraints when creating a new one.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
table_name = self.preparer.format_table(table)
|
||||||
|
|
||||||
|
# we remove all indexes so as not to have
|
||||||
|
# problems during copy and re-create
|
||||||
|
for index in table.indexes:
|
||||||
|
index.drop()
|
||||||
|
|
||||||
|
# reflect existing unique constraints
|
||||||
|
for uc in self._get_unique_constraints(table):
|
||||||
|
table.append_constraint(uc)
|
||||||
|
# omit given unique constraints when creating a new table if required
|
||||||
|
table.constraints = set([
|
||||||
|
cons for cons in table.constraints
|
||||||
|
if omit_uniques is None or cons.name not in omit_uniques
|
||||||
|
])
|
||||||
|
|
||||||
|
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
|
||||||
|
self.execute()
|
||||||
|
|
||||||
|
insertion_string = self._modify_table(table, column, delta)
|
||||||
|
|
||||||
|
table.create(bind=self.connection)
|
||||||
|
self.append(insertion_string % {'table_name': table_name})
|
||||||
|
self.execute()
|
||||||
|
self.append('DROP TABLE migration_tmp')
|
||||||
|
self.execute()
|
||||||
|
|
||||||
|
|
||||||
|
def _visit_migrate_unique_constraint(self, *p, **k):
|
||||||
|
"""Drop the given unique constraint
|
||||||
|
|
||||||
|
The corresponding original method of sqlalchemy-migrate just
|
||||||
|
raises NotImplemented error
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
|
||||||
|
|
||||||
|
|
||||||
|
def patch_migrate():
|
||||||
|
"""A workaround for SQLite's inability to alter things
|
||||||
|
|
||||||
|
SQLite abilities to alter tables are very limited (please read
|
||||||
|
http://www.sqlite.org/lang_altertable.html for more details).
|
||||||
|
E. g. one can't drop a column or a constraint in SQLite. The
|
||||||
|
workaround for this is to recreate the original table omitting
|
||||||
|
the corresponding constraint (or column).
|
||||||
|
|
||||||
|
sqlalchemy-migrate library has recreate_table() method that
|
||||||
|
implements this workaround, but it does it wrong:
|
||||||
|
|
||||||
|
- information about unique constraints of a table
|
||||||
|
is not retrieved. So if you have a table with one
|
||||||
|
unique constraint and a migration adding another one
|
||||||
|
you will end up with a table that has only the
|
||||||
|
latter unique constraint, and the former will be lost
|
||||||
|
|
||||||
|
- dropping of unique constraints is not supported at all
|
||||||
|
|
||||||
|
The proper way to fix this is to provide a pull-request to
|
||||||
|
sqlalchemy-migrate, but the project seems to be dead. So we
|
||||||
|
can go on with monkey-patching of the lib at least for now.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# this patch is needed to ensure that recreate_table() doesn't drop
|
||||||
|
# existing unique constraints of the table when creating a new one
|
||||||
|
helper_cls = sqlite.SQLiteHelper
|
||||||
|
helper_cls.recreate_table = _recreate_table
|
||||||
|
helper_cls._get_unique_constraints = _get_unique_constraints
|
||||||
|
|
||||||
|
# this patch is needed to be able to drop existing unique constraints
|
||||||
|
constraint_cls = sqlite.SQLiteConstraintDropper
|
||||||
|
constraint_cls.visit_migrate_unique_constraint = \
|
||||||
|
_visit_migrate_unique_constraint
|
||||||
|
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
|
||||||
|
sqlite.SQLiteConstraintGenerator)
|
||||||
|
|
||||||
|
|
||||||
|
def db_sync(abs_path, version=None, init_version=0):
|
||||||
|
"""Upgrade or downgrade a database.
|
||||||
|
|
||||||
|
Function runs the upgrade() or downgrade() functions in change scripts.
|
||||||
|
|
||||||
|
:param abs_path: Absolute path to migrate repository.
|
||||||
|
:param version: Database will upgrade/downgrade until this version.
|
||||||
|
If None - database will update to the latest
|
||||||
|
available version.
|
||||||
|
:param init_version: Initial database version
|
||||||
|
"""
|
||||||
|
if version is not None:
|
||||||
|
try:
|
||||||
|
version = int(version)
|
||||||
|
except ValueError:
|
||||||
|
raise exception.DbMigrationError(
|
||||||
|
message=_("version should be an integer"))
|
||||||
|
|
||||||
|
current_version = db_version(abs_path, init_version)
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
_db_schema_sanity_check()
|
||||||
|
if version is None or version > current_version:
|
||||||
|
return versioning_api.upgrade(get_engine(), repository, version)
|
||||||
|
else:
|
||||||
|
return versioning_api.downgrade(get_engine(), repository,
|
||||||
|
version)
|
||||||
|
|
||||||
|
|
||||||
|
def _db_schema_sanity_check():
|
||||||
|
engine = get_engine()
|
||||||
|
if engine.name == 'mysql':
|
||||||
|
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
||||||
|
'from information_schema.TABLES '
|
||||||
|
'where TABLE_SCHEMA=%s and '
|
||||||
|
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
|
||||||
|
|
||||||
|
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
|
||||||
|
engine.url.database)]
|
||||||
|
if len(table_names) > 0:
|
||||||
|
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
||||||
|
'please make sure all tables are CHARSET=utf8'
|
||||||
|
) % ','.join(table_names))
|
||||||
|
|
||||||
|
|
||||||
|
def db_version(abs_path, init_version):
|
||||||
|
"""Show the current version of the repository.
|
||||||
|
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
:param version: Initial database version
|
||||||
|
"""
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
try:
|
||||||
|
return versioning_api.db_version(get_engine(), repository)
|
||||||
|
except versioning_exceptions.DatabaseNotControlledError:
|
||||||
|
meta = sqlalchemy.MetaData()
|
||||||
|
engine = get_engine()
|
||||||
|
meta.reflect(bind=engine)
|
||||||
|
tables = meta.tables
|
||||||
|
if len(tables) == 0 or 'alembic_version' in tables:
|
||||||
|
db_version_control(abs_path, init_version)
|
||||||
|
return versioning_api.db_version(get_engine(), repository)
|
||||||
|
else:
|
||||||
|
raise exception.DbMigrationError(
|
||||||
|
message=_(
|
||||||
|
"The database is not under version control, but has "
|
||||||
|
"tables. Please stamp the current version of the schema "
|
||||||
|
"manually."))
|
||||||
|
|
||||||
|
|
||||||
|
def db_version_control(abs_path, version=None):
|
||||||
|
"""Mark a database as under this repository's version control.
|
||||||
|
|
||||||
|
Once a database is under version control, schema changes should
|
||||||
|
only be done via change scripts in this repository.
|
||||||
|
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
:param version: Initial database version
|
||||||
|
"""
|
||||||
|
repository = _find_migrate_repo(abs_path)
|
||||||
|
versioning_api.version_control(get_engine(), repository, version)
|
||||||
|
return version
|
||||||
|
|
||||||
|
|
||||||
|
def _find_migrate_repo(abs_path):
|
||||||
|
"""Get the project's change script repository
|
||||||
|
|
||||||
|
:param abs_path: Absolute path to migrate repository
|
||||||
|
"""
|
||||||
|
if not os.path.exists(abs_path):
|
||||||
|
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
||||||
|
return Repository(abs_path)
|
117
gceapi/openstack/common/db/sqlalchemy/models.py
Normal file
117
gceapi/openstack/common/db/sqlalchemy/models.py
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||||
|
# Copyright 2012 Cloudscaling Group, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
SQLAlchemy models.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer
|
||||||
|
from sqlalchemy import DateTime
|
||||||
|
from sqlalchemy.orm import object_mapper
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db.sqlalchemy import session as sa
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
class ModelBase(object):
|
||||||
|
"""Base class for models."""
|
||||||
|
__table_initialized__ = False
|
||||||
|
|
||||||
|
def save(self, session=None):
|
||||||
|
"""Save this object."""
|
||||||
|
if not session:
|
||||||
|
session = sa.get_session()
|
||||||
|
# NOTE(boris-42): This part of code should be look like:
|
||||||
|
# session.add(self)
|
||||||
|
# session.flush()
|
||||||
|
# But there is a bug in sqlalchemy and eventlet that
|
||||||
|
# raises NoneType exception if there is no running
|
||||||
|
# transaction and rollback is called. As long as
|
||||||
|
# sqlalchemy has this bug we have to create transaction
|
||||||
|
# explicitly.
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
session.add(self)
|
||||||
|
session.flush()
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
return getattr(self, key)
|
||||||
|
|
||||||
|
def get(self, key, default=None):
|
||||||
|
return getattr(self, key, default)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _extra_keys(self):
|
||||||
|
"""Specifies custom fields
|
||||||
|
|
||||||
|
Subclasses can override this property to return a list
|
||||||
|
of custom fields that should be included in their dict
|
||||||
|
representation.
|
||||||
|
|
||||||
|
For reference check tests/db/sqlalchemy/test_models.py
|
||||||
|
"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
columns = dict(object_mapper(self).columns).keys()
|
||||||
|
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||||
|
# up, beyond the actual db columns. An example would be the 'name'
|
||||||
|
# property for an Instance.
|
||||||
|
columns.extend(self._extra_keys)
|
||||||
|
self._i = iter(columns)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
n = six.advance_iterator(self._i)
|
||||||
|
return n, getattr(self, n)
|
||||||
|
|
||||||
|
def update(self, values):
|
||||||
|
"""Make the model object behave like a dict."""
|
||||||
|
for k, v in six.iteritems(values):
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
def iteritems(self):
|
||||||
|
"""Make the model object behave like a dict.
|
||||||
|
|
||||||
|
Includes attributes from joins.
|
||||||
|
"""
|
||||||
|
local = dict(self)
|
||||||
|
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
||||||
|
if not k[0] == '_'])
|
||||||
|
local.update(joined)
|
||||||
|
return six.iteritems(local)
|
||||||
|
|
||||||
|
|
||||||
|
class TimestampMixin(object):
|
||||||
|
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
|
||||||
|
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
|
||||||
|
|
||||||
|
|
||||||
|
class SoftDeleteMixin(object):
|
||||||
|
deleted_at = Column(DateTime)
|
||||||
|
deleted = Column(Integer, default=0)
|
||||||
|
|
||||||
|
def soft_delete(self, session=None):
|
||||||
|
"""Mark this object as deleted."""
|
||||||
|
self.deleted = self.id
|
||||||
|
self.deleted_at = timeutils.utcnow()
|
||||||
|
self.save(session=session)
|
187
gceapi/openstack/common/db/sqlalchemy/provision.py
Normal file
187
gceapi/openstack/common/db/sqlalchemy/provision.py
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
# Copyright 2013 Mirantis.inc
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Provision test environment for specific DB backends"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
import sqlalchemy
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db import exception as exc
|
||||||
|
|
||||||
|
|
||||||
|
SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://')
|
||||||
|
|
||||||
|
|
||||||
|
def _gen_credentials(*names):
|
||||||
|
"""Generate credentials."""
|
||||||
|
auth_dict = {}
|
||||||
|
for name in names:
|
||||||
|
val = ''.join(random.choice(string.ascii_lowercase)
|
||||||
|
for i in moves.range(10))
|
||||||
|
auth_dict[name] = val
|
||||||
|
return auth_dict
|
||||||
|
|
||||||
|
|
||||||
|
def _get_engine(uri=SQL_CONNECTION):
|
||||||
|
"""Engine creation
|
||||||
|
|
||||||
|
By default the uri is SQL_CONNECTION which is admin credentials.
|
||||||
|
Call the function without arguments to get admin connection. Admin
|
||||||
|
connection required to create temporary user and database for each
|
||||||
|
particular test. Otherwise use existing connection to recreate connection
|
||||||
|
to the temporary database.
|
||||||
|
"""
|
||||||
|
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
|
||||||
|
|
||||||
|
|
||||||
|
def _execute_sql(engine, sql, driver):
|
||||||
|
"""Initialize connection, execute sql query and close it."""
|
||||||
|
try:
|
||||||
|
with engine.connect() as conn:
|
||||||
|
if driver == 'postgresql':
|
||||||
|
conn.connection.set_isolation_level(0)
|
||||||
|
for s in sql:
|
||||||
|
conn.execute(s)
|
||||||
|
except sqlalchemy.exc.OperationalError:
|
||||||
|
msg = ('%s does not match database admin '
|
||||||
|
'credentials or database does not exist.')
|
||||||
|
raise exc.DBConnectionError(msg % SQL_CONNECTION)
|
||||||
|
|
||||||
|
|
||||||
|
def create_database(engine):
|
||||||
|
"""Provide temporary user and database for each particular test."""
|
||||||
|
driver = engine.name
|
||||||
|
|
||||||
|
auth = _gen_credentials('database', 'user', 'passwd')
|
||||||
|
|
||||||
|
sqls = {
|
||||||
|
'mysql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"grant all on %(database)s.* to '%(user)s'@'localhost'"
|
||||||
|
" identified by '%(passwd)s';",
|
||||||
|
"create database %(database)s;",
|
||||||
|
],
|
||||||
|
'postgresql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user if exists %(user)s;",
|
||||||
|
"create user %(user)s with password '%(passwd)s';",
|
||||||
|
"create database %(database)s owner %(user)s;",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
if driver == 'sqlite':
|
||||||
|
return 'sqlite:////tmp/%s' % auth['database']
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql_rows = sqls[driver]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
|
sql_query = map(lambda x: x % auth, sql_rows)
|
||||||
|
|
||||||
|
_execute_sql(engine, sql_query, driver)
|
||||||
|
|
||||||
|
params = auth.copy()
|
||||||
|
params['backend'] = driver
|
||||||
|
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
|
||||||
|
|
||||||
|
|
||||||
|
def drop_database(engine, current_uri):
|
||||||
|
"""Drop temporary database and user after each particular test."""
|
||||||
|
engine = _get_engine(current_uri)
|
||||||
|
admin_engine = _get_engine()
|
||||||
|
driver = engine.name
|
||||||
|
auth = {'database': engine.url.database, 'user': engine.url.username}
|
||||||
|
|
||||||
|
if driver == 'sqlite':
|
||||||
|
try:
|
||||||
|
os.remove(auth['database'])
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
return
|
||||||
|
|
||||||
|
sqls = {
|
||||||
|
'mysql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user '%(user)s'@'localhost';",
|
||||||
|
],
|
||||||
|
'postgresql': [
|
||||||
|
"drop database if exists %(database)s;",
|
||||||
|
"drop user if exists %(user)s;",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql_rows = sqls[driver]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||||
|
sql_query = map(lambda x: x % auth, sql_rows)
|
||||||
|
|
||||||
|
_execute_sql(admin_engine, sql_query, driver)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Controller to handle commands
|
||||||
|
|
||||||
|
::create: Create test user and database with random names.
|
||||||
|
::drop: Drop user and database created by previous command.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Controller to handle database creation and dropping'
|
||||||
|
' commands.',
|
||||||
|
epilog='Under normal circumstances is not used directly.'
|
||||||
|
' Used in .testr.conf to automate test database creation'
|
||||||
|
' and dropping processes.')
|
||||||
|
subparsers = parser.add_subparsers(
|
||||||
|
help='Subcommands to manipulate temporary test databases.')
|
||||||
|
|
||||||
|
create = subparsers.add_parser(
|
||||||
|
'create',
|
||||||
|
help='Create temporary test '
|
||||||
|
'databases and users.')
|
||||||
|
create.set_defaults(which='create')
|
||||||
|
create.add_argument(
|
||||||
|
'instances_count',
|
||||||
|
type=int,
|
||||||
|
help='Number of databases to create.')
|
||||||
|
|
||||||
|
drop = subparsers.add_parser(
|
||||||
|
'drop',
|
||||||
|
help='Drop temporary test databases and users.')
|
||||||
|
drop.set_defaults(which='drop')
|
||||||
|
drop.add_argument(
|
||||||
|
'instances',
|
||||||
|
nargs='+',
|
||||||
|
help='List of databases uri to be dropped.')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
engine = _get_engine()
|
||||||
|
which = args.which
|
||||||
|
|
||||||
|
if which == "create":
|
||||||
|
for i in range(int(args.instances_count)):
|
||||||
|
print(create_database(engine))
|
||||||
|
elif which == "drop":
|
||||||
|
for db in args.instances:
|
||||||
|
drop_database(engine, db)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
867
gceapi/openstack/common/db/sqlalchemy/session.py
Normal file
867
gceapi/openstack/common/db/sqlalchemy/session.py
Normal file
@ -0,0 +1,867 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Session Handling for SQLAlchemy backend.
|
||||||
|
|
||||||
|
Initializing:
|
||||||
|
|
||||||
|
* Call set_defaults with the minimal of the following kwargs:
|
||||||
|
sql_connection, sqlite_db
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
session.set_defaults(
|
||||||
|
sql_connection="sqlite:///var/lib/gceapi/sqlite.db",
|
||||||
|
sqlite_db="/var/lib/gceapi/sqlite.db")
|
||||||
|
|
||||||
|
Recommended ways to use sessions within this framework:
|
||||||
|
|
||||||
|
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
|
||||||
|
model_query() will implicitly use a session when called without one
|
||||||
|
supplied. This is the ideal situation because it will allow queries
|
||||||
|
to be automatically retried if the database connection is interrupted.
|
||||||
|
|
||||||
|
Note: Automatic retry will be enabled in a future patch.
|
||||||
|
|
||||||
|
It is generally fine to issue several queries in a row like this. Even though
|
||||||
|
they may be run in separate transactions and/or separate sessions, each one
|
||||||
|
will see the data from the prior calls. If needed, undo- or rollback-like
|
||||||
|
functionality should be handled at a logical level. For an example, look at
|
||||||
|
the code around quotas and reservation_rollback().
|
||||||
|
|
||||||
|
Examples::
|
||||||
|
|
||||||
|
def get_foo(context, foo):
|
||||||
|
return (model_query(context, models.Foo).
|
||||||
|
filter_by(foo=foo).
|
||||||
|
first())
|
||||||
|
|
||||||
|
def update_foo(context, id, newfoo):
|
||||||
|
(model_query(context, models.Foo).
|
||||||
|
filter_by(id=id).
|
||||||
|
update({'foo': newfoo}))
|
||||||
|
|
||||||
|
def create_foo(context, values):
|
||||||
|
foo_ref = models.Foo()
|
||||||
|
foo_ref.update(values)
|
||||||
|
foo_ref.save()
|
||||||
|
return foo_ref
|
||||||
|
|
||||||
|
|
||||||
|
* Within the scope of a single method, keeping all the reads and writes within
|
||||||
|
the context managed by a single session. In this way, the session's __exit__
|
||||||
|
handler will take care of calling flush() and commit() for you.
|
||||||
|
If using this approach, you should not explicitly call flush() or commit().
|
||||||
|
Any error within the context of the session will cause the session to emit
|
||||||
|
a ROLLBACK. Database Errors like IntegrityError will be raised in
|
||||||
|
session's __exit__ handler, and any try/except within the context managed
|
||||||
|
by session will not be triggered. And catching other non-database errors in
|
||||||
|
the session will not trigger the ROLLBACK, so exception handlers should
|
||||||
|
always be outside the session, unless the developer wants to do a partial
|
||||||
|
commit on purpose. If the connection is dropped before this is possible,
|
||||||
|
the database will implicitly roll back the transaction.
|
||||||
|
|
||||||
|
Note: statements in the session scope will not be automatically retried.
|
||||||
|
|
||||||
|
If you create models within the session, they need to be added, but you
|
||||||
|
do not need to call model.save()
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
def create_many_foo(context, foos):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
for foo in foos:
|
||||||
|
foo_ref = models.Foo()
|
||||||
|
foo_ref.update(foo)
|
||||||
|
session.add(foo_ref)
|
||||||
|
|
||||||
|
def update_bar(context, foo_id, newbar):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
foo_ref = (model_query(context, models.Foo, session).
|
||||||
|
filter_by(id=foo_id).
|
||||||
|
first())
|
||||||
|
(model_query(context, models.Bar, session).
|
||||||
|
filter_by(id=foo_ref['bar_id']).
|
||||||
|
update({'bar': newbar}))
|
||||||
|
|
||||||
|
Note: update_bar is a trivially simple example of using "with session.begin".
|
||||||
|
Whereas create_many_foo is a good example of when a transaction is needed,
|
||||||
|
it is always best to use as few queries as possible. The two queries in
|
||||||
|
update_bar can be better expressed using a single query which avoids
|
||||||
|
the need for an explicit transaction. It can be expressed like so::
|
||||||
|
|
||||||
|
def update_bar(context, foo_id, newbar):
|
||||||
|
subq = (model_query(context, models.Foo.id).
|
||||||
|
filter_by(id=foo_id).
|
||||||
|
limit(1).
|
||||||
|
subquery())
|
||||||
|
(model_query(context, models.Bar).
|
||||||
|
filter_by(id=subq.as_scalar()).
|
||||||
|
update({'bar': newbar}))
|
||||||
|
|
||||||
|
For reference, this emits approximately the following SQL statement::
|
||||||
|
|
||||||
|
UPDATE bar SET bar = ${newbar}
|
||||||
|
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||||
|
|
||||||
|
Note: create_duplicate_foo is a trivially simple example of catching an
|
||||||
|
exception while using "with session.begin". Here create two duplicate
|
||||||
|
instances with same primary key, must catch the exception out of context
|
||||||
|
managed by a single session:
|
||||||
|
|
||||||
|
def create_duplicate_foo(context):
|
||||||
|
foo1 = models.Foo()
|
||||||
|
foo2 = models.Foo()
|
||||||
|
foo1.id = foo2.id = 1
|
||||||
|
session = get_session()
|
||||||
|
try:
|
||||||
|
with session.begin():
|
||||||
|
session.add(foo1)
|
||||||
|
session.add(foo2)
|
||||||
|
except exception.DBDuplicateEntry as e:
|
||||||
|
handle_error(e)
|
||||||
|
|
||||||
|
* Passing an active session between methods. Sessions should only be passed
|
||||||
|
to private methods. The private method must use a subtransaction; otherwise
|
||||||
|
SQLAlchemy will throw an error when you call session.begin() on an existing
|
||||||
|
transaction. Public methods should not accept a session parameter and should
|
||||||
|
not be involved in sessions within the caller's scope.
|
||||||
|
|
||||||
|
Note that this incurs more overhead in SQLAlchemy than the above means
|
||||||
|
due to nesting transactions, and it is not possible to implicitly retry
|
||||||
|
failed database operations when using this approach.
|
||||||
|
|
||||||
|
This also makes code somewhat more difficult to read and debug, because a
|
||||||
|
single database transaction spans more than one method. Error handling
|
||||||
|
becomes less clear in this situation. When this is needed for code clarity,
|
||||||
|
it should be clearly documented.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
def myfunc(foo):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
# do some database things
|
||||||
|
bar = _private_func(foo, session)
|
||||||
|
return bar
|
||||||
|
|
||||||
|
def _private_func(foo, session=None):
|
||||||
|
if not session:
|
||||||
|
session = get_session()
|
||||||
|
with session.begin(subtransaction=True):
|
||||||
|
# do some other database things
|
||||||
|
return bar
|
||||||
|
|
||||||
|
|
||||||
|
There are some things which it is best to avoid:
|
||||||
|
|
||||||
|
* Don't keep a transaction open any longer than necessary.
|
||||||
|
|
||||||
|
This means that your "with session.begin()" block should be as short
|
||||||
|
as possible, while still containing all the related calls for that
|
||||||
|
transaction.
|
||||||
|
|
||||||
|
* Avoid "with_lockmode('UPDATE')" when possible.
|
||||||
|
|
||||||
|
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
|
||||||
|
any rows, it will take a gap-lock. This is a form of write-lock on the
|
||||||
|
"gap" where no rows exist, and prevents any other writes to that space.
|
||||||
|
This can effectively prevent any INSERT into a table by locking the gap
|
||||||
|
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
||||||
|
has an overly broad WHERE clause, or doesn't properly use an index.
|
||||||
|
|
||||||
|
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
||||||
|
number of rows matching a query, and if only one row is returned,
|
||||||
|
then issue the SELECT FOR UPDATE.
|
||||||
|
|
||||||
|
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
|
||||||
|
However, this can not be done until the "deleted" columns are removed and
|
||||||
|
proper UNIQUE constraints are added to the tables.
|
||||||
|
|
||||||
|
|
||||||
|
Enabling soft deletes:
|
||||||
|
|
||||||
|
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
||||||
|
to your model class. For example::
|
||||||
|
|
||||||
|
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
Efficient use of soft deletes:
|
||||||
|
|
||||||
|
* There are two possible ways to mark a record as deleted::
|
||||||
|
|
||||||
|
model.soft_delete() and query.soft_delete().
|
||||||
|
|
||||||
|
model.soft_delete() method works with single already fetched entry.
|
||||||
|
query.soft_delete() makes only one db request for all entries that correspond
|
||||||
|
to query.
|
||||||
|
|
||||||
|
* In almost all cases you should use query.soft_delete(). Some examples::
|
||||||
|
|
||||||
|
def soft_delete_bar():
|
||||||
|
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||||
|
if count == 0:
|
||||||
|
raise Exception("0 entries were soft deleted")
|
||||||
|
|
||||||
|
def complex_soft_delete_with_synchronization_bar(session=None):
|
||||||
|
if session is None:
|
||||||
|
session = get_session()
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
count = (model_query(BarModel).
|
||||||
|
find(some_condition).
|
||||||
|
soft_delete(synchronize_session=True))
|
||||||
|
# Here synchronize_session is required, because we
|
||||||
|
# don't know what is going on in outer session.
|
||||||
|
if count == 0:
|
||||||
|
raise Exception("0 entries were soft deleted")
|
||||||
|
|
||||||
|
* There is only one situation where model.soft_delete() is appropriate: when
|
||||||
|
you fetch a single record, work with it, and mark it as deleted in the same
|
||||||
|
transaction.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
def soft_delete_bar_model():
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
bar_ref = model_query(BarModel).find(some_condition).first()
|
||||||
|
# Work with bar_ref
|
||||||
|
bar_ref.soft_delete(session=session)
|
||||||
|
|
||||||
|
However, if you need to work with all entries that correspond to query and
|
||||||
|
then soft delete them you should use query.soft_delete() method::
|
||||||
|
|
||||||
|
def soft_delete_multi_models():
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
query = (model_query(BarModel, session=session).
|
||||||
|
find(some_condition))
|
||||||
|
model_refs = query.all()
|
||||||
|
# Work with model_refs
|
||||||
|
query.soft_delete(synchronize_session=False)
|
||||||
|
# synchronize_session=False should be set if there is no outer
|
||||||
|
# session and these entries are not used after this.
|
||||||
|
|
||||||
|
When working with many rows, it is very important to use query.soft_delete,
|
||||||
|
which issues a single query. Using model.soft_delete(), as in the following
|
||||||
|
example, is very inefficient.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
for bar_ref in bar_refs:
|
||||||
|
bar_ref.soft_delete(session=session)
|
||||||
|
# This will produce count(bar_refs) db requests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
from sqlalchemy import exc as sqla_exc
|
||||||
|
from sqlalchemy.interfaces import PoolListener
|
||||||
|
import sqlalchemy.orm
|
||||||
|
from sqlalchemy.pool import NullPool, StaticPool
|
||||||
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db import exception
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
sqlite_db_opts = [
|
||||||
|
cfg.StrOpt('sqlite_db',
|
||||||
|
default='gceapi.sqlite',
|
||||||
|
help='The file name to use with SQLite'),
|
||||||
|
cfg.BoolOpt('sqlite_synchronous',
|
||||||
|
default=True,
|
||||||
|
help='If True, SQLite uses synchronous mode'),
|
||||||
|
]
|
||||||
|
|
||||||
|
database_opts = [
|
||||||
|
cfg.StrOpt('connection',
|
||||||
|
default='sqlite:///' +
|
||||||
|
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
|
'../', '$sqlite_db')),
|
||||||
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
|
'database',
|
||||||
|
secret=True,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_connection',
|
||||||
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('connection',
|
||||||
|
group='sql'), ]),
|
||||||
|
cfg.StrOpt('slave_connection',
|
||||||
|
default='',
|
||||||
|
secret=True,
|
||||||
|
help='The SQLAlchemy connection string used to connect to the '
|
||||||
|
'slave database'),
|
||||||
|
cfg.IntOpt('idle_timeout',
|
||||||
|
default=3600,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_idle_timeout',
|
||||||
|
group='DATABASE'),
|
||||||
|
cfg.DeprecatedOpt('idle_timeout',
|
||||||
|
group='sql')],
|
||||||
|
help='Timeout before idle sql connections are reaped'),
|
||||||
|
cfg.IntOpt('min_pool_size',
|
||||||
|
default=1,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_min_pool_size',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Minimum number of SQL connections to keep open in a '
|
||||||
|
'pool'),
|
||||||
|
cfg.IntOpt('max_pool_size',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_max_pool_size',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Maximum number of SQL connections to keep open in a '
|
||||||
|
'pool'),
|
||||||
|
cfg.IntOpt('max_retries',
|
||||||
|
default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sql_max_retries',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Maximum db connection retries during startup. '
|
||||||
|
'(setting -1 implies an infinite retry count)'),
|
||||||
|
cfg.IntOpt('retry_interval',
|
||||||
|
default=10,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('reconnect_interval',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='Interval between retries of opening a sql connection'),
|
||||||
|
cfg.IntOpt('max_overflow',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
||||||
|
group='DEFAULT'),
|
||||||
|
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='If set, use this value for max_overflow with sqlalchemy'),
|
||||||
|
cfg.IntOpt('connection_debug',
|
||||||
|
default=0,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help='Verbosity of SQL debugging information. 0=None, '
|
||||||
|
'100=Everything'),
|
||||||
|
cfg.BoolOpt('connection_trace',
|
||||||
|
default=False,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
||||||
|
group='DEFAULT')],
|
||||||
|
help='Add python stack traces to SQL as comment strings'),
|
||||||
|
cfg.IntOpt('pool_timeout',
|
||||||
|
default=None,
|
||||||
|
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
||||||
|
group='DATABASE')],
|
||||||
|
help='If set, use this value for pool_timeout with sqlalchemy'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(sqlite_db_opts)
|
||||||
|
CONF.register_opts(database_opts, 'database')
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_ENGINE = None
|
||||||
|
_MAKER = None
|
||||||
|
_SLAVE_ENGINE = None
|
||||||
|
_SLAVE_MAKER = None
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
||||||
|
max_overflow=None, pool_timeout=None):
|
||||||
|
"""Set defaults for configuration variables."""
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
connection=sql_connection)
|
||||||
|
cfg.set_defaults(sqlite_db_opts,
|
||||||
|
sqlite_db=sqlite_db)
|
||||||
|
# Update the QueuePool defaults
|
||||||
|
if max_pool_size is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_pool_size=max_pool_size)
|
||||||
|
if max_overflow is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
max_overflow=max_overflow)
|
||||||
|
if pool_timeout is not None:
|
||||||
|
cfg.set_defaults(database_opts,
|
||||||
|
pool_timeout=pool_timeout)
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup():
|
||||||
|
global _ENGINE, _MAKER
|
||||||
|
global _SLAVE_ENGINE, _SLAVE_MAKER
|
||||||
|
|
||||||
|
if _MAKER:
|
||||||
|
_MAKER.close_all()
|
||||||
|
_MAKER = None
|
||||||
|
if _ENGINE:
|
||||||
|
_ENGINE.dispose()
|
||||||
|
_ENGINE = None
|
||||||
|
if _SLAVE_MAKER:
|
||||||
|
_SLAVE_MAKER.close_all()
|
||||||
|
_SLAVE_MAKER = None
|
||||||
|
if _SLAVE_ENGINE:
|
||||||
|
_SLAVE_ENGINE.dispose()
|
||||||
|
_SLAVE_ENGINE = None
|
||||||
|
|
||||||
|
|
||||||
|
class SqliteForeignKeysListener(PoolListener):
|
||||||
|
"""Ensures that the foreign key constraints are enforced in SQLite.
|
||||||
|
|
||||||
|
The foreign key constraints are disabled by default in SQLite,
|
||||||
|
so the foreign key constraints will be enabled here for every
|
||||||
|
database connection
|
||||||
|
"""
|
||||||
|
def connect(self, dbapi_con, con_record):
|
||||||
|
dbapi_con.execute('pragma foreign_keys=ON')
|
||||||
|
|
||||||
|
|
||||||
|
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
|
||||||
|
slave_session=False, mysql_traditional_mode=False):
|
||||||
|
"""Return a SQLAlchemy session."""
|
||||||
|
global _MAKER
|
||||||
|
global _SLAVE_MAKER
|
||||||
|
maker = _MAKER
|
||||||
|
|
||||||
|
if slave_session:
|
||||||
|
maker = _SLAVE_MAKER
|
||||||
|
|
||||||
|
if maker is None:
|
||||||
|
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
|
||||||
|
mysql_traditional_mode=mysql_traditional_mode)
|
||||||
|
maker = get_maker(engine, autocommit, expire_on_commit)
|
||||||
|
|
||||||
|
if slave_session:
|
||||||
|
_SLAVE_MAKER = maker
|
||||||
|
else:
|
||||||
|
_MAKER = maker
|
||||||
|
|
||||||
|
session = maker()
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
# note(boris-42): In current versions of DB backends unique constraint
|
||||||
|
# violation messages follow the structure:
|
||||||
|
#
|
||||||
|
# sqlite:
|
||||||
|
# 1 column - (IntegrityError) column c1 is not unique
|
||||||
|
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||||
|
#
|
||||||
|
# sqlite since 3.7.16:
|
||||||
|
# 1 column - (IntegrityError) UNIQUE constraint failed: k1
|
||||||
|
#
|
||||||
|
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
|
||||||
|
#
|
||||||
|
# postgres:
|
||||||
|
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||||
|
# constraint "users_c1_key"
|
||||||
|
# N columns - (IntegrityError) duplicate key value violates unique
|
||||||
|
# constraint "name_of_our_constraint"
|
||||||
|
#
|
||||||
|
# mysql:
|
||||||
|
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
||||||
|
# 'c1'")
|
||||||
|
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||||
|
# with -' for key 'name_of_our_constraint'")
|
||||||
|
_DUP_KEY_RE_DB = {
|
||||||
|
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||||
|
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
||||||
|
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
||||||
|
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
||||||
|
"""Raise exception if two entries are duplicated.
|
||||||
|
|
||||||
|
In this function will be raised DBDuplicateEntry exception if integrity
|
||||||
|
error wrap unique constraint violation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_columns_from_uniq_cons_or_name(columns):
|
||||||
|
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
|
||||||
|
# where `t` it is table name and columns `c1`, `c2`
|
||||||
|
# are in UniqueConstraint.
|
||||||
|
uniqbase = "uniq_"
|
||||||
|
if not columns.startswith(uniqbase):
|
||||||
|
if engine_name == "postgresql":
|
||||||
|
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
||||||
|
return [columns]
|
||||||
|
return columns[len(uniqbase):].split("0")[1:]
|
||||||
|
|
||||||
|
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
||||||
|
return
|
||||||
|
|
||||||
|
# FIXME(johannes): The usage of the .message attribute has been
|
||||||
|
# deprecated since Python 2.6. However, the exceptions raised by
|
||||||
|
# SQLAlchemy can differ when using unicode() and accessing .message.
|
||||||
|
# An audit across all three supported engines will be necessary to
|
||||||
|
# ensure there are no regressions.
|
||||||
|
for pattern in _DUP_KEY_RE_DB[engine_name]:
|
||||||
|
match = pattern.match(integrity_error.message)
|
||||||
|
if match:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
columns = match.group(1)
|
||||||
|
|
||||||
|
if engine_name == "sqlite":
|
||||||
|
columns = columns.strip().split(", ")
|
||||||
|
else:
|
||||||
|
columns = get_columns_from_uniq_cons_or_name(columns)
|
||||||
|
raise exception.DBDuplicateEntry(columns, integrity_error)
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
||||||
|
# messages follow the structure:
|
||||||
|
#
|
||||||
|
# mysql:
|
||||||
|
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
||||||
|
# 'restarting transaction') <query_str> <query_args>
|
||||||
|
_DEADLOCK_RE_DB = {
|
||||||
|
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _raise_if_deadlock_error(operational_error, engine_name):
|
||||||
|
"""Raise exception on deadlock condition.
|
||||||
|
|
||||||
|
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
||||||
|
condition.
|
||||||
|
"""
|
||||||
|
re = _DEADLOCK_RE_DB.get(engine_name)
|
||||||
|
if re is None:
|
||||||
|
return
|
||||||
|
# FIXME(johannes): The usage of the .message attribute has been
|
||||||
|
# deprecated since Python 2.6. However, the exceptions raised by
|
||||||
|
# SQLAlchemy can differ when using unicode() and accessing .message.
|
||||||
|
# An audit across all three supported engines will be necessary to
|
||||||
|
# ensure there are no regressions.
|
||||||
|
m = re.match(operational_error.message)
|
||||||
|
if not m:
|
||||||
|
return
|
||||||
|
raise exception.DBDeadlock(operational_error)
|
||||||
|
|
||||||
|
|
||||||
|
def _wrap_db_error(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def _wrap(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
raise exception.DBInvalidUnicodeParameter()
|
||||||
|
# note(boris-42): We should catch unique constraint violation and
|
||||||
|
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
||||||
|
# violation is wrapped by IntegrityError.
|
||||||
|
except sqla_exc.OperationalError as e:
|
||||||
|
_raise_if_deadlock_error(e, get_engine().name)
|
||||||
|
# NOTE(comstud): A lot of code is checking for OperationalError
|
||||||
|
# so let's not wrap it for now.
|
||||||
|
raise
|
||||||
|
except sqla_exc.IntegrityError as e:
|
||||||
|
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
||||||
|
# DBs so we must do this. Also in some tables (for example
|
||||||
|
# instance_types) there are more than one unique constraint. This
|
||||||
|
# means we should get names of columns, which values violate
|
||||||
|
# unique constraint, from error message.
|
||||||
|
_raise_if_duplicate_entry_error(e, get_engine().name)
|
||||||
|
raise exception.DBError(e)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(_('DB exception wrapped.'))
|
||||||
|
raise exception.DBError(e)
|
||||||
|
return _wrap
|
||||||
|
|
||||||
|
|
||||||
|
def get_engine(sqlite_fk=False, slave_engine=False,
|
||||||
|
mysql_traditional_mode=False):
|
||||||
|
"""Return a SQLAlchemy engine."""
|
||||||
|
global _ENGINE
|
||||||
|
global _SLAVE_ENGINE
|
||||||
|
engine = _ENGINE
|
||||||
|
db_uri = CONF.database.connection
|
||||||
|
|
||||||
|
if slave_engine:
|
||||||
|
engine = _SLAVE_ENGINE
|
||||||
|
db_uri = CONF.database.slave_connection
|
||||||
|
|
||||||
|
if engine is None:
|
||||||
|
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
|
||||||
|
mysql_traditional_mode=mysql_traditional_mode)
|
||||||
|
if slave_engine:
|
||||||
|
_SLAVE_ENGINE = engine
|
||||||
|
else:
|
||||||
|
_ENGINE = engine
|
||||||
|
|
||||||
|
return engine
|
||||||
|
|
||||||
|
|
||||||
|
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
||||||
|
"""Switch sqlite connections to non-synchronous mode."""
|
||||||
|
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
||||||
|
|
||||||
|
|
||||||
|
def _add_regexp_listener(dbapi_con, con_record):
|
||||||
|
"""Add REGEXP function to sqlite connections."""
|
||||||
|
|
||||||
|
def regexp(expr, item):
|
||||||
|
reg = re.compile(expr)
|
||||||
|
return reg.search(six.text_type(item)) is not None
|
||||||
|
dbapi_con.create_function('regexp', 2, regexp)
|
||||||
|
|
||||||
|
|
||||||
|
def _thread_yield(dbapi_con, con_record):
|
||||||
|
"""Ensure other greenthreads get a chance to be executed.
|
||||||
|
|
||||||
|
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
|
||||||
|
execute instead of time.sleep(0).
|
||||||
|
Force a context switch. With common database backends (eg MySQLdb and
|
||||||
|
sqlite), there is no implicit yield caused by network I/O since they are
|
||||||
|
implemented by C libraries that eventlet cannot monkey patch.
|
||||||
|
"""
|
||||||
|
time.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
|
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
||||||
|
"""Ensures that MySQL and DB2 connections are alive.
|
||||||
|
|
||||||
|
Borrowed from:
|
||||||
|
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||||
|
"""
|
||||||
|
cursor = dbapi_conn.cursor()
|
||||||
|
try:
|
||||||
|
ping_sql = 'select 1'
|
||||||
|
if engine.name == 'ibm_db_sa':
|
||||||
|
# DB2 requires a table expression
|
||||||
|
ping_sql = 'select 1 from (values (1)) AS t1'
|
||||||
|
cursor.execute(ping_sql)
|
||||||
|
except Exception as ex:
|
||||||
|
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
|
||||||
|
msg = _('Database server has gone away: %s') % ex
|
||||||
|
LOG.warning(msg)
|
||||||
|
raise sqla_exc.DisconnectionError(msg)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
|
||||||
|
"""Set engine mode to 'traditional'.
|
||||||
|
|
||||||
|
Required to prevent silent truncates at insert or update operations
|
||||||
|
under MySQL. By default MySQL truncates inserted string if it longer
|
||||||
|
than a declared field just with warning. That is fraught with data
|
||||||
|
corruption.
|
||||||
|
"""
|
||||||
|
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
|
||||||
|
|
||||||
|
|
||||||
|
def _is_db_connection_error(args):
|
||||||
|
"""Return True if error in connecting to db."""
|
||||||
|
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||||
|
# to support Postgres and others.
|
||||||
|
# For the db2, the error code is -30081 since the db2 is still not ready
|
||||||
|
conn_err_codes = ('2002', '2003', '2006', '-30081')
|
||||||
|
for err_code in conn_err_codes:
|
||||||
|
if args.find(err_code) != -1:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def create_engine(sql_connection, sqlite_fk=False,
|
||||||
|
mysql_traditional_mode=False):
|
||||||
|
"""Return a new SQLAlchemy engine."""
|
||||||
|
# NOTE(geekinutah): At this point we could be connecting to the normal
|
||||||
|
# db handle or the slave db handle. Things like
|
||||||
|
# _wrap_db_error aren't going to work well if their
|
||||||
|
# backends don't match. Let's check.
|
||||||
|
_assert_matching_drivers()
|
||||||
|
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
||||||
|
|
||||||
|
engine_args = {
|
||||||
|
"pool_recycle": CONF.database.idle_timeout,
|
||||||
|
"echo": False,
|
||||||
|
'convert_unicode': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Map our SQL debug level to SQLAlchemy's options
|
||||||
|
if CONF.database.connection_debug >= 100:
|
||||||
|
engine_args['echo'] = 'debug'
|
||||||
|
elif CONF.database.connection_debug >= 50:
|
||||||
|
engine_args['echo'] = True
|
||||||
|
|
||||||
|
if "sqlite" in connection_dict.drivername:
|
||||||
|
if sqlite_fk:
|
||||||
|
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
||||||
|
engine_args["poolclass"] = NullPool
|
||||||
|
|
||||||
|
if CONF.database.connection == "sqlite://":
|
||||||
|
engine_args["poolclass"] = StaticPool
|
||||||
|
engine_args["connect_args"] = {'check_same_thread': False}
|
||||||
|
else:
|
||||||
|
if CONF.database.max_pool_size is not None:
|
||||||
|
engine_args['pool_size'] = CONF.database.max_pool_size
|
||||||
|
if CONF.database.max_overflow is not None:
|
||||||
|
engine_args['max_overflow'] = CONF.database.max_overflow
|
||||||
|
if CONF.database.pool_timeout is not None:
|
||||||
|
engine_args['pool_timeout'] = CONF.database.pool_timeout
|
||||||
|
|
||||||
|
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
||||||
|
|
||||||
|
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
||||||
|
|
||||||
|
if engine.name in ['mysql', 'ibm_db_sa']:
|
||||||
|
callback = functools.partial(_ping_listener, engine)
|
||||||
|
sqlalchemy.event.listen(engine, 'checkout', callback)
|
||||||
|
if mysql_traditional_mode:
|
||||||
|
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
|
||||||
|
else:
|
||||||
|
LOG.warning(_("This application has not enabled MySQL traditional"
|
||||||
|
" mode, which means silent data corruption may"
|
||||||
|
" occur. Please encourage the application"
|
||||||
|
" developers to enable this mode."))
|
||||||
|
elif 'sqlite' in connection_dict.drivername:
|
||||||
|
if not CONF.sqlite_synchronous:
|
||||||
|
sqlalchemy.event.listen(engine, 'connect',
|
||||||
|
_synchronous_switch_listener)
|
||||||
|
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
||||||
|
|
||||||
|
if (CONF.database.connection_trace and
|
||||||
|
engine.dialect.dbapi.__name__ == 'MySQLdb'):
|
||||||
|
_patch_mysqldb_with_stacktrace_comments()
|
||||||
|
|
||||||
|
try:
|
||||||
|
engine.connect()
|
||||||
|
except sqla_exc.OperationalError as e:
|
||||||
|
if not _is_db_connection_error(e.args[0]):
|
||||||
|
raise
|
||||||
|
|
||||||
|
remaining = CONF.database.max_retries
|
||||||
|
if remaining == -1:
|
||||||
|
remaining = 'infinite'
|
||||||
|
while True:
|
||||||
|
msg = _('SQL connection failed. %s attempts left.')
|
||||||
|
LOG.warning(msg % remaining)
|
||||||
|
if remaining != 'infinite':
|
||||||
|
remaining -= 1
|
||||||
|
time.sleep(CONF.database.retry_interval)
|
||||||
|
try:
|
||||||
|
engine.connect()
|
||||||
|
break
|
||||||
|
except sqla_exc.OperationalError as e:
|
||||||
|
if (remaining != 'infinite' and remaining == 0) or \
|
||||||
|
not _is_db_connection_error(e.args[0]):
|
||||||
|
raise
|
||||||
|
return engine
|
||||||
|
|
||||||
|
|
||||||
|
class Query(sqlalchemy.orm.query.Query):
|
||||||
|
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
||||||
|
def soft_delete(self, synchronize_session='evaluate'):
|
||||||
|
return self.update({'deleted': literal_column('id'),
|
||||||
|
'updated_at': literal_column('updated_at'),
|
||||||
|
'deleted_at': timeutils.utcnow()},
|
||||||
|
synchronize_session=synchronize_session)
|
||||||
|
|
||||||
|
|
||||||
|
class Session(sqlalchemy.orm.session.Session):
|
||||||
|
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
||||||
|
@_wrap_db_error
|
||||||
|
def query(self, *args, **kwargs):
|
||||||
|
return super(Session, self).query(*args, **kwargs)
|
||||||
|
|
||||||
|
@_wrap_db_error
|
||||||
|
def flush(self, *args, **kwargs):
|
||||||
|
return super(Session, self).flush(*args, **kwargs)
|
||||||
|
|
||||||
|
@_wrap_db_error
|
||||||
|
def execute(self, *args, **kwargs):
|
||||||
|
return super(Session, self).execute(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
||||||
|
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
||||||
|
return sqlalchemy.orm.sessionmaker(bind=engine,
|
||||||
|
class_=Session,
|
||||||
|
autocommit=autocommit,
|
||||||
|
expire_on_commit=expire_on_commit,
|
||||||
|
query_cls=Query)
|
||||||
|
|
||||||
|
|
||||||
|
def _patch_mysqldb_with_stacktrace_comments():
|
||||||
|
"""Adds current stack trace as a comment in queries.
|
||||||
|
|
||||||
|
Patches MySQLdb.cursors.BaseCursor._do_query.
|
||||||
|
"""
|
||||||
|
import MySQLdb.cursors
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
||||||
|
|
||||||
|
def _do_query(self, q):
|
||||||
|
stack = ''
|
||||||
|
for filename, line, method, function in traceback.extract_stack():
|
||||||
|
# exclude various common things from trace
|
||||||
|
if filename.endswith('session.py') and method == '_do_query':
|
||||||
|
continue
|
||||||
|
if filename.endswith('api.py') and method == 'wrapper':
|
||||||
|
continue
|
||||||
|
if filename.endswith('utils.py') and method == '_inner':
|
||||||
|
continue
|
||||||
|
if filename.endswith('exception.py') and method == '_wrap':
|
||||||
|
continue
|
||||||
|
# db/api is just a wrapper around db/sqlalchemy/api
|
||||||
|
if filename.endswith('db/api.py'):
|
||||||
|
continue
|
||||||
|
# only trace inside gceapi
|
||||||
|
index = filename.rfind('gceapi')
|
||||||
|
if index == -1:
|
||||||
|
continue
|
||||||
|
stack += "File:%s:%s Method:%s() Line:%s | " \
|
||||||
|
% (filename[index:], line, method, function)
|
||||||
|
|
||||||
|
# strip trailing " | " from stack
|
||||||
|
if stack:
|
||||||
|
stack = stack[:-3]
|
||||||
|
qq = "%s /* %s */" % (q, stack)
|
||||||
|
else:
|
||||||
|
qq = q
|
||||||
|
old_mysql_do_query(self, qq)
|
||||||
|
|
||||||
|
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
||||||
|
|
||||||
|
|
||||||
|
def _assert_matching_drivers():
|
||||||
|
"""Make sure slave handle and normal handle have the same driver."""
|
||||||
|
# NOTE(geekinutah): There's no use case for writing to one backend and
|
||||||
|
# reading from another. Who knows what the future holds?
|
||||||
|
if CONF.database.slave_connection == '':
|
||||||
|
return
|
||||||
|
|
||||||
|
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
|
||||||
|
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
|
||||||
|
assert normal.drivername == slave.drivername
|
269
gceapi/openstack/common/db/sqlalchemy/test_migrations.py
Normal file
269
gceapi/openstack/common/db/sqlalchemy/test_migrations.py
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
# Copyright 2010-2011 OpenStack Foundation
|
||||||
|
# Copyright 2012-2013 IBM Corp.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
import lockfile
|
||||||
|
from six import moves
|
||||||
|
import sqlalchemy
|
||||||
|
import sqlalchemy.exc
|
||||||
|
|
||||||
|
from gceapi.openstack.common.db.sqlalchemy import utils
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi.openstack.common.py3kcompat import urlutils
|
||||||
|
from gceapi.openstack.common import test
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _have_mysql(user, passwd, database):
|
||||||
|
present = os.environ.get('TEST_MYSQL_PRESENT')
|
||||||
|
if present is None:
|
||||||
|
return utils.is_backend_avail(backend='mysql',
|
||||||
|
user=user,
|
||||||
|
passwd=passwd,
|
||||||
|
database=database)
|
||||||
|
return present.lower() in ('', 'true')
|
||||||
|
|
||||||
|
|
||||||
|
def _have_postgresql(user, passwd, database):
|
||||||
|
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
|
||||||
|
if present is None:
|
||||||
|
return utils.is_backend_avail(backend='postgres',
|
||||||
|
user=user,
|
||||||
|
passwd=passwd,
|
||||||
|
database=database)
|
||||||
|
return present.lower() in ('', 'true')
|
||||||
|
|
||||||
|
|
||||||
|
def _set_db_lock(lock_path=None, lock_prefix=None):
|
||||||
|
def decorator(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
path = lock_path or os.environ.get("GCEAPI_LOCK_PATH")
|
||||||
|
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
|
||||||
|
with lock:
|
||||||
|
LOG.debug(_('Got lock "%s"') % f.__name__)
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
LOG.debug(_('Lock released "%s"') % f.__name__)
|
||||||
|
return wrapper
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
class BaseMigrationTestCase(test.BaseTestCase):
|
||||||
|
"""Base class fort testing of migration utils."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
||||||
|
'test_migrations.conf')
|
||||||
|
# Test machines can set the TEST_MIGRATIONS_CONF variable
|
||||||
|
# to override the location of the config file for migration testing
|
||||||
|
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
|
||||||
|
self.DEFAULT_CONFIG_FILE)
|
||||||
|
self.test_databases = {}
|
||||||
|
self.migration_api = None
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(BaseMigrationTestCase, self).setUp()
|
||||||
|
|
||||||
|
# Load test databases from the config file. Only do this
|
||||||
|
# once. No need to re-run this on each test...
|
||||||
|
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
||||||
|
if os.path.exists(self.CONFIG_FILE_PATH):
|
||||||
|
cp = moves.configparser.RawConfigParser()
|
||||||
|
try:
|
||||||
|
cp.read(self.CONFIG_FILE_PATH)
|
||||||
|
defaults = cp.defaults()
|
||||||
|
for key, value in defaults.items():
|
||||||
|
self.test_databases[key] = value
|
||||||
|
except moves.configparser.ParsingError as e:
|
||||||
|
self.fail("Failed to read test_migrations.conf config "
|
||||||
|
"file. Got error: %s" % e)
|
||||||
|
else:
|
||||||
|
self.fail("Failed to find test_migrations.conf config "
|
||||||
|
"file.")
|
||||||
|
|
||||||
|
self.engines = {}
|
||||||
|
for key, value in self.test_databases.items():
|
||||||
|
self.engines[key] = sqlalchemy.create_engine(value)
|
||||||
|
|
||||||
|
# We start each test case with a completely blank slate.
|
||||||
|
self._reset_databases()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
# We destroy the test data store between each test case,
|
||||||
|
# and recreate it, which ensures that we have no side-effects
|
||||||
|
# from the tests
|
||||||
|
self._reset_databases()
|
||||||
|
super(BaseMigrationTestCase, self).tearDown()
|
||||||
|
|
||||||
|
def execute_cmd(self, cmd=None):
|
||||||
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT)
|
||||||
|
output = process.communicate()[0]
|
||||||
|
LOG.debug(output)
|
||||||
|
self.assertEqual(0, process.returncode,
|
||||||
|
"Failed to run: %s\n%s" % (cmd, output))
|
||||||
|
|
||||||
|
def _reset_pg(self, conn_pieces):
|
||||||
|
(user,
|
||||||
|
password,
|
||||||
|
database,
|
||||||
|
host) = utils.get_db_connection_info(conn_pieces)
|
||||||
|
os.environ['PGPASSWORD'] = password
|
||||||
|
os.environ['PGUSER'] = user
|
||||||
|
# note(boris-42): We must create and drop database, we can't
|
||||||
|
# drop database which we have connected to, so for such
|
||||||
|
# operations there is a special database template1.
|
||||||
|
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
|
||||||
|
" '%(sql)s' -d template1")
|
||||||
|
|
||||||
|
sql = ("drop database if exists %s;") % database
|
||||||
|
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(droptable)
|
||||||
|
|
||||||
|
sql = ("create database %s;") % database
|
||||||
|
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(createtable)
|
||||||
|
|
||||||
|
os.unsetenv('PGPASSWORD')
|
||||||
|
os.unsetenv('PGUSER')
|
||||||
|
|
||||||
|
@_set_db_lock(lock_prefix='migration_tests-')
|
||||||
|
def _reset_databases(self):
|
||||||
|
for key, engine in self.engines.items():
|
||||||
|
conn_string = self.test_databases[key]
|
||||||
|
conn_pieces = urlutils.urlparse(conn_string)
|
||||||
|
engine.dispose()
|
||||||
|
if conn_string.startswith('sqlite'):
|
||||||
|
# We can just delete the SQLite database, which is
|
||||||
|
# the easiest and cleanest solution
|
||||||
|
db_path = conn_pieces.path.strip('/')
|
||||||
|
if os.path.exists(db_path):
|
||||||
|
os.unlink(db_path)
|
||||||
|
# No need to recreate the SQLite DB. SQLite will
|
||||||
|
# create it for us if it's not there...
|
||||||
|
elif conn_string.startswith('mysql'):
|
||||||
|
# We can execute the MySQL client to destroy and re-create
|
||||||
|
# the MYSQL database, which is easier and less error-prone
|
||||||
|
# than using SQLAlchemy to do this via MetaData...trust me.
|
||||||
|
(user, password, database, host) = \
|
||||||
|
utils.get_db_connection_info(conn_pieces)
|
||||||
|
sql = ("drop database if exists %(db)s; "
|
||||||
|
"create database %(db)s;") % {'db': database}
|
||||||
|
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
|
||||||
|
"-e \"%(sql)s\"") % {'user': user, 'password': password,
|
||||||
|
'host': host, 'sql': sql}
|
||||||
|
self.execute_cmd(cmd)
|
||||||
|
elif conn_string.startswith('postgresql'):
|
||||||
|
self._reset_pg(conn_pieces)
|
||||||
|
|
||||||
|
|
||||||
|
class WalkVersionsMixin(object):
|
||||||
|
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
|
||||||
|
# Determine latest version script from the repo, then
|
||||||
|
# upgrade from 1 through to the latest, with no data
|
||||||
|
# in the databases. This just checks that the schema itself
|
||||||
|
# upgrades successfully.
|
||||||
|
|
||||||
|
# Place the database under version control
|
||||||
|
self.migration_api.version_control(engine, self.REPOSITORY,
|
||||||
|
self.INIT_VERSION)
|
||||||
|
self.assertEqual(self.INIT_VERSION,
|
||||||
|
self.migration_api.db_version(engine,
|
||||||
|
self.REPOSITORY))
|
||||||
|
|
||||||
|
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
|
||||||
|
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||||
|
|
||||||
|
for version in versions:
|
||||||
|
# upgrade -> downgrade -> upgrade
|
||||||
|
self._migrate_up(engine, version, with_data=True)
|
||||||
|
if snake_walk:
|
||||||
|
downgraded = self._migrate_down(
|
||||||
|
engine, version - 1, with_data=True)
|
||||||
|
if downgraded:
|
||||||
|
self._migrate_up(engine, version)
|
||||||
|
|
||||||
|
if downgrade:
|
||||||
|
# Now walk it back down to 0 from the latest, testing
|
||||||
|
# the downgrade paths.
|
||||||
|
for version in reversed(versions):
|
||||||
|
# downgrade -> upgrade -> downgrade
|
||||||
|
downgraded = self._migrate_down(engine, version - 1)
|
||||||
|
|
||||||
|
if snake_walk and downgraded:
|
||||||
|
self._migrate_up(engine, version)
|
||||||
|
self._migrate_down(engine, version - 1)
|
||||||
|
|
||||||
|
def _migrate_down(self, engine, version, with_data=False):
|
||||||
|
try:
|
||||||
|
self.migration_api.downgrade(engine, self.REPOSITORY, version)
|
||||||
|
except NotImplementedError:
|
||||||
|
# NOTE(sirp): some migrations, namely release-level
|
||||||
|
# migrations, don't support a downgrade.
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
version, self.migration_api.db_version(engine, self.REPOSITORY))
|
||||||
|
|
||||||
|
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
|
||||||
|
# version). So if we have any downgrade checks, they need to be run for
|
||||||
|
# the previous (higher numbered) migration.
|
||||||
|
if with_data:
|
||||||
|
post_downgrade = getattr(
|
||||||
|
self, "_post_downgrade_%03d" % (version + 1), None)
|
||||||
|
if post_downgrade:
|
||||||
|
post_downgrade(engine)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _migrate_up(self, engine, version, with_data=False):
|
||||||
|
"""migrate up to a new version of the db.
|
||||||
|
|
||||||
|
We allow for data insertion and post checks at every
|
||||||
|
migration version with special _pre_upgrade_### and
|
||||||
|
_check_### functions in the main test.
|
||||||
|
"""
|
||||||
|
# NOTE(sdague): try block is here because it's impossible to debug
|
||||||
|
# where a failed data migration happens otherwise
|
||||||
|
try:
|
||||||
|
if with_data:
|
||||||
|
data = None
|
||||||
|
pre_upgrade = getattr(
|
||||||
|
self, "_pre_upgrade_%03d" % version, None)
|
||||||
|
if pre_upgrade:
|
||||||
|
data = pre_upgrade(engine)
|
||||||
|
|
||||||
|
self.migration_api.upgrade(engine, self.REPOSITORY, version)
|
||||||
|
self.assertEqual(version,
|
||||||
|
self.migration_api.db_version(engine,
|
||||||
|
self.REPOSITORY))
|
||||||
|
if with_data:
|
||||||
|
check = getattr(self, "_check_%03d" % version, None)
|
||||||
|
if check:
|
||||||
|
check(engine, data)
|
||||||
|
except Exception:
|
||||||
|
LOG.error("Failed to migrate to version %s on engine %s" %
|
||||||
|
(version, engine))
|
||||||
|
raise
|
548
gceapi/openstack/common/db/sqlalchemy/utils.py
Normal file
548
gceapi/openstack/common/db/sqlalchemy/utils.py
Normal file
@ -0,0 +1,548 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2010-2011 OpenStack Foundation.
|
||||||
|
# Copyright 2012 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from migrate.changeset import UniqueConstraint
|
||||||
|
import sqlalchemy
|
||||||
|
from sqlalchemy import Boolean
|
||||||
|
from sqlalchemy import CheckConstraint
|
||||||
|
from sqlalchemy import Column
|
||||||
|
from sqlalchemy.engine import reflection
|
||||||
|
from sqlalchemy.ext.compiler import compiles
|
||||||
|
from sqlalchemy import func
|
||||||
|
from sqlalchemy import Index
|
||||||
|
from sqlalchemy import Integer
|
||||||
|
from sqlalchemy import MetaData
|
||||||
|
from sqlalchemy.sql.expression import literal_column
|
||||||
|
from sqlalchemy.sql.expression import UpdateBase
|
||||||
|
from sqlalchemy.sql import select
|
||||||
|
from sqlalchemy import String
|
||||||
|
from sqlalchemy import Table
|
||||||
|
from sqlalchemy.types import NullType
|
||||||
|
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
|
||||||
|
|
||||||
|
|
||||||
|
def sanitize_db_url(url):
|
||||||
|
match = _DBURL_REGEX.match(url)
|
||||||
|
if match:
|
||||||
|
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
|
||||||
|
return url
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidSortKey(Exception):
|
||||||
|
message = _("Sort key supplied was not valid.")
|
||||||
|
|
||||||
|
|
||||||
|
# copy from glance/db/sqlalchemy/api.py
|
||||||
|
def paginate_query(query, model, limit, sort_keys, marker=None,
|
||||||
|
sort_dir=None, sort_dirs=None):
|
||||||
|
"""Returns a query with sorting / pagination criteria added.
|
||||||
|
|
||||||
|
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
||||||
|
(If sort_keys is not unique, then we risk looping through values.)
|
||||||
|
We use the last row in the previous page as the 'marker' for pagination.
|
||||||
|
So we must return values that follow the passed marker in the order.
|
||||||
|
With a single-valued sort_key, this would be easy: sort_key > X.
|
||||||
|
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
||||||
|
the lexicographical ordering:
|
||||||
|
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
||||||
|
|
||||||
|
We also have to cope with different sort_directions.
|
||||||
|
|
||||||
|
Typically, the id of the last row is used as the client-facing pagination
|
||||||
|
marker, then the actual marker object must be fetched from the db and
|
||||||
|
passed in to us as marker.
|
||||||
|
|
||||||
|
:param query: the query object to which we should add paging/sorting
|
||||||
|
:param model: the ORM model class
|
||||||
|
:param limit: maximum number of items to return
|
||||||
|
:param sort_keys: array of attributes by which results should be sorted
|
||||||
|
:param marker: the last item of the previous page; we returns the next
|
||||||
|
results after this value.
|
||||||
|
:param sort_dir: direction in which results should be sorted (asc, desc)
|
||||||
|
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
||||||
|
|
||||||
|
:rtype: sqlalchemy.orm.query.Query
|
||||||
|
:return: The query with sorting/pagination added.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if 'id' not in sort_keys:
|
||||||
|
# TODO(justinsb): If this ever gives a false-positive, check
|
||||||
|
# the actual primary key, rather than assuming its id
|
||||||
|
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
|
||||||
|
|
||||||
|
assert(not (sort_dir and sort_dirs))
|
||||||
|
|
||||||
|
# Default the sort direction to ascending
|
||||||
|
if sort_dirs is None and sort_dir is None:
|
||||||
|
sort_dir = 'asc'
|
||||||
|
|
||||||
|
# Ensure a per-column sort direction
|
||||||
|
if sort_dirs is None:
|
||||||
|
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
||||||
|
|
||||||
|
assert(len(sort_dirs) == len(sort_keys))
|
||||||
|
|
||||||
|
# Add sorting
|
||||||
|
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
||||||
|
try:
|
||||||
|
sort_dir_func = {
|
||||||
|
'asc': sqlalchemy.asc,
|
||||||
|
'desc': sqlalchemy.desc,
|
||||||
|
}[current_sort_dir]
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError(_("Unknown sort direction, "
|
||||||
|
"must be 'desc' or 'asc'"))
|
||||||
|
try:
|
||||||
|
sort_key_attr = getattr(model, current_sort_key)
|
||||||
|
except AttributeError:
|
||||||
|
raise InvalidSortKey()
|
||||||
|
query = query.order_by(sort_dir_func(sort_key_attr))
|
||||||
|
|
||||||
|
# Add pagination
|
||||||
|
if marker is not None:
|
||||||
|
marker_values = []
|
||||||
|
for sort_key in sort_keys:
|
||||||
|
v = getattr(marker, sort_key)
|
||||||
|
marker_values.append(v)
|
||||||
|
|
||||||
|
# Build up an array of sort criteria as in the docstring
|
||||||
|
criteria_list = []
|
||||||
|
for i in range(len(sort_keys)):
|
||||||
|
crit_attrs = []
|
||||||
|
for j in range(i):
|
||||||
|
model_attr = getattr(model, sort_keys[j])
|
||||||
|
crit_attrs.append((model_attr == marker_values[j]))
|
||||||
|
|
||||||
|
model_attr = getattr(model, sort_keys[i])
|
||||||
|
if sort_dirs[i] == 'desc':
|
||||||
|
crit_attrs.append((model_attr < marker_values[i]))
|
||||||
|
else:
|
||||||
|
crit_attrs.append((model_attr > marker_values[i]))
|
||||||
|
|
||||||
|
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
||||||
|
criteria_list.append(criteria)
|
||||||
|
|
||||||
|
f = sqlalchemy.sql.or_(*criteria_list)
|
||||||
|
query = query.filter(f)
|
||||||
|
|
||||||
|
if limit is not None:
|
||||||
|
query = query.limit(limit)
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
|
||||||
|
def get_table(engine, name):
|
||||||
|
"""Returns an sqlalchemy table dynamically from db.
|
||||||
|
|
||||||
|
Needed because the models don't work for us in migrations
|
||||||
|
as models will be far out of sync with the current data.
|
||||||
|
"""
|
||||||
|
metadata = MetaData()
|
||||||
|
metadata.bind = engine
|
||||||
|
return Table(name, metadata, autoload=True)
|
||||||
|
|
||||||
|
|
||||||
|
class InsertFromSelect(UpdateBase):
|
||||||
|
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
|
||||||
|
def __init__(self, table, select):
|
||||||
|
self.table = table
|
||||||
|
self.select = select
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(InsertFromSelect)
|
||||||
|
def visit_insert_from_select(element, compiler, **kw):
|
||||||
|
"""Form the `INSERT INTO table (SELECT ... )` statement."""
|
||||||
|
return "INSERT INTO %s %s" % (
|
||||||
|
compiler.process(element.table, asfrom=True),
|
||||||
|
compiler.process(element.select))
|
||||||
|
|
||||||
|
|
||||||
|
class ColumnError(Exception):
|
||||||
|
"""Error raised when no column or an invalid column is found."""
|
||||||
|
|
||||||
|
|
||||||
|
def _get_not_supported_column(col_name_col_instance, column_name):
|
||||||
|
try:
|
||||||
|
column = col_name_col_instance[column_name]
|
||||||
|
except KeyError:
|
||||||
|
msg = _("Please specify column %s in col_name_col_instance "
|
||||||
|
"param. It is required because column has unsupported "
|
||||||
|
"type by sqlite).")
|
||||||
|
raise ColumnError(msg % column_name)
|
||||||
|
|
||||||
|
if not isinstance(column, Column):
|
||||||
|
msg = _("col_name_col_instance param has wrong type of "
|
||||||
|
"column instance for column %s It should be instance "
|
||||||
|
"of sqlalchemy.Column.")
|
||||||
|
raise ColumnError(msg % column_name)
|
||||||
|
return column
|
||||||
|
|
||||||
|
|
||||||
|
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
||||||
|
**col_name_col_instance):
|
||||||
|
"""Drop unique constraint from table.
|
||||||
|
|
||||||
|
This method drops UC from table and works for mysql, postgresql and sqlite.
|
||||||
|
In mysql and postgresql we are able to use "alter table" construction.
|
||||||
|
Sqlalchemy doesn't support some sqlite column types and replaces their
|
||||||
|
type with NullType in metadata. We process these columns and replace
|
||||||
|
NullType with the correct column type.
|
||||||
|
|
||||||
|
:param migrate_engine: sqlalchemy engine
|
||||||
|
:param table_name: name of table that contains uniq constraint.
|
||||||
|
:param uc_name: name of uniq constraint that will be dropped.
|
||||||
|
:param columns: columns that are in uniq constraint.
|
||||||
|
:param col_name_col_instance: contains pair column_name=column_instance.
|
||||||
|
column_instance is instance of Column. These params
|
||||||
|
are required only for columns that have unsupported
|
||||||
|
types by sqlite. For example BigInteger.
|
||||||
|
"""
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
t = Table(table_name, meta, autoload=True)
|
||||||
|
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
override_cols = [
|
||||||
|
_get_not_supported_column(col_name_col_instance, col.name)
|
||||||
|
for col in t.columns
|
||||||
|
if isinstance(col.type, NullType)
|
||||||
|
]
|
||||||
|
for col in override_cols:
|
||||||
|
t.columns.replace(col)
|
||||||
|
|
||||||
|
uc = UniqueConstraint(*columns, table=t, name=uc_name)
|
||||||
|
uc.drop()
|
||||||
|
|
||||||
|
|
||||||
|
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
||||||
|
use_soft_delete, *uc_column_names):
|
||||||
|
"""Drop all old rows having the same values for columns in uc_columns.
|
||||||
|
|
||||||
|
This method drop (or mark ad `deleted` if use_soft_delete is True) old
|
||||||
|
duplicate rows form table with name `table_name`.
|
||||||
|
|
||||||
|
:param migrate_engine: Sqlalchemy engine
|
||||||
|
:param table_name: Table with duplicates
|
||||||
|
:param use_soft_delete: If True - values will be marked as `deleted`,
|
||||||
|
if False - values will be removed from table
|
||||||
|
:param uc_column_names: Unique constraint columns
|
||||||
|
"""
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
columns_for_group_by = [table.c[name] for name in uc_column_names]
|
||||||
|
|
||||||
|
columns_for_select = [func.max(table.c.id)]
|
||||||
|
columns_for_select.extend(columns_for_group_by)
|
||||||
|
|
||||||
|
duplicated_rows_select = select(columns_for_select,
|
||||||
|
group_by=columns_for_group_by,
|
||||||
|
having=func.count(table.c.id) > 1)
|
||||||
|
|
||||||
|
for row in migrate_engine.execute(duplicated_rows_select):
|
||||||
|
# NOTE(boris-42): Do not remove row that has the biggest ID.
|
||||||
|
delete_condition = table.c.id != row[0]
|
||||||
|
is_none = None # workaround for pyflakes
|
||||||
|
delete_condition &= table.c.deleted_at == is_none
|
||||||
|
for name in uc_column_names:
|
||||||
|
delete_condition &= table.c[name] == row[name]
|
||||||
|
|
||||||
|
rows_to_delete_select = select([table.c.id]).where(delete_condition)
|
||||||
|
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
||||||
|
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
|
||||||
|
"%(table)s") % dict(id=row[0], table=table_name))
|
||||||
|
|
||||||
|
if use_soft_delete:
|
||||||
|
delete_statement = table.update().\
|
||||||
|
where(delete_condition).\
|
||||||
|
values({
|
||||||
|
'deleted': literal_column('id'),
|
||||||
|
'updated_at': literal_column('updated_at'),
|
||||||
|
'deleted_at': timeutils.utcnow()
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
delete_statement = table.delete().where(delete_condition)
|
||||||
|
migrate_engine.execute(delete_statement)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_default_deleted_value(table):
|
||||||
|
if isinstance(table.c.id.type, Integer):
|
||||||
|
return 0
|
||||||
|
if isinstance(table.c.id.type, String):
|
||||||
|
return ""
|
||||||
|
raise ColumnError(_("Unsupported id columns type"))
|
||||||
|
|
||||||
|
|
||||||
|
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
real_indexes = insp.get_indexes(table_name)
|
||||||
|
existing_index_names = dict(
|
||||||
|
[(index['name'], index['column_names']) for index in real_indexes])
|
||||||
|
|
||||||
|
# NOTE(boris-42): Restore indexes on `deleted` column
|
||||||
|
for index in indexes:
|
||||||
|
if 'deleted' not in index['column_names']:
|
||||||
|
continue
|
||||||
|
name = index['name']
|
||||||
|
if name in existing_index_names:
|
||||||
|
column_names = [table.c[c] for c in existing_index_names[name]]
|
||||||
|
old_index = Index(name, *column_names, unique=index["unique"])
|
||||||
|
old_index.drop(migrate_engine)
|
||||||
|
|
||||||
|
column_names = [table.c[c] for c in index['column_names']]
|
||||||
|
new_index = Index(index["name"], *column_names, unique=index["unique"])
|
||||||
|
new_index.create(migrate_engine)
|
||||||
|
|
||||||
|
|
||||||
|
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
return _change_deleted_column_type_to_boolean_sqlite(
|
||||||
|
migrate_engine, table_name, **col_name_col_instance)
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
indexes = insp.get_indexes(table_name)
|
||||||
|
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
old_deleted = Column('old_deleted', Boolean, default=False)
|
||||||
|
old_deleted.create(table, populate_default=False)
|
||||||
|
|
||||||
|
table.update().\
|
||||||
|
where(table.c.deleted == table.c.id).\
|
||||||
|
values(old_deleted=True).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
table.c.deleted.drop()
|
||||||
|
table.c.old_deleted.alter(name="deleted")
|
||||||
|
|
||||||
|
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
||||||
|
|
||||||
|
|
||||||
|
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
columns = []
|
||||||
|
for column in table.columns:
|
||||||
|
column_copy = None
|
||||||
|
if column.name != "deleted":
|
||||||
|
if isinstance(column.type, NullType):
|
||||||
|
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||||
|
column.name)
|
||||||
|
else:
|
||||||
|
column_copy = column.copy()
|
||||||
|
else:
|
||||||
|
column_copy = Column('deleted', Boolean, default=0)
|
||||||
|
columns.append(column_copy)
|
||||||
|
|
||||||
|
constraints = [constraint.copy() for constraint in table.constraints]
|
||||||
|
|
||||||
|
meta = table.metadata
|
||||||
|
new_table = Table(table_name + "__tmp__", meta,
|
||||||
|
*(columns + constraints))
|
||||||
|
new_table.create()
|
||||||
|
|
||||||
|
indexes = []
|
||||||
|
for index in insp.get_indexes(table_name):
|
||||||
|
column_names = [new_table.c[c] for c in index['column_names']]
|
||||||
|
indexes.append(Index(index["name"], *column_names,
|
||||||
|
unique=index["unique"]))
|
||||||
|
|
||||||
|
c_select = []
|
||||||
|
for c in table.c:
|
||||||
|
if c.name != "deleted":
|
||||||
|
c_select.append(c)
|
||||||
|
else:
|
||||||
|
c_select.append(table.c.deleted == table.c.id)
|
||||||
|
|
||||||
|
ins = InsertFromSelect(new_table, select(c_select))
|
||||||
|
migrate_engine.execute(ins)
|
||||||
|
|
||||||
|
table.drop()
|
||||||
|
[index.create(migrate_engine) for index in indexes]
|
||||||
|
|
||||||
|
new_table.rename(table_name)
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == new_table.c.id).\
|
||||||
|
values(deleted=True).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
|
||||||
|
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
if migrate_engine.name == "sqlite":
|
||||||
|
return _change_deleted_column_type_to_id_type_sqlite(
|
||||||
|
migrate_engine, table_name, **col_name_col_instance)
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
indexes = insp.get_indexes(table_name)
|
||||||
|
|
||||||
|
table = get_table(migrate_engine, table_name)
|
||||||
|
|
||||||
|
new_deleted = Column('new_deleted', table.c.id.type,
|
||||||
|
default=_get_default_deleted_value(table))
|
||||||
|
new_deleted.create(table, populate_default=True)
|
||||||
|
|
||||||
|
deleted = True # workaround for pyflakes
|
||||||
|
table.update().\
|
||||||
|
where(table.c.deleted == deleted).\
|
||||||
|
values(new_deleted=table.c.id).\
|
||||||
|
execute()
|
||||||
|
table.c.deleted.drop()
|
||||||
|
table.c.new_deleted.alter(name="deleted")
|
||||||
|
|
||||||
|
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
||||||
|
|
||||||
|
|
||||||
|
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
|
||||||
|
**col_name_col_instance):
|
||||||
|
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
|
||||||
|
# constraints in sqlite DB and our `deleted` column has
|
||||||
|
# 2 check constraints. So there is only one way to remove
|
||||||
|
# these constraints:
|
||||||
|
# 1) Create new table with the same columns, constraints
|
||||||
|
# and indexes. (except deleted column).
|
||||||
|
# 2) Copy all data from old to new table.
|
||||||
|
# 3) Drop old table.
|
||||||
|
# 4) Rename new table to old table name.
|
||||||
|
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||||
|
meta = MetaData(bind=migrate_engine)
|
||||||
|
table = Table(table_name, meta, autoload=True)
|
||||||
|
default_deleted_value = _get_default_deleted_value(table)
|
||||||
|
|
||||||
|
columns = []
|
||||||
|
for column in table.columns:
|
||||||
|
column_copy = None
|
||||||
|
if column.name != "deleted":
|
||||||
|
if isinstance(column.type, NullType):
|
||||||
|
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||||
|
column.name)
|
||||||
|
else:
|
||||||
|
column_copy = column.copy()
|
||||||
|
else:
|
||||||
|
column_copy = Column('deleted', table.c.id.type,
|
||||||
|
default=default_deleted_value)
|
||||||
|
columns.append(column_copy)
|
||||||
|
|
||||||
|
def is_deleted_column_constraint(constraint):
|
||||||
|
# NOTE(boris-42): There is no other way to check is CheckConstraint
|
||||||
|
# associated with deleted column.
|
||||||
|
if not isinstance(constraint, CheckConstraint):
|
||||||
|
return False
|
||||||
|
sqltext = str(constraint.sqltext)
|
||||||
|
return (sqltext.endswith("deleted in (0, 1)") or
|
||||||
|
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
|
||||||
|
|
||||||
|
constraints = []
|
||||||
|
for constraint in table.constraints:
|
||||||
|
if not is_deleted_column_constraint(constraint):
|
||||||
|
constraints.append(constraint.copy())
|
||||||
|
|
||||||
|
new_table = Table(table_name + "__tmp__", meta,
|
||||||
|
*(columns + constraints))
|
||||||
|
new_table.create()
|
||||||
|
|
||||||
|
indexes = []
|
||||||
|
for index in insp.get_indexes(table_name):
|
||||||
|
column_names = [new_table.c[c] for c in index['column_names']]
|
||||||
|
indexes.append(Index(index["name"], *column_names,
|
||||||
|
unique=index["unique"]))
|
||||||
|
|
||||||
|
ins = InsertFromSelect(new_table, table.select())
|
||||||
|
migrate_engine.execute(ins)
|
||||||
|
|
||||||
|
table.drop()
|
||||||
|
[index.create(migrate_engine) for index in indexes]
|
||||||
|
|
||||||
|
new_table.rename(table_name)
|
||||||
|
deleted = True # workaround for pyflakes
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == deleted).\
|
||||||
|
values(deleted=new_table.c.id).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
|
||||||
|
deleted = False # workaround for pyflakes
|
||||||
|
new_table.update().\
|
||||||
|
where(new_table.c.deleted == deleted).\
|
||||||
|
values(deleted=default_deleted_value).\
|
||||||
|
execute()
|
||||||
|
|
||||||
|
|
||||||
|
def get_connect_string(backend, database, user=None, passwd=None):
|
||||||
|
"""Get database connection
|
||||||
|
|
||||||
|
Try to get a connection with a very specific set of values, if we get
|
||||||
|
these then we'll run the tests, otherwise they are skipped
|
||||||
|
"""
|
||||||
|
args = {'backend': backend,
|
||||||
|
'user': user,
|
||||||
|
'passwd': passwd,
|
||||||
|
'database': database}
|
||||||
|
if backend == 'sqlite':
|
||||||
|
template = '%(backend)s:///%(database)s'
|
||||||
|
else:
|
||||||
|
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
|
||||||
|
return template % args
|
||||||
|
|
||||||
|
|
||||||
|
def is_backend_avail(backend, database, user=None, passwd=None):
|
||||||
|
try:
|
||||||
|
connect_uri = get_connect_string(backend=backend,
|
||||||
|
database=database,
|
||||||
|
user=user,
|
||||||
|
passwd=passwd)
|
||||||
|
engine = sqlalchemy.create_engine(connect_uri)
|
||||||
|
connection = engine.connect()
|
||||||
|
except Exception:
|
||||||
|
# intentionally catch all to handle exceptions even if we don't
|
||||||
|
# have any backend code loaded.
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
connection.close()
|
||||||
|
engine.dispose()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_db_connection_info(conn_pieces):
|
||||||
|
database = conn_pieces.path.strip('/')
|
||||||
|
loc_pieces = conn_pieces.netloc.split('@')
|
||||||
|
host = loc_pieces[1]
|
||||||
|
|
||||||
|
auth_pieces = loc_pieces[0].split(':')
|
||||||
|
user = auth_pieces[0]
|
||||||
|
password = ""
|
||||||
|
if len(auth_pieces) > 1:
|
||||||
|
password = auth_pieces[1].strip()
|
||||||
|
|
||||||
|
return (user, password, database, host)
|
144
gceapi/openstack/common/eventlet_backdoor.py
Normal file
144
gceapi/openstack/common/eventlet_backdoor.py
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
# Copyright (c) 2012 OpenStack Foundation.
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import gc
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
import eventlet.backdoor
|
||||||
|
import greenlet
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
|
||||||
|
help_for_backdoor_port = (
|
||||||
|
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
|
||||||
|
"in listening on a random tcp port number; <port> results in listening "
|
||||||
|
"on the specified port number (and not enabling backdoor if that port "
|
||||||
|
"is in use); and <start>:<end> results in listening on the smallest "
|
||||||
|
"unused port number within the specified range of port numbers. The "
|
||||||
|
"chosen port is displayed in the service's log file.")
|
||||||
|
eventlet_backdoor_opts = [
|
||||||
|
cfg.StrOpt('backdoor_port',
|
||||||
|
default=None,
|
||||||
|
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(eventlet_backdoor_opts)
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class EventletBackdoorConfigValueError(Exception):
|
||||||
|
def __init__(self, port_range, help_msg, ex):
|
||||||
|
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||||
|
'%(help)s' %
|
||||||
|
{'range': port_range, 'ex': ex, 'help': help_msg})
|
||||||
|
super(EventletBackdoorConfigValueError, self).__init__(msg)
|
||||||
|
self.port_range = port_range
|
||||||
|
|
||||||
|
|
||||||
|
def _dont_use_this():
|
||||||
|
print("Don't use this, just disconnect instead")
|
||||||
|
|
||||||
|
|
||||||
|
def _find_objects(t):
|
||||||
|
return [o for o in gc.get_objects() if isinstance(o, t)]
|
||||||
|
|
||||||
|
|
||||||
|
def _print_greenthreads():
|
||||||
|
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||||
|
print(i, gt)
|
||||||
|
traceback.print_stack(gt.gr_frame)
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def _print_nativethreads():
|
||||||
|
for threadId, stack in sys._current_frames().items():
|
||||||
|
print(threadId)
|
||||||
|
traceback.print_stack(stack)
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_port_range(port_range):
|
||||||
|
if ':' not in port_range:
|
||||||
|
start, end = port_range, port_range
|
||||||
|
else:
|
||||||
|
start, end = port_range.split(':', 1)
|
||||||
|
try:
|
||||||
|
start, end = int(start), int(end)
|
||||||
|
if end < start:
|
||||||
|
raise ValueError
|
||||||
|
return start, end
|
||||||
|
except ValueError as ex:
|
||||||
|
raise EventletBackdoorConfigValueError(port_range, ex,
|
||||||
|
help_for_backdoor_port)
|
||||||
|
|
||||||
|
|
||||||
|
def _listen(host, start_port, end_port, listen_func):
|
||||||
|
try_port = start_port
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return listen_func((host, try_port))
|
||||||
|
except socket.error as exc:
|
||||||
|
if (exc.errno != errno.EADDRINUSE or
|
||||||
|
try_port >= end_port):
|
||||||
|
raise
|
||||||
|
try_port += 1
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_if_enabled():
|
||||||
|
backdoor_locals = {
|
||||||
|
'exit': _dont_use_this, # So we don't exit the entire process
|
||||||
|
'quit': _dont_use_this, # So we don't exit the entire process
|
||||||
|
'fo': _find_objects,
|
||||||
|
'pgt': _print_greenthreads,
|
||||||
|
'pnt': _print_nativethreads,
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONF.backdoor_port is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
|
||||||
|
|
||||||
|
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||||
|
# the last expression and set it to __builtin__._, which overwrites
|
||||||
|
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||||
|
# since it won't interact poorly with gettext, and it's easier to
|
||||||
|
# read the output too.
|
||||||
|
def displayhook(val):
|
||||||
|
if val is not None:
|
||||||
|
pprint.pprint(val)
|
||||||
|
sys.displayhook = displayhook
|
||||||
|
|
||||||
|
sock = _listen('localhost', start_port, end_port, eventlet.listen)
|
||||||
|
|
||||||
|
# In the case of backdoor port being zero, a port number is assigned by
|
||||||
|
# listen(). In any case, pull the port number out here.
|
||||||
|
port = sock.getsockname()[1]
|
||||||
|
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
|
||||||
|
{'port': port, 'pid': os.getpid()})
|
||||||
|
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||||
|
locals=backdoor_locals)
|
||||||
|
return port
|
99
gceapi/openstack/common/excutils.py
Normal file
99
gceapi/openstack/common/excutils.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# Copyright 2012, Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Exception related utilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
|
||||||
|
|
||||||
|
class save_and_reraise_exception(object):
|
||||||
|
"""Save current exception, run some code and then re-raise.
|
||||||
|
|
||||||
|
In some cases the exception context can be cleared, resulting in None
|
||||||
|
being attempted to be re-raised after an exception handler is run. This
|
||||||
|
can happen when eventlet switches greenthreads or when running an
|
||||||
|
exception handler, code raises and catches an exception. In both
|
||||||
|
cases the exception context will be cleared.
|
||||||
|
|
||||||
|
To work around this, we save the exception state, run handler code, and
|
||||||
|
then re-raise the original exception. If another exception occurs, the
|
||||||
|
saved exception is logged and the new exception is re-raised.
|
||||||
|
|
||||||
|
In some cases the caller may not want to re-raise the exception, and
|
||||||
|
for those circumstances this context provides a reraise flag that
|
||||||
|
can be used to suppress the exception. For example::
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
with save_and_reraise_exception() as ctxt:
|
||||||
|
decide_if_need_reraise()
|
||||||
|
if not should_be_reraised:
|
||||||
|
ctxt.reraise = False
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.reraise = True
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.type_, self.value, self.tb, = sys.exc_info()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
if exc_type is not None:
|
||||||
|
logging.error(_('Original exception being dropped: %s'),
|
||||||
|
traceback.format_exception(self.type_,
|
||||||
|
self.value,
|
||||||
|
self.tb))
|
||||||
|
return False
|
||||||
|
if self.reraise:
|
||||||
|
six.reraise(self.type_, self.value, self.tb)
|
||||||
|
|
||||||
|
|
||||||
|
def forever_retry_uncaught_exceptions(infunc):
|
||||||
|
def inner_func(*args, **kwargs):
|
||||||
|
last_log_time = 0
|
||||||
|
last_exc_message = None
|
||||||
|
exc_count = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return infunc(*args, **kwargs)
|
||||||
|
except Exception as exc:
|
||||||
|
this_exc_message = six.u(str(exc))
|
||||||
|
if this_exc_message == last_exc_message:
|
||||||
|
exc_count += 1
|
||||||
|
else:
|
||||||
|
exc_count = 1
|
||||||
|
# Do not log any more frequently than once a minute unless
|
||||||
|
# the exception message changes
|
||||||
|
cur_time = int(time.time())
|
||||||
|
if (cur_time - last_log_time > 60 or
|
||||||
|
this_exc_message != last_exc_message):
|
||||||
|
logging.exception(
|
||||||
|
_('Unexpected exception occurred %d time(s)... '
|
||||||
|
'retrying.') % exc_count)
|
||||||
|
last_log_time = cur_time
|
||||||
|
last_exc_message = this_exc_message
|
||||||
|
exc_count = 0
|
||||||
|
# This should be a very rare event. In case it isn't, do
|
||||||
|
# a sleep.
|
||||||
|
time.sleep(1)
|
||||||
|
return inner_func
|
440
gceapi/openstack/common/gettextutils.py
Normal file
440
gceapi/openstack/common/gettextutils.py
Normal file
@ -0,0 +1,440 @@
|
|||||||
|
# Copyright 2012 Red Hat, Inc.
|
||||||
|
# Copyright 2013 IBM Corp.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
gettext for openstack-common modules.
|
||||||
|
|
||||||
|
Usual usage in an openstack.common module:
|
||||||
|
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
"""
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import gettext
|
||||||
|
import locale
|
||||||
|
from logging import handlers
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from babel import localedata
|
||||||
|
import six
|
||||||
|
|
||||||
|
_localedir = os.environ.get('gceapi'.upper() + '_LOCALEDIR')
|
||||||
|
_t = gettext.translation('gceapi', localedir=_localedir, fallback=True)
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES = {}
|
||||||
|
USE_LAZY = False
|
||||||
|
|
||||||
|
|
||||||
|
def enable_lazy():
|
||||||
|
"""Convenience function for configuring _() to use lazy gettext
|
||||||
|
|
||||||
|
Call this at the start of execution to enable the gettextutils._
|
||||||
|
function to use lazy gettext functionality. This is useful if
|
||||||
|
your project is importing _ directly instead of using the
|
||||||
|
gettextutils.install() way of importing the _ function.
|
||||||
|
"""
|
||||||
|
global USE_LAZY
|
||||||
|
USE_LAZY = True
|
||||||
|
|
||||||
|
|
||||||
|
def _(msg):
|
||||||
|
if USE_LAZY:
|
||||||
|
return Message(msg, domain='gceapi')
|
||||||
|
else:
|
||||||
|
if six.PY3:
|
||||||
|
return _t.gettext(msg)
|
||||||
|
return _t.ugettext(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def install(domain, lazy=False):
|
||||||
|
"""Install a _() function using the given translation domain.
|
||||||
|
|
||||||
|
Given a translation domain, install a _() function using gettext's
|
||||||
|
install() function.
|
||||||
|
|
||||||
|
The main difference from gettext.install() is that we allow
|
||||||
|
overriding the default localedir (e.g. /usr/share/locale) using
|
||||||
|
a translation-domain-specific environment variable (e.g.
|
||||||
|
NOVA_LOCALEDIR).
|
||||||
|
|
||||||
|
:param domain: the translation domain
|
||||||
|
:param lazy: indicates whether or not to install the lazy _() function.
|
||||||
|
The lazy _() introduces a way to do deferred translation
|
||||||
|
of messages by installing a _ that builds Message objects,
|
||||||
|
instead of strings, which can then be lazily translated into
|
||||||
|
any available locale.
|
||||||
|
"""
|
||||||
|
if lazy:
|
||||||
|
# NOTE(mrodden): Lazy gettext functionality.
|
||||||
|
#
|
||||||
|
# The following introduces a deferred way to do translations on
|
||||||
|
# messages in OpenStack. We override the standard _() function
|
||||||
|
# and % (format string) operation to build Message objects that can
|
||||||
|
# later be translated when we have more information.
|
||||||
|
def _lazy_gettext(msg):
|
||||||
|
"""Create and return a Message object.
|
||||||
|
|
||||||
|
Lazy gettext function for a given domain, it is a factory method
|
||||||
|
for a project/module to get a lazy gettext function for its own
|
||||||
|
translation domain (i.e. nova, glance, cinder, etc.)
|
||||||
|
|
||||||
|
Message encapsulates a string so that we can translate
|
||||||
|
it later when needed.
|
||||||
|
"""
|
||||||
|
return Message(msg, domain=domain)
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
moves.builtins.__dict__['_'] = _lazy_gettext
|
||||||
|
else:
|
||||||
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
|
if six.PY3:
|
||||||
|
gettext.install(domain,
|
||||||
|
localedir=os.environ.get(localedir))
|
||||||
|
else:
|
||||||
|
gettext.install(domain,
|
||||||
|
localedir=os.environ.get(localedir),
|
||||||
|
unicode=True)
|
||||||
|
|
||||||
|
|
||||||
|
class Message(six.text_type):
|
||||||
|
"""A Message object is a unicode object that can be translated.
|
||||||
|
|
||||||
|
Translation of Message is done explicitly using the translate() method.
|
||||||
|
For all non-translation intents and purposes, a Message is simply unicode,
|
||||||
|
and can be treated as such.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(cls, msgid, msgtext=None, params=None,
|
||||||
|
domain='gceapi', *args):
|
||||||
|
"""Create a new Message object.
|
||||||
|
|
||||||
|
In order for translation to work gettext requires a message ID, this
|
||||||
|
msgid will be used as the base unicode text. It is also possible
|
||||||
|
for the msgid and the base unicode text to be different by passing
|
||||||
|
the msgtext parameter.
|
||||||
|
"""
|
||||||
|
# If the base msgtext is not given, we use the default translation
|
||||||
|
# of the msgid (which is in English) just in case the system locale is
|
||||||
|
# not English, so that the base text will be in that locale by default.
|
||||||
|
if not msgtext:
|
||||||
|
msgtext = Message._translate_msgid(msgid, domain)
|
||||||
|
# We want to initialize the parent unicode with the actual object that
|
||||||
|
# would have been plain unicode if 'Message' was not enabled.
|
||||||
|
msg = super(Message, cls).__new__(cls, msgtext)
|
||||||
|
msg.msgid = msgid
|
||||||
|
msg.domain = domain
|
||||||
|
msg.params = params
|
||||||
|
return msg
|
||||||
|
|
||||||
|
def translate(self, desired_locale=None):
|
||||||
|
"""Translate this message to the desired locale.
|
||||||
|
|
||||||
|
:param desired_locale: The desired locale to translate the message to,
|
||||||
|
if no locale is provided the message will be
|
||||||
|
translated to the system's default locale.
|
||||||
|
|
||||||
|
:returns: the translated message in unicode
|
||||||
|
"""
|
||||||
|
|
||||||
|
translated_message = Message._translate_msgid(self.msgid,
|
||||||
|
self.domain,
|
||||||
|
desired_locale)
|
||||||
|
if self.params is None:
|
||||||
|
# No need for more translation
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
# This Message object may have been formatted with one or more
|
||||||
|
# Message objects as substitution arguments, given either as a single
|
||||||
|
# argument, part of a tuple, or as one or more values in a dictionary.
|
||||||
|
# When translating this Message we need to translate those Messages too
|
||||||
|
translated_params = _translate_args(self.params, desired_locale)
|
||||||
|
|
||||||
|
translated_message = translated_message % translated_params
|
||||||
|
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _translate_msgid(msgid, domain, desired_locale=None):
|
||||||
|
if not desired_locale:
|
||||||
|
system_locale = locale.getdefaultlocale()
|
||||||
|
# If the system locale is not available to the runtime use English
|
||||||
|
if not system_locale[0]:
|
||||||
|
desired_locale = 'en_US'
|
||||||
|
else:
|
||||||
|
desired_locale = system_locale[0]
|
||||||
|
|
||||||
|
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||||
|
lang = gettext.translation(domain,
|
||||||
|
localedir=locale_dir,
|
||||||
|
languages=[desired_locale],
|
||||||
|
fallback=True)
|
||||||
|
if six.PY3:
|
||||||
|
translator = lang.gettext
|
||||||
|
else:
|
||||||
|
translator = lang.ugettext
|
||||||
|
|
||||||
|
translated_message = translator(msgid)
|
||||||
|
return translated_message
|
||||||
|
|
||||||
|
def __mod__(self, other):
|
||||||
|
# When we mod a Message we want the actual operation to be performed
|
||||||
|
# by the parent class (i.e. unicode()), the only thing we do here is
|
||||||
|
# save the original msgid and the parameters in case of a translation
|
||||||
|
params = self._sanitize_mod_params(other)
|
||||||
|
unicode_mod = super(Message, self).__mod__(params)
|
||||||
|
modded = Message(self.msgid,
|
||||||
|
msgtext=unicode_mod,
|
||||||
|
params=params,
|
||||||
|
domain=self.domain)
|
||||||
|
return modded
|
||||||
|
|
||||||
|
def _sanitize_mod_params(self, other):
|
||||||
|
"""Sanitize the object being modded with this Message.
|
||||||
|
|
||||||
|
- Add support for modding 'None' so translation supports it
|
||||||
|
- Trim the modded object, which can be a large dictionary, to only
|
||||||
|
those keys that would actually be used in a translation
|
||||||
|
- Snapshot the object being modded, in case the message is
|
||||||
|
translated, it will be used as it was when the Message was created
|
||||||
|
"""
|
||||||
|
if other is None:
|
||||||
|
params = (other,)
|
||||||
|
elif isinstance(other, dict):
|
||||||
|
params = self._trim_dictionary_parameters(other)
|
||||||
|
else:
|
||||||
|
params = self._copy_param(other)
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _trim_dictionary_parameters(self, dict_param):
|
||||||
|
"""Return a dict that only has matching entries in the msgid."""
|
||||||
|
# NOTE(luisg): Here we trim down the dictionary passed as parameters
|
||||||
|
# to avoid carrying a lot of unnecessary weight around in the message
|
||||||
|
# object, for example if someone passes in Message() % locals() but
|
||||||
|
# only some params are used, and additionally we prevent errors for
|
||||||
|
# non-deepcopyable objects by unicoding() them.
|
||||||
|
|
||||||
|
# Look for %(param) keys in msgid;
|
||||||
|
# Skip %% and deal with the case where % is first character on the line
|
||||||
|
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
|
||||||
|
|
||||||
|
# If we don't find any %(param) keys but have a %s
|
||||||
|
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
|
||||||
|
# Apparently the full dictionary is the parameter
|
||||||
|
params = self._copy_param(dict_param)
|
||||||
|
else:
|
||||||
|
params = {}
|
||||||
|
# Save our existing parameters as defaults to protect
|
||||||
|
# ourselves from losing values if we are called through an
|
||||||
|
# (erroneous) chain that builds a valid Message with
|
||||||
|
# arguments, and then does something like "msg % kwds"
|
||||||
|
# where kwds is an empty dictionary.
|
||||||
|
src = {}
|
||||||
|
if isinstance(self.params, dict):
|
||||||
|
src.update(self.params)
|
||||||
|
src.update(dict_param)
|
||||||
|
for key in keys:
|
||||||
|
params[key] = self._copy_param(src[key])
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _copy_param(self, param):
|
||||||
|
try:
|
||||||
|
return copy.deepcopy(param)
|
||||||
|
except TypeError:
|
||||||
|
# Fallback to casting to unicode this will handle the
|
||||||
|
# python code-like objects that can't be deep-copied
|
||||||
|
return six.text_type(param)
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
msg = _('Message objects do not support addition.')
|
||||||
|
raise TypeError(msg)
|
||||||
|
|
||||||
|
def __radd__(self, other):
|
||||||
|
return self.__add__(other)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||||
|
# and it expects specifically a UnicodeError in order to proceed.
|
||||||
|
msg = _('Message objects do not support str() because they may '
|
||||||
|
'contain non-ascii characters. '
|
||||||
|
'Please use unicode() or translate() instead.')
|
||||||
|
raise UnicodeError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_languages(domain):
|
||||||
|
"""Lists the available languages for the given translation domain.
|
||||||
|
|
||||||
|
:param domain: the domain to get languages for
|
||||||
|
"""
|
||||||
|
if domain in _AVAILABLE_LANGUAGES:
|
||||||
|
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
||||||
|
|
||||||
|
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||||
|
find = lambda x: gettext.find(domain,
|
||||||
|
localedir=os.environ.get(localedir),
|
||||||
|
languages=[x])
|
||||||
|
|
||||||
|
# NOTE(mrodden): en_US should always be available (and first in case
|
||||||
|
# order matters) since our in-line message strings are en_US
|
||||||
|
language_list = ['en_US']
|
||||||
|
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||||
|
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||||
|
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||||
|
# this check when the master list updates to >=1.0, and update all projects
|
||||||
|
list_identifiers = (getattr(localedata, 'list', None) or
|
||||||
|
getattr(localedata, 'locale_identifiers'))
|
||||||
|
locale_identifiers = list_identifiers()
|
||||||
|
|
||||||
|
for i in locale_identifiers:
|
||||||
|
if find(i) is not None:
|
||||||
|
language_list.append(i)
|
||||||
|
|
||||||
|
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
|
||||||
|
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
|
||||||
|
# are perfectly legitimate locales:
|
||||||
|
# https://github.com/mitsuhiko/babel/issues/37
|
||||||
|
# In Babel 1.3 they fixed the bug and they support these locales, but
|
||||||
|
# they are still not explicitly "listed" by locale_identifiers().
|
||||||
|
# That is why we add the locales here explicitly if necessary so that
|
||||||
|
# they are listed as supported.
|
||||||
|
aliases = {'zh': 'zh_CN',
|
||||||
|
'zh_Hant_HK': 'zh_HK',
|
||||||
|
'zh_Hant': 'zh_TW',
|
||||||
|
'fil': 'tl_PH'}
|
||||||
|
for (locale, alias) in six.iteritems(aliases):
|
||||||
|
if locale in language_list and alias not in language_list:
|
||||||
|
language_list.append(alias)
|
||||||
|
|
||||||
|
_AVAILABLE_LANGUAGES[domain] = language_list
|
||||||
|
return copy.copy(language_list)
|
||||||
|
|
||||||
|
|
||||||
|
def translate(obj, desired_locale=None):
|
||||||
|
"""Gets the translated unicode representation of the given object.
|
||||||
|
|
||||||
|
If the object is not translatable it is returned as-is.
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param obj: the object to translate
|
||||||
|
:param desired_locale: the locale to translate the message to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: the translated object in unicode, or the original object if
|
||||||
|
it could not be translated
|
||||||
|
"""
|
||||||
|
message = obj
|
||||||
|
if not isinstance(message, Message):
|
||||||
|
# If the object to translate is not already translatable,
|
||||||
|
# let's first get its unicode representation
|
||||||
|
message = six.text_type(obj)
|
||||||
|
if isinstance(message, Message):
|
||||||
|
# Even after unicoding() we still need to check if we are
|
||||||
|
# running with translatable unicode before translating
|
||||||
|
return message.translate(desired_locale)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_args(args, desired_locale=None):
|
||||||
|
"""Translates all the translatable elements of the given arguments object.
|
||||||
|
|
||||||
|
This method is used for translating the translatable values in method
|
||||||
|
arguments which include values of tuples or dictionaries.
|
||||||
|
If the object is not a tuple or a dictionary the object itself is
|
||||||
|
translated if it is translatable.
|
||||||
|
|
||||||
|
If the locale is None the object is translated to the system locale.
|
||||||
|
|
||||||
|
:param args: the args to translate
|
||||||
|
:param desired_locale: the locale to translate the args to, if None the
|
||||||
|
default system locale will be used
|
||||||
|
:returns: a new args object with the translated contents of the original
|
||||||
|
"""
|
||||||
|
if isinstance(args, tuple):
|
||||||
|
return tuple(translate(v, desired_locale) for v in args)
|
||||||
|
if isinstance(args, dict):
|
||||||
|
translated_dict = {}
|
||||||
|
for (k, v) in six.iteritems(args):
|
||||||
|
translated_v = translate(v, desired_locale)
|
||||||
|
translated_dict[k] = translated_v
|
||||||
|
return translated_dict
|
||||||
|
return translate(args, desired_locale)
|
||||||
|
|
||||||
|
|
||||||
|
class TranslationHandler(handlers.MemoryHandler):
|
||||||
|
"""Handler that translates records before logging them.
|
||||||
|
|
||||||
|
The TranslationHandler takes a locale and a target logging.Handler object
|
||||||
|
to forward LogRecord objects to after translating them. This handler
|
||||||
|
depends on Message objects being logged, instead of regular strings.
|
||||||
|
|
||||||
|
The handler can be configured declaratively in the logging.conf as follows:
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys = translatedlog, translator
|
||||||
|
|
||||||
|
[handler_translatedlog]
|
||||||
|
class = handlers.WatchedFileHandler
|
||||||
|
args = ('/var/log/api-localized.log',)
|
||||||
|
formatter = context
|
||||||
|
|
||||||
|
[handler_translator]
|
||||||
|
class = openstack.common.log.TranslationHandler
|
||||||
|
target = translatedlog
|
||||||
|
args = ('zh_CN',)
|
||||||
|
|
||||||
|
If the specified locale is not available in the system, the handler will
|
||||||
|
log in the default locale.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, locale=None, target=None):
|
||||||
|
"""Initialize a TranslationHandler
|
||||||
|
|
||||||
|
:param locale: locale to use for translating messages
|
||||||
|
:param target: logging.Handler object to forward
|
||||||
|
LogRecord objects to after translation
|
||||||
|
"""
|
||||||
|
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
||||||
|
# other handlers, such as a FileHandler, and still be able to
|
||||||
|
# configure it using logging.conf, this handler has to extend
|
||||||
|
# MemoryHandler because only the MemoryHandlers' logging.conf
|
||||||
|
# parsing is implemented such that it accepts a target handler.
|
||||||
|
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
||||||
|
self.locale = locale
|
||||||
|
|
||||||
|
def setFormatter(self, fmt):
|
||||||
|
self.target.setFormatter(fmt)
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
# We save the message from the original record to restore it
|
||||||
|
# after translation, so other handlers are not affected by this
|
||||||
|
original_msg = record.msg
|
||||||
|
original_args = record.args
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._translate_and_log_record(record)
|
||||||
|
finally:
|
||||||
|
record.msg = original_msg
|
||||||
|
record.args = original_args
|
||||||
|
|
||||||
|
def _translate_and_log_record(self, record):
|
||||||
|
record.msg = translate(record.msg, self.locale)
|
||||||
|
|
||||||
|
# In addition to translating the message, we also need to translate
|
||||||
|
# arguments that were passed to the log method that were not part
|
||||||
|
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
||||||
|
record.args = _translate_args(record.args, self.locale)
|
||||||
|
|
||||||
|
self.target.emit(record)
|
66
gceapi/openstack/common/importutils.py
Normal file
66
gceapi/openstack/common/importutils.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Import related utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
|
||||||
|
def import_class(import_str):
|
||||||
|
"""Returns a class from a string including module and class."""
|
||||||
|
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||||
|
try:
|
||||||
|
__import__(mod_str)
|
||||||
|
return getattr(sys.modules[mod_str], class_str)
|
||||||
|
except (ValueError, AttributeError):
|
||||||
|
raise ImportError('Class %s cannot be found (%s)' %
|
||||||
|
(class_str,
|
||||||
|
traceback.format_exception(*sys.exc_info())))
|
||||||
|
|
||||||
|
|
||||||
|
def import_object(import_str, *args, **kwargs):
|
||||||
|
"""Import a class and return an instance of it."""
|
||||||
|
return import_class(import_str)(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||||
|
"""Tries to import object from default namespace.
|
||||||
|
|
||||||
|
Imports a class and return an instance of it, first by trying
|
||||||
|
to find the class in a default namespace, then failing back to
|
||||||
|
a full path if not found in the default namespace.
|
||||||
|
"""
|
||||||
|
import_value = "%s.%s" % (name_space, import_str)
|
||||||
|
try:
|
||||||
|
return import_class(import_value)(*args, **kwargs)
|
||||||
|
except ImportError:
|
||||||
|
return import_class(import_str)(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def import_module(import_str):
|
||||||
|
"""Import a module."""
|
||||||
|
__import__(import_str)
|
||||||
|
return sys.modules[import_str]
|
||||||
|
|
||||||
|
|
||||||
|
def try_import(import_str, default=None):
|
||||||
|
"""Try to import a module and if it fails return default."""
|
||||||
|
try:
|
||||||
|
return import_module(import_str)
|
||||||
|
except ImportError:
|
||||||
|
return default
|
182
gceapi/openstack/common/jsonutils.py
Normal file
182
gceapi/openstack/common/jsonutils.py
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
'''
|
||||||
|
JSON related utilities.
|
||||||
|
|
||||||
|
This module provides a few things:
|
||||||
|
|
||||||
|
1) A handy function for getting an object down to something that can be
|
||||||
|
JSON serialized. See to_primitive().
|
||||||
|
|
||||||
|
2) Wrappers around loads() and dumps(). The dumps() wrapper will
|
||||||
|
automatically use to_primitive() for you if needed.
|
||||||
|
|
||||||
|
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
|
||||||
|
is available.
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import functools
|
||||||
|
import inspect
|
||||||
|
import itertools
|
||||||
|
import json
|
||||||
|
try:
|
||||||
|
import xmlrpclib
|
||||||
|
except ImportError:
|
||||||
|
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
|
||||||
|
# however the function and object call signatures
|
||||||
|
# remained the same. This whole try/except block should
|
||||||
|
# be removed and replaced with a call to six.moves once
|
||||||
|
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from gceapi.openstack.common import gettextutils
|
||||||
|
from gceapi.openstack.common import importutils
|
||||||
|
from gceapi.openstack.common import timeutils
|
||||||
|
|
||||||
|
netaddr = importutils.try_import("netaddr")
|
||||||
|
|
||||||
|
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||||
|
inspect.isfunction, inspect.isgeneratorfunction,
|
||||||
|
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||||
|
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||||
|
inspect.isabstract]
|
||||||
|
|
||||||
|
_simple_types = (six.string_types + six.integer_types
|
||||||
|
+ (type(None), bool, float))
|
||||||
|
|
||||||
|
|
||||||
|
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||||
|
level=0, max_depth=3):
|
||||||
|
"""Convert a complex object into primitives.
|
||||||
|
|
||||||
|
Handy for JSON serialization. We can optionally handle instances,
|
||||||
|
but since this is a recursive function, we could have cyclical
|
||||||
|
data structures.
|
||||||
|
|
||||||
|
To handle cyclical data structures we could track the actual objects
|
||||||
|
visited in a set, but not all objects are hashable. Instead we just
|
||||||
|
track the depth of the object inspections and don't go too deep.
|
||||||
|
|
||||||
|
Therefore, convert_instances=True is lossy ... be aware.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# handle obvious types first - order of basic types determined by running
|
||||||
|
# full tests on nova project, resulting in the following counts:
|
||||||
|
# 572754 <type 'NoneType'>
|
||||||
|
# 460353 <type 'int'>
|
||||||
|
# 379632 <type 'unicode'>
|
||||||
|
# 274610 <type 'str'>
|
||||||
|
# 199918 <type 'dict'>
|
||||||
|
# 114200 <type 'datetime.datetime'>
|
||||||
|
# 51817 <type 'bool'>
|
||||||
|
# 26164 <type 'list'>
|
||||||
|
# 6491 <type 'float'>
|
||||||
|
# 283 <type 'tuple'>
|
||||||
|
# 19 <type 'long'>
|
||||||
|
if isinstance(value, _simple_types):
|
||||||
|
return value
|
||||||
|
|
||||||
|
if isinstance(value, datetime.datetime):
|
||||||
|
if convert_datetime:
|
||||||
|
return timeutils.strtime(value)
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
|
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||||
|
# and results in infinite loop when list(value) is called.
|
||||||
|
if type(value) == itertools.count:
|
||||||
|
return six.text_type(value)
|
||||||
|
|
||||||
|
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||||
|
# tests that raise an exception in a mocked method that
|
||||||
|
# has a @wrap_exception with a notifier will fail. If
|
||||||
|
# we up the dependency to 0.5.4 (when it is released) we
|
||||||
|
# can remove this workaround.
|
||||||
|
if getattr(value, '__module__', None) == 'mox':
|
||||||
|
return 'mock'
|
||||||
|
|
||||||
|
if level > max_depth:
|
||||||
|
return '?'
|
||||||
|
|
||||||
|
# The try block may not be necessary after the class check above,
|
||||||
|
# but just in case ...
|
||||||
|
try:
|
||||||
|
recursive = functools.partial(to_primitive,
|
||||||
|
convert_instances=convert_instances,
|
||||||
|
convert_datetime=convert_datetime,
|
||||||
|
level=level,
|
||||||
|
max_depth=max_depth)
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||||
|
elif isinstance(value, (list, tuple)):
|
||||||
|
return [recursive(lv) for lv in value]
|
||||||
|
|
||||||
|
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||||
|
# for our purposes, make it a datetime type which is explicitly
|
||||||
|
# handled
|
||||||
|
if isinstance(value, xmlrpclib.DateTime):
|
||||||
|
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||||
|
|
||||||
|
if convert_datetime and isinstance(value, datetime.datetime):
|
||||||
|
return timeutils.strtime(value)
|
||||||
|
elif isinstance(value, gettextutils.Message):
|
||||||
|
return value.data
|
||||||
|
elif hasattr(value, 'iteritems'):
|
||||||
|
return recursive(dict(value.iteritems()), level=level + 1)
|
||||||
|
elif hasattr(value, '__iter__'):
|
||||||
|
return recursive(list(value))
|
||||||
|
elif convert_instances and hasattr(value, '__dict__'):
|
||||||
|
# Likely an instance of something. Watch for cycles.
|
||||||
|
# Ignore class member vars.
|
||||||
|
return recursive(value.__dict__, level=level + 1)
|
||||||
|
elif netaddr and isinstance(value, netaddr.IPAddress):
|
||||||
|
return six.text_type(value)
|
||||||
|
else:
|
||||||
|
if any(test(value) for test in _nasty_type_tests):
|
||||||
|
return six.text_type(value)
|
||||||
|
return value
|
||||||
|
except TypeError:
|
||||||
|
# Class objects are tricky since they may define something like
|
||||||
|
# __iter__ defined but it isn't callable as list().
|
||||||
|
return six.text_type(value)
|
||||||
|
|
||||||
|
|
||||||
|
def dumps(value, default=to_primitive, **kwargs):
|
||||||
|
return json.dumps(value, default=default, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def loads(s):
|
||||||
|
return json.loads(s)
|
||||||
|
|
||||||
|
|
||||||
|
def load(s):
|
||||||
|
return json.load(s)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import anyjson
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
anyjson._modules.append((__name__, 'dumps', TypeError,
|
||||||
|
'loads', ValueError, 'load'))
|
||||||
|
anyjson.force_implementation(__name__)
|
45
gceapi/openstack/common/local.py
Normal file
45
gceapi/openstack/common/local.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Local storage of variables using weak references"""
|
||||||
|
|
||||||
|
import threading
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
|
||||||
|
class WeakLocal(threading.local):
|
||||||
|
def __getattribute__(self, attr):
|
||||||
|
rval = super(WeakLocal, self).__getattribute__(attr)
|
||||||
|
if rval:
|
||||||
|
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||||
|
# reference, not the value itself. We therefore need to lookup
|
||||||
|
# the weak reference and return the inner value here.
|
||||||
|
rval = rval()
|
||||||
|
return rval
|
||||||
|
|
||||||
|
def __setattr__(self, attr, value):
|
||||||
|
value = weakref.ref(value)
|
||||||
|
return super(WeakLocal, self).__setattr__(attr, value)
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||||
|
store = WeakLocal()
|
||||||
|
|
||||||
|
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||||
|
# when it falls out of scope in the code that uses the thread local storage. A
|
||||||
|
# "strong" store will hold a reference to the object so that it never falls out
|
||||||
|
# of scope.
|
||||||
|
weak_store = WeakLocal()
|
||||||
|
strong_store = threading.local()
|
657
gceapi/openstack/common/log.py
Normal file
657
gceapi/openstack/common/log.py
Normal file
@ -0,0 +1,657 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Openstack logging handler.
|
||||||
|
|
||||||
|
This module adds to logging functionality by adding the option to specify
|
||||||
|
a context object when calling the various log methods. If the context object
|
||||||
|
is not specified, default formatting is used. Additionally, an instance uuid
|
||||||
|
may be passed as part of the log message, which is intended to make it easier
|
||||||
|
for admins to find messages related to a specific instance.
|
||||||
|
|
||||||
|
It also allows setting of formatting information through conf.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import itertools
|
||||||
|
import logging
|
||||||
|
import logging.config
|
||||||
|
import logging.handlers
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
import six
|
||||||
|
from six import moves
|
||||||
|
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import importutils
|
||||||
|
from gceapi.openstack.common import jsonutils
|
||||||
|
from gceapi.openstack.common import local
|
||||||
|
|
||||||
|
|
||||||
|
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||||
|
|
||||||
|
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||||
|
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||||
|
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||||
|
# for XML and JSON automatically.
|
||||||
|
_SANITIZE_PATTERNS = []
|
||||||
|
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||||
|
r'(<%(key)s>).*?(</%(key)s>)',
|
||||||
|
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||||
|
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
|
||||||
|
|
||||||
|
for key in _SANITIZE_KEYS:
|
||||||
|
for pattern in _FORMAT_PATTERNS:
|
||||||
|
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||||
|
_SANITIZE_PATTERNS.append(reg_ex)
|
||||||
|
|
||||||
|
|
||||||
|
common_cli_opts = [
|
||||||
|
cfg.BoolOpt('debug',
|
||||||
|
short='d',
|
||||||
|
default=False,
|
||||||
|
help='Print debugging output (set logging level to '
|
||||||
|
'DEBUG instead of default WARNING level).'),
|
||||||
|
cfg.BoolOpt('verbose',
|
||||||
|
short='v',
|
||||||
|
default=False,
|
||||||
|
help='Print more verbose output (set logging level to '
|
||||||
|
'INFO instead of default WARNING level).'),
|
||||||
|
]
|
||||||
|
|
||||||
|
logging_cli_opts = [
|
||||||
|
cfg.StrOpt('log-config-append',
|
||||||
|
metavar='PATH',
|
||||||
|
deprecated_name='log-config',
|
||||||
|
help='The name of logging configuration file. It does not '
|
||||||
|
'disable existing loggers, but just appends specified '
|
||||||
|
'logging configuration to any other existing logging '
|
||||||
|
'options. Please see the Python logging module '
|
||||||
|
'documentation for details on logging configuration '
|
||||||
|
'files.'),
|
||||||
|
cfg.StrOpt('log-format',
|
||||||
|
default=None,
|
||||||
|
metavar='FORMAT',
|
||||||
|
help='DEPRECATED. '
|
||||||
|
'A logging.Formatter log message format string which may '
|
||||||
|
'use any of the available logging.LogRecord attributes. '
|
||||||
|
'This option is deprecated. Please use '
|
||||||
|
'logging_context_format_string and '
|
||||||
|
'logging_default_format_string instead.'),
|
||||||
|
cfg.StrOpt('log-date-format',
|
||||||
|
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||||
|
metavar='DATE_FORMAT',
|
||||||
|
help='Format string for %%(asctime)s in log records. '
|
||||||
|
'Default: %(default)s'),
|
||||||
|
cfg.StrOpt('log-file',
|
||||||
|
metavar='PATH',
|
||||||
|
deprecated_name='logfile',
|
||||||
|
help='(Optional) Name of log file to output to. '
|
||||||
|
'If no default is set, logging will go to stdout.'),
|
||||||
|
cfg.StrOpt('log-dir',
|
||||||
|
deprecated_name='logdir',
|
||||||
|
help='(Optional) The base directory used for relative '
|
||||||
|
'--log-file paths'),
|
||||||
|
cfg.BoolOpt('use-syslog',
|
||||||
|
default=False,
|
||||||
|
help='Use syslog for logging. '
|
||||||
|
'Existing syslog format is DEPRECATED during I, '
|
||||||
|
'and then will be changed in J to honor RFC5424'),
|
||||||
|
cfg.BoolOpt('use-syslog-rfc-format',
|
||||||
|
# TODO(bogdando) remove or use True after existing
|
||||||
|
# syslog format deprecation in J
|
||||||
|
default=False,
|
||||||
|
help='(Optional) Use syslog rfc5424 format for logging. '
|
||||||
|
'If enabled, will add APP-NAME (RFC5424) before the '
|
||||||
|
'MSG part of the syslog message. The old format '
|
||||||
|
'without APP-NAME is deprecated in I, '
|
||||||
|
'and will be removed in J.'),
|
||||||
|
cfg.StrOpt('syslog-log-facility',
|
||||||
|
default='LOG_USER',
|
||||||
|
help='Syslog facility to receive log lines')
|
||||||
|
]
|
||||||
|
|
||||||
|
generic_log_opts = [
|
||||||
|
cfg.BoolOpt('use_stderr',
|
||||||
|
default=True,
|
||||||
|
help='Log output to standard error')
|
||||||
|
]
|
||||||
|
|
||||||
|
log_opts = [
|
||||||
|
cfg.StrOpt('logging_context_format_string',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
|
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||||
|
'%(instance)s%(message)s',
|
||||||
|
help='Format string to use for log messages with context'),
|
||||||
|
cfg.StrOpt('logging_default_format_string',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||||
|
'%(name)s [-] %(instance)s%(message)s',
|
||||||
|
help='Format string to use for log messages without context'),
|
||||||
|
cfg.StrOpt('logging_debug_format_suffix',
|
||||||
|
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||||
|
help='Data to append to log format when level is DEBUG'),
|
||||||
|
cfg.StrOpt('logging_exception_prefix',
|
||||||
|
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||||
|
'%(instance)s',
|
||||||
|
help='Prefix each line of exception output with this format'),
|
||||||
|
cfg.ListOpt('default_log_levels',
|
||||||
|
default=[
|
||||||
|
'amqp=WARN',
|
||||||
|
'amqplib=WARN',
|
||||||
|
'boto=WARN',
|
||||||
|
'qpid=WARN',
|
||||||
|
'sqlalchemy=WARN',
|
||||||
|
'suds=INFO',
|
||||||
|
'iso8601=WARN',
|
||||||
|
'requests.packages.urllib3.connectionpool=WARN'
|
||||||
|
],
|
||||||
|
help='List of logger=LEVEL pairs'),
|
||||||
|
cfg.BoolOpt('publish_errors',
|
||||||
|
default=False,
|
||||||
|
help='Publish error events'),
|
||||||
|
cfg.BoolOpt('fatal_deprecations',
|
||||||
|
default=False,
|
||||||
|
help='Make deprecations fatal'),
|
||||||
|
|
||||||
|
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||||
|
# a full instance (and could include more information), and other times we
|
||||||
|
# are just handed a UUID for the instance.
|
||||||
|
cfg.StrOpt('instance_format',
|
||||||
|
default='[instance: %(uuid)s] ',
|
||||||
|
help='If an instance is passed with the log message, format '
|
||||||
|
'it like this'),
|
||||||
|
cfg.StrOpt('instance_uuid_format',
|
||||||
|
default='[instance: %(uuid)s] ',
|
||||||
|
help='If an instance UUID is passed with the log message, '
|
||||||
|
'format it like this'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_cli_opts(common_cli_opts)
|
||||||
|
CONF.register_cli_opts(logging_cli_opts)
|
||||||
|
CONF.register_opts(generic_log_opts)
|
||||||
|
CONF.register_opts(log_opts)
|
||||||
|
|
||||||
|
# our new audit level
|
||||||
|
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
||||||
|
# module aware of it so it acts like other levels.
|
||||||
|
logging.AUDIT = logging.INFO + 1
|
||||||
|
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
NullHandler = logging.NullHandler
|
||||||
|
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
||||||
|
class NullHandler(logging.Handler):
|
||||||
|
def handle(self, record):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def createLock(self):
|
||||||
|
self.lock = None
|
||||||
|
|
||||||
|
|
||||||
|
def _dictify_context(context):
|
||||||
|
if context is None:
|
||||||
|
return None
|
||||||
|
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
||||||
|
context = context.to_dict()
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def _get_binary_name():
|
||||||
|
return os.path.basename(inspect.stack()[-1][1])
|
||||||
|
|
||||||
|
|
||||||
|
def _get_log_file_path(binary=None):
|
||||||
|
logfile = CONF.log_file
|
||||||
|
logdir = CONF.log_dir
|
||||||
|
|
||||||
|
if logfile and not logdir:
|
||||||
|
return logfile
|
||||||
|
|
||||||
|
if logfile and logdir:
|
||||||
|
return os.path.join(logdir, logfile)
|
||||||
|
|
||||||
|
if logdir:
|
||||||
|
binary = binary or _get_binary_name()
|
||||||
|
return '%s.log' % (os.path.join(logdir, binary),)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def mask_password(message, secret="***"):
|
||||||
|
"""Replace password with 'secret' in message.
|
||||||
|
|
||||||
|
:param message: The string which includes security information.
|
||||||
|
:param secret: value with which to replace passwords.
|
||||||
|
:returns: The unicode value of message with the password fields masked.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||||
|
"'adminPass' : '***'"
|
||||||
|
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||||
|
"'admin_pass' : '***'"
|
||||||
|
>>> mask_password('"password" : "aaaaa"')
|
||||||
|
'"password" : "***"'
|
||||||
|
>>> mask_password("'original_password' : 'aaaaa'")
|
||||||
|
"'original_password' : '***'"
|
||||||
|
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||||
|
"u'original_password' : u'***'"
|
||||||
|
"""
|
||||||
|
message = six.text_type(message)
|
||||||
|
|
||||||
|
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||||
|
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||||
|
# we don't have to mask any passwords.
|
||||||
|
if not any(key in message for key in _SANITIZE_KEYS):
|
||||||
|
return message
|
||||||
|
|
||||||
|
secret = r'\g<1>' + secret + r'\g<2>'
|
||||||
|
for pattern in _SANITIZE_PATTERNS:
|
||||||
|
message = re.sub(pattern, secret, message)
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||||
|
|
||||||
|
def audit(self, msg, *args, **kwargs):
|
||||||
|
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class LazyAdapter(BaseLoggerAdapter):
|
||||||
|
def __init__(self, name='unknown', version='unknown'):
|
||||||
|
self._logger = None
|
||||||
|
self.extra = {}
|
||||||
|
self.name = name
|
||||||
|
self.version = version
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logger(self):
|
||||||
|
if not self._logger:
|
||||||
|
self._logger = getLogger(self.name, self.version)
|
||||||
|
return self._logger
|
||||||
|
|
||||||
|
|
||||||
|
class ContextAdapter(BaseLoggerAdapter):
|
||||||
|
warn = logging.LoggerAdapter.warning
|
||||||
|
|
||||||
|
def __init__(self, logger, project_name, version_string):
|
||||||
|
self.logger = logger
|
||||||
|
self.project = project_name
|
||||||
|
self.version = version_string
|
||||||
|
|
||||||
|
@property
|
||||||
|
def handlers(self):
|
||||||
|
return self.logger.handlers
|
||||||
|
|
||||||
|
def deprecated(self, msg, *args, **kwargs):
|
||||||
|
stdmsg = _("Deprecated: %s") % msg
|
||||||
|
if CONF.fatal_deprecations:
|
||||||
|
self.critical(stdmsg, *args, **kwargs)
|
||||||
|
raise DeprecatedConfig(msg=stdmsg)
|
||||||
|
else:
|
||||||
|
self.warn(stdmsg, *args, **kwargs)
|
||||||
|
|
||||||
|
def process(self, msg, kwargs):
|
||||||
|
# NOTE(mrodden): catch any Message/other object and
|
||||||
|
# coerce to unicode before they can get
|
||||||
|
# to the python logging and possibly
|
||||||
|
# cause string encoding trouble
|
||||||
|
if not isinstance(msg, six.string_types):
|
||||||
|
msg = six.text_type(msg)
|
||||||
|
|
||||||
|
if 'extra' not in kwargs:
|
||||||
|
kwargs['extra'] = {}
|
||||||
|
extra = kwargs['extra']
|
||||||
|
|
||||||
|
context = kwargs.pop('context', None)
|
||||||
|
if not context:
|
||||||
|
context = getattr(local.store, 'context', None)
|
||||||
|
if context:
|
||||||
|
extra.update(_dictify_context(context))
|
||||||
|
|
||||||
|
instance = kwargs.pop('instance', None)
|
||||||
|
instance_uuid = (extra.get('instance_uuid', None) or
|
||||||
|
kwargs.pop('instance_uuid', None))
|
||||||
|
instance_extra = ''
|
||||||
|
if instance:
|
||||||
|
instance_extra = CONF.instance_format % instance
|
||||||
|
elif instance_uuid:
|
||||||
|
instance_extra = (CONF.instance_uuid_format
|
||||||
|
% {'uuid': instance_uuid})
|
||||||
|
extra['instance'] = instance_extra
|
||||||
|
|
||||||
|
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||||
|
|
||||||
|
extra['project'] = self.project
|
||||||
|
extra['version'] = self.version
|
||||||
|
extra['extra'] = extra.copy()
|
||||||
|
return msg, kwargs
|
||||||
|
|
||||||
|
|
||||||
|
class JSONFormatter(logging.Formatter):
|
||||||
|
def __init__(self, fmt=None, datefmt=None):
|
||||||
|
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
||||||
|
# since logging.config.fileConfig passes it.
|
||||||
|
self.datefmt = datefmt
|
||||||
|
|
||||||
|
def formatException(self, ei, strip_newlines=True):
|
||||||
|
lines = traceback.format_exception(*ei)
|
||||||
|
if strip_newlines:
|
||||||
|
lines = [moves.filter(
|
||||||
|
lambda x: x,
|
||||||
|
line.rstrip().splitlines()) for line in lines]
|
||||||
|
lines = list(itertools.chain(*lines))
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
message = {'message': record.getMessage(),
|
||||||
|
'asctime': self.formatTime(record, self.datefmt),
|
||||||
|
'name': record.name,
|
||||||
|
'msg': record.msg,
|
||||||
|
'args': record.args,
|
||||||
|
'levelname': record.levelname,
|
||||||
|
'levelno': record.levelno,
|
||||||
|
'pathname': record.pathname,
|
||||||
|
'filename': record.filename,
|
||||||
|
'module': record.module,
|
||||||
|
'lineno': record.lineno,
|
||||||
|
'funcname': record.funcName,
|
||||||
|
'created': record.created,
|
||||||
|
'msecs': record.msecs,
|
||||||
|
'relative_created': record.relativeCreated,
|
||||||
|
'thread': record.thread,
|
||||||
|
'thread_name': record.threadName,
|
||||||
|
'process_name': record.processName,
|
||||||
|
'process': record.process,
|
||||||
|
'traceback': None}
|
||||||
|
|
||||||
|
if hasattr(record, 'extra'):
|
||||||
|
message['extra'] = record.extra
|
||||||
|
|
||||||
|
if record.exc_info:
|
||||||
|
message['traceback'] = self.formatException(record.exc_info)
|
||||||
|
|
||||||
|
return jsonutils.dumps(message)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_logging_excepthook(product_name):
|
||||||
|
def logging_excepthook(exc_type, value, tb):
|
||||||
|
extra = {}
|
||||||
|
if CONF.verbose or CONF.debug:
|
||||||
|
extra['exc_info'] = (exc_type, value, tb)
|
||||||
|
getLogger(product_name).critical(
|
||||||
|
"".join(traceback.format_exception_only(exc_type, value)),
|
||||||
|
**extra)
|
||||||
|
return logging_excepthook
|
||||||
|
|
||||||
|
|
||||||
|
class LogConfigError(Exception):
|
||||||
|
|
||||||
|
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||||
|
|
||||||
|
def __init__(self, log_config, err_msg):
|
||||||
|
self.log_config = log_config
|
||||||
|
self.err_msg = err_msg
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.message % dict(log_config=self.log_config,
|
||||||
|
err_msg=self.err_msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_log_config(log_config_append):
|
||||||
|
try:
|
||||||
|
logging.config.fileConfig(log_config_append,
|
||||||
|
disable_existing_loggers=False)
|
||||||
|
except moves.configparser.Error as exc:
|
||||||
|
raise LogConfigError(log_config_append, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
def setup(product_name):
|
||||||
|
"""Setup logging."""
|
||||||
|
if CONF.log_config_append:
|
||||||
|
_load_log_config(CONF.log_config_append)
|
||||||
|
else:
|
||||||
|
_setup_logging_from_conf()
|
||||||
|
sys.excepthook = _create_logging_excepthook(product_name)
|
||||||
|
|
||||||
|
|
||||||
|
def set_defaults(logging_context_format_string):
|
||||||
|
cfg.set_defaults(log_opts,
|
||||||
|
logging_context_format_string=
|
||||||
|
logging_context_format_string)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_facility_from_conf():
|
||||||
|
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||||
|
facility = getattr(logging.handlers.SysLogHandler,
|
||||||
|
CONF.syslog_log_facility,
|
||||||
|
None)
|
||||||
|
|
||||||
|
if facility is None and CONF.syslog_log_facility in facility_names:
|
||||||
|
facility = facility_names.get(CONF.syslog_log_facility)
|
||||||
|
|
||||||
|
if facility is None:
|
||||||
|
valid_facilities = facility_names.keys()
|
||||||
|
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
||||||
|
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
||||||
|
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
||||||
|
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
||||||
|
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
||||||
|
valid_facilities.extend(consts)
|
||||||
|
raise TypeError(_('syslog facility must be one of: %s') %
|
||||||
|
', '.join("'%s'" % fac
|
||||||
|
for fac in valid_facilities))
|
||||||
|
|
||||||
|
return facility
|
||||||
|
|
||||||
|
|
||||||
|
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.binary_name = _get_binary_name()
|
||||||
|
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
msg = super(RFCSysLogHandler, self).format(record)
|
||||||
|
msg = self.binary_name + ' ' + msg
|
||||||
|
return msg
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_logging_from_conf():
|
||||||
|
log_root = getLogger(None).logger
|
||||||
|
for handler in log_root.handlers:
|
||||||
|
log_root.removeHandler(handler)
|
||||||
|
|
||||||
|
if CONF.use_syslog:
|
||||||
|
facility = _find_facility_from_conf()
|
||||||
|
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
||||||
|
# after existing syslog format deprecation in J
|
||||||
|
if CONF.use_syslog_rfc_format:
|
||||||
|
syslog = RFCSysLogHandler(address='/dev/log',
|
||||||
|
facility=facility)
|
||||||
|
else:
|
||||||
|
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
||||||
|
facility=facility)
|
||||||
|
log_root.addHandler(syslog)
|
||||||
|
|
||||||
|
logpath = _get_log_file_path()
|
||||||
|
if logpath:
|
||||||
|
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||||
|
log_root.addHandler(filelog)
|
||||||
|
|
||||||
|
if CONF.use_stderr:
|
||||||
|
streamlog = ColorHandler()
|
||||||
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
|
elif not logpath:
|
||||||
|
# pass sys.stdout as a positional argument
|
||||||
|
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||||
|
streamlog = logging.StreamHandler(sys.stdout)
|
||||||
|
log_root.addHandler(streamlog)
|
||||||
|
|
||||||
|
if CONF.publish_errors:
|
||||||
|
handler = importutils.import_object(
|
||||||
|
"gceapi.openstack.common.log_handler.PublishErrorsHandler",
|
||||||
|
logging.ERROR)
|
||||||
|
log_root.addHandler(handler)
|
||||||
|
|
||||||
|
datefmt = CONF.log_date_format
|
||||||
|
for handler in log_root.handlers:
|
||||||
|
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||||
|
# should be deprecated in favor of context aware formatting.
|
||||||
|
if CONF.log_format:
|
||||||
|
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||||
|
datefmt=datefmt))
|
||||||
|
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||||
|
'be removed in the next release')
|
||||||
|
else:
|
||||||
|
handler.setFormatter(ContextFormatter(datefmt=datefmt))
|
||||||
|
|
||||||
|
if CONF.debug:
|
||||||
|
log_root.setLevel(logging.DEBUG)
|
||||||
|
elif CONF.verbose:
|
||||||
|
log_root.setLevel(logging.INFO)
|
||||||
|
else:
|
||||||
|
log_root.setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
for pair in CONF.default_log_levels:
|
||||||
|
mod, _sep, level_name = pair.partition('=')
|
||||||
|
level = logging.getLevelName(level_name)
|
||||||
|
logger = logging.getLogger(mod)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
_loggers = {}
|
||||||
|
|
||||||
|
|
||||||
|
def getLogger(name='unknown', version='unknown'):
|
||||||
|
if name not in _loggers:
|
||||||
|
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
||||||
|
name,
|
||||||
|
version)
|
||||||
|
return _loggers[name]
|
||||||
|
|
||||||
|
|
||||||
|
def getLazyLogger(name='unknown', version='unknown'):
|
||||||
|
"""Returns lazy logger.
|
||||||
|
|
||||||
|
Creates a pass-through logger that does not create the real logger
|
||||||
|
until it is really needed and delegates all calls to the real logger
|
||||||
|
once it is created.
|
||||||
|
"""
|
||||||
|
return LazyAdapter(name, version)
|
||||||
|
|
||||||
|
|
||||||
|
class WritableLogger(object):
|
||||||
|
"""A thin wrapper that responds to `write` and logs."""
|
||||||
|
|
||||||
|
def __init__(self, logger, level=logging.INFO):
|
||||||
|
self.logger = logger
|
||||||
|
self.level = level
|
||||||
|
|
||||||
|
def write(self, msg):
|
||||||
|
self.logger.log(self.level, msg.rstrip())
|
||||||
|
|
||||||
|
|
||||||
|
class ContextFormatter(logging.Formatter):
|
||||||
|
"""A context.RequestContext aware formatter configured through flags.
|
||||||
|
|
||||||
|
The flags used to set format strings are: logging_context_format_string
|
||||||
|
and logging_default_format_string. You can also specify
|
||||||
|
logging_debug_format_suffix to append extra formatting if the log level is
|
||||||
|
debug.
|
||||||
|
|
||||||
|
For information about what variables are available for the formatter see:
|
||||||
|
http://docs.python.org/library/logging.html#formatter
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
"""Uses contextstring if request_id is set, otherwise default."""
|
||||||
|
# NOTE(sdague): default the fancier formatting params
|
||||||
|
# to an empty string so we don't throw an exception if
|
||||||
|
# they get used
|
||||||
|
for key in ('instance', 'color'):
|
||||||
|
if key not in record.__dict__:
|
||||||
|
record.__dict__[key] = ''
|
||||||
|
|
||||||
|
if record.__dict__.get('request_id', None):
|
||||||
|
self._fmt = CONF.logging_context_format_string
|
||||||
|
else:
|
||||||
|
self._fmt = CONF.logging_default_format_string
|
||||||
|
|
||||||
|
if (record.levelno == logging.DEBUG and
|
||||||
|
CONF.logging_debug_format_suffix):
|
||||||
|
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||||
|
|
||||||
|
# Cache this on the record, Logger will respect our formatted copy
|
||||||
|
if record.exc_info:
|
||||||
|
record.exc_text = self.formatException(record.exc_info, record)
|
||||||
|
return logging.Formatter.format(self, record)
|
||||||
|
|
||||||
|
def formatException(self, exc_info, record=None):
|
||||||
|
"""Format exception output with CONF.logging_exception_prefix."""
|
||||||
|
if not record:
|
||||||
|
return logging.Formatter.formatException(self, exc_info)
|
||||||
|
|
||||||
|
stringbuffer = moves.StringIO()
|
||||||
|
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||||
|
None, stringbuffer)
|
||||||
|
lines = stringbuffer.getvalue().split('\n')
|
||||||
|
stringbuffer.close()
|
||||||
|
|
||||||
|
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
||||||
|
record.asctime = self.formatTime(record, self.datefmt)
|
||||||
|
|
||||||
|
formatted_lines = []
|
||||||
|
for line in lines:
|
||||||
|
pl = CONF.logging_exception_prefix % record.__dict__
|
||||||
|
fl = '%s%s' % (pl, line)
|
||||||
|
formatted_lines.append(fl)
|
||||||
|
return '\n'.join(formatted_lines)
|
||||||
|
|
||||||
|
|
||||||
|
class ColorHandler(logging.StreamHandler):
|
||||||
|
LEVEL_COLORS = {
|
||||||
|
logging.DEBUG: '\033[00;32m', # GREEN
|
||||||
|
logging.INFO: '\033[00;36m', # CYAN
|
||||||
|
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
||||||
|
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
||||||
|
logging.ERROR: '\033[01;31m', # BOLD RED
|
||||||
|
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
||||||
|
}
|
||||||
|
|
||||||
|
def format(self, record):
|
||||||
|
record.color = self.LEVEL_COLORS[record.levelno]
|
||||||
|
return logging.StreamHandler.format(self, record)
|
||||||
|
|
||||||
|
|
||||||
|
class DeprecatedConfig(Exception):
|
||||||
|
message = _("Fatal call to deprecated config: %(msg)s")
|
||||||
|
|
||||||
|
def __init__(self, msg):
|
||||||
|
super(Exception, self).__init__(self.message % dict(msg=msg))
|
0
gceapi/openstack/common/py3kcompat/__init__.py
Normal file
0
gceapi/openstack/common/py3kcompat/__init__.py
Normal file
67
gceapi/openstack/common/py3kcompat/urlutils.py
Normal file
67
gceapi/openstack/common/py3kcompat/urlutils.py
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
Python2/Python3 compatibility layer for OpenStack
|
||||||
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
if six.PY3:
|
||||||
|
# python3
|
||||||
|
import urllib.error
|
||||||
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
urlencode = urllib.parse.urlencode
|
||||||
|
urljoin = urllib.parse.urljoin
|
||||||
|
quote = urllib.parse.quote
|
||||||
|
quote_plus = urllib.parse.quote_plus
|
||||||
|
parse_qsl = urllib.parse.parse_qsl
|
||||||
|
unquote = urllib.parse.unquote
|
||||||
|
unquote_plus = urllib.parse.unquote_plus
|
||||||
|
urlparse = urllib.parse.urlparse
|
||||||
|
urlsplit = urllib.parse.urlsplit
|
||||||
|
urlunsplit = urllib.parse.urlunsplit
|
||||||
|
SplitResult = urllib.parse.SplitResult
|
||||||
|
|
||||||
|
urlopen = urllib.request.urlopen
|
||||||
|
URLError = urllib.error.URLError
|
||||||
|
pathname2url = urllib.request.pathname2url
|
||||||
|
else:
|
||||||
|
# python2
|
||||||
|
import urllib
|
||||||
|
import urllib2
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
urlencode = urllib.urlencode
|
||||||
|
quote = urllib.quote
|
||||||
|
quote_plus = urllib.quote_plus
|
||||||
|
unquote = urllib.unquote
|
||||||
|
unquote_plus = urllib.unquote_plus
|
||||||
|
|
||||||
|
parse = urlparse
|
||||||
|
parse_qsl = parse.parse_qsl
|
||||||
|
urljoin = parse.urljoin
|
||||||
|
urlparse = parse.urlparse
|
||||||
|
urlsplit = parse.urlsplit
|
||||||
|
urlunsplit = parse.urlunsplit
|
||||||
|
SplitResult = parse.SplitResult
|
||||||
|
|
||||||
|
urlopen = urllib2.urlopen
|
||||||
|
URLError = urllib2.URLError
|
||||||
|
pathname2url = urllib.pathname2url
|
88
gceapi/openstack/common/test.py
Normal file
88
gceapi/openstack/common/test.py
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Common utilities used in testing"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
_TRUE_VALUES = ('True', 'true', '1', 'yes')
|
||||||
|
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
|
||||||
|
|
||||||
|
|
||||||
|
class BaseTestCase(testtools.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(BaseTestCase, self).setUp()
|
||||||
|
self._set_timeout()
|
||||||
|
self._fake_output()
|
||||||
|
self._fake_logs()
|
||||||
|
self.useFixture(fixtures.NestedTempfile())
|
||||||
|
self.useFixture(fixtures.TempHomeDir())
|
||||||
|
self.tempdirs = []
|
||||||
|
|
||||||
|
def _set_timeout(self):
|
||||||
|
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||||
|
try:
|
||||||
|
test_timeout = int(test_timeout)
|
||||||
|
except ValueError:
|
||||||
|
# If timeout value is invalid do not set a timeout.
|
||||||
|
test_timeout = 0
|
||||||
|
if test_timeout > 0:
|
||||||
|
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
||||||
|
|
||||||
|
def _fake_output(self):
|
||||||
|
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
|
||||||
|
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||||
|
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||||
|
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
|
||||||
|
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||||
|
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||||
|
|
||||||
|
def _fake_logs(self):
|
||||||
|
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
|
||||||
|
level = logging.DEBUG
|
||||||
|
else:
|
||||||
|
level = logging.INFO
|
||||||
|
capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES
|
||||||
|
if capture_logs:
|
||||||
|
self.useFixture(
|
||||||
|
fixtures.FakeLogger(
|
||||||
|
format=_LOG_FORMAT,
|
||||||
|
level=level,
|
||||||
|
nuke_handlers=capture_logs,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.basicConfig(format=_LOG_FORMAT, level=level)
|
||||||
|
|
||||||
|
def create_tempfiles(self, files, ext='.conf'):
|
||||||
|
tempfiles = []
|
||||||
|
for (basename, contents) in files:
|
||||||
|
if not os.path.isabs(basename):
|
||||||
|
(fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext)
|
||||||
|
else:
|
||||||
|
path = basename + ext
|
||||||
|
fd = os.open(path, os.O_CREAT | os.O_WRONLY)
|
||||||
|
tempfiles.append(path)
|
||||||
|
try:
|
||||||
|
os.write(fd, contents)
|
||||||
|
finally:
|
||||||
|
os.close(fd)
|
||||||
|
return tempfiles
|
210
gceapi/openstack/common/timeutils.py
Normal file
210
gceapi/openstack/common/timeutils.py
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Time related utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import calendar
|
||||||
|
import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
|
import iso8601
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
# ISO 8601 extended time format with microseconds
|
||||||
|
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||||
|
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||||
|
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||||
|
|
||||||
|
|
||||||
|
def isotime(at=None, subsecond=False):
|
||||||
|
"""Stringify time in ISO 8601 format."""
|
||||||
|
if not at:
|
||||||
|
at = utcnow()
|
||||||
|
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||||
|
if not subsecond
|
||||||
|
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||||
|
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||||
|
st += ('Z' if tz == 'UTC' else tz)
|
||||||
|
return st
|
||||||
|
|
||||||
|
|
||||||
|
def parse_isotime(timestr):
|
||||||
|
"""Parse time from ISO 8601 format."""
|
||||||
|
try:
|
||||||
|
return iso8601.parse_date(timestr)
|
||||||
|
except iso8601.ParseError as e:
|
||||||
|
raise ValueError(six.text_type(e))
|
||||||
|
except TypeError as e:
|
||||||
|
raise ValueError(six.text_type(e))
|
||||||
|
|
||||||
|
|
||||||
|
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||||
|
"""Returns formatted utcnow."""
|
||||||
|
if not at:
|
||||||
|
at = utcnow()
|
||||||
|
return at.strftime(fmt)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
||||||
|
"""Turn a formatted time back into a datetime."""
|
||||||
|
return datetime.datetime.strptime(timestr, fmt)
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_time(timestamp):
|
||||||
|
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||||
|
offset = timestamp.utcoffset()
|
||||||
|
if offset is None:
|
||||||
|
return timestamp
|
||||||
|
return timestamp.replace(tzinfo=None) - offset
|
||||||
|
|
||||||
|
|
||||||
|
def is_older_than(before, seconds):
|
||||||
|
"""Return True if before is older than seconds."""
|
||||||
|
if isinstance(before, six.string_types):
|
||||||
|
before = parse_strtime(before).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
before = before.replace(tzinfo=None)
|
||||||
|
|
||||||
|
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
|
def is_newer_than(after, seconds):
|
||||||
|
"""Return True if after is newer than seconds."""
|
||||||
|
if isinstance(after, six.string_types):
|
||||||
|
after = parse_strtime(after).replace(tzinfo=None)
|
||||||
|
else:
|
||||||
|
after = after.replace(tzinfo=None)
|
||||||
|
|
||||||
|
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
|
def utcnow_ts():
|
||||||
|
"""Timestamp version of our utcnow function."""
|
||||||
|
if utcnow.override_time is None:
|
||||||
|
# NOTE(kgriffs): This is several times faster
|
||||||
|
# than going through calendar.timegm(...)
|
||||||
|
return int(time.time())
|
||||||
|
|
||||||
|
return calendar.timegm(utcnow().timetuple())
|
||||||
|
|
||||||
|
|
||||||
|
def utcnow():
|
||||||
|
"""Overridable version of utils.utcnow."""
|
||||||
|
if utcnow.override_time:
|
||||||
|
try:
|
||||||
|
return utcnow.override_time.pop(0)
|
||||||
|
except AttributeError:
|
||||||
|
return utcnow.override_time
|
||||||
|
return datetime.datetime.utcnow()
|
||||||
|
|
||||||
|
|
||||||
|
def iso8601_from_timestamp(timestamp):
|
||||||
|
"""Returns a iso8601 formatted date from timestamp."""
|
||||||
|
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||||
|
|
||||||
|
|
||||||
|
utcnow.override_time = None
|
||||||
|
|
||||||
|
|
||||||
|
def set_time_override(override_time=None):
|
||||||
|
"""Overrides utils.utcnow.
|
||||||
|
|
||||||
|
Make it return a constant time or a list thereof, one at a time.
|
||||||
|
|
||||||
|
:param override_time: datetime instance or list thereof. If not
|
||||||
|
given, defaults to the current UTC time.
|
||||||
|
"""
|
||||||
|
utcnow.override_time = override_time or datetime.datetime.utcnow()
|
||||||
|
|
||||||
|
|
||||||
|
def advance_time_delta(timedelta):
|
||||||
|
"""Advance overridden time using a datetime.timedelta."""
|
||||||
|
assert(not utcnow.override_time is None)
|
||||||
|
try:
|
||||||
|
for dt in utcnow.override_time:
|
||||||
|
dt += timedelta
|
||||||
|
except TypeError:
|
||||||
|
utcnow.override_time += timedelta
|
||||||
|
|
||||||
|
|
||||||
|
def advance_time_seconds(seconds):
|
||||||
|
"""Advance overridden time by seconds."""
|
||||||
|
advance_time_delta(datetime.timedelta(0, seconds))
|
||||||
|
|
||||||
|
|
||||||
|
def clear_time_override():
|
||||||
|
"""Remove the overridden time."""
|
||||||
|
utcnow.override_time = None
|
||||||
|
|
||||||
|
|
||||||
|
def marshall_now(now=None):
|
||||||
|
"""Make an rpc-safe datetime with microseconds.
|
||||||
|
|
||||||
|
Note: tzinfo is stripped, but not required for relative times.
|
||||||
|
"""
|
||||||
|
if not now:
|
||||||
|
now = utcnow()
|
||||||
|
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||||
|
minute=now.minute, second=now.second,
|
||||||
|
microsecond=now.microsecond)
|
||||||
|
|
||||||
|
|
||||||
|
def unmarshall_time(tyme):
|
||||||
|
"""Unmarshall a datetime dict."""
|
||||||
|
return datetime.datetime(day=tyme['day'],
|
||||||
|
month=tyme['month'],
|
||||||
|
year=tyme['year'],
|
||||||
|
hour=tyme['hour'],
|
||||||
|
minute=tyme['minute'],
|
||||||
|
second=tyme['second'],
|
||||||
|
microsecond=tyme['microsecond'])
|
||||||
|
|
||||||
|
|
||||||
|
def delta_seconds(before, after):
|
||||||
|
"""Return the difference between two timing objects.
|
||||||
|
|
||||||
|
Compute the difference in seconds between two date, time, or
|
||||||
|
datetime objects (as a float, to microsecond resolution).
|
||||||
|
"""
|
||||||
|
delta = after - before
|
||||||
|
return total_seconds(delta)
|
||||||
|
|
||||||
|
|
||||||
|
def total_seconds(delta):
|
||||||
|
"""Return the total seconds of datetime.timedelta object.
|
||||||
|
|
||||||
|
Compute total seconds of datetime.timedelta, datetime.timedelta
|
||||||
|
doesn't have method total_seconds in Python2.6, calculate it manually.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return delta.total_seconds()
|
||||||
|
except AttributeError:
|
||||||
|
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||||
|
float(delta.microseconds) / (10 ** 6))
|
||||||
|
|
||||||
|
|
||||||
|
def is_soon(dt, window):
|
||||||
|
"""Determines if time is going to happen in the next window seconds.
|
||||||
|
|
||||||
|
:param dt: the time
|
||||||
|
:param window: minimum seconds to remain to consider the time not soon
|
||||||
|
|
||||||
|
:return: True if expiration is within the given duration
|
||||||
|
"""
|
||||||
|
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||||
|
return normalize_time(dt) <= soon
|
68
gceapi/paths.py
Normal file
68
gceapi/paths.py
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
# Copyright 2012 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
path_opts = [
|
||||||
|
cfg.StrOpt('pybasedir',
|
||||||
|
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
|
'../')),
|
||||||
|
help='Directory where the gceapi python module is installed'),
|
||||||
|
cfg.StrOpt('bindir',
|
||||||
|
default='$pybasedir/bin',
|
||||||
|
help='Directory where gceapi binaries are installed'),
|
||||||
|
cfg.StrOpt('state_path',
|
||||||
|
default='$pybasedir',
|
||||||
|
help="Top-level directory for maintaining gceapi's state"),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(path_opts)
|
||||||
|
|
||||||
|
|
||||||
|
def basedir_def(*args):
|
||||||
|
"""Return an uninterpolated path relative to $pybasedir."""
|
||||||
|
return os.path.join('$pybasedir', *args)
|
||||||
|
|
||||||
|
|
||||||
|
def bindir_def(*args):
|
||||||
|
"""Return an uninterpolated path relative to $bindir."""
|
||||||
|
return os.path.join('$bindir', *args)
|
||||||
|
|
||||||
|
|
||||||
|
def state_path_def(*args):
|
||||||
|
"""Return an uninterpolated path relative to $state_path."""
|
||||||
|
return os.path.join('$state_path', *args)
|
||||||
|
|
||||||
|
|
||||||
|
def basedir_rel(*args):
|
||||||
|
"""Return a path relative to $pybasedir."""
|
||||||
|
return os.path.join(CONF.pybasedir, *args)
|
||||||
|
|
||||||
|
|
||||||
|
def bindir_rel(*args):
|
||||||
|
"""Return a path relative to $bindir."""
|
||||||
|
return os.path.join(CONF.bindir, *args)
|
||||||
|
|
||||||
|
|
||||||
|
def state_path_rel(*args):
|
||||||
|
"""Return a path relative to $state_path."""
|
||||||
|
return os.path.join(CONF.state_path, *args)
|
263
gceapi/service.py
Normal file
263
gceapi/service.py
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Generic Node base class for all workers that run on hosts."""
|
||||||
|
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
import greenlet
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from gceapi.openstack.common import eventlet_backdoor
|
||||||
|
from gceapi.openstack.common.gettextutils import _
|
||||||
|
from gceapi.openstack.common import importutils
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi import wsgi
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
service_opts = [
|
||||||
|
cfg.BoolOpt('use_ssl',
|
||||||
|
default=False,
|
||||||
|
help='Enable ssl connections or not'),
|
||||||
|
cfg.IntOpt('service_down_time',
|
||||||
|
default=60,
|
||||||
|
help='maximum time since last check-in for up service'),
|
||||||
|
cfg.StrOpt('gce_listen',
|
||||||
|
default="0.0.0.0",
|
||||||
|
help='IP address for gce api to listen'),
|
||||||
|
cfg.IntOpt('gce_listen_port',
|
||||||
|
default=8777,
|
||||||
|
help='port for gce api to listen'),
|
||||||
|
cfg.StrOpt('network_api',
|
||||||
|
default="neutron",
|
||||||
|
help='Name of network API. neutron(quantum) or nova'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(service_opts)
|
||||||
|
|
||||||
|
|
||||||
|
class SignalExit(SystemExit):
|
||||||
|
def __init__(self, signo, exccode=1):
|
||||||
|
super(SignalExit, self).__init__(exccode)
|
||||||
|
self.signo = signo
|
||||||
|
|
||||||
|
|
||||||
|
class Launcher(object):
|
||||||
|
"""Launch one or more services and wait for them to complete."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize the service launcher.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
self._services = []
|
||||||
|
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def run_server(server):
|
||||||
|
"""Start and wait for a server to finish.
|
||||||
|
|
||||||
|
:param service: Server to run and wait for.
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
server.start()
|
||||||
|
server.wait()
|
||||||
|
|
||||||
|
def launch_server(self, server):
|
||||||
|
"""Load and start the given server.
|
||||||
|
|
||||||
|
:param server: The server you would like to start.
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self.backdoor_port is not None:
|
||||||
|
server.backdoor_port = self.backdoor_port
|
||||||
|
gt = eventlet.spawn(self.run_server, server)
|
||||||
|
self._services.append(gt)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""Stop all services which are currently running.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
for service in self._services:
|
||||||
|
service.kill()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
"""Waits until all services have been stopped, and then returns.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
for service in self._services:
|
||||||
|
try:
|
||||||
|
service.wait()
|
||||||
|
except greenlet.GreenletExit:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceLauncher(Launcher):
|
||||||
|
def _handle_signal(self, signo, frame):
|
||||||
|
# Allow the process to be killed again and die from natural causes
|
||||||
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||||
|
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||||
|
|
||||||
|
raise SignalExit(signo)
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||||
|
signal.signal(signal.SIGINT, self._handle_signal)
|
||||||
|
|
||||||
|
LOG.debug(_('Full set of CONF:'))
|
||||||
|
for flag in CONF:
|
||||||
|
flag_get = CONF.get(flag, None)
|
||||||
|
# hide flag contents from log if contains a password
|
||||||
|
# should use secret flag when switch over to openstack-common
|
||||||
|
if ("_password" in flag or "_key" in flag or
|
||||||
|
(flag == "sql_connection" and "mysql:" in flag_get)):
|
||||||
|
LOG.debug(_('%(flag)s : FLAG SET ') % {'flag': flag})
|
||||||
|
else:
|
||||||
|
LOG.debug('%(flag)s : %(flag_get)s' % {'flag': flag,
|
||||||
|
'flag_get': flag_get})
|
||||||
|
|
||||||
|
status = None
|
||||||
|
try:
|
||||||
|
super(ServiceLauncher, self).wait()
|
||||||
|
except SignalExit as exc:
|
||||||
|
signame = {signal.SIGTERM: 'SIGTERM',
|
||||||
|
signal.SIGINT: 'SIGINT'}[exc.signo]
|
||||||
|
LOG.info(_('Caught %s, exiting'), signame)
|
||||||
|
status = exc.code
|
||||||
|
except SystemExit as exc:
|
||||||
|
status = exc.code
|
||||||
|
finally:
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
if status is not None:
|
||||||
|
sys.exit(status)
|
||||||
|
|
||||||
|
|
||||||
|
class WSGIService(object):
|
||||||
|
"""Provides ability to launch API from a 'paste' configuration."""
|
||||||
|
|
||||||
|
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
|
||||||
|
"""Initialize, but do not start the WSGI server.
|
||||||
|
|
||||||
|
:param name: The name of the WSGI server given to the loader.
|
||||||
|
:param loader: Loads the WSGI application using the given name.
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.name = name
|
||||||
|
self.manager = self._get_manager()
|
||||||
|
self.loader = loader or wsgi.Loader()
|
||||||
|
self.app = self.loader.load_app(name)
|
||||||
|
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
|
||||||
|
self.port = getattr(CONF, '%s_listen_port' % name, 0)
|
||||||
|
self.use_ssl = use_ssl
|
||||||
|
self.server = wsgi.Server(name,
|
||||||
|
self.app,
|
||||||
|
host=self.host,
|
||||||
|
port=self.port,
|
||||||
|
use_ssl=self.use_ssl,
|
||||||
|
max_url_len=max_url_len)
|
||||||
|
# Pull back actual port used
|
||||||
|
self.port = self.server.port
|
||||||
|
self.backdoor_port = None
|
||||||
|
|
||||||
|
def _get_manager(self):
|
||||||
|
"""Initialize a Manager object appropriate for this service.
|
||||||
|
|
||||||
|
Use the service name to look up a Manager subclass from the
|
||||||
|
configuration and initialize an instance. If no class name
|
||||||
|
is configured, just return None.
|
||||||
|
|
||||||
|
:returns: a Manager instance, or None.
|
||||||
|
|
||||||
|
"""
|
||||||
|
fl = '%s_manager' % self.name
|
||||||
|
if fl not in CONF:
|
||||||
|
return None
|
||||||
|
|
||||||
|
manager_class_name = CONF.get(fl, None)
|
||||||
|
if not manager_class_name:
|
||||||
|
return None
|
||||||
|
|
||||||
|
manager_class = importutils.import_class(manager_class_name)
|
||||||
|
return manager_class()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
"""Start serving this service using loaded configuration.
|
||||||
|
|
||||||
|
Also, retrieve updated port number in case '0' was passed in, which
|
||||||
|
indicates a random port should be used.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self.manager:
|
||||||
|
self.manager.init_host()
|
||||||
|
self.manager.pre_start_hook()
|
||||||
|
if self.backdoor_port is not None:
|
||||||
|
self.manager.backdoor_port = self.backdoor_port
|
||||||
|
self.server.start()
|
||||||
|
if self.manager:
|
||||||
|
self.manager.post_start_hook()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""Stop serving this API.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.server.stop()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
"""Wait for the service to stop serving this API.
|
||||||
|
|
||||||
|
:returns: None
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.server.wait()
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(vish): the global launcher is to maintain the existing
|
||||||
|
# functionality of calling service.serve +
|
||||||
|
# service.wait
|
||||||
|
_launcher = None
|
||||||
|
|
||||||
|
|
||||||
|
def serve(server):
|
||||||
|
global _launcher
|
||||||
|
if _launcher:
|
||||||
|
raise RuntimeError(_('serve() can only be called once'))
|
||||||
|
|
||||||
|
_launcher = ServiceLauncher()
|
||||||
|
_launcher.launch_server(server)
|
||||||
|
|
||||||
|
|
||||||
|
def wait():
|
||||||
|
_launcher.wait()
|
180
gceapi/test.py
Normal file
180
gceapi/test.py
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Base classes for our unit tests.
|
||||||
|
|
||||||
|
Allows overriding of flags for use of fakes, and some black magic for
|
||||||
|
inline callbacks.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import eventlet
|
||||||
|
import fixtures
|
||||||
|
import mox
|
||||||
|
from oslo.config import cfg
|
||||||
|
import stubout
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
from gceapi.openstack.common import log as logging
|
||||||
|
from gceapi import paths
|
||||||
|
|
||||||
|
|
||||||
|
test_opts = [
|
||||||
|
cfg.StrOpt('sqlite_clean_db',
|
||||||
|
default='clean.sqlite',
|
||||||
|
help='File name of clean sqlite db'),
|
||||||
|
cfg.StrOpt('network_api',
|
||||||
|
default="neutron",
|
||||||
|
help='Name of network API. neutron(quantum) or nova'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(test_opts)
|
||||||
|
CONF.import_opt('connection',
|
||||||
|
'gceapi.openstack.common.db.sqlalchemy.session',
|
||||||
|
group='database')
|
||||||
|
CONF.set_override('use_stderr', False)
|
||||||
|
|
||||||
|
logging.setup('gceapi')
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
eventlet.monkey_patch(os=False)
|
||||||
|
|
||||||
|
_DB_CACHE = None
|
||||||
|
|
||||||
|
|
||||||
|
class Database(fixtures.Fixture):
|
||||||
|
|
||||||
|
def __init__(self, db_session, db_migrate, sql_connection,
|
||||||
|
sqlite_db, sqlite_clean_db):
|
||||||
|
self.sql_connection = sql_connection
|
||||||
|
self.sqlite_db = sqlite_db
|
||||||
|
self.sqlite_clean_db = sqlite_clean_db
|
||||||
|
|
||||||
|
self.engine = db_session.get_engine()
|
||||||
|
self.engine.dispose()
|
||||||
|
conn = self.engine.connect()
|
||||||
|
if sql_connection == "sqlite://":
|
||||||
|
if db_migrate.db_version() > db_migrate.INIT_VERSION:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
testdb = paths.state_path_rel(sqlite_db)
|
||||||
|
if os.path.exists(testdb):
|
||||||
|
return
|
||||||
|
db_migrate.db_sync()
|
||||||
|
self.post_migrations()
|
||||||
|
if sql_connection == "sqlite://":
|
||||||
|
conn = self.engine.connect()
|
||||||
|
self._DB = "".join(line for line in conn.connection.iterdump())
|
||||||
|
self.engine.dispose()
|
||||||
|
else:
|
||||||
|
cleandb = paths.state_path_rel(sqlite_clean_db)
|
||||||
|
shutil.copyfile(testdb, cleandb)
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(Database, self).setUp()
|
||||||
|
|
||||||
|
if self.sql_connection == "sqlite://":
|
||||||
|
conn = self.engine.connect()
|
||||||
|
conn.connection.executescript(self._DB)
|
||||||
|
self.addCleanup(self.engine.dispose)
|
||||||
|
else:
|
||||||
|
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
|
||||||
|
paths.state_path_rel(self.sqlite_db))
|
||||||
|
|
||||||
|
|
||||||
|
class MoxStubout(fixtures.Fixture):
|
||||||
|
"""Deal with code around mox and stubout as a fixture."""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(MoxStubout, self).setUp()
|
||||||
|
# emulate some of the mox stuff, we can't use the metaclass
|
||||||
|
# because it screws with our generators
|
||||||
|
self.mox = mox.Mox()
|
||||||
|
self.stubs = stubout.StubOutForTesting()
|
||||||
|
self.addCleanup(self.mox.UnsetStubs)
|
||||||
|
self.addCleanup(self.stubs.UnsetAll)
|
||||||
|
self.addCleanup(self.stubs.SmartUnsetAll)
|
||||||
|
self.addCleanup(self.mox.VerifyAll)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCase(testtools.TestCase):
|
||||||
|
"""Test case base class for all unit tests."""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
"""Run before each test method to initialize test environment."""
|
||||||
|
super(TestCase, self).setUp()
|
||||||
|
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||||
|
try:
|
||||||
|
test_timeout = int(test_timeout)
|
||||||
|
except ValueError:
|
||||||
|
# If timeout value is invalid do not set a timeout.
|
||||||
|
test_timeout = 0
|
||||||
|
if test_timeout > 0:
|
||||||
|
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
||||||
|
self.useFixture(fixtures.NestedTempfile())
|
||||||
|
self.useFixture(fixtures.TempHomeDir())
|
||||||
|
|
||||||
|
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
|
||||||
|
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
|
||||||
|
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||||
|
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||||
|
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
|
||||||
|
os.environ.get('OS_STDERR_CAPTURE') == '1'):
|
||||||
|
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||||
|
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||||
|
|
||||||
|
self.log_fixture = self.useFixture(fixtures.FakeLogger('gceapi'))
|
||||||
|
|
||||||
|
mox_fixture = self.useFixture(MoxStubout())
|
||||||
|
self.mox = mox_fixture.mox
|
||||||
|
self.stubs = mox_fixture.stubs
|
||||||
|
self.addCleanup(self._clear_attrs)
|
||||||
|
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
|
||||||
|
CONF.set_override('fatal_exception_format_errors', True)
|
||||||
|
|
||||||
|
def _clear_attrs(self):
|
||||||
|
# Delete attributes that don't start with _ so they don't pin
|
||||||
|
# memory around unnecessarily for the duration of the test
|
||||||
|
# suite
|
||||||
|
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
|
||||||
|
del self.__dict__[key]
|
||||||
|
|
||||||
|
def flags(self, **kw):
|
||||||
|
"""Override flag variables for a test."""
|
||||||
|
group = kw.pop('group', None)
|
||||||
|
for k, v in kw.iteritems():
|
||||||
|
CONF.set_override(k, v, group)
|
||||||
|
|
||||||
|
def assertDictEqual(self, d1, d2, msg=None):
|
||||||
|
for k, v1 in d1.iteritems():
|
||||||
|
self.assertIn(k, d2)
|
||||||
|
v2 = d2[k]
|
||||||
|
if(isinstance(v1, collections.Iterable) and
|
||||||
|
not isinstance(v1, basestring)):
|
||||||
|
self.assertItemsEqual(v1, v2, msg)
|
||||||
|
else:
|
||||||
|
self.assertEqual(v1, v2, msg)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
|
||||||
|
self.assertEqual(sorted(expected_seq), sorted(actual_seq), msg)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user