Add source code to Tricircle
Initial PoC source code for Tricircle, the project for OpenStack cascading solution. Change-Id: I8abc93839a26446cb61c8d9004dfd812bd91de6e
This commit is contained in:
45
.gitignore
vendored
Normal file
45
.gitignore
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
*.DS_Store
|
||||
*.egg*
|
||||
*.log
|
||||
*.mo
|
||||
*.pyc
|
||||
*.swo
|
||||
*.swp
|
||||
*.sqlite
|
||||
*.iml
|
||||
*~
|
||||
.autogenerated
|
||||
.coverage
|
||||
.nova-venv
|
||||
.project
|
||||
.pydevproject
|
||||
.ropeproject
|
||||
.testrepository/
|
||||
.tox
|
||||
.idea
|
||||
.venv
|
||||
AUTHORS
|
||||
Authors
|
||||
build-stamp
|
||||
build/*
|
||||
CA/
|
||||
ChangeLog
|
||||
coverage.xml
|
||||
cover/*
|
||||
covhtml
|
||||
dist/*
|
||||
doc/source/api/*
|
||||
doc/build/*
|
||||
etc/nova/nova.conf.sample
|
||||
instances
|
||||
keeper
|
||||
keys
|
||||
local_settings.py
|
||||
MANIFEST
|
||||
nosetests.xml
|
||||
nova/tests/cover/*
|
||||
nova/vcsversion.py
|
||||
tools/conf/nova.conf*
|
||||
tools/lintstack.head.py
|
||||
tools/pylint_exceptions
|
||||
etc/nova/nova.conf.sample
|
201
LICENSE
Normal file
201
LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
397
README.md
Normal file
397
README.md
Normal file
@ -0,0 +1,397 @@
|
||||
๏ปฟTricircle
|
||||
===============================
|
||||
|
||||
Tricircle is a project for [Openstack cascading solution](https://wiki.openstack.org/wiki/OpenStack_cascading_solution), including the source code of Nova Proxy, Cinder Proxy, Neutron L2/L3 Proxy, Glance sync manager and Ceilometer Proxy(not implemented yet).
|
||||
|
||||
The project name "Tricircle" comes from a fractal. See the blog ["OpenStack cascading and fractal"](https://www.linkedin.com/today/post/article/20140729022031-23841540-openstack-cascading-and-fractal) for more information.
|
||||
|
||||
Important to know
|
||||
-----------
|
||||
* the initial source code is for PoC only. Refactory will be done constantly to reach OpenStack acceptance standard.
|
||||
* the PoC source code is based on IceHouse version, while Neutron is a master branch snapshot on July 1, 2014 which include DVR feature, not IceHouse version. The Neutron code is download from github when it was still in the developement and review status. The source code of DVR part is not stable, and not all DVR features are included, for example, N-S functions not ready.
|
||||
* The Neutron cascading using the feature of provider network. But horizon doen't support provider network very well. So you have to use Neutron CLI to create a network. Or set default provide network type to VxLAN, or remove "local", "flat", "VLAN", "GRE" network typedriver from ML2 plugin configuration.
|
||||
* For Neutron L2/L3 features, only VxLAN/L3 across casacaded OpenStack supported in the current source code. VLAN2VLAN, VLAN2VxLAN and VxLAN2VxLAN across cascaded OpenStack also implemented with IceHouse version but the patch is not ready yet, source code is in the VLAN2VLAN folder.
|
||||
* The tunneling network for cross OpenStack piggy data path is using VxLAN, it leads to modification on L2 agent and L3 agent, we will refactory it to using GRE for the tunneling network to reduce patch for Juno version.
|
||||
* If you want to experience VLAN2VLAN, VLAN2VxLAN and VxLAN2VxLAN across cascaded OpenStack, please ask help from PoC team member, see the wiki page [Openstack cascading solution](https://wiki.openstack.org/wiki/OpenStack_cascading_solution) for contact information.
|
||||
* Glance cascading using Glance V2 API. Only CLI/pythonclient support V2 API, the Horizon doesn't support that version. So image management should be done through CLI, and using V2 only. Otherwise, the glance cascading cannot work properly.
|
||||
* Glance cascading is not used by default, eg, useing global Glance by default. If Glance cascading is required, configuration is required.
|
||||
* Refactory the Tricircle source code based on Juno version will be started soon once the Juno version is available.
|
||||
|
||||
|
||||
Key modules
|
||||
-----------
|
||||
|
||||
* Nova proxy
|
||||
|
||||
Similar role like Nova-Compute. Transfer the VM operation to cascaded Nova. Also responsible for attach volume and network to the VM in the cascaded OpenStack.
|
||||
|
||||
* Cinder proxy
|
||||
|
||||
Similar role like Cinder-Volume. Transfer the volume operation to cascaded Cinder.
|
||||
|
||||
* Neuton proxy
|
||||
|
||||
Including L2 proxy and L3 proxy, Similar role like OVS-Agent/L3-Agent. Finish L2/L3-networking in the cascaded OpenStack, including cross OpenStack networking.
|
||||
|
||||
* Glance sync
|
||||
|
||||
Synchronize image among the cascading and policy determined Cascaded OpenStacks
|
||||
|
||||
Patches required
|
||||
------------------
|
||||
|
||||
* IceHouse-Patches
|
||||
|
||||
Pacthes for OpenStack IceHouse version, including patches for cascading level and cacscaded level.
|
||||
|
||||
Feature Supported
|
||||
------------------
|
||||
|
||||
* Nova cascading
|
||||
Launch/Reboot/Terminate/Resize/Rescue/Pause/Un-pause/Suspend/Resume/VNC Console/Attach Volume/Detach Volume/Snapshot/KeyPair/Flavor
|
||||
|
||||
* Cinder cascading
|
||||
Create Volume/Delete Volume/Attach Volume/Detach Volume/Extend Volume/Create Snapshot/Delete Snapshot/List Snapshots/Create Volume from Snapshot/Create Volume from Image/Create Volume from Volume (Clone)/Create Image from Volume
|
||||
|
||||
* Neutron cascading
|
||||
Network/Subnet/Port/Router
|
||||
|
||||
* Glance cascading
|
||||
Only support V2 api. Create Image/Delete Image/List Image/Update Image/Upload Image/Patch Location/VM Snapshot/Image Synchronization
|
||||
|
||||
Known Issues
|
||||
------------------
|
||||
* Use "admin" role to experience these feature first, multi-tenancy has not been tested well.
|
||||
* Launch VM only support "boot from image", "boot from volume", "boot from snapshot"
|
||||
* Flavor only support new created flavor synchronized to the cascaded OpenStack, does not support flavor update synchronization to cascaded OpenStack yet.
|
||||
* Must make a patch for "Create a volume from image", the patch link: https://bugs.launchpad.net/cinder/+bug/1308058
|
||||
|
||||
Installation without Glance cascading
|
||||
------------
|
||||
|
||||
* **Prerequisites**
|
||||
- the minimal installation requires three OpenStack IceHouse installated to experience across cascaded OpenStacks L2/L3 function. The minimal setup needs four nodes, see the following picture:
|
||||
|
||||

|
||||
|
||||
- the cascading OpenStack needs two node, Node1 and Node 2. Add Node1 to AZ1, Node2 to AZ2 in the cascading OpenStack for both Nova and Cinder.
|
||||
|
||||
- It's recommended to name the cascading Openstack region to "Cascading_OpenStack" or "Region1"
|
||||
|
||||
- Node1 is all-in-one OpenStack installation with KeyStone and Glance, Node1 also function as Nova-Compute/Cinder-Volume/Neutron OVS-Agent/L3-Agent node, and will be replaced to be the proxy node for AZ1.
|
||||
|
||||
- Node2 is general Nova-Compute node with Cinder-Volume, Neutron OVS-Agent/L3-Agent function installed. And will be replaced to be the proxy node for AZ2
|
||||
|
||||
- the all-in-one cascaded OpenStack installed in Node3 function as the AZ1. Node3 will also function as the Nova-Compute/Cinder-Volume/Neutron OVS-Agent/L3-Agent in order to be able to create VMs/Volume/Networking in this AZ1. Glance is only required to be installed if Glance cascading needed. Add Node3 to AZ1 in the cascaded OpenStack both for Nova and Cinder. It's recommended to name the cascaded Openstack region for Node3 to "AZ1"
|
||||
|
||||
- the all-in-one cascaded OpenStack installed in Node4 function as the AZ2. Node3 will also function as the Nova-Compute/Cinder-Volume/Neutron OVS-Agent/L3-Agent in order to be able to create VMs/Volume/Networking in this AZ2. Glance is only required to be installed if Glance cascading needed.Add Node4 to AZ2 in the cascaded OpenStack both for Nova and Cinder.It's recommended to name the cascaded Openstack region for Node4 to "AZ2"
|
||||
|
||||
Make sure the time of these four nodes are synchronized. Because the Nova Proxy/Cinder Proxy/Neutron L2/L3 Proxy will query the cascaded OpenStack using timestamp, incorrect time will lead to VM/Volume/Port status synchronization not work properly.
|
||||
|
||||
Register all services endpoint in the global shared KeyStone.
|
||||
|
||||
Make sure the 3 OpenStack can work independently before cascading introduced, eg. you can boot VM with network, create volume and attach volume in each OpenStack. After verify that 3 OpenStack can work independently, clean all created resources VM/Volume/Network.
|
||||
|
||||
After all OpenStack installation is ready, it's time to install IceHouse pathces both for cascading OpenStack and cascaded OpenStack, and then replace the Nova-Compute/Cinder-Volume/Neutron OVS-Agent/L3-Agent to Nova Proxy / Cinder Proxy / Neutron l2/l3 Proxy.
|
||||
|
||||
* **IceHouse pachtes installation step by step**
|
||||
|
||||
1. Node1
|
||||
- Patches for Nova - instance_mapping_uuid_patch
|
||||
|
||||
This patch is to make the Nova proxy being able to translate the cascading level VM's uuid to cascadede level VM's uuid
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/nova/instance_mapping_uuid_patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Cinder - Volume/SnapShot/Backup UUID mapping patch
|
||||
|
||||
This patch is to make the Cinder proxy being able to translate the cascading level (Volume/Snapshot/backup)'s uuid to cascadede level (Volume/Snapshot/backup)'s uuid
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/cinder/instance_mapping_uuid_patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Neutron - DVR patch
|
||||
|
||||
This patch is to make the Neutron has the DVR(distributed virtual router) feature. Through DVR, all L2/L3 proxy nodes in the cascading level can receive correspoding RPC message, and then convert the command to restful API to cascaded Neutron.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/neutron/dvr-patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Neutron - ml2-mech-driver-cascading patch
|
||||
|
||||
This patch is to make L2 population driver being able to populate the VM's host IP which stored in the port binding profile in the cascaded OpenStack to another cascaded OpenStack.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/neutron/ml2-mech-driver-cascading-patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
2. Node3
|
||||
- Patches for Nova - port binding profile update bug: https://bugs.launchpad.net/neutron/+bug/1338202.
|
||||
|
||||
because ml2-mech-driver-cascaded-patch will update the binding profile in the port, and will be flushed to null if you don't fix the bug.
|
||||
|
||||
You can also fix the bug via:
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/icehouse-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/
|
||||
cp api.py $python_installation_path/site-packages/nova/network/neutronv2/
|
||||
|
||||
```
|
||||
the patch will reserve what has been saved in the port binding profile
|
||||
|
||||
- Patches for Cinder - timestamp-query-patch patch
|
||||
|
||||
This patch is to make the cascaded Cinder being able to execute query with timestamp filter, but not to return all objects.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/cinder/timestamp-query-patch_patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Neutron - DVR patch
|
||||
|
||||
This patch is to make the Neutron has the DVR(distributed virtual router) feature.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/neutron/dvr-patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Neutron - ml2-mech-driver-cascaded patch
|
||||
|
||||
This patch is to make L2 population driver being able to populate the virtual remote port where the VM located in another OpenStack.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Neutron - openvswitch-agent patch
|
||||
|
||||
This patch is to get dvr mac crossing openstack for cross OpenStack L3 networking for VLAN-VLAN/VLAN-VxLAN/VxLAN-VxLAN.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/neutron/openvswitch-agent-patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
3. Node4
|
||||
- Patches for Nova - port binding profile update bug: https://bugs.launchpad.net/neutron/+bug/1338202.
|
||||
|
||||
because ml2-mech-driver-cascaded-patch will update the binding profile in the port, and will be flushed to null if you don't fix the bug.
|
||||
|
||||
You can also fix the bug via:
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/icehouse-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/
|
||||
cp api.py $python_installation_path/site-packages/nova/network/neutronv2/
|
||||
|
||||
```
|
||||
the patch will reserve what has been saved in the port binding profile
|
||||
|
||||
- Patches for Cinder - timestamp-query-patch patch
|
||||
|
||||
This patch is to make the cascaded Cinder being able to execute query with timestamp filter, but not to return all objects.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/cinder/timestamp-query-patch_patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Neutron - DVR patch
|
||||
|
||||
This patch is to make the Neutron has the DVR(distributed virtual router) feature.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/neutron/dvr-patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Neutron - ml2-mech-driver-cascaded patch
|
||||
|
||||
This patch is to make L2 population driver being able to populate the virtual remote port where the VM located in another OpenStack.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Patches for Neutron - openvswitch-agent patch
|
||||
|
||||
This patch is to get dvr mac crossing openstack for cross OpenStack L3 networking for VLAN-VLAN/VLAN-VxLAN/VxLAN-VxLAN.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/neutron/openvswitch-agent-patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
* **Proxy installation step by step**
|
||||
|
||||
1. Node1
|
||||
- Nova proxy
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/novaproxy
|
||||
```
|
||||
follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting
|
||||
|
||||
- Cinder proxy
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/cinderproxy
|
||||
```
|
||||
follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting
|
||||
|
||||
- L2 proxy
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/neutronproxy/l2-proxy
|
||||
```
|
||||
follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting
|
||||
|
||||
- L3 proxy
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/neutronproxy/l3-proxy
|
||||
```
|
||||
follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting
|
||||
|
||||
2. Node2
|
||||
- Nova proxy
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/novaproxy
|
||||
```
|
||||
follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/nova/instance_mapping_uuid_patch/nova/objects
|
||||
cp instance.py $python_installation_path/site-packages/nova/objects/
|
||||
```
|
||||
This file is a patch for instance UUID mapping used in the proxy nodes.
|
||||
|
||||
- Cinder proxy
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/cinderproxy
|
||||
```
|
||||
follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy
|
||||
cp models.py $python_installation_path/site-packages/cinder/db/sqlalchemy
|
||||
```
|
||||
This file is a patch for instance UUID mapping used in the proxy nodes.
|
||||
|
||||
|
||||
- L2 proxy
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/neutronproxy/l2-proxy
|
||||
```
|
||||
follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting
|
||||
|
||||
- L3 proxy
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/neutronproxy/l3-proxy
|
||||
```
|
||||
follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting
|
||||
|
||||
|
||||
Upgrade to Glance cascading
|
||||
------------
|
||||
|
||||
* **Prerequisites**
|
||||
- To experience the glance cascading feature, you can simply upgrade the current installation with several step, see the following picture:
|
||||
|
||||

|
||||
|
||||
1. Node1
|
||||
- Patches for Glance - glance_location_patch
|
||||
|
||||
This patch is to make the glance being able to handle http url location. The patch also insert the sync manager to the chain of responsibility.
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/icehouse-patches/glance/glance_location_patch
|
||||
```
|
||||
follow README.md instruction to install the patch
|
||||
|
||||
- Sync Manager
|
||||
|
||||
Navigate to the folder
|
||||
```
|
||||
cd ./tricircle/glancesync
|
||||
```
|
||||
|
||||
modify the storage scheme configuration for cascading and cascaded level
|
||||
```
|
||||
vi ./tricircle/glancesync/etc/glance/glance_store.yaml
|
||||
```
|
||||
|
||||
follow README.md instruction to install the sync manager. Please change the configuration value in the install.sh according to your environment setting, espeically for configuration:
|
||||
sync_enabled=True
|
||||
sync_server_port=9595
|
||||
sync_server_host=127.0.0.1
|
||||
|
||||
2. Node3
|
||||
- Glance Installation
|
||||
|
||||
Please install Glance in the Node3 as the casacded Glance.
|
||||
Register the service endpoint in the KeyStone.
|
||||
Change the glance endpoint in nova.conf and cinder.conf to the Glance located in Node3
|
||||
|
||||
3. Node4
|
||||
- Glance Installation
|
||||
|
||||
Please install Glance in the Node4 as the casacded Glance.
|
||||
Register the service endpoint in the KeyStone
|
||||
Change the glance endpoint in nova.conf and cinder.conf to the Glance located in Node4
|
||||
|
||||
4. Configuration
|
||||
- Change Nova proxy configuration on Node1, setting the "cascaded_glance_flag" to True and add "cascaded_glance_url" of Node3 configurantion according to Nova-proxy README.MD instruction
|
||||
- Change Cinder proxy configuration on Node1, setting the "glance_cascading_flag" to True and add "cascaded_glance_url" of Node3 configurantion according to Nova-proxy README.MD instruction
|
||||
|
||||
- Change Nova proxy configuration on Node2, setting the "cascaded_glance_flag" to True and add "cascaded_glance_url" of Node4 configurantion according to Nova-proxy README.MD instruction
|
||||
- Change Cinder proxy configuration on Node2, setting the "glance_cascading_flag" to True and add "cascaded_glance_url" of Node4 configurantion according to Nova-proxy README.MD instruction
|
||||
|
||||
5. Experience Glance cascading
|
||||
- Restart all related service
|
||||
- Use Glance V2 api to create Image, Upload Image or patch location for Image. Image should be able to sync to distributed Glance if sync_enabled is setting to True
|
||||
- Sync image only during first time usage but not uploading or patch location is still in testing phase, may not work properly.
|
||||
- Create VM/Volume/etc from Horizon
|
||||
|
||||
|
148
cinderproxy/README.md
Normal file
148
cinderproxy/README.md
Normal file
@ -0,0 +1,148 @@
|
||||
Openstack Cinder Proxy
|
||||
===============================
|
||||
|
||||
Cinder-Proxy acts as the same role of Cinder-Volume in cascading OpenStack.
|
||||
Cinder-Proxy treats cascaded Cinder as its cinder volume, convert the internal request message from the message bus to restful API calling to cascaded Cinder.
|
||||
|
||||
|
||||
Key modules
|
||||
-----------
|
||||
|
||||
* The new cinder proxy module cinder_proxy,which treats cascaded Cinder as its cinder volume, convert the internal request message from the message bus to restful API calling to cascaded Cinder:
|
||||
|
||||
cinder/volume/cinder_proxy.py
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* openstack-cinder-volume-2014.1-14.1 has been installed
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
We provide two ways to install the cinder proxy code. In this section, we will guide you through installing the cinder proxy with the minimum configuration.
|
||||
|
||||
* **Note:**
|
||||
|
||||
- Make sure you have an existing installation of **Openstack Icehouse**.
|
||||
- We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified:
|
||||
$CINDER_CONFIG_PARENT_DIR/cinder.conf
|
||||
(replace the $... with actual directory names.)
|
||||
|
||||
* **Manual Installation**
|
||||
|
||||
- Make sure you have performed backups properly.
|
||||
|
||||
- Navigate to the local repository and copy the contents in 'cinder' sub-directory to the corresponding places in existing cinder, e.g.
|
||||
```cp -r $LOCAL_REPOSITORY_DIR/cinder $CINDER_PARENT_DIR```
|
||||
(replace the $... with actual directory name.)
|
||||
|
||||
- Update the cinder configuration file (e.g. /etc/cinder/cinder.conf) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide.
|
||||
```
|
||||
[DEFAULT]
|
||||
...
|
||||
###configuration for Cinder cascading ###
|
||||
volume_manager=cinder.volume.cinder_proxy.CinderProxy
|
||||
volume_sync_interval=5
|
||||
cinder_tenant_name=$CASCADED_ADMIN_TENANT
|
||||
cinder_username=$CASCADED_ADMIN_NAME
|
||||
cinder_password=$CASCADED_ADMIN_PASSWORD
|
||||
keystone_auth_url=http://$GLOBAL_KEYSTONE_IP:5000/v2.0/
|
||||
cascading_glance_url=$CASCADING_GLANCE
|
||||
cascaded_glance_url=http://$CASCADED_GLANCE
|
||||
cascaded_available_zone=$CASCADED_AVAILABLE_ZONE
|
||||
cascaded_region_name=$CASCADED_REGION_NAME
|
||||
```
|
||||
|
||||
- Restart the cinder proxy.
|
||||
```service openstack-cinder-volume restart```
|
||||
|
||||
- Done. The cinder proxy should be working with a demo configuration.
|
||||
|
||||
* **Automatic Installation**
|
||||
|
||||
- Make sure you have performed backups properly.
|
||||
|
||||
- Navigate to the installation directory and run installation script.
|
||||
```
|
||||
cd $LOCAL_REPOSITORY_DIR/installation
|
||||
sudo bash ./install.sh
|
||||
```
|
||||
(replace the $... with actual directory name.)
|
||||
|
||||
- Done. The installation code should setup the cinder proxy with the minimum configuration below. Check the "Configurations" section for a full configuration guide.
|
||||
```
|
||||
[DEFAULT]
|
||||
...
|
||||
###cascade info ###
|
||||
...
|
||||
###configuration for Cinder cascading ###
|
||||
volume_manager=cinder.volume.cinder_proxy.CinderProxy
|
||||
volume_sync_interval=5
|
||||
cinder_tenant_name=$CASCADED_ADMIN_TENANT
|
||||
cinder_username=$CASCADED_ADMIN_NAME
|
||||
cinder_password=$CASCADED_ADMIN_PASSWORD
|
||||
keystone_auth_url=http://$GLOBAL_KEYSTONE_IP:5000/v2.0/
|
||||
cascading_glance_url=$CASCADING_GLANCE
|
||||
cascaded_glance_url=http://$CASCADED_GLANCE
|
||||
cascaded_available_zone=$CASCADED_AVAILABLE_ZONE
|
||||
cascaded_region_name=$CASCADED_REGION_NAME
|
||||
```
|
||||
|
||||
* **Troubleshooting**
|
||||
|
||||
In case the automatic installation process is not complete, please check the followings:
|
||||
|
||||
- Make sure your OpenStack version is Icehouse.
|
||||
|
||||
- Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide.
|
||||
|
||||
- The installation code will automatically add the related codes to $CINDER_PARENT_DIR/cinder and modify the related configuration.
|
||||
|
||||
- In case the automatic installation does not work, try to install manually.
|
||||
|
||||
Configurations
|
||||
--------------
|
||||
|
||||
* This is a (default) configuration sample for the cinder proxy. Please add/modify these options in /etc/cinder/cinder.conf.
|
||||
* Note:
|
||||
- Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name.
|
||||
- Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints.
|
||||
|
||||
```
|
||||
[DEFAULT]
|
||||
|
||||
...
|
||||
|
||||
#
|
||||
#Options defined in cinder.volume.manager
|
||||
#
|
||||
|
||||
# Default driver to use for the cinder proxy (string value)
|
||||
volume_manager=cinder.volume.cinder_proxy.CinderProxy
|
||||
|
||||
|
||||
#The cascading level keystone component service url, by which the cinder proxy
|
||||
#can access to cascading level keystone service
|
||||
keystone_auth_url=$keystone_auth_url
|
||||
|
||||
#The cascading level glance component service url, by which the cinder proxy
|
||||
#can access to cascading level glance service
|
||||
cascading_glance_url=$CASCADING_GLANCE
|
||||
|
||||
#The cascaded level glance component service url, by which the cinder proxy
|
||||
#can judge whether the cascading glance image has a location for this cascaded glance
|
||||
cascaded_glance_url=http://$CASCADED_GLANCE
|
||||
|
||||
#The cascaded level region name, which will be set as a parameter when
|
||||
#the cascaded level component services register endpoint to keystone
|
||||
cascaded_region_name=$CASCADED_REGION_NAME
|
||||
|
||||
#The cascaded level available zone name, which will be set as a parameter when
|
||||
#forward request to cascaded level cinder. Please pay attention to that value of
|
||||
#cascaded_available_zone of cinder-proxy must be the same as storage_availability_zone in
|
||||
#the cascaded level node. And cinder-proxy should be configured to the same storage_availability_zone.
|
||||
#this configuration could be removed in the future to just use the cinder-proxy storage_availability_zone
|
||||
#configuration item. but it is up to the admin to make sure the storage_availability_zone in cinder-proxy#and casacdede cinder keep the same value.
|
||||
cascaded_available_zone=$CASCADED_AVAILABLE_ZONE
|
||||
|
||||
|
1099
cinderproxy/cinder/volume/cinder_proxy.py
Normal file
1099
cinderproxy/cinder/volume/cinder_proxy.py
Normal file
File diff suppressed because it is too large
Load Diff
130
cinderproxy/installation/install.sh
Normal file
130
cinderproxy/installation/install.sh
Normal file
@ -0,0 +1,130 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_CINDER_CONF_DIR="/etc/cinder"
|
||||
_CINDER_CONF_FILE="cinder.conf"
|
||||
_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
||||
_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log"
|
||||
|
||||
# please set the option list set in cinder configure file
|
||||
_CINDER_CONF_OPTION=("volume_manager=cinder.volume.cinder_proxy.CinderProxy volume_sync_interval=5 periodic_interval=5 cinder_tenant_name=admin cinder_username=admin cinder_password=1234 keystone_auth_url=http://10.67.148.210:5000/v2.0/ glance_cascading_flag=False cascading_glance_url=10.67.148.210:9292 cascaded_glance_url=http://10.67.148.201:9292 cascaded_cinder_url=http://10.67.148.201:8776/v2/%(project_id)s cascaded_region_name=Region_AZ1 cascaded_available_zone=AZ1")
|
||||
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../cinder/"
|
||||
_BACKUP_DIR="${_CINDER_DIR}/cinder-proxy-installation-backup"
|
||||
|
||||
|
||||
function log()
|
||||
{
|
||||
if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then
|
||||
mkdir -p `dirname ${_CINDER_INSTALL_LOG}`
|
||||
touch $_CINDER_INSTALL_LOG
|
||||
chmod 777 $_CINDER_INSTALL_LOG
|
||||
fi
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG
|
||||
}
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_CINDER_DIR}" ] ; then
|
||||
log "Could not find the cinder installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then
|
||||
log "Could not find cinder config file. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking previous installation..."
|
||||
if [ -d "${_BACKUP_DIR}/cinder" ] ; then
|
||||
log "It seems cinder-proxy has already been installed!"
|
||||
log "Please check README for solution if this is not true."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
mkdir -p "${_BACKUP_DIR}/cinder"
|
||||
mkdir -p "${_BACKUP_DIR}/etc/cinder"
|
||||
cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/cinder"
|
||||
log "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/etc/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/cinder"
|
||||
rm -r "${_BACKUP_DIR}/etc"
|
||||
log "Error in config backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "copying in new files..."
|
||||
cp -r "${_CODE_DIR}" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering original files..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please install manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "updating config file..."
|
||||
sed -i.backup -e "/volume_manager *=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}"
|
||||
sed -i.backup -e "/periodic_interval *=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}"
|
||||
for option in $_CINDER_CONF_OPTION
|
||||
do
|
||||
sed -i -e "/\[DEFAULT\]/a \\"$option "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}"
|
||||
done
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in updating, aborted."
|
||||
log "Recovering original files..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering /cinder failed! Please install manually."
|
||||
fi
|
||||
cp "${_BACKUP_DIR}/etc/cinder/${_CINDER_CONF_FILE}" "${_CINDER_CONF_DIR}" && rm -r "${_BACKUP_DIR}/etc"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering config failed! Please install manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "restarting cinder proxy..."
|
||||
service openstack-cinder-volume restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart cinder proxy manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Cinder proxy Completed."
|
||||
log "See README to get started."
|
||||
|
||||
exit 0
|
129
cinderproxy/installation/uninstall.sh
Normal file
129
cinderproxy/installation/uninstall.sh
Normal file
@ -0,0 +1,129 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_CINDER_CONF_DIR="/etc/cinder"
|
||||
_CINDER_CONF_FILE="cinder.conf"
|
||||
_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
||||
_CINDER_CONF_OPTION=("volume_manager volume_sync_interval periodic_interval cinder_tenant_name cinder_username cinder_password keystone_auth_url glance_cascading_flag cascading_glance_url cascaded_glance_url cascaded_cinder_url cascaded_region_name cascaded_available_zone")
|
||||
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../cinder"
|
||||
_BACKUP_DIR="${_CINDER_DIR}/cinder-proxy-installation-backup"
|
||||
_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log"
|
||||
|
||||
#_SCRIPT_NAME="${0##*/}"
|
||||
#_SCRIPT_LOGFILE="/var/log/nova-solver-scheduler/installation/${_SCRIPT_NAME}.log"
|
||||
|
||||
function log()
|
||||
{
|
||||
if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then
|
||||
mkdir -p `dirname ${_CINDER_INSTALL_LOG}`
|
||||
touch $_CINDER_INSTALL_LOG
|
||||
chmod 777 $_CINDER_INSTALL_LOG
|
||||
fi
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG
|
||||
}
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_CINDER_DIR}" ] ; then
|
||||
log "Could not find the cinder installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then
|
||||
log "Could not find cinder config file. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking backup..."
|
||||
if [ ! -d "${_BACKUP_DIR}/cinder" ] ; then
|
||||
log "Could not find backup files. It is possible that the cinder-proxy has been uninstalled."
|
||||
log "If this is not the case, then please uninstall manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
if [ -d "${_BACKUP_DIR}/uninstall" ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall"
|
||||
fi
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/cinder"
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/etc/cinder"
|
||||
cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/uninstall/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
log "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/uninstall/etc/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
rm -r "${_BACKUP_DIR}/uninstall/etc"
|
||||
log "Error in config backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "restoring code to the status before installing cinder-proxy..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering current files..."
|
||||
cp -r "${_BACKUP_DIR}/uninstall/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please uninstall manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "updating config file..."
|
||||
for option in $_CINDER_CONF_OPTION
|
||||
do
|
||||
sed -i.uninstall.backup -e "/"$option "*=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}"
|
||||
done
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in updating, aborted."
|
||||
log "Recovering current files..."
|
||||
cp "${_BACKUP_DIR}/uninstall/etc/cinder/${_CINDER_CONF_FILE}" "${_CINDER_CONF_DIR}"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please uninstall manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "cleaning up backup files..."
|
||||
rm -r "${_BACKUP_DIR}/cinder" && rm -r "${_BACKUP_DIR}/etc"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error when cleaning up the backup files."
|
||||
fi
|
||||
|
||||
log "restarting cinder volume..."
|
||||
service openstack-cinder-volume restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart cinder volume manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Completed."
|
||||
|
||||
exit 0
|
140
glancesync/README.md
Normal file
140
glancesync/README.md
Normal file
@ -0,0 +1,140 @@
|
||||
Glance Sync Manager
|
||||
===============================
|
||||
|
||||
This is a submodule of Tricircle Project, in which a sync function is added to support the glance images' sync between cascading and cascadeds.
|
||||
When launching a instance, the nova will search the image which is in the same region with the instance to downland, this can speeded up the whole launching time of the instance.
|
||||
|
||||
Key modules
|
||||
-----------
|
||||
|
||||
* Primarily, there is only new module in glance cascading: Sync, which is in the glance/sync package.
|
||||
|
||||
glance/sync/__init__.py : Adds a ImageRepoProxy class, like store, policy .etc , to augment a sync mechanism layer on top of the api request handling chain.
|
||||
glance/sync/base.py : Contains SyncManager object, execute the sync operations.
|
||||
glance/sync/utils.py : Some help functions.
|
||||
glance/sync/api/ : Support a Web Server of sync.
|
||||
glance/sync/client/: Support a client to visit the Web Server , ImageRepoProxy use this client to call the sync requests.
|
||||
glance/sync/task/: Each Sync operation is transformed into a task, we using queue to store the task an eventlet to handle the task simultaneously.
|
||||
glance/sync/store/: We implements the independent-glance-store, separating the handles of image_data from image_metadata.
|
||||
glance/cmd/sync.py: For the Sync Server starting launch (refer this in /usr/bin/glance-sync).
|
||||
|
||||
|
||||
|
||||
* **Note:**
|
||||
At present, the glance cascading only support v2 version of glance-api;
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
* pexpect>=2.3
|
||||
|
||||
Installation
|
||||
------------
|
||||
* **Note:**
|
||||
- The Installation and configuration guidelines written below is just for the cascading layer of glance. For the cascaded layer, the glance is installed as normal.
|
||||
|
||||
* **Prerequisites**
|
||||
- Please install the python package: pexpect>=2.3 ( because we use pxssh for loginng and there is a bug in pxssh, see https://mail.python.org/pipermail/python-list/2008-February/510054.html, you should fix this before launch the service. )
|
||||
|
||||
* **Manual Installation**
|
||||
|
||||
- Make sure you have performed backups properly.
|
||||
* **Manual Installation**
|
||||
|
||||
1. Under cascading Openstack, copy these files from glance-patch directory and glancesync directory to suitable place:
|
||||
|
||||
| DIR | FROM | TO |
|
||||
| ------------- |:-----------------|:-------------------------------------------|
|
||||
| glancesync | glance/ | ${python_install_dir}/glance |
|
||||
| glancesync | etc/glance/ | /etc/glance/ |
|
||||
| glancesync | glance-sync | /usr/bin/ |
|
||||
|${glance-patch}| glance/ | ${python_install_dir}/glance |
|
||||
|${glance-patch}|glance.egg-info/entry_points.txt | ${glance_install_egg.info}/ |
|
||||
|
||||
${glance-patch} = `icehouse-patches/glance/glance_location_patch` ${python_install_dir} is where the openstack installed, e.g. `/usr/lib64/python2.6/site-packages` .
|
||||
2. Add/modify the config options
|
||||
|
||||
| CONFIG_FILE | OPTION | ADD or MODIFY |
|
||||
| ----------------|:---------------------------------------------------|:--------------:|
|
||||
|glance-api.conf | show_multiple_locations=True | M |
|
||||
|glance-api.conf | sync_server_host=${sync_mgr_host} | A |
|
||||
|glance-api.conf | sync_server_port=9595 | A |
|
||||
|glance-api.conf | sync_enabled=True | A |
|
||||
|glance-sync.conf | cascading_endpoint_url=${glance_api_endpoint_url} | M |
|
||||
|glance-sync.conf | sync_strategy=ALL | M |
|
||||
|glance-sync.conf | auth_host=${keystone_host} | M |
|
||||
3. Re-launch services on cacading openstack, like:
|
||||
|
||||
`service openstack-glance-api restart `
|
||||
`service openstack-glance-registry restart `
|
||||
`python /usr/bin/glance-sync --config-file=/etc/glance/glance-sync.conf & `
|
||||
|
||||
* **Automatic Installation**
|
||||
|
||||
1. Enter the glance-patch installation dir: `cd ./tricircle/icehouse-patches/glance/glance_location_patch/installation` .
|
||||
2. Optional, modify the shell script variable: `_PYTHON_INSTALL_DIR` .
|
||||
3. Run the install script: `sh install.sh`
|
||||
4. Enter the glancesync installation dir: `cd ./tricircle/glancesync/installation` .
|
||||
5. Modify the cascading&cascaded glances' store scheme configuration, which is in the file: `./tricircle/glancesync/etc/glance/glance_store.yaml` .
|
||||
6. Optional, modify the config options in shell script: `sync_enabled=True`, `sync_server_port=9595`, `sync_server_host=127.0.0.1` with the proper values.
|
||||
7. Run the install script: `sh install.sh`
|
||||
|
||||
Configurations
|
||||
--------------
|
||||
|
||||
Besides glance-api.conf file, we add some new config files. They are described separately.
|
||||
|
||||
- In glance-api.conf, three options added:
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
# Indicate whether use the image sync, default value is False.
|
||||
#If configuring on cascading layer, this value should be True.
|
||||
sync_enabled = True
|
||||
|
||||
#The sync server 's port number, default is 9595.
|
||||
sync_server_port = 9595
|
||||
|
||||
#The sync server's host name (or ip address)
|
||||
sync_server_host = 127.0.0.1
|
||||
|
||||
*Besides, the option show_multiple_locations value should be ture.
|
||||
- In glance-sync.conf which newly increased, the options is similar with glance-registry.conf except:
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
#How to sync the image, the value can be ["None", "ALL", "USER"]
|
||||
#When "ALL" choosen, means to sync to all the cascaded glances;
|
||||
#When "USER" choosen, means according to user's role, project, etc.
|
||||
sync_strategy = ALL
|
||||
|
||||
#What the cascading glance endpoint url is .(Note that this value should be consistent with what in keystone).
|
||||
cascading_endpoint_url = http://127.0.0.1:9292/
|
||||
|
||||
#when snapshot sync, set the timeout time(second) of snapshot 's status
|
||||
#changing into 'active'.
|
||||
snapshot_timeout = 300
|
||||
|
||||
#when snapshot sync, set the polling interval time(second) to check the
|
||||
#snapshot's status.
|
||||
snapshot_sleep_interval = 10
|
||||
|
||||
#When sync task fails, set the retry times.
|
||||
task_retry_times = 0
|
||||
|
||||
#When copy image data using 'scp' between filesystmes, set the timeout
|
||||
#time of the copy.
|
||||
scp_copy_timeout = 3600
|
||||
|
||||
#When snapshot, one can set the specific regions in which the snapshot
|
||||
#will sync to. (e.g. physicalOpenstack001, physicalOpenstack002)
|
||||
snapshot_region_names =
|
||||
|
||||
- Last but also important, we add a yaml file for config the store backend's copy : glance_store.yaml in cascading glance.
|
||||
these config correspond to various store scheme (at present, only filesystem is supported), the values
|
||||
are based on your environment, so you have to config it before installation or restart the glance-sync
|
||||
when modify it.
|
||||
|
||||
|
||||
|
||||
|
10
glancesync/etc/glance-sync
Normal file
10
glancesync/etc/glance-sync
Normal file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/python
|
||||
# PBR Generated from 'console_scripts'
|
||||
|
||||
import sys
|
||||
|
||||
from glance.cmd.sync import main
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
35
glancesync/etc/glance/glance-sync-paste.ini
Normal file
35
glancesync/etc/glance/glance-sync-paste.ini
Normal file
@ -0,0 +1,35 @@
|
||||
# Use this pipeline for no auth - DEFAULT
|
||||
[pipeline:glance-sync]
|
||||
pipeline = versionnegotiation unauthenticated-context rootapp
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
# Use this pipeline for keystone auth
|
||||
[pipeline:glance-sync-keystone]
|
||||
pipeline = versionnegotiation authtoken context rootapp
|
||||
|
||||
# Use this pipeline for authZ only. This means that the registry will treat a
|
||||
# user as authenticated without making requests to keystone to reauthenticate
|
||||
# the user.
|
||||
[pipeline:glance-sync-trusted-auth]
|
||||
pipeline = versionnegotiation context rootapp
|
||||
|
||||
[composite:rootapp]
|
||||
paste.composite_factory = glance.sync.api:root_app_factory
|
||||
/v1: syncv1app
|
||||
|
||||
[app:syncv1app]
|
||||
paste.app_factory = glance.sync.api.v1:API.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
|
||||
|
||||
[filter:versionnegotiation]
|
||||
paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
57
glancesync/etc/glance/glance-sync.conf
Normal file
57
glancesync/etc/glance/glance-sync.conf
Normal file
@ -0,0 +1,57 @@
|
||||
[DEFAULT]
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
debug = True
|
||||
|
||||
# Address to bind the API server
|
||||
bind_host = 0.0.0.0
|
||||
|
||||
# Port the bind the API server to
|
||||
bind_port = 9595
|
||||
|
||||
# Log to this file. Make sure you do not set the same log file for both the API
|
||||
# and registry servers!
|
||||
#
|
||||
# If `log_file` is omitted and `use_syslog` is false, then log messages are
|
||||
# sent to stdout as a fallback.
|
||||
log_file = /var/log/glance/sync.log
|
||||
|
||||
# Backlog requests when creating socket
|
||||
backlog = 4096
|
||||
|
||||
#How to sync the image, the value can be ["None", "ALL", "USER"]
|
||||
#When "ALL" choosen, means to sync to all the cascaded glances;
|
||||
#When "USER" choosen, means according to user's role, project, etc.
|
||||
sync_strategy = None
|
||||
|
||||
#What the cascading glance endpoint is .
|
||||
cascading_endpoint_url = http://127.0.0.1:9292/
|
||||
|
||||
#when snapshot sync, set the timeout time(second) of snapshot 's status
|
||||
#changing into 'active'.
|
||||
snapshot_timeout = 300
|
||||
|
||||
#when snapshot sync, set the polling interval time(second) to check the
|
||||
#snapshot's status.
|
||||
snapshot_sleep_interval = 10
|
||||
|
||||
#When sync task fails, set the retry times.
|
||||
task_retry_times = 0
|
||||
|
||||
#When copy image data using 'scp' between filesystmes, set the timeout
|
||||
#time of the copy.
|
||||
scp_copy_timeout = 3600
|
||||
|
||||
#When snapshot, one can set the specific regions in which the snapshot
|
||||
#will sync to.
|
||||
snapshot_region_names = physicalOpenstack001, physicalOpenstack002
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = admin
|
||||
admin_user = glance
|
||||
admin_password = glance
|
||||
[paste_deploy]
|
||||
config_file = /etc/glance/glance-sync-paste.ini
|
||||
flavor=keystone
|
29
glancesync/etc/glance/glance_store.yaml
Normal file
29
glancesync/etc/glance/glance_store.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
glances:
|
||||
- name: master
|
||||
service_ip: "127.0.0.1"
|
||||
schemes:
|
||||
- name: http
|
||||
parameters:
|
||||
netloc: '127.0.0.1:8800'
|
||||
path: '/'
|
||||
image_name: 'test.img'
|
||||
- name: filesystem
|
||||
parameters:
|
||||
host: '127.0.0.1'
|
||||
datadir: '/var/lib/glance/images/'
|
||||
login_user: 'glance'
|
||||
login_password: 'glance'
|
||||
- name: slave1
|
||||
service_ip: "0.0.0.0"
|
||||
schemes:
|
||||
- name: http
|
||||
parameters:
|
||||
netloc: '0.0.0.0:8800'
|
||||
path: '/'
|
||||
- name: filesystem
|
||||
parameters:
|
||||
host: '0.0.0.0'
|
||||
datadir: '/var/lib/glance/images/'
|
||||
login_user: 'glance'
|
||||
login_password: 'glance'
|
59
glancesync/glance/cmd/sync.py
Normal file
59
glancesync/glance/cmd/sync.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
"""
|
||||
Reference implementation server for Glance Sync
|
||||
"""
|
||||
|
||||
import eventlet
|
||||
import os
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
# Monkey patch socket and time
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
|
||||
|
||||
# If ../glance/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
from glance.common import config
|
||||
from glance.common import exception
|
||||
from glance.common import wsgi
|
||||
from glance.openstack.common import log
|
||||
import glance.sync
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
config.parse_args(default_config_files='glance-sync.conf')
|
||||
log.setup('glance')
|
||||
|
||||
server = wsgi.Server()
|
||||
server.start(config.load_paste_app('glance-sync'), default_port=9595)
|
||||
server.wait()
|
||||
except RuntimeError as e:
|
||||
sys.exit("ERROR: %s" % e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
257
glancesync/glance/sync/__init__.py
Normal file
257
glancesync/glance/sync/__init__.py
Normal file
@ -0,0 +1,257 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
import glance.context
|
||||
import glance.domain.proxy
|
||||
import glance.openstack.common.log as logging
|
||||
from glance.sync.clients import Clients as clients
|
||||
from glance.sync import utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_V2_IMAGE_CREATE_PROPERTIES = ['container_format', 'disk_format', 'min_disk',
|
||||
'min_ram', 'name', 'virtual_size', 'visibility',
|
||||
'protected']
|
||||
|
||||
_V2_IMAGE_UPDATE_PROPERTIES = ['container_format', 'disk_format', 'min_disk',
|
||||
'min_ram', 'name']
|
||||
|
||||
|
||||
def _check_trigger_sync(pre_image, image):
|
||||
"""
|
||||
check if it is the case that the cascaded glance has upload or first patch
|
||||
location.
|
||||
"""
|
||||
return pre_image.status in ('saving', 'queued') and image.size and \
|
||||
[l for l in image.locations if not utils.is_glance_location(l['url'])]
|
||||
|
||||
|
||||
def _from_snapshot_request(pre_image, image):
|
||||
"""
|
||||
when patch location, check if it's snapshot-sync case.
|
||||
"""
|
||||
if pre_image.status == 'queued' and len(image.locations) == 1:
|
||||
loc_meta = image.locations[0]['metadata']
|
||||
return loc_meta and loc_meta.get('image_from', None) in ['snapshot',
|
||||
'volume']
|
||||
|
||||
|
||||
def get_adding_image_properties(image):
|
||||
_tags = list(image.tags) or []
|
||||
kwargs = {}
|
||||
kwargs['body'] = {}
|
||||
for key in _V2_IMAGE_CREATE_PROPERTIES:
|
||||
try:
|
||||
value = getattr(image, key, None)
|
||||
if value and value != 'None':
|
||||
kwargs['body'][key] = value
|
||||
except KeyError:
|
||||
pass
|
||||
_properties = getattr(image, 'extra_properties') or None
|
||||
|
||||
if _properties:
|
||||
extra_keys = _properties.keys()
|
||||
for _key in extra_keys:
|
||||
kwargs['body'][_key] = _properties[_key]
|
||||
if _tags:
|
||||
kwargs['body']['tags'] = _tags
|
||||
return kwargs
|
||||
|
||||
|
||||
def get_existing_image_locations(image):
|
||||
return {'locations': image.locations}
|
||||
|
||||
|
||||
class ImageRepoProxy(glance.domain.proxy.Repo):
|
||||
|
||||
def __init__(self, image_repo, context, sync_api):
|
||||
self.image_repo = image_repo
|
||||
self.context = context
|
||||
self.sync_client = sync_api.get_sync_client(context)
|
||||
proxy_kwargs = {'context': context, 'sync_api': sync_api}
|
||||
super(ImageRepoProxy, self).__init__(image_repo,
|
||||
item_proxy_class=ImageProxy,
|
||||
item_proxy_kwargs=proxy_kwargs)
|
||||
|
||||
def _sync_saving_metadata(self, pre_image, image):
|
||||
kwargs = {}
|
||||
remove_keys = []
|
||||
changes = {}
|
||||
"""
|
||||
image base properties
|
||||
"""
|
||||
for key in _V2_IMAGE_UPDATE_PROPERTIES:
|
||||
pre_value = getattr(pre_image, key, None)
|
||||
my_value = getattr(image, key, None)
|
||||
|
||||
if not my_value and not pre_value or my_value == pre_value:
|
||||
continue
|
||||
if not my_value and pre_value:
|
||||
remove_keys.append(key)
|
||||
else:
|
||||
changes[key] = my_value
|
||||
|
||||
"""
|
||||
image extra_properties
|
||||
"""
|
||||
pre_props = pre_image.extra_properties or {}
|
||||
_properties = image.extra_properties or {}
|
||||
addset = set(_properties.keys()).difference(set(pre_props.keys()))
|
||||
removeset = set(pre_props.keys()).difference(set(_properties.keys()))
|
||||
mayrepset = set(pre_props.keys()).intersection(set(_properties.keys()))
|
||||
|
||||
for key in addset:
|
||||
changes[key] = _properties[key]
|
||||
|
||||
for key in removeset:
|
||||
remove_keys.append(key)
|
||||
|
||||
for key in mayrepset:
|
||||
if _properties[key] == pre_props[key]:
|
||||
continue
|
||||
changes[key] = _properties[key]
|
||||
|
||||
"""
|
||||
image tags
|
||||
"""
|
||||
tag_dict = {}
|
||||
pre_tags = pre_image.tags
|
||||
new_tags = image.tags
|
||||
|
||||
added_tags = set(new_tags) - set(pre_tags)
|
||||
removed_tags = set(pre_tags) - set(new_tags)
|
||||
if added_tags:
|
||||
tag_dict['add'] = added_tags
|
||||
if removed_tags:
|
||||
tag_dict['delete'] = removed_tags
|
||||
if tag_dict:
|
||||
kwargs['tags'] = tag_dict
|
||||
|
||||
kwargs['changes'] = changes
|
||||
kwargs['removes'] = remove_keys
|
||||
if not changes and not remove_keys and not tag_dict:
|
||||
return
|
||||
LOG.debug(_('In image %s, some properties changed, sync...')
|
||||
% (image.image_id))
|
||||
self.sync_client.update_image_matedata(image.image_id, **kwargs)
|
||||
|
||||
def _try_sync_locations(self, pre_image, image):
|
||||
image_id = image.image_id
|
||||
"""
|
||||
image locations
|
||||
"""
|
||||
locations_dict = {}
|
||||
pre_locs = pre_image.locations
|
||||
_locs = image.locations
|
||||
|
||||
"""
|
||||
if all locations of cascading removed, the image status become 'queued'
|
||||
so the cascaded images should be 'queued' too. we replace all locations
|
||||
with '[]'
|
||||
"""
|
||||
if pre_locs and not _locs:
|
||||
LOG.debug(_('The image %s all locations removed, sync...')
|
||||
% (image_id))
|
||||
self.sync_client.sync_locations(image_id,
|
||||
action='CLEAR',
|
||||
locs=pre_locs)
|
||||
return
|
||||
|
||||
added_locs = []
|
||||
removed_locs = []
|
||||
for _loc in pre_locs:
|
||||
if _loc in _locs:
|
||||
continue
|
||||
removed_locs.append(_loc)
|
||||
|
||||
for _loc in _locs:
|
||||
if _loc in pre_locs:
|
||||
continue
|
||||
added_locs.append(_loc)
|
||||
|
||||
if added_locs:
|
||||
if _from_snapshot_request(pre_image, image):
|
||||
add_kwargs = get_adding_image_properties(image)
|
||||
else:
|
||||
add_kwargs = {}
|
||||
LOG.debug(_('The image %s add locations, sync...') % (image_id))
|
||||
self.sync_client.sync_locations(image_id,
|
||||
action='INSERT',
|
||||
locs=added_locs,
|
||||
**add_kwargs)
|
||||
elif removed_locs:
|
||||
LOG.debug(_('The image %s remove some locations, sync...')
|
||||
% (image_id))
|
||||
self.sync_client.sync_locations(image_id,
|
||||
action='DELETE',
|
||||
locs=removed_locs)
|
||||
|
||||
def save(self, image):
|
||||
pre_image = self.get(image.image_id)
|
||||
result = super(ImageRepoProxy, self).save(image)
|
||||
|
||||
image_id = image.image_id
|
||||
if _check_trigger_sync(pre_image, image):
|
||||
add_kwargs = get_adding_image_properties(image)
|
||||
self.sync_client.sync_data(image_id, **add_kwargs)
|
||||
LOG.debug(_('Sync data when image status changes ACTIVE, the '
|
||||
'image id is %s.' % (image_id)))
|
||||
else:
|
||||
"""
|
||||
In case of add/remove/replace locations property.
|
||||
"""
|
||||
self._try_sync_locations(pre_image, image)
|
||||
"""
|
||||
In case of sync the glance's properties
|
||||
"""
|
||||
if image.status == 'active':
|
||||
self._sync_saving_metadata(pre_image, image)
|
||||
|
||||
return result
|
||||
|
||||
def remove(self, image):
|
||||
result = super(ImageRepoProxy, self).remove(image)
|
||||
LOG.debug(_('Image %s removed, sync...') % (image.image_id))
|
||||
delete_kwargs = get_existing_image_locations(image)
|
||||
self.sync_client.remove_image(image.image_id, **delete_kwargs)
|
||||
return result
|
||||
|
||||
|
||||
class ImageFactoryProxy(glance.domain.proxy.ImageFactory):
|
||||
|
||||
def __init__(self, factory, context, sync_api):
|
||||
self.context = context
|
||||
self.sync_api = sync_api
|
||||
proxy_kwargs = {'context': context, 'sync_api': sync_api}
|
||||
super(ImageFactoryProxy, self).__init__(factory,
|
||||
proxy_class=ImageProxy,
|
||||
proxy_kwargs=proxy_kwargs)
|
||||
|
||||
def new_image(self, **kwargs):
|
||||
return super(ImageFactoryProxy, self).new_image(**kwargs)
|
||||
|
||||
|
||||
class ImageProxy(glance.domain.proxy.Image):
|
||||
|
||||
def __init__(self, image, context, sync_api=None):
|
||||
self.image = image
|
||||
self.sync_api = sync_api
|
||||
self.context = context
|
||||
super(ImageProxy, self).__init__(image)
|
22
glancesync/glance/sync/api/__init__.py
Normal file
22
glancesync/glance/sync/api/__init__.py
Normal file
@ -0,0 +1,22 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
import paste.urlmap
|
||||
|
||||
|
||||
def root_app_factory(loader, global_conf, **local_conf):
|
||||
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
|
59
glancesync/glance/sync/api/v1/__init__.py
Normal file
59
glancesync/glance/sync/api/v1/__init__.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
from glance.common import wsgi
|
||||
from glance.sync.api.v1 import images
|
||||
|
||||
|
||||
def init(mapper):
|
||||
|
||||
images_resource = images.create_resource()
|
||||
|
||||
mapper.connect("/cascaded-eps",
|
||||
controller=images_resource,
|
||||
action="endpoints",
|
||||
conditions={'method': ['POST']})
|
||||
|
||||
mapper.connect("/images/{id}",
|
||||
controller=images_resource,
|
||||
action="update",
|
||||
conditions={'method': ['PATCH']})
|
||||
|
||||
mapper.connect("/images/{id}",
|
||||
controller=images_resource,
|
||||
action="remove",
|
||||
conditions={'method': ['DELETE']})
|
||||
|
||||
mapper.connect("/images/{id}",
|
||||
controller=images_resource,
|
||||
action="upload",
|
||||
conditions={'method': ['PUT']})
|
||||
|
||||
mapper.connect("/images/{id}/location",
|
||||
controller=images_resource,
|
||||
action="sync_loc",
|
||||
conditions={'method': ['PUT']})
|
||||
|
||||
|
||||
class API(wsgi.Router):
|
||||
|
||||
"""WSGI entry point for all Registry requests."""
|
||||
|
||||
def __init__(self, mapper):
|
||||
mapper = mapper or wsgi.APIMapper()
|
||||
init(mapper)
|
||||
super(API, self).__init__(mapper)
|
95
glancesync/glance/sync/api/v1/images.py
Normal file
95
glancesync/glance/sync/api/v1/images.py
Normal file
@ -0,0 +1,95 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from glance.common import exception
|
||||
from glance.common import wsgi
|
||||
import glance.openstack.common.log as logging
|
||||
from glance.sync.base import SyncManagerV2 as sync_manager
|
||||
from glance.sync import utils as utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Controller(object):
|
||||
|
||||
def __init__(self):
|
||||
self.sync_manager = sync_manager()
|
||||
self.sync_manager.start()
|
||||
|
||||
def test(self, req):
|
||||
return {'body': 'for test'}
|
||||
|
||||
def update(self, req, id, body):
|
||||
LOG.debug(_('sync client start run UPDATE metadata operation for'
|
||||
'image_id: %s' % (id)))
|
||||
self.sync_manager.sync_image_metadata(id, req.context.auth_tok, 'SAVE',
|
||||
**body)
|
||||
return dict({'body': id})
|
||||
|
||||
def remove(self, req, id, body):
|
||||
LOG.debug(_('sync client start run DELETE operation for image_id: %s'
|
||||
% (id)))
|
||||
self.sync_manager.sync_image_metadata(id, req.context.auth_tok,
|
||||
'DELETE', **body)
|
||||
return dict({'body': id})
|
||||
|
||||
def upload(self, req, id, body):
|
||||
LOG.debug(_('sync client start run UPLOAD operation for image_id: %s'
|
||||
% (id)))
|
||||
self.sync_manager.sync_image_data(id, req.context.auth_tok, **body)
|
||||
return dict({'body': id})
|
||||
|
||||
def sync_loc(self, req, id, body):
|
||||
action = body['action']
|
||||
locs = body['locations']
|
||||
LOG.debug(_('sync client start run SYNC-LOC operation for image_id: %s'
|
||||
% (id)))
|
||||
if action == 'INSERT':
|
||||
self.sync_manager.adding_locations(id, req.context.auth_tok, locs,
|
||||
**body)
|
||||
elif action == 'DELETE':
|
||||
self.sync_manager.removing_locations(id,
|
||||
req.context.auth_tok,
|
||||
locs)
|
||||
elif action == 'CLEAR':
|
||||
self.sync_manager.clear_all_locations(id,
|
||||
req.context.auth_tok,
|
||||
locs)
|
||||
|
||||
return dict({'body': id})
|
||||
|
||||
def endpoints(self, req, body):
|
||||
regions = req.params.get('regions', [])
|
||||
if not regions:
|
||||
regions = body.pop('regions', [])
|
||||
if not isinstance(regions, list):
|
||||
regions = [regions]
|
||||
LOG.debug(_('get cacaded endpoints of user/tenant: %s'
|
||||
% (req.context.user or req.context.tenant or 'NONE')))
|
||||
return dict(eps=utils.get_endpoints(req.context.auth_tok,
|
||||
req.context.tenant,
|
||||
region_names=regions) or [])
|
||||
|
||||
|
||||
def create_resource():
|
||||
"""Images resource factory method."""
|
||||
deserializer = wsgi.JSONRequestDeserializer()
|
||||
serializer = wsgi.JSONResponseSerializer()
|
||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
606
glancesync/glance/sync/base.py
Normal file
606
glancesync/glance/sync/base.py
Normal file
@ -0,0 +1,606 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
import copy
|
||||
import httplib
|
||||
import Queue
|
||||
import threading
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from glance.common import exception
|
||||
from glance.common import utils
|
||||
from glance.openstack.common import importutils
|
||||
from glance.openstack.common import jsonutils
|
||||
from glance.openstack.common import threadgroup
|
||||
from glance.openstack.common import timeutils
|
||||
import glance.openstack.common.log as logging
|
||||
|
||||
from glance.sync import utils as s_utils
|
||||
from glance.sync.clients import Clients as clients
|
||||
from glance.sync.store.driver import StoreFactory as s_factory
|
||||
from glance.sync.store.location import LocationFactory as l_factory
|
||||
import glance.sync.store.glance_store as glance_store
|
||||
from glance.sync.task import TaskObject
|
||||
from glance.sync.task import PeriodicTask
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('sync_strategy', 'glance.common.config', group='sync')
|
||||
CONF.import_opt('task_retry_times', 'glance.common.config', group='sync')
|
||||
CONF.import_opt('snapshot_timeout', 'glance.common.config', group='sync')
|
||||
CONF.import_opt('snapshot_sleep_interval', 'glance.common.config',
|
||||
group='sync')
|
||||
|
||||
|
||||
def get_image_servcie():
|
||||
return ImageService
|
||||
|
||||
|
||||
def create_glance_client(auth_token, url):
|
||||
return clients(auth_token).glance(url=url)
|
||||
|
||||
|
||||
def create_self_glance_client(auth_token):
|
||||
return create_glance_client(auth_token,
|
||||
s_utils.get_cascading_endpoint_url())
|
||||
|
||||
|
||||
def create_restful_client(auth_token, url):
|
||||
pieces = urlparse.urlparse(url)
|
||||
return _create_restful_client(auth_token, pieces.netloc)
|
||||
|
||||
|
||||
def create_self_restful_client(auth_token):
|
||||
return create_restful_client(auth_token,
|
||||
s_utils.get_cascading_endpoint_url())
|
||||
|
||||
|
||||
def _create_restful_client(auth_token, url):
|
||||
server, port = url.split(':')
|
||||
conn = httplib.HTTPConnection(server.encode(), port.encode())
|
||||
image_service = get_image_servcie()
|
||||
glance_client = image_service(conn, auth_token)
|
||||
return glance_client
|
||||
|
||||
|
||||
def get_mappings_from_image(auth_token, image_id):
|
||||
client = create_self_glance_client(auth_token)
|
||||
image = client.images.get(image_id)
|
||||
locations = image.locations
|
||||
if not locations:
|
||||
return {}
|
||||
return get_mappings_from_locations(locations)
|
||||
|
||||
|
||||
def get_mappings_from_locations(locations):
|
||||
mappings = {}
|
||||
for loc in locations:
|
||||
if s_utils.is_glance_location(loc['url']):
|
||||
id = loc['metadata'].get('image_id')
|
||||
if not id:
|
||||
continue
|
||||
ep_url = s_utils.create_ep_by_loc(loc)
|
||||
mappings[ep_url] = id
|
||||
# endpoints.append(utils.create_ep_by_loc(loc))
|
||||
return mappings
|
||||
|
||||
|
||||
class AuthenticationException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ImageAlreadyPresentException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerErrorException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UploadException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ImageService(object):
|
||||
|
||||
def __init__(self, conn, auth_token):
|
||||
"""Initialize the ImageService.
|
||||
|
||||
conn: a httplib.HTTPConnection to the glance server
|
||||
auth_token: authentication token to pass in the x-auth-token header
|
||||
"""
|
||||
self.auth_token = auth_token
|
||||
self.conn = conn
|
||||
|
||||
def _http_request(self, method, url, headers, body,
|
||||
ignore_result_body=False):
|
||||
"""Perform an HTTP request against the server.
|
||||
|
||||
method: the HTTP method to use
|
||||
url: the URL to request (not including server portion)
|
||||
headers: headers for the request
|
||||
body: body to send with the request
|
||||
ignore_result_body: the body of the result will be ignored
|
||||
|
||||
Returns: a httplib response object
|
||||
"""
|
||||
if self.auth_token:
|
||||
headers.setdefault('x-auth-token', self.auth_token)
|
||||
|
||||
LOG.debug(_('Request: %(method)s http://%(server)s:%(port)s'
|
||||
'%(url)s with headers %(headers)s')
|
||||
% {'method': method,
|
||||
'server': self.conn.host,
|
||||
'port': self.conn.port,
|
||||
'url': url,
|
||||
'headers': repr(headers)})
|
||||
self.conn.request(method, url, body, headers)
|
||||
|
||||
response = self.conn.getresponse()
|
||||
headers = self._header_list_to_dict(response.getheaders())
|
||||
code = response.status
|
||||
code_description = httplib.responses[code]
|
||||
LOG.debug(_('Response: %(code)s %(status)s %(headers)s')
|
||||
% {'code': code,
|
||||
'status': code_description,
|
||||
'headers': repr(headers)})
|
||||
|
||||
if code in [400, 500]:
|
||||
raise ServerErrorException(response.read())
|
||||
|
||||
if code in [401, 403]:
|
||||
raise AuthenticationException(response.read())
|
||||
|
||||
if code == 409:
|
||||
raise ImageAlreadyPresentException(response.read())
|
||||
|
||||
if ignore_result_body:
|
||||
# NOTE: because we are pipelining requests through a single HTTP
|
||||
# connection, httplib requires that we read the response body
|
||||
# before we can make another request. If the caller knows they
|
||||
# don't care about the body, they can ask us to do that for them.
|
||||
response.read()
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
def _header_list_to_dict(headers):
|
||||
"""Expand a list of headers into a dictionary.
|
||||
|
||||
headers: a list of [(key, value), (key, value), (key, value)]
|
||||
|
||||
Returns: a dictionary representation of the list
|
||||
"""
|
||||
d = {}
|
||||
for (header, value) in headers:
|
||||
if header.startswith('x-image-meta-property-'):
|
||||
prop = header.replace('x-image-meta-property-', '')
|
||||
d.setdefault('properties', {})
|
||||
d['properties'][prop] = value
|
||||
else:
|
||||
d[header.replace('x-image-meta-', '')] = value
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
def _dict_to_headers(d):
|
||||
"""Convert a dictionary into one suitable for a HTTP request.
|
||||
|
||||
d: a dictionary
|
||||
|
||||
Returns: the same dictionary, with x-image-meta added to every key
|
||||
"""
|
||||
h = {}
|
||||
for key in d:
|
||||
if key == 'properties':
|
||||
for subkey in d[key]:
|
||||
if d[key][subkey] is None:
|
||||
h['x-image-meta-property-%s' % subkey] = ''
|
||||
else:
|
||||
h['x-image-meta-property-%s' % subkey] = d[key][subkey]
|
||||
|
||||
else:
|
||||
h['x-image-meta-%s' % key] = d[key]
|
||||
return h
|
||||
|
||||
def add_location(self, image_uuid, path_val, metadata=None):
|
||||
"""
|
||||
add an actual location
|
||||
"""
|
||||
LOG.debug(_('call restful api to add location: url is %s' % path_val))
|
||||
metadata = metadata or {}
|
||||
url = '/v2/images/%s' % image_uuid
|
||||
hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
|
||||
body = []
|
||||
value = {'url': path_val, 'metadata': metadata}
|
||||
body.append({'op': 'add', 'path': '/locations/-', 'value': value})
|
||||
return self._http_request('PATCH', url, hdrs, jsonutils.dumps(body))
|
||||
|
||||
def clear_locations(self, image_uuid):
|
||||
"""
|
||||
clear all the location infos, make the image status be 'queued'.
|
||||
"""
|
||||
LOG.debug(_('call restful api to clear image location: image id is %s'
|
||||
% image_uuid))
|
||||
url = '/v2/images/%s' % image_uuid
|
||||
hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
|
||||
body = []
|
||||
body.append({'op': 'replace', 'path': '/locations', 'value': []})
|
||||
return self._http_request('PATCH', url, hdrs, jsonutils.dumps(body))
|
||||
|
||||
|
||||
class MetadataHelper(object):
|
||||
|
||||
def execute(self, auth_token, endpoint, action_name='CREATE',
|
||||
image_id=None, **kwargs):
|
||||
|
||||
glance_client = create_glance_client(auth_token, endpoint)
|
||||
if action_name.upper() == 'CREATE':
|
||||
return self._do_create_action(glance_client, **kwargs)
|
||||
if action_name.upper() == 'SAVE':
|
||||
return self._do_save_action(glance_client, image_id, **kwargs)
|
||||
if action_name.upper() == 'DELETE':
|
||||
return self._do_delete_action(glance_client, image_id, **kwargs)
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _fetch_params(keys, **kwargs):
|
||||
return tuple([kwargs.get(key, None) for key in keys])
|
||||
|
||||
def _do_create_action(self, glance_client, **kwargs):
|
||||
body = kwargs['body']
|
||||
new_image = glance_client.images.create(**body)
|
||||
return new_image.id
|
||||
|
||||
def _do_save_action(self, glance_client, image_id, **kwargs):
|
||||
keys = ['changes', 'removes', 'tags']
|
||||
changes, removes, tags = self._fetch_params(keys, **kwargs)
|
||||
if changes or removes:
|
||||
glance_client.images.update(image_id,
|
||||
remove_props=removes,
|
||||
**changes)
|
||||
if tags:
|
||||
if tags.get('add', None):
|
||||
added = tags.get('add')
|
||||
for tag in added:
|
||||
glance_client.image_tags.update(image_id, tag)
|
||||
elif tags.get('delete', None):
|
||||
removed = tags.get('delete')
|
||||
for tag in removed:
|
||||
glance_client.image_tags.delete(image_id, tag)
|
||||
return glance_client.images.get(image_id)
|
||||
|
||||
def _do_delete_action(self, glance_client, image_id, **kwargs):
|
||||
return glance_client.images.delete(image_id)
|
||||
|
||||
|
||||
_task_queue = Queue.Queue(maxsize=150)
|
||||
|
||||
|
||||
class SyncManagerV2():
|
||||
|
||||
MAX_TASK_RETRY_TIMES = 1
|
||||
|
||||
def __init__(self):
|
||||
global _task_queue
|
||||
self.mete_helper = MetadataHelper()
|
||||
self.location_factory = l_factory()
|
||||
self.store_factory = s_factory()
|
||||
self.task_queue = _task_queue
|
||||
self.task_handler = None
|
||||
self.unhandle_task_list = []
|
||||
self.periodic_add_id_list = []
|
||||
self.periodic_add_done = True
|
||||
self._load_glance_store_cfg()
|
||||
self.ks_client = clients().keystone()
|
||||
self.create_new_periodic_task = False
|
||||
|
||||
def _load_glance_store_cfg(self):
|
||||
glance_store.setup_glance_stores()
|
||||
|
||||
def sync_image_metadata(self, image_id, auth_token, action, **kwargs):
|
||||
if not action or CONF.sync.sync_strategy == 'None':
|
||||
return
|
||||
kwargs['image_id'] = image_id
|
||||
if action == 'SAVE':
|
||||
self.task_queue.put_nowait(TaskObject.get_instance('meta_update',
|
||||
kwargs))
|
||||
elif action == 'DELETE':
|
||||
self.task_queue.put_nowait(TaskObject.get_instance('meta_remove',
|
||||
kwargs))
|
||||
|
||||
def sync_image_data(self, image_id, auth_token, eps=None, **kwargs):
|
||||
if CONF.sync.sync_strategy == 'None':
|
||||
return
|
||||
|
||||
kwargs['image_id'] = image_id
|
||||
cascading_ep = s_utils.get_cascading_endpoint_url()
|
||||
kwargs['cascading_ep'] = cascading_ep
|
||||
self.task_queue.put_nowait(TaskObject.get_instance('sync', kwargs))
|
||||
|
||||
def adding_locations(self, image_id, auth_token, locs, **kwargs):
|
||||
if CONF.sync.sync_strategy == 'None':
|
||||
return
|
||||
for loc in locs:
|
||||
if s_utils.is_glance_location(loc['url']):
|
||||
if s_utils.is_snapshot_location(loc):
|
||||
snapshot_ep = s_utils.create_ep_by_loc(loc)
|
||||
snapshot_id = s_utils.get_id_from_glance_loc(loc)
|
||||
snapshot_client = create_glance_client(auth_token,
|
||||
snapshot_ep)
|
||||
snapshot_image = snapshot_client.images.get(snapshot_id)
|
||||
_pre_check_time = timeutils.utcnow()
|
||||
_timout = CONF.sync.snapshot_timeout
|
||||
while not timeutils.is_older_than(_pre_check_time,
|
||||
_timout):
|
||||
if snapshot_image.status == 'active':
|
||||
break
|
||||
LOG.debug(_('Check snapshot not active, wait for %i'
|
||||
'second.'
|
||||
% CONF.sync.snapshot_sleep_interval))
|
||||
time.sleep(CONF.sync.snapshot_sleep_interval)
|
||||
snapshot_image = snapshot_client.images.get(
|
||||
snapshot_id)
|
||||
|
||||
if snapshot_image.status != 'active':
|
||||
LOG.error(_('Snapshot status to active Timeout'))
|
||||
return
|
||||
kwargs['image_id'] = image_id
|
||||
kwargs['snapshot_ep'] = snapshot_ep
|
||||
kwargs['snapshot_id'] = snapshot_id
|
||||
snapshot_task = TaskObject.get_instance('snapshot', kwargs)
|
||||
self.task_queue.put_nowait(snapshot_task)
|
||||
else:
|
||||
LOG.debug(_('patch a normal location %s to image %s'
|
||||
% (loc['url'], image_id)))
|
||||
input = {'image_id': image_id, 'location': loc}
|
||||
self.task_queue.put_nowait(TaskObject.get_instance('patch',
|
||||
input))
|
||||
|
||||
def removing_locations(self, image_id, auth_token, locs):
|
||||
if CONF.sync.sync_strategy == 'None':
|
||||
return
|
||||
locs = filter(lambda loc: s_utils.is_glance_location(loc['url']), locs)
|
||||
if not locs:
|
||||
return
|
||||
input = {'image_id': image_id, 'locations': locs}
|
||||
remove_locs_task = TaskObject.get_instance('locs_remove', input)
|
||||
self.task_queue.put_nowait(remove_locs_task)
|
||||
|
||||
def clear_all_locations(self, image_id, auth_token, locs):
|
||||
locs = filter(lambda loc: not s_utils.is_snapshot_location(loc), locs)
|
||||
self.removing_locations(image_id, auth_token, locs)
|
||||
|
||||
def create_new_cascaded_task(self, last_run_time=None):
|
||||
LOG.debug(_('new_cascaded periodic task has been created.'))
|
||||
glance_client = create_self_glance_client(self.ks_client.auth_token)
|
||||
filters = {'status': 'active'}
|
||||
image_list = glance_client.images.list(filters=filters)
|
||||
input = {}
|
||||
run_images = {}
|
||||
cascading_ep = s_utils.get_cascading_endpoint_url()
|
||||
input['cascading_ep'] = cascading_ep
|
||||
input['image_id'] = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
|
||||
all_ep_urls = s_utils.get_endpoints()
|
||||
for image in image_list:
|
||||
glance_urls = [loc['url'] for loc in image.locations
|
||||
if s_utils.is_glance_location(loc['url'])]
|
||||
lack_ep_urls = s_utils.calculate_lack_endpoints(all_ep_urls,
|
||||
glance_urls)
|
||||
if lack_ep_urls:
|
||||
image_core_props = s_utils.get_core_properties(image)
|
||||
run_images[image.id] = {'body': image_core_props,
|
||||
'locations': lack_ep_urls}
|
||||
if not run_images:
|
||||
LOG.debug(_('No images need to sync to new cascaded glances.'))
|
||||
input['images'] = run_images
|
||||
return TaskObject.get_instance('periodic_add', input,
|
||||
last_run_time=last_run_time)
|
||||
|
||||
@staticmethod
|
||||
def _fetch_params(keys, **kwargs):
|
||||
return tuple([kwargs.get(key, None) for key in keys])
|
||||
|
||||
def _get_candidate_path(self, auth_token, from_ep, image_id,
|
||||
scheme='file'):
|
||||
g_client = create_glance_client(auth_token, from_ep)
|
||||
image = g_client.images.get(image_id)
|
||||
locs = image.locations or []
|
||||
for loc in locs:
|
||||
if s_utils.is_glance_location(loc['url']):
|
||||
continue
|
||||
if loc['url'].startswith(scheme):
|
||||
if scheme == 'file':
|
||||
return loc['url'][len('file://'):]
|
||||
return loc['url']
|
||||
return None
|
||||
|
||||
def _do_image_data_copy(self, s_ep, d_ep, from_image_id, to_image_id,
|
||||
candidate_path=None):
|
||||
from_scheme, to_scheme = glance_store.choose_best_store_schemes(s_ep,
|
||||
d_ep)
|
||||
store_driver = self.store_factory.get_instance(from_scheme['name'],
|
||||
to_scheme['name'])
|
||||
from_params = from_scheme['parameters']
|
||||
from_params['image_id'] = from_image_id
|
||||
to_params = to_scheme['parameters']
|
||||
to_params['image_id'] = to_image_id
|
||||
from_location = self.location_factory.get_instance(from_scheme['name'],
|
||||
**from_params)
|
||||
to_location = self.location_factory.get_instance(to_scheme['name'],
|
||||
**to_params)
|
||||
return store_driver.copy_to(from_location, to_location,
|
||||
candidate_path=candidate_path)
|
||||
|
||||
def _patch_cascaded_location(self, auth_token, image_id,
|
||||
cascaded_ep, cascaded_id, action=None):
|
||||
self_restful_client = create_self_restful_client(auth_token)
|
||||
path = s_utils.generate_glance_location(cascaded_ep, cascaded_id)
|
||||
# add the auth_token, so this url can be visited, otherwise 404 error
|
||||
path += '?auth_token=' + auth_token
|
||||
metadata = {'image_id': cascaded_id}
|
||||
if action:
|
||||
metadata['action'] = action
|
||||
self_restful_client.add_location(image_id, path, metadata)
|
||||
|
||||
def meta_update(self, auth_token, cascaded_ep, image_id, **kwargs):
|
||||
|
||||
return self.mete_helper.execute(auth_token, cascaded_ep, 'SAVE',
|
||||
image_id, **kwargs)
|
||||
|
||||
def meta_delete(self, auth_token, cascaded_ep, image_id):
|
||||
|
||||
return self.mete_helper.execute(auth_token, cascaded_ep, 'DELETE',
|
||||
image_id)
|
||||
|
||||
def sync_image(self, auth_token, copy_ep, cascaded_ep, copy_image_id,
|
||||
cascading_image_id, **kwargs):
|
||||
# Firstly, crate an image object with cascading image's properties.
|
||||
cascaded_id = self.mete_helper.execute(auth_token, cascaded_ep,
|
||||
**kwargs)
|
||||
try:
|
||||
c_path = self._get_candidate_path(auth_token, copy_ep,
|
||||
copy_image_id)
|
||||
# execute copy operation to copy the image data.
|
||||
copy_image_loc = self._do_image_data_copy(copy_ep,
|
||||
cascaded_ep,
|
||||
copy_image_id,
|
||||
cascaded_id,
|
||||
candidate_path=c_path)
|
||||
# patch the copied image_data to the image
|
||||
glance_client = create_restful_client(auth_token, cascaded_ep)
|
||||
glance_client.add_location(cascaded_id, copy_image_loc)
|
||||
# patch the glance location to cascading glance
|
||||
|
||||
msg = _("patch glance location to cascading image, with cascaded "
|
||||
"endpoint : %s, cascaded id: %s, cascading image id: %s." %
|
||||
(cascaded_ep, cascaded_id, cascading_image_id))
|
||||
LOG.debug(msg)
|
||||
self._patch_cascaded_location(auth_token,
|
||||
cascading_image_id,
|
||||
cascaded_ep,
|
||||
cascaded_id,
|
||||
action='upload')
|
||||
return cascaded_id
|
||||
except exception.SyncStoreCopyError as e:
|
||||
LOG.error(_("Exception occurs when syncing store copy."))
|
||||
raise exception.SyncServiceOperationError(reason=e.msg)
|
||||
|
||||
def do_snapshot(self, auth_token, snapshot_ep, cascaded_ep,
|
||||
snapshot_image_id, cascading_image_id, **kwargs):
|
||||
|
||||
return self.sync_image(auth_token, snapshot_ep, cascaded_ep,
|
||||
snapshot_image_id, cascading_image_id, **kwargs)
|
||||
|
||||
def patch_location(self, image_id, cascaded_id, auth_token, cascaded_ep,
|
||||
location):
|
||||
g_client = create_glance_client(auth_token, cascaded_ep)
|
||||
cascaded_image = g_client.images.get(cascaded_id)
|
||||
glance_client = create_restful_client(auth_token, cascaded_ep)
|
||||
try:
|
||||
glance_client.add_location(cascaded_id, location['url'])
|
||||
if cascaded_image.status == 'queued':
|
||||
self._patch_cascaded_location(auth_token,
|
||||
image_id,
|
||||
cascaded_ep,
|
||||
cascaded_id,
|
||||
action='patch')
|
||||
except:
|
||||
pass
|
||||
|
||||
def remove_loc(self, cascaded_id, auth_token, cascaded_ep):
|
||||
glance_client = create_glance_client(auth_token, cascaded_ep)
|
||||
glance_client.images.delete(cascaded_id)
|
||||
|
||||
def start(self):
|
||||
# lanuch a new thread to read the task_task to handle.
|
||||
_thread = threading.Thread(target=self.tasks_handle)
|
||||
_thread.setDaemon(True)
|
||||
_thread.start()
|
||||
|
||||
def tasks_handle(self):
|
||||
while True:
|
||||
_task = self.task_queue.get()
|
||||
if not isinstance(_task, TaskObject):
|
||||
LOG.error(_('task type valid.'))
|
||||
continue
|
||||
LOG.debug(_('Task start to runs, task id is %s' % _task.id))
|
||||
_task.start_time = timeutils.strtime()
|
||||
self.unhandle_task_list.append(copy.deepcopy(_task))
|
||||
|
||||
eventlet.spawn(_task.execute, self, self.ks_client.auth_token)
|
||||
|
||||
def handle_tasks(self, task_result):
|
||||
t_image_id = task_result.get('image_id')
|
||||
t_type = task_result.get('type')
|
||||
t_start_time = task_result.get('start_time')
|
||||
t_status = task_result.get('status')
|
||||
|
||||
handling_tasks = filter(lambda t: t.image_id == t_image_id and
|
||||
t.start_time == t_start_time,
|
||||
self.unhandle_task_list)
|
||||
if not handling_tasks or len(handling_tasks) > 1:
|
||||
LOG.error(_('The task not exist or duplicate, can not go handle. '
|
||||
'Info is image: %(id)s, op_type: %(type)s, run time: '
|
||||
'%(time)s'
|
||||
% {'id': t_image_id,
|
||||
'type': t_type,
|
||||
'time': t_start_time}
|
||||
))
|
||||
return
|
||||
|
||||
task = handling_tasks[0]
|
||||
self.unhandle_task_list.remove(task)
|
||||
|
||||
if isinstance(task, PeriodicTask):
|
||||
LOG.debug(_('The periodic task executed done, with op %(type)s '
|
||||
'runs at time: %(start_time)s, the status is '
|
||||
'%(status)s.' %
|
||||
{'type': t_type,
|
||||
'start_time': t_start_time,
|
||||
'status': t_status
|
||||
}))
|
||||
|
||||
else:
|
||||
if t_status == 'terminal':
|
||||
LOG.debug(_('The task executed successful for image:'
|
||||
'%(image_id)s with op %(type)s, which runs '
|
||||
'at time: %(start_time)s' %
|
||||
{'image_id': t_image_id,
|
||||
'type': t_type,
|
||||
'start_time': t_start_time
|
||||
}))
|
||||
elif t_status == 'param_error':
|
||||
LOG.error(_('The task executed failed for params error. Image:'
|
||||
'%(image_id)s with op %(type)s, which runs '
|
||||
'at time: %(start_time)s' %
|
||||
{'image_id': t_image_id,
|
||||
'type': t_type,
|
||||
'start_time': t_start_time
|
||||
}))
|
||||
elif t_status == 'error':
|
||||
LOG.error(_('The task failed to execute. Detail info is: '
|
||||
'%(image_id)s with op %(op_type)s run_time:'
|
||||
'%(start_time)s' %
|
||||
{'image_id': t_image_id,
|
||||
'op_type': t_type,
|
||||
'start_time': t_start_time
|
||||
}))
|
46
glancesync/glance/sync/client/__init__.py
Normal file
46
glancesync/glance/sync/client/__init__.py
Normal file
@ -0,0 +1,46 @@
|
||||
from oslo.config import cfg
|
||||
|
||||
sync_client_opts = [
|
||||
cfg.StrOpt('sync_client_protocol', default='http',
|
||||
help=_('The protocol to use for communication with the '
|
||||
'sync server. Either http or https.')),
|
||||
cfg.StrOpt('sync_client_key_file',
|
||||
help=_('The path to the key file to use in SSL connections '
|
||||
'to the sync server.')),
|
||||
cfg.StrOpt('sync_client_cert_file',
|
||||
help=_('The path to the cert file to use in SSL connections '
|
||||
'to the sync server.')),
|
||||
cfg.StrOpt('sync_client_ca_file',
|
||||
help=_('The path to the certifying authority cert file to '
|
||||
'use in SSL connections to the sync server.')),
|
||||
cfg.BoolOpt('sync_client_insecure', default=False,
|
||||
help=_('When using SSL in connections to the sync server, '
|
||||
'do not require validation via a certifying '
|
||||
'authority.')),
|
||||
cfg.IntOpt('sync_client_timeout', default=600,
|
||||
help=_('The period of time, in seconds, that the API server '
|
||||
'will wait for a sync request to complete. A '
|
||||
'value of 0 implies no timeout.')),
|
||||
]
|
||||
|
||||
sync_client_ctx_opts = [
|
||||
cfg.BoolOpt('sync_use_user_token', default=True,
|
||||
help=_('Whether to pass through the user token when '
|
||||
'making requests to the sync.')),
|
||||
cfg.StrOpt('sync_admin_user', secret=True,
|
||||
help=_('The administrators user name.')),
|
||||
cfg.StrOpt('sync_admin_password', secret=True,
|
||||
help=_('The administrators password.')),
|
||||
cfg.StrOpt('sync_admin_tenant_name', secret=True,
|
||||
help=_('The tenant name of the administrative user.')),
|
||||
cfg.StrOpt('sync_auth_url',
|
||||
help=_('The URL to the keystone service.')),
|
||||
cfg.StrOpt('sync_auth_strategy', default='noauth',
|
||||
help=_('The strategy to use for authentication.')),
|
||||
cfg.StrOpt('sync_auth_region',
|
||||
help=_('The region for the authentication service.')),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(sync_client_opts)
|
||||
CONF.register_opts(sync_client_ctx_opts)
|
0
glancesync/glance/sync/client/v1/__init__.py
Normal file
0
glancesync/glance/sync/client/v1/__init__.py
Normal file
124
glancesync/glance/sync/client/v1/api.py
Normal file
124
glancesync/glance/sync/client/v1/api.py
Normal file
@ -0,0 +1,124 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from glance.common import exception
|
||||
from glance.openstack.common import jsonutils
|
||||
import glance.openstack.common.log as logging
|
||||
from glance.sync.client.v1 import client
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('sync_server_host', 'glance.common.config')
|
||||
CONF.import_opt('sync_server_port', 'glance.common.config')
|
||||
|
||||
sync_client_ctx_opts = [
|
||||
cfg.BoolOpt('sync_send_identity_headers', default=False,
|
||||
help=_("Whether to pass through headers containing user "
|
||||
"and tenant information when making requests to "
|
||||
"the sync. This allows the sync to use the "
|
||||
"context middleware without the keystoneclients' "
|
||||
"auth_token middleware, removing calls to the keystone "
|
||||
"auth service. It is recommended that when using this "
|
||||
"option, secure communication between glance api and "
|
||||
"glance sync is ensured by means other than "
|
||||
"auth_token middleware.")),
|
||||
]
|
||||
CONF.register_opts(sync_client_ctx_opts)
|
||||
|
||||
_sync_client = 'glance.sync.client'
|
||||
CONF.import_opt('sync_client_protocol', _sync_client)
|
||||
CONF.import_opt('sync_client_key_file', _sync_client)
|
||||
CONF.import_opt('sync_client_cert_file', _sync_client)
|
||||
CONF.import_opt('sync_client_ca_file', _sync_client)
|
||||
CONF.import_opt('sync_client_insecure', _sync_client)
|
||||
CONF.import_opt('sync_client_timeout', _sync_client)
|
||||
CONF.import_opt('sync_use_user_token', _sync_client)
|
||||
CONF.import_opt('sync_admin_user', _sync_client)
|
||||
CONF.import_opt('sync_admin_password', _sync_client)
|
||||
CONF.import_opt('sync_admin_tenant_name', _sync_client)
|
||||
CONF.import_opt('sync_auth_url', _sync_client)
|
||||
CONF.import_opt('sync_auth_strategy', _sync_client)
|
||||
CONF.import_opt('sync_auth_region', _sync_client)
|
||||
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
|
||||
|
||||
_CLIENT_CREDS = None
|
||||
_CLIENT_HOST = None
|
||||
_CLIENT_PORT = None
|
||||
_CLIENT_KWARGS = {}
|
||||
|
||||
|
||||
def get_sync_client(cxt):
|
||||
global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
|
||||
kwargs = _CLIENT_KWARGS.copy()
|
||||
if CONF.sync_use_user_token:
|
||||
kwargs['auth_tok'] = cxt.auth_tok
|
||||
if _CLIENT_CREDS:
|
||||
kwargs['creds'] = _CLIENT_CREDS
|
||||
|
||||
if CONF.sync_send_identity_headers:
|
||||
identity_headers = {
|
||||
'X-User-Id': cxt.user,
|
||||
'X-Tenant-Id': cxt.tenant,
|
||||
'X-Roles': ','.join(cxt.roles),
|
||||
'X-Identity-Status': 'Confirmed',
|
||||
'X-Service-Catalog': jsonutils.dumps(cxt.service_catalog),
|
||||
}
|
||||
kwargs['identity_headers'] = identity_headers
|
||||
return client.SyncClient(_CLIENT_HOST, _CLIENT_PORT, **kwargs)
|
||||
|
||||
|
||||
def configure_sync_client():
|
||||
|
||||
global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
|
||||
host, port = CONF.sync_server_host, CONF.sync_server_port
|
||||
|
||||
_CLIENT_HOST = host
|
||||
_CLIENT_PORT = port
|
||||
_METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key
|
||||
_CLIENT_KWARGS = {
|
||||
'use_ssl': CONF.sync_client_protocol.lower() == 'https',
|
||||
'key_file': CONF.sync_client_key_file,
|
||||
'cert_file': CONF.sync_client_cert_file,
|
||||
'ca_file': CONF.sync_client_ca_file,
|
||||
'insecure': CONF.sync_client_insecure,
|
||||
'timeout': CONF.sync_client_timeout,
|
||||
}
|
||||
|
||||
if not CONF.sync_use_user_token:
|
||||
configure_sync_admin_creds()
|
||||
|
||||
|
||||
def configure_sync_admin_creds():
|
||||
global _CLIENT_CREDS
|
||||
|
||||
if CONF.sync_auth_url or os.getenv('OS_AUTH_URL'):
|
||||
strategy = 'keystone'
|
||||
else:
|
||||
strategy = CONF.sync_auth_strategy
|
||||
|
||||
_CLIENT_CREDS = {
|
||||
'user': CONF.sync_admin_user,
|
||||
'password': CONF.sync_admin_password,
|
||||
'username': CONF.sync_admin_user,
|
||||
'tenant': CONF.sync_admin_tenant_name,
|
||||
'auth_url': CONF.sync_auth_url,
|
||||
'strategy': strategy,
|
||||
'region': CONF.sync_auth_region,
|
||||
}
|
106
glancesync/glance/sync/client/v1/client.py
Normal file
106
glancesync/glance/sync/client/v1/client.py
Normal file
@ -0,0 +1,106 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
from glance.common.client import BaseClient
|
||||
from glance.openstack.common import jsonutils
|
||||
import glance.openstack.common.log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SyncClient(BaseClient):
|
||||
|
||||
DEFAULT_PORT = 9595
|
||||
|
||||
def __init__(self, host=None, port=DEFAULT_PORT, identity_headers=None,
|
||||
**kwargs):
|
||||
self.identity_headers = identity_headers
|
||||
BaseClient.__init__(self, host, port, configure_via_auth=False,
|
||||
**kwargs)
|
||||
|
||||
def do_request(self, method, action, **kwargs):
|
||||
try:
|
||||
kwargs['headers'] = kwargs.get('headers', {})
|
||||
res = super(SyncClient, self).do_request(method, action, **kwargs)
|
||||
status = res.status
|
||||
request_id = res.getheader('x-openstack-request-id')
|
||||
msg = (_("Sync request %(method)s %(action)s HTTP %(status)s"
|
||||
" request id %(request_id)s") %
|
||||
{'method': method, 'action': action,
|
||||
'status': status, 'request_id': request_id})
|
||||
LOG.debug(msg)
|
||||
|
||||
except Exception as exc:
|
||||
exc_name = exc.__class__.__name__
|
||||
LOG.info(_("Sync client request %(method)s %(action)s "
|
||||
"raised %(exc_name)s"),
|
||||
{'method': method, 'action': action,
|
||||
'exc_name': exc_name})
|
||||
raise
|
||||
return res
|
||||
|
||||
def _add_common_params(self, id, kwargs):
|
||||
pass
|
||||
|
||||
def update_image_matedata(self, image_id, **kwargs):
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
body = jsonutils.dumps(kwargs)
|
||||
res = self.do_request("PATCH", "/v1/images/%s" % (image_id), body=body,
|
||||
headers=headers)
|
||||
return res
|
||||
|
||||
def remove_image(self, image_id, **kwargs):
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
body = jsonutils.dumps(kwargs)
|
||||
res = self.do_request("DELETE", "/v1/images/%s" %
|
||||
(image_id), body=body, headers=headers)
|
||||
return res
|
||||
|
||||
def sync_data(self, image_id, **kwargs):
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
body = jsonutils.dumps(kwargs)
|
||||
res = self.do_request("PUT", "/v1/images/%s" % (image_id), body=body,
|
||||
headers=headers)
|
||||
return res
|
||||
|
||||
def sync_locations(self, image_id, action=None, locs=None, **kwargs):
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
kwargs['action'] = action
|
||||
kwargs['locations'] = locs
|
||||
body = jsonutils.dumps(kwargs)
|
||||
res = self.do_request("PUT", "/v1/images/%s/location" % (image_id),
|
||||
body=body, headers=headers)
|
||||
return res
|
||||
|
||||
def get_cascaded_endpoints(self, regions=[]):
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
body = jsonutils.dumps({'regions': regions})
|
||||
res = self.do_request('POST', '/v1/cascaded-eps', body=body,
|
||||
headers=headers)
|
||||
return jsonutils.loads(res.read())['eps']
|
89
glancesync/glance/sync/clients.py
Normal file
89
glancesync/glance/sync/clients.py
Normal file
@ -0,0 +1,89 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from keystoneclient.v2_0 import client as ksclient
|
||||
import glance.openstack.common.log as logging
|
||||
from glanceclient.v2 import client as gclient2
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class Clients(object):
|
||||
|
||||
def __init__(self, auth_token=None, tenant_id=None):
|
||||
self._keystone = None
|
||||
self._glance = None
|
||||
self._cxt_token = auth_token
|
||||
self._tenant_id = tenant_id
|
||||
self._ks_conf = cfg.CONF.keystone_authtoken
|
||||
|
||||
@property
|
||||
def auth_token(self, token=None):
|
||||
return token or self.keystone().auth_token
|
||||
|
||||
@property
|
||||
def ks_url(self):
|
||||
protocol = self._ks_conf.auth_protocol or 'http'
|
||||
auth_host = self._ks_conf.auth_host or '127.0.0.1'
|
||||
auth_port = self._ks_conf.auth_port or '35357'
|
||||
return protocol + '://' + auth_host + ':' + str(auth_port) + '/v2.0/'
|
||||
|
||||
def url_for(self, **kwargs):
|
||||
return self.keystone().service_catalog.url_for(**kwargs)
|
||||
|
||||
def get_urls(self, **kwargs):
|
||||
return self.keystone().service_catalog.get_urls(**kwargs)
|
||||
|
||||
def keystone(self):
|
||||
if self._keystone:
|
||||
return self._keystone
|
||||
|
||||
if self._cxt_token and self._tenant_id:
|
||||
creds = {'token': self._cxt_token,
|
||||
'auth_url': self.ks_url,
|
||||
'project_id': self._tenant_id
|
||||
}
|
||||
else:
|
||||
creds = {'username': self._ks_conf.admin_user,
|
||||
'password': self._ks_conf.admin_password,
|
||||
'auth_url': self.ks_url,
|
||||
'project_name': self._ks_conf.admin_tenant_name}
|
||||
try:
|
||||
self._keystone = ksclient.Client(**creds)
|
||||
except Exception as e:
|
||||
LOG.error(_('create keystone client error: reason: %s') % (e))
|
||||
return None
|
||||
|
||||
return self._keystone
|
||||
|
||||
def glance(self, auth_token=None, url=None):
|
||||
gclient = gclient2
|
||||
if gclient is None:
|
||||
return None
|
||||
if self._glance:
|
||||
return self._glance
|
||||
args = {
|
||||
'token': auth_token or self.auth_token,
|
||||
'endpoint': url or self.url_for(service_type='image')
|
||||
}
|
||||
self._glance = gclient.Client(**args)
|
||||
|
||||
return self._glance
|
33
glancesync/glance/sync/pool.py
Normal file
33
glancesync/glance/sync/pool.py
Normal file
@ -0,0 +1,33 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import glance.openstack.common.log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ThreadPool(object):
|
||||
|
||||
def __init__(self):
|
||||
self.pool = ThreadPoolExecutor(128)
|
||||
|
||||
def execute(self, func, *args, **kwargs):
|
||||
LOG.info(_('execute %s in a thread pool') % (func.__name__))
|
||||
self.pool.submit(func, *args, **kwargs)
|
0
glancesync/glance/sync/store/__init__.py
Normal file
0
glancesync/glance/sync/store/__init__.py
Normal file
0
glancesync/glance/sync/store/_drivers/__init__.py
Normal file
0
glancesync/glance/sync/store/_drivers/__init__.py
Normal file
171
glancesync/glance/sync/store/_drivers/filesystem.py
Normal file
171
glancesync/glance/sync/store/_drivers/filesystem.py
Normal file
@ -0,0 +1,171 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
"""
|
||||
A simple filesystem-backed store
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
import pxssh
|
||||
import pexpect
|
||||
|
||||
from glance.common import exception
|
||||
import glance.sync.store.driver
|
||||
import glance.sync.store.location
|
||||
from glance.sync.store.location import Location
|
||||
from glance.sync import utils as s_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('scp_copy_timeout', 'glance.common.config', group='sync')
|
||||
|
||||
|
||||
def _login_ssh(host, passwd):
|
||||
child_ssh = pexpect.spawn('ssh -p 22 %s' % (host))
|
||||
child_ssh.logfile = sys.stdout
|
||||
login_flag = True
|
||||
while True:
|
||||
ssh_index = child_ssh.expect(['.yes/no.', '.assword:.',
|
||||
pexpect.TIMEOUT])
|
||||
if ssh_index == 0:
|
||||
child_ssh.sendline('yes')
|
||||
elif ssh_index == 1:
|
||||
child_ssh.sendline(passwd)
|
||||
break
|
||||
else:
|
||||
login_flag = False
|
||||
break
|
||||
if not login_flag:
|
||||
return None
|
||||
|
||||
return child_ssh
|
||||
|
||||
|
||||
def _get_ssh(hostname, username, password):
|
||||
s = pxssh.pxssh()
|
||||
s.login(hostname, username, password, original_prompt='[#$>]')
|
||||
s.logfile = sys.stdout
|
||||
return s
|
||||
|
||||
|
||||
class LocationCreator(glance.sync.store.location.LocationCreator):
|
||||
|
||||
def __init__(self):
|
||||
self.scheme = 'file'
|
||||
|
||||
def create(self, **kwargs):
|
||||
image_id = kwargs.get('image_id')
|
||||
image_file_name = kwargs.get('image_name', None) or image_id
|
||||
datadir = kwargs.get('datadir')
|
||||
path = os.path.join(datadir, str(image_file_name))
|
||||
login_user = kwargs.get('login_user')
|
||||
login_password = kwargs.get('login_password')
|
||||
host = kwargs.get('host')
|
||||
store_specs = {'scheme': self.scheme, 'path': path, 'host': host,
|
||||
'login_user': login_user,
|
||||
'login_password': login_password}
|
||||
return Location(self.scheme, StoreLocation, image_id=image_id,
|
||||
store_specs=store_specs)
|
||||
|
||||
|
||||
class StoreLocation(glance.sync.store.location.StoreLocation):
|
||||
|
||||
def process_specs(self):
|
||||
self.scheme = self.specs.get('scheme', 'file')
|
||||
self.path = self.specs.get('path')
|
||||
self.host = self.specs.get('host')
|
||||
self.login_user = self.specs.get('login_user')
|
||||
self.login_password = self.specs.get('login_password')
|
||||
|
||||
|
||||
class Store(glance.sync.store.driver.Store):
|
||||
|
||||
def copy_to(self, from_location, to_location, candidate_path=None):
|
||||
|
||||
from_store_loc = from_location.store_location
|
||||
to_store_loc = to_location.store_location
|
||||
|
||||
if from_store_loc.host == to_store_loc.host and \
|
||||
from_store_loc.path == to_store_loc.path:
|
||||
|
||||
LOG.info(_('The from_loc is same to to_loc, no need to copy. the '
|
||||
'host:path is %s:%s') % (from_store_loc.host,
|
||||
from_store_loc.path))
|
||||
return 'file://%s' % to_store_loc.path
|
||||
|
||||
from_host = r"""{username}@{host}""".format(
|
||||
username=from_store_loc.login_user,
|
||||
host=from_store_loc.host)
|
||||
|
||||
to_host = r"""{username}@{host}""".format(
|
||||
username=to_store_loc.login_user,
|
||||
host=to_store_loc.host)
|
||||
|
||||
to_path = r"""{to_host}:{path}""".format(to_host=to_host,
|
||||
path=to_store_loc.path)
|
||||
|
||||
copy_path = from_store_loc.path
|
||||
|
||||
try:
|
||||
from_ssh = _get_ssh(from_store_loc.host,
|
||||
from_store_loc.login_user,
|
||||
from_store_loc.login_password)
|
||||
except Exception:
|
||||
raise exception.SyncStoreCopyError(reason="ssh login failed.")
|
||||
|
||||
from_ssh.sendline('ls %s' % copy_path)
|
||||
from_ssh.prompt()
|
||||
if 'cannot access' in from_ssh.before or \
|
||||
'No such file' in from_ssh.before:
|
||||
if candidate_path:
|
||||
from_ssh.sendline('ls %s' % candidate_path)
|
||||
from_ssh.prompt()
|
||||
if 'cannot access' not in from_ssh.before and \
|
||||
'No such file' not in from_ssh.before:
|
||||
copy_path = candidate_path
|
||||
else:
|
||||
msg = _("the image path for copy to is not exists, file copy"
|
||||
"failed: path is %s" % (copy_path))
|
||||
raise exception.SyncStoreCopyError(reason=msg)
|
||||
|
||||
from_ssh.sendline('scp -P 22 %s %s' % (copy_path, to_path))
|
||||
while True:
|
||||
scp_index = from_ssh.expect(['.yes/no.', '.assword:.',
|
||||
pexpect.TIMEOUT])
|
||||
if scp_index == 0:
|
||||
from_ssh.sendline('yes')
|
||||
from_ssh.prompt()
|
||||
elif scp_index == 1:
|
||||
from_ssh.sendline(to_store_loc.login_password)
|
||||
from_ssh.prompt(timeout=CONF.sync.scp_copy_timeout)
|
||||
break
|
||||
else:
|
||||
msg = _("scp commond execute failed, with copy_path %s and "
|
||||
"to_path %s" % (copy_path, to_path))
|
||||
raise exception.SyncStoreCopyError(reason=msg)
|
||||
break
|
||||
|
||||
if from_ssh:
|
||||
from_ssh.logout()
|
||||
|
||||
return 'file://%s' % to_store_loc.path
|
63
glancesync/glance/sync/store/driver.py
Normal file
63
glancesync/glance/sync/store/driver.py
Normal file
@ -0,0 +1,63 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
"""Base class for all storage backends"""
|
||||
|
||||
from oslo.config import cfg
|
||||
from stevedore import extension
|
||||
|
||||
from glance.common import exception
|
||||
import glance.openstack.common.log as logging
|
||||
from glance.openstack.common.gettextutils import _
|
||||
from glance.openstack.common import importutils
|
||||
from glance.openstack.common import strutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StoreFactory(object):
|
||||
|
||||
SYNC_STORE_NAMESPACE = "glance.sync.store.driver"
|
||||
|
||||
def __init__(self):
|
||||
self._stores = {}
|
||||
self._load_store_drivers()
|
||||
|
||||
def _load_store_drivers(self):
|
||||
extension_manager = extension.ExtensionManager(
|
||||
namespace=self.SYNC_STORE_NAMESPACE,
|
||||
invoke_on_load=True,
|
||||
)
|
||||
for ext in extension_manager:
|
||||
if ext.name in self._stores:
|
||||
continue
|
||||
ext.obj.name = ext.name
|
||||
self._stores[ext.name] = ext.obj
|
||||
|
||||
def get_instance(self, from_scheme='filesystem', to_scheme=None):
|
||||
_store_driver = self._stores.get(from_scheme)
|
||||
if to_scheme and to_scheme != from_scheme and _store_driver:
|
||||
func_name = 'copy_to_%s' % to_scheme
|
||||
if not getattr(_store_driver, func_name, None):
|
||||
return None
|
||||
return _store_driver
|
||||
|
||||
|
||||
class Store(object):
|
||||
|
||||
def copy_to(self, source_location, dest_location, candidate_path=None):
|
||||
pass
|
111
glancesync/glance/sync/store/glance_store.py
Normal file
111
glancesync/glance/sync/store/glance_store.py
Normal file
@ -0,0 +1,111 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
import fnmatch
|
||||
import operator
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
import yaml
|
||||
|
||||
from glance.sync import utils as s_utils
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('glance_store_cfg_file',
|
||||
default="glance_store.yaml",
|
||||
help="Configuration file for glance's store location "
|
||||
"definition."
|
||||
),
|
||||
]
|
||||
|
||||
PRIOR_SOTRE_SCHEMES = ['filesystem', 'http', 'swift']
|
||||
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
|
||||
|
||||
def choose_best_store_schemes(source_endpoint, dest_endpoint):
|
||||
global GLANCE_STORES
|
||||
source_host = s_utils.get_host_from_ep(source_endpoint)
|
||||
dest_host = s_utils.get_host_from_ep(dest_endpoint)
|
||||
source_store = GLANCE_STORES.get_glance_store(source_host)
|
||||
dest_store = GLANCE_STORES.get_glance_store(dest_host)
|
||||
tmp_dict = {}
|
||||
for s_scheme in source_store.schemes:
|
||||
s_scheme_name = s_scheme['name']
|
||||
for d_scheme in dest_store.schemes:
|
||||
d_scheme_name = d_scheme['name']
|
||||
if s_scheme_name == d_scheme_name:
|
||||
tmp_dict[s_scheme_name] = (s_scheme, d_scheme)
|
||||
if tmp_dict:
|
||||
return tmp_dict[sorted(tmp_dict, key=lambda scheme:
|
||||
PRIOR_SOTRE_SCHEMES.index(scheme))[0]]
|
||||
|
||||
return (source_store.schemes[0], dest_store.schemes[0])
|
||||
|
||||
|
||||
class GlanceStore(object):
|
||||
|
||||
def __init__(self, service_ip, name, schemes):
|
||||
self.service_ip = service_ip
|
||||
self.name = name
|
||||
self.schemes = schemes
|
||||
|
||||
|
||||
class ImageObject(object):
|
||||
|
||||
def __init__(self, image_id, glance_store):
|
||||
self.image_id = image_id
|
||||
self.glance_store = glance_store
|
||||
|
||||
|
||||
class GlanceStoreManager(object):
|
||||
|
||||
def __init__(self, cfg):
|
||||
self.cfg = cfg
|
||||
self.g_stores = []
|
||||
|
||||
cfg_items = cfg['glances']
|
||||
for item in cfg_items:
|
||||
self.g_stores.append(GlanceStore(item['service_ip'],
|
||||
item['name'],
|
||||
item['schemes']))
|
||||
|
||||
def get_glance_store(self, service_ip):
|
||||
for g_store in self.g_stores:
|
||||
if service_ip == g_store.service_ip:
|
||||
return g_store
|
||||
return None
|
||||
|
||||
def generate_Image_obj(self, image_id, endpoint):
|
||||
g_store = self.get_glance_store(s_utils.get_host_from_ep(endpoint))
|
||||
return ImageObject(image_id, g_store)
|
||||
|
||||
|
||||
GLANCE_STORES = None
|
||||
|
||||
|
||||
def setup_glance_stores():
|
||||
global GLANCE_STORES
|
||||
cfg_file = cfg.CONF.glance_store_cfg_file
|
||||
if not os.path.exists(cfg_file):
|
||||
cfg_file = cfg.CONF.find_file(cfg_file)
|
||||
with open(cfg_file) as fap:
|
||||
data = fap.read()
|
||||
|
||||
locs_cfg = yaml.safe_load(data)
|
||||
GLANCE_STORES = GlanceStoreManager(locs_cfg)
|
95
glancesync/glance/sync/store/location.py
Normal file
95
glancesync/glance/sync/store/location.py
Normal file
@ -0,0 +1,95 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
import logging
|
||||
import urlparse
|
||||
|
||||
from stevedore import extension
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LocationCreator(object):
|
||||
|
||||
def __init__(self):
|
||||
self.scheme = None
|
||||
|
||||
def creator(self, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class Location(object):
|
||||
|
||||
"""
|
||||
Class describing the location of an image that Glance knows about
|
||||
"""
|
||||
|
||||
def __init__(self, store_name, store_location_class,
|
||||
uri=None, image_id=None, store_specs=None):
|
||||
"""
|
||||
Create a new Location object.
|
||||
|
||||
:param store_name: The string identifier/scheme of the storage backend
|
||||
:param store_location_class: The store location class to use
|
||||
for this location instance.
|
||||
:param image_id: The identifier of the image in whatever storage
|
||||
backend is used.
|
||||
:param uri: Optional URI to construct location from
|
||||
:param store_specs: Dictionary of information about the location
|
||||
of the image that is dependent on the backend
|
||||
store
|
||||
"""
|
||||
self.store_name = store_name
|
||||
self.image_id = image_id
|
||||
self.store_specs = store_specs or {}
|
||||
self.store_location = store_location_class(self.store_specs)
|
||||
|
||||
|
||||
class StoreLocation(object):
|
||||
|
||||
"""
|
||||
Base class that must be implemented by each store
|
||||
"""
|
||||
|
||||
def __init__(self, store_specs):
|
||||
self.specs = store_specs
|
||||
if self.specs:
|
||||
self.process_specs()
|
||||
|
||||
|
||||
class LocationFactory(object):
|
||||
|
||||
SYNC_LOCATION_NAMESPACE = "glance.sync.store.location"
|
||||
|
||||
def __init__(self):
|
||||
self._locations = {}
|
||||
self._load_locations()
|
||||
|
||||
def _load_locations(self):
|
||||
extension_manager = extension.ExtensionManager(
|
||||
namespace=self.SYNC_LOCATION_NAMESPACE,
|
||||
invoke_on_load=True,
|
||||
)
|
||||
for ext in extension_manager:
|
||||
if ext.name in self._locations:
|
||||
continue
|
||||
ext.obj.name = ext.name
|
||||
self._locations[ext.name] = ext.obj
|
||||
|
||||
def get_instance(self, scheme, **kwargs):
|
||||
loc_creator = self._locations.get(scheme, None)
|
||||
return loc_creator.create(**kwargs)
|
349
glancesync/glance/sync/task/__init__.py
Normal file
349
glancesync/glance/sync/task/__init__.py
Normal file
@ -0,0 +1,349 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
import threading
|
||||
import Queue
|
||||
import uuid
|
||||
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
|
||||
import glance.openstack.common.log as logging
|
||||
from glance.openstack.common import timeutils
|
||||
from glance.sync import utils as s_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
snapshot_opt = [
|
||||
cfg.ListOpt('snapshot_region_names',
|
||||
default=[],
|
||||
help=_("for what regions the snapshot sync to"),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('snapshot_region_names',
|
||||
group='DEFAULT')]),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(snapshot_opt)
|
||||
|
||||
|
||||
class TaskObject(object):
|
||||
|
||||
def __init__(self, type, input, retry_times=0):
|
||||
self.id = str(uuid.uuid4())
|
||||
self.type = type
|
||||
self.input = input
|
||||
self.image_id = self.input.get('image_id')
|
||||
self.status = 'new'
|
||||
self.retry_times = retry_times
|
||||
self.start_time = None
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls, type, input, **kwargs):
|
||||
_type_cls_dict = {'meta_update': MetaUpdateTask,
|
||||
'meta_remove': MetaDeleteTask,
|
||||
'sync': ImageActiveTask,
|
||||
'snapshot': PatchSnapshotLocationTask,
|
||||
'patch': PatchLocationTask,
|
||||
'locs_remove': RemoveLocationsTask,
|
||||
'periodic_add': ChkNewCascadedsPeriodicTask}
|
||||
|
||||
if _type_cls_dict.get(type):
|
||||
return _type_cls_dict[type](input, **kwargs)
|
||||
|
||||
return None
|
||||
|
||||
def _handle_result(self, sync_manager):
|
||||
return sync_manager.handle_tasks({'image_id': self.image_id,
|
||||
'type': self.type,
|
||||
'start_time': self.start_time,
|
||||
'status': self.status
|
||||
})
|
||||
|
||||
def execute(self, sync_manager, auth_token):
|
||||
if not self.checkInput():
|
||||
self.status = 'param_error'
|
||||
LOG.error(_('the input content not valid: %s.' % (self.input)))
|
||||
return self._handle_result(sync_manager)
|
||||
|
||||
try:
|
||||
self.status = 'running'
|
||||
green_threads = self.create_green_threads(sync_manager, auth_token)
|
||||
for gt in green_threads:
|
||||
gt.wait()
|
||||
except Exception as e:
|
||||
msg = _("Unable to execute task of image %(image_id)s: %(e)s") % \
|
||||
{'image_id': self.image_id, 'e': unicode(e)}
|
||||
LOG.exception(msg)
|
||||
self.status = 'error'
|
||||
else:
|
||||
self.status = 'terminal'
|
||||
|
||||
return self._handle_result(sync_manager)
|
||||
|
||||
def checkInput(self):
|
||||
if not self.input.pop('image_id', None):
|
||||
LOG.warn(_('No cascading image_id specified.'))
|
||||
return False
|
||||
return self.do_checkInput()
|
||||
|
||||
|
||||
class MetaUpdateTask(TaskObject):
|
||||
|
||||
def __init__(self, input):
|
||||
super(MetaUpdateTask, self).__init__('meta_update', input)
|
||||
|
||||
def do_checkInput(self):
|
||||
params = self.input
|
||||
changes = params.get('changes')
|
||||
removes = params.get('removes')
|
||||
tags = params.get('tags')
|
||||
if not changes and not removes and not tags:
|
||||
LOG.warn(_('No changes and removes and tags with the glance.'))
|
||||
return True
|
||||
|
||||
def create_green_threads(self, sync_manager, auth_token):
|
||||
green_threads = []
|
||||
cascaded_mapping = s_utils.get_mappings_from_image(auth_token,
|
||||
self.image_id)
|
||||
for cascaded_ep in cascaded_mapping:
|
||||
cascaded_id = cascaded_mapping[cascaded_ep]
|
||||
green_threads.append(eventlet.spawn(sync_manager.meta_update,
|
||||
auth_token,
|
||||
cascaded_ep,
|
||||
image_id=cascaded_id,
|
||||
**self.input))
|
||||
|
||||
return green_threads
|
||||
|
||||
|
||||
class MetaDeleteTask(TaskObject):
|
||||
|
||||
def __init__(self, input):
|
||||
super(MetaDeleteTask, self).__init__('meta_remove', input)
|
||||
|
||||
def do_checkInput(self):
|
||||
self.locations = self.input.get('locations')
|
||||
return self.locations is not None
|
||||
|
||||
def create_green_threads(self, sync_manager, auth_token):
|
||||
green_threads = []
|
||||
cascaded_mapping = s_utils.get_mappings_from_locations(self.locations)
|
||||
for cascaded_ep in cascaded_mapping:
|
||||
cascaded_id = cascaded_mapping[cascaded_ep]
|
||||
green_threads.append(eventlet.spawn(sync_manager.meta_delete,
|
||||
auth_token,
|
||||
cascaded_ep,
|
||||
image_id=cascaded_id))
|
||||
|
||||
return green_threads
|
||||
|
||||
|
||||
class ImageActiveTask(TaskObject):
|
||||
|
||||
def __init__(self, input):
|
||||
super(ImageActiveTask, self).__init__('sync', input)
|
||||
|
||||
def do_checkInput(self):
|
||||
image_data = self.input.get('body')
|
||||
self.cascading_endpoint = self.input.get('cascading_ep')
|
||||
return image_data and self.cascading_endpoint
|
||||
|
||||
def create_green_threads(self, sync_manager, auth_token):
|
||||
green_threads = []
|
||||
cascaded_eps = s_utils.get_endpoints(auth_token)
|
||||
for cascaded_ep in cascaded_eps:
|
||||
green_threads.append(eventlet.spawn(sync_manager.sync_image,
|
||||
auth_token,
|
||||
self.cascading_endpoint,
|
||||
cascaded_ep,
|
||||
self.image_id,
|
||||
self.image_id,
|
||||
**self.input))
|
||||
|
||||
return green_threads
|
||||
|
||||
|
||||
class PatchSnapshotLocationTask(TaskObject):
|
||||
|
||||
def __init__(self, input):
|
||||
super(PatchSnapshotLocationTask, self).__init__('snapshot', input)
|
||||
|
||||
def do_checkInput(self):
|
||||
image_metadata = self.input.get('body')
|
||||
self.snapshot_endpoint = self.input.pop('snapshot_ep', None)
|
||||
self.snapshot_id = self.input.pop('snapshot_id', None)
|
||||
return image_metadata and self.snapshot_endpoint and self.snapshot_id
|
||||
|
||||
def create_green_threads(self, sync_manager, auth_token):
|
||||
green_threads = []
|
||||
_region_names = CONF.snapshot_region_names
|
||||
cascaded_mapping = s_utils.get_endpoints(auth_token,
|
||||
region_names=_region_names)
|
||||
try:
|
||||
if self.snapshot_endpoint in cascaded_mapping:
|
||||
cascaded_mapping.remove(self.snapshot_endpoint)
|
||||
except TypeError:
|
||||
pass
|
||||
for cascaded_ep in cascaded_mapping:
|
||||
green_threads.append(eventlet.spawn(sync_manager.do_snapshot,
|
||||
auth_token,
|
||||
self.snapshot_endpoint,
|
||||
cascaded_ep,
|
||||
self.snapshot_id,
|
||||
self.image_id,
|
||||
**self.input))
|
||||
|
||||
return green_threads
|
||||
|
||||
|
||||
class PatchLocationTask(TaskObject):
|
||||
|
||||
def __init__(self, input):
|
||||
super(PatchLocationTask, self).__init__('patch', input)
|
||||
|
||||
def do_checkInput(self):
|
||||
self.location = self.input.get('location')
|
||||
return self.location is not None
|
||||
|
||||
def create_green_threads(self, sync_manager, auth_token):
|
||||
green_threads = []
|
||||
cascaded_mapping = s_utils.get_mappings_from_image(auth_token,
|
||||
self.image_id)
|
||||
for cascaded_ep in cascaded_mapping:
|
||||
cascaded_id = cascaded_mapping[cascaded_ep]
|
||||
green_threads.append(eventlet.spawn(sync_manager.patch_location,
|
||||
self.image_id,
|
||||
cascaded_id,
|
||||
auth_token,
|
||||
cascaded_ep,
|
||||
self.location))
|
||||
return green_threads
|
||||
|
||||
|
||||
class RemoveLocationsTask(TaskObject):
|
||||
|
||||
def __init__(self, input):
|
||||
super(RemoveLocationsTask, self).__init__('locs_remove', input)
|
||||
|
||||
def do_checkInput(self):
|
||||
self.locations = self.input.get('locations')
|
||||
return self.locations is not None
|
||||
|
||||
def create_green_threads(self, sync_manager, auth_token):
|
||||
green_threads = []
|
||||
cascaded_mapping = s_utils.get_mappings_from_locations(self.locations)
|
||||
for cascaded_ep in cascaded_mapping:
|
||||
cascaded_id = cascaded_mapping[cascaded_ep]
|
||||
green_threads.append(eventlet.spawn(sync_manager.remove_loc,
|
||||
cascaded_id,
|
||||
auth_token,
|
||||
cascaded_ep))
|
||||
return green_threads
|
||||
|
||||
|
||||
class PeriodicTask(TaskObject):
|
||||
|
||||
MAX_SLEEP_SECONDS = 15
|
||||
|
||||
def __init__(self, type, input, interval, last_run_time, run_immediately):
|
||||
super(PeriodicTask, self).__init__(type, input)
|
||||
self.interval = interval
|
||||
self.last_run_time = last_run_time
|
||||
self.run_immediately = run_immediately
|
||||
|
||||
def do_checkInput(self):
|
||||
if not self.interval or self.interval < 0:
|
||||
LOG.error(_('The Periodic Task interval invaild.'))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def ready(self):
|
||||
# first time to run
|
||||
if self.last_run_time is None:
|
||||
self.last_run_time = timeutils.strtime()
|
||||
return self.run_immediately
|
||||
return timeutils.is_older_than(self.last_run_time, self.interval)
|
||||
|
||||
def execute(self, sync_manager, auth_token):
|
||||
while not self.ready():
|
||||
LOG.debug(_('the periodic task has not ready yet, sleep a while.'
|
||||
'current_start_time is %s, last_run_time is %s, and '
|
||||
'the interval is %i.' % (self.start_time,
|
||||
self.last_run_time,
|
||||
self.interval)))
|
||||
_max_sleep_time = self.MAX_SLEEP_SECONDS
|
||||
eventlet.sleep(seconds=max(self.interval / 10, _max_sleep_time))
|
||||
|
||||
super(PeriodicTask, self).execute(sync_manager, auth_token)
|
||||
|
||||
|
||||
class ChkNewCascadedsPeriodicTask(PeriodicTask):
|
||||
|
||||
def __init__(self, input, interval=60, last_run_time=None,
|
||||
run_immediately=False):
|
||||
|
||||
super(ChkNewCascadedsPeriodicTask, self).__init__('periodic_add',
|
||||
input, interval,
|
||||
last_run_time,
|
||||
run_immediately)
|
||||
LOG.debug(_('create ChkNewCascadedsPeriodicTask.'))
|
||||
|
||||
def do_checkInput(self):
|
||||
self.images = self.input.get('images')
|
||||
self.cascading_endpoint = self.input.get('cascading_ep')
|
||||
if self.images is None or not self.cascading_endpoint:
|
||||
return False
|
||||
return super(ChkNewCascadedsPeriodicTask, self).do_checkInput()
|
||||
|
||||
def _stil_need_synced(self, cascaded_ep, image_id, auth_token):
|
||||
g_client = s_utils.create_self_glance_client(auth_token)
|
||||
try:
|
||||
image = g_client.images.get(image_id)
|
||||
except Exception:
|
||||
LOG.warn(_('The add cascaded periodic task checks that the image '
|
||||
'has deleted, no need to sync. id is %s' % image_id))
|
||||
return False
|
||||
else:
|
||||
if image.status != 'active':
|
||||
LOG.warn(_('The add cascaded period task checks image status '
|
||||
'not active, no need to sync.'
|
||||
'image id is %s.' % image_id))
|
||||
return False
|
||||
ep_list = [loc['url'] for loc in image.locations
|
||||
if s_utils.is_glance_location(loc['url'])]
|
||||
return not s_utils.is_ep_contains(cascaded_ep, ep_list)
|
||||
|
||||
def create_green_threads(self, sync_manager, auth_token):
|
||||
green_threads = []
|
||||
for image_id in self.images:
|
||||
cascaded_eps = self.images[image_id].get('locations')
|
||||
kwargs = {'body': self.images[image_id].get('body')}
|
||||
for cascaded_ep in cascaded_eps:
|
||||
if not self._stil_need_synced(cascaded_ep,
|
||||
image_id, auth_token):
|
||||
continue
|
||||
green_threads.append(eventlet.spawn(sync_manager.sync_image,
|
||||
auth_token,
|
||||
self.cascading_endpoint,
|
||||
cascaded_ep,
|
||||
image_id,
|
||||
image_id,
|
||||
**kwargs))
|
||||
|
||||
return green_threads
|
215
glancesync/glance/sync/utils.py
Normal file
215
glancesync/glance/sync/utils.py
Normal file
@ -0,0 +1,215 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Jia Dong, HuaWei
|
||||
|
||||
import re
|
||||
|
||||
from oslo.config import cfg
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from glance.sync.clients import Clients as clients
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('cascading_endpoint_url', 'glance.common.config', group='sync')
|
||||
CONF.import_opt('sync_strategy', 'glance.common.config', group='sync')
|
||||
|
||||
|
||||
def create_glance_client(auth_token, url):
|
||||
"""
|
||||
create glance clients
|
||||
"""
|
||||
return clients(auth_token).glance(url=url)
|
||||
|
||||
|
||||
def create_self_glance_client(auth_token):
|
||||
return create_glance_client(auth_token, get_cascading_endpoint_url())
|
||||
|
||||
|
||||
def get_mappings_from_image(auth_token, image_id):
|
||||
"""
|
||||
get image's patched glance-locations
|
||||
"""
|
||||
client = create_self_glance_client(auth_token)
|
||||
image = client.images.get(image_id)
|
||||
locations = image.locations
|
||||
if not locations:
|
||||
return {}
|
||||
return get_mappings_from_locations(locations)
|
||||
|
||||
|
||||
def get_mappings_from_locations(locations):
|
||||
mappings = {}
|
||||
for loc in locations:
|
||||
if is_glance_location(loc['url']):
|
||||
id = loc['metadata'].get('image_id')
|
||||
if not id:
|
||||
continue
|
||||
ep_url = create_ep_by_loc(loc)
|
||||
mappings[ep_url] = id
|
||||
return mappings
|
||||
|
||||
|
||||
def get_cascading_endpoint_url():
|
||||
return CONF.sync.cascading_endpoint_url
|
||||
|
||||
|
||||
def get_host_from_ep(ep_url):
|
||||
if not ep_url:
|
||||
return None
|
||||
pieces = urlparse.urlparse(ep_url)
|
||||
return pieces.netloc.split(':')[0]
|
||||
|
||||
pattern = re.compile(r'^https?://\S+/v2/images/\S+$')
|
||||
|
||||
|
||||
def get_default_location(locations):
|
||||
for location in locations:
|
||||
if is_default_location(location):
|
||||
return location
|
||||
return None
|
||||
|
||||
|
||||
def is_glance_location(loc_url):
|
||||
return pattern.match(loc_url)
|
||||
|
||||
|
||||
def is_snapshot_location(location):
|
||||
l_meta = location['metadata']
|
||||
return l_meta and l_meta.get('image_from', None) in['snapshot', 'volume']
|
||||
|
||||
|
||||
def get_id_from_glance_loc(location):
|
||||
if not is_glance_location(location['url']):
|
||||
return None
|
||||
loc_meta = location['metadata']
|
||||
if not loc_meta:
|
||||
return None
|
||||
return loc_meta.get('image_id', None)
|
||||
|
||||
|
||||
def is_default_location(location):
|
||||
try:
|
||||
return not is_glance_location(location['url']) \
|
||||
and location['metadata']['is_default'] == 'true'
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def get_snapshot_glance_loc(locations):
|
||||
for location in locations:
|
||||
if is_snapshot_location(location):
|
||||
return location
|
||||
return None
|
||||
|
||||
|
||||
def create_ep_by_loc(location):
|
||||
loc_url = location['url']
|
||||
if not is_glance_location(loc_url):
|
||||
return None
|
||||
piece = urlparse.urlparse(loc_url)
|
||||
return piece.scheme + '://' + piece.netloc + '/'
|
||||
|
||||
|
||||
def generate_glance_location(ep, image_id, port=None):
|
||||
default_port = port or '9292'
|
||||
piece = urlparse.urlparse(ep)
|
||||
paths = []
|
||||
paths.append(piece.scheme)
|
||||
paths.append('://')
|
||||
paths.append(piece.netloc.split(':')[0])
|
||||
paths.append(':')
|
||||
paths.append(default_port)
|
||||
paths.append('/v2/images/')
|
||||
paths.append(image_id)
|
||||
return ''.join(paths)
|
||||
|
||||
|
||||
def get_endpoints(auth_token=None, tenant_id=None, **kwargs):
|
||||
"""
|
||||
find which glance should be sync by strategy config
|
||||
"""
|
||||
strategy = CONF.sync.sync_strategy
|
||||
if strategy not in ['All', 'User']:
|
||||
return None
|
||||
|
||||
openstack_clients = clients(auth_token, tenant_id)
|
||||
ksclient = openstack_clients.keystone()
|
||||
|
||||
'''
|
||||
suppose that the cascading glance is 'public' endpoint type, and the
|
||||
cascaded glacne endpoints are 'internal'
|
||||
'''
|
||||
regions = kwargs.pop('region_names', [])
|
||||
if strategy == 'All' and not regions:
|
||||
urls = ksclient.service_catalog.get_urls(service_type='image',
|
||||
endpoint_type='publicURL')
|
||||
if urls:
|
||||
result = [u for u in urls if u != get_cascading_endpoint_url()]
|
||||
else:
|
||||
result = []
|
||||
return result
|
||||
else:
|
||||
user_urls = []
|
||||
for region_name in regions:
|
||||
urls = ksclient.service_catalog.get_urls(service_type='image',
|
||||
endpoint_type='publicURL',
|
||||
region_name=region_name)
|
||||
if urls:
|
||||
user_urls.extend(urls)
|
||||
result = [u for u in set(user_urls) if u !=
|
||||
get_cascading_endpoint_url()]
|
||||
return result
|
||||
|
||||
|
||||
_V2_IMAGE_CREATE_PROPERTIES = ['container_format',
|
||||
'disk_format', 'min_disk', 'min_ram', 'name',
|
||||
'virtual_size', 'visibility', 'protected']
|
||||
|
||||
|
||||
def get_core_properties(image):
|
||||
"""
|
||||
when sync, create image object, get the sync info
|
||||
"""
|
||||
_tags = list(image.tags) or []
|
||||
kwargs = {}
|
||||
for key in _V2_IMAGE_CREATE_PROPERTIES:
|
||||
try:
|
||||
value = getattr(image, key, None)
|
||||
if value and value != 'None':
|
||||
kwargs[key] = value
|
||||
except KeyError:
|
||||
pass
|
||||
if _tags:
|
||||
kwargs['tags'] = _tags
|
||||
return kwargs
|
||||
|
||||
|
||||
def calculate_lack_endpoints(all_ep_urls, glance_urls):
|
||||
"""
|
||||
calculate endpoints which exists in all_eps but not in glance_eps
|
||||
"""
|
||||
if not glance_urls:
|
||||
return all_ep_urls
|
||||
|
||||
def _contain(ep):
|
||||
_hosts = [urlparse.urlparse(_ep).netloc for _ep in glance_urls]
|
||||
return not urlparse.urlparse(ep).netloc in _hosts
|
||||
return filter(_contain, all_ep_urls)
|
||||
|
||||
|
||||
def is_ep_contains(ep_url, glance_urls):
|
||||
_hosts = [urlparse.urlparse(_ep).netloc for _ep in glance_urls]
|
||||
return urlparse.urlparse(ep_url) in _hosts
|
152
glancesync/installation/install.sh
Normal file
152
glancesync/installation/install.sh
Normal file
@ -0,0 +1,152 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
CURPATH=$(cd "$(dirname "$0")"; pwd)
|
||||
_GLANCE_CONF_DIR="/etc/glance"
|
||||
_GLANCE_API_CONF_FILE="glance-api.conf"
|
||||
_GLANCE_SYNC_CMD_FILE="glance-sync"
|
||||
_PYTHON_INSTALL_DIR="/usr/lib64/python2.6/site-packages"
|
||||
_GLANCE_DIR="${_PYTHON_INSTALL_DIR}/glance"
|
||||
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="${CURPATH}/../glance"
|
||||
_CONF_DIR="${CURPATH}/../etc"
|
||||
_BACKUP_DIR="${_GLANCE_DIR}/glance-sync-backup"
|
||||
|
||||
_SCRIPT_LOGFILE="/var/log/glance/installation/install.log"
|
||||
|
||||
api_config_option_list="sync_enabled=True sync_server_port=9595 sync_server_host=127.0.0.1"
|
||||
|
||||
export PS4='+{$LINENO:${FUNCNAME[0]}}'
|
||||
|
||||
ERRTRAP()
|
||||
{
|
||||
echo "[LINE:$1] Error: Command or function exited with status $?"
|
||||
}
|
||||
|
||||
function log()
|
||||
{
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE
|
||||
}
|
||||
|
||||
|
||||
function process_stop
|
||||
{
|
||||
PID=`ps -efw|grep "$1"|grep -v grep|awk '{print $2}'`
|
||||
echo "PID is: $PID">>$_SCRIPT_LOGFILE
|
||||
if [ "x${PID}" != "x" ]; then
|
||||
for kill_id in $PID
|
||||
do
|
||||
kill -9 ${kill_id}
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "[[stop glance-sync]]$1 stop failed.">>$_SCRIPT_LOGFILE
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "[[stop glance-sync]]$1 stop ok.">>$_SCRIPT_LOGFILE
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
trap 'ERRTRAP $LINENO' ERR
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "/var/log/glance/installation" ]; then
|
||||
mkdir /var/log/glance/installation
|
||||
touch _SCRIPT_LOGFILE
|
||||
fi
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_GLANCE_DIR}" ] ; then
|
||||
log "Could not find the glance installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}" ] ; then
|
||||
log "Could not find glance-api config file. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_CONF_DIR}/${_GLANCE_SYNC_CMD_FILE}" ]; then
|
||||
log "Could not find the glance-sync file. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking previous installation..."
|
||||
if [ -d "${_BACKUP_DIR}/glance" ] ; then
|
||||
log "It seems glance cascading has already been installed!"
|
||||
log "Please check README for solution if this is not true."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
mkdir -p "${_BACKUP_DIR}/glance"
|
||||
mkdir -p "${_BACKUP_DIR}/etc"
|
||||
mkdir -p "${_BACKUP_DIR}/etc/glance"
|
||||
cp -rf "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}" "${_BACKUP_DIR}/etc/glance/"
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/glance"
|
||||
rm -r "${_BACKUP_DIR}/etc"
|
||||
log "Error in config backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "copying in new files..."
|
||||
cp -r "${_CODE_DIR}" `dirname ${_GLANCE_DIR}`
|
||||
cp -r "${_CONF_DIR}/glance" "/etc"
|
||||
cp "${_CONF_DIR}/${_GLANCE_SYNC_CMD_FILE}" "/usr/bin/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering original files..."
|
||||
cp -r "${_BACKUP_DIR}/glance" `dirname ${_GLANCE_DIR}` && rm -r "${_BACKUP_DIR}/glance"
|
||||
cp "${_BACKUP_DIR}/etc/glance/*.conf" `dirname ${_GLANCE_CONF_DIR}` && rm -r "${_BACKUP_DIR}/etc"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please install manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "updating config file..."
|
||||
for option in $api_config_option_list
|
||||
do
|
||||
sed -i -e "/$option/d" "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}"
|
||||
sed -i -e "/DEFAULT/a $option" "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}"
|
||||
done
|
||||
|
||||
|
||||
log "restarting glance ..."
|
||||
service openstack-glance-api restart
|
||||
service openstack-glance-registry restart
|
||||
process_stop "glance-sync"
|
||||
python /usr/bin/glance-sync --config-file=/etc/glance/glance-sync.conf &
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart glance manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Completed."
|
||||
log "See README to get started."
|
||||
|
||||
exit 0
|
43
icehouse-patches/cinder/README.md
Normal file
43
icehouse-patches/cinder/README.md
Normal file
@ -0,0 +1,43 @@
|
||||
Cinder create volume from image bug
|
||||
===============================
|
||||
Openstack cascade current is developed based on Icehouse version. While
|
||||
in Icehouse version there is a bug about creating volume from image and uploading volume to image.
|
||||
Please referer to the http links https://bugs.launchpad.net/cinder/+bug/1308058 for details.
|
||||
This bug is recommended to fix in cascaded cinder.
|
||||
|
||||
Key modules
|
||||
-----------
|
||||
|
||||
* when create volume from image or upload volume to image, cinder will call a function in glance.py
|
||||
to check image metadata, while not all the metadata will be included from glance image information.
|
||||
As a result, in the function _extract_attributes included in file,not all the element such as "cheksum"
|
||||
will be validated :
|
||||
|
||||
cinder/image/glance.py
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* openstack icehouse has been installed
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
We suggest a way to fix the cinder-image-metadata bug. In this section,
|
||||
we will guide you through fix this image metadata bug.
|
||||
|
||||
* **Note:**
|
||||
|
||||
- Make sure you have an existing installation of **Openstack Icehouse**.
|
||||
- We recommend that you Do backup at least the following files before installation,
|
||||
because they are to be overwritten or modified.
|
||||
|
||||
* **Manual Installation as the OpenStack Community suggest**
|
||||
|
||||
mofify "output[attr] = getattr(image, attr)" to "output[attr] = getattr(image, attr, None)"
|
||||
in _extract_attributes cinder/image/glance.py,Line 434 around
|
||||
|
||||
|
||||
|
||||
|
||||
|
54
icehouse-patches/cinder/timestamp-query-patch/README.md
Normal file
54
icehouse-patches/cinder/timestamp-query-patch/README.md
Normal file
@ -0,0 +1,54 @@
|
||||
Cinder timestamp-query-patch
|
||||
===============================
|
||||
it will be patched in cascaded level's control node
|
||||
|
||||
cinder icehouse version database has update_at attribute for change_since
|
||||
query filter function, however cinder db api this version don't support
|
||||
timestamp query function. So it is needed to make this patch in cascaded level
|
||||
while syncronization state between cascading and cascaded openstack level
|
||||
|
||||
Key modules
|
||||
-----------
|
||||
|
||||
* adding timestamp query function while list volumes:
|
||||
|
||||
cinder\db\sqlalchemy\api.py
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* openstack icehouse has been installed
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
We provide two ways to install the timestamp query patch code. In this section, we will guide you through installing the timestamp query patch.
|
||||
|
||||
* **Note:**
|
||||
|
||||
- Make sure you have an existing installation of **Openstack Icehouse**.
|
||||
- We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified:
|
||||
|
||||
* **Manual Installation**
|
||||
|
||||
- Make sure you have performed backups properly.
|
||||
|
||||
- Navigate to the local repository and copy the contents in 'cinder' sub-directory to the corresponding places in existing cinder, e.g.
|
||||
```cp -r $LOCAL_REPOSITORY_DIR/cinder $CINDER_PARENT_DIR```
|
||||
(replace the $... with actual directory name.)
|
||||
|
||||
- restart cinder api service
|
||||
|
||||
- Done. The cinder proxy should be working with a demo configuration.
|
||||
|
||||
* **Automatic Installation**
|
||||
|
||||
- Make sure you have performed backups properly.
|
||||
|
||||
- Navigate to the installation directory and run installation script.
|
||||
```
|
||||
cd $LOCAL_REPOSITORY_DIR/installation
|
||||
sudo bash ./install.sh
|
||||
```
|
||||
(replace the $... with actual directory name.)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../cinder"
|
||||
_BACKUP_DIR="${_CINDER_DIR}/cinder_timestamp_query_patch-installation-backup"
|
||||
_SCRIPT_LOGFILE="/var/log/cinder/cinder_timestamp_query_patch/installation/install.log"
|
||||
|
||||
function log()
|
||||
{
|
||||
log_path=`dirname ${_SCRIPT_LOGFILE}`
|
||||
if [ ! -d $log_path ] ; then
|
||||
mkdir -p $log_path
|
||||
chmod 777 $_SCRIPT_LOGFILE
|
||||
fi
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE
|
||||
}
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_CINDER_DIR}" ] ; then
|
||||
log "Could not find the cinder installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking previous installation..."
|
||||
if [ -d "${_BACKUP_DIR}/cinder" ] ; then
|
||||
log "It seems cinder timestamp query has already been installed!"
|
||||
log "Please check README for solution if this is not true."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
mkdir -p "${_BACKUP_DIR}/cinder"
|
||||
mkdir -p "${_BACKUP_DIR}/etc/cinder"
|
||||
cp -r "${_CINDER_DIR}/db" "${_BACKUP_DIR}/cinder"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/cinder"
|
||||
echo "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "copying in new files..."
|
||||
cp -r "${_CODE_DIR}" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering original files..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please install manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
service openstack-cinder-api restart
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart cinder api manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Completed."
|
||||
log "See README to get started."
|
||||
exit 0
|
@ -0,0 +1,115 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_CINDER_CONF_DIR="/etc/cinder"
|
||||
_CINDER_CONF_FILE="cinder.conf"
|
||||
_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
||||
_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log"
|
||||
|
||||
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../cinder"
|
||||
_BACKUP_DIR="${_CINDER_DIR}/cinder_timestamp_query_patch-installation-backup"
|
||||
|
||||
#_SCRIPT_NAME="${0##*/}"
|
||||
#_SCRIPT_LOGFILE="/var/log/nova-solver-scheduler/installation/${_SCRIPT_NAME}.log"
|
||||
|
||||
function log()
|
||||
{
|
||||
if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then
|
||||
mkdir -p `dirname ${_CINDER_INSTALL_LOG}`
|
||||
touch $_CINDER_INSTALL_LOG
|
||||
chmod 777 $_CINDER_INSTALL_LOG
|
||||
fi
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG
|
||||
}
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_CINDER_DIR}" ] ; then
|
||||
log "Could not find the cinder installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then
|
||||
log "Could not find cinder config file. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking backup..."
|
||||
if [ ! -d "${_BACKUP_DIR}/cinder" ] ; then
|
||||
log "Could not find backup files. It is possible that the cinder-proxy has been uninstalled."
|
||||
log "If this is not the case, then please uninstall manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
if [ -d "${_BACKUP_DIR}/uninstall" ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall"
|
||||
fi
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/cinder"
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/etc/cinder"
|
||||
cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/uninstall/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
log "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/uninstall/etc/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
rm -r "${_BACKUP_DIR}/uninstall/etc"
|
||||
log "Error in config backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "restoring code to the status before installing cinder-proxy..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering current files..."
|
||||
cp -r "${_BACKUP_DIR}/uninstall/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please uninstall manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log "cleaning up backup files..."
|
||||
rm -r "${_BACKUP_DIR}/cinder" && rm -r "${_BACKUP_DIR}/etc"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error when cleaning up the backup files."
|
||||
fi
|
||||
|
||||
log "restarting cinder api..."
|
||||
service openstack-cinder-api restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart cinder volume manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Completed."
|
||||
|
||||
exit 0
|
65
icehouse-patches/cinder/uuid-mapping-patch/README.md
Normal file
65
icehouse-patches/cinder/uuid-mapping-patch/README.md
Normal file
@ -0,0 +1,65 @@
|
||||
Cinder uuid-mapping-patch
|
||||
===============================
|
||||
|
||||
it will be patched in cascading level's control node
|
||||
|
||||
Cascading level node can manage volume/snapshot/backup/ in cascaded level node,
|
||||
because of the mapping_uuid stored in cascading level represent the relationship of
|
||||
volume/snapshot/bakcup
|
||||
|
||||
Key modules
|
||||
-----------
|
||||
|
||||
* adding mapping_uuid column in cinder volume /cinder snapshot /cinder backup table,
|
||||
when cinder synchronizes db:
|
||||
|
||||
cinder\db\sqlalchemy\migrate_repo\versions\023_add_mapping_uuid.py
|
||||
cinder\db\sqlalchemy\migrate_repo\versions\024_snapshots_add_mapping_uuid.py
|
||||
cinder\db\sqlalchemy\migrate_repo\versions\025_backup_add_mapping_uuid.py
|
||||
cinder\db\sqlalchemy\models.py
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* openstack icehouse has been installed
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
We provide two ways to install the mapping-uuid-patch code. In this section, we will guide you through installing the instance_mapping_uuid patch.
|
||||
|
||||
* **Note:**
|
||||
|
||||
- Make sure you have an existing installation of **Openstack Icehouse**.
|
||||
- We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified:
|
||||
|
||||
* **Manual Installation**
|
||||
|
||||
- Make sure you have performed backups properly.
|
||||
|
||||
- Navigate to the local repository and copy the contents in 'cinder' sub-directory to the corresponding places in existing nova, e.g.
|
||||
```cp -r $LOCAL_REPOSITORY_DIR/cinder $NOVA_PARENT_DIR```
|
||||
(replace the $... with actual directory name.)
|
||||
|
||||
- synchronize the cinder db.
|
||||
```
|
||||
mysql -u root -p$MYSQL_PASS -e "DROP DATABASE if exists cinder;
|
||||
CREATE DATABASE cinder;
|
||||
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY $MYSQL_PASS;
|
||||
GRANT ALL PRIVILEGES ON *.* TO 'cinder'@'%'IDENTIFIED BY $MYSQL_PASS;
|
||||
cinder-manage db sync
|
||||
```
|
||||
|
||||
- Done. The cinder proxy should be working with a demo configuration.
|
||||
|
||||
* **Automatic Installation**
|
||||
|
||||
- Make sure you have performed backups properly.
|
||||
|
||||
- Navigate to the installation directory and run installation script.
|
||||
```
|
||||
cd $LOCAL_REPOSITORY_DIR/installation
|
||||
sudo bash ./install.sh
|
||||
```
|
||||
(replace the $... with actual directory name.)
|
||||
|
@ -0,0 +1,36 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from sqlalchemy import String, Column, MetaData, Table
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
"""Add mapping_uuid column to volumes."""
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
volumes = Table('volumes', meta, autoload=True)
|
||||
mapping_uuid = Column('mapping_uuid', String(36))
|
||||
volumes.create_column(mapping_uuid)
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
"""Remove mapping_uuid column from volumes."""
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
volumes = Table('volumes', meta, autoload=True)
|
||||
mapping_uuid = volumes.columns.mapping_uuid
|
||||
volumes.drop_column(mapping_uuid)
|
@ -0,0 +1,34 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from sqlalchemy import Column
|
||||
from sqlalchemy import MetaData, String, Table
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
snapshots = Table('snapshots', meta, autoload=True)
|
||||
mapping_uuid = Column('mapping_uuid', String(36))
|
||||
snapshots.create_column(mapping_uuid)
|
||||
snapshots.update().values(mapping_uuid=None).execute()
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
snapshots = Table('snapshots', meta, autoload=True)
|
||||
mapping_uuid = snapshots.columns.mapping_uuid
|
||||
snapshots.drop_column(mapping_uuid)
|
@ -0,0 +1,34 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from sqlalchemy import Column
|
||||
from sqlalchemy import MetaData, String, Table
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
backups = Table('backups', meta, autoload=True)
|
||||
mapping_uuid = Column('mapping_uuid', String(36))
|
||||
backups.create_column(mapping_uuid)
|
||||
backups.update().values(mapping_uuid=None).execute()
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
backups = Table('backups', meta, autoload=True)
|
||||
mapping_uuid = backups.columns.mapping_uuid
|
||||
backups.drop_column(mapping_uuid)
|
@ -0,0 +1,515 @@
|
||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
SQLAlchemy models for cinder data.
|
||||
"""
|
||||
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, schema
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy import ForeignKey, DateTime, Boolean
|
||||
from sqlalchemy.orm import relationship, backref
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder.openstack.common.db.sqlalchemy import models
|
||||
from cinder.openstack.common import timeutils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
BASE = declarative_base()
|
||||
|
||||
|
||||
class CinderBase(models.TimestampMixin,
|
||||
models.ModelBase):
|
||||
|
||||
"""Base class for Cinder Models."""
|
||||
|
||||
__table_args__ = {'mysql_engine': 'InnoDB'}
|
||||
|
||||
# TODO(rpodolyaka): reuse models.SoftDeleteMixin in the next stage
|
||||
# of implementing of BP db-cleanup
|
||||
deleted_at = Column(DateTime)
|
||||
deleted = Column(Boolean, default=False)
|
||||
metadata = None
|
||||
|
||||
def delete(self, session=None):
|
||||
"""Delete this object."""
|
||||
self.deleted = True
|
||||
self.deleted_at = timeutils.utcnow()
|
||||
self.save(session=session)
|
||||
|
||||
|
||||
class Service(BASE, CinderBase):
|
||||
|
||||
"""Represents a running service on a host."""
|
||||
|
||||
__tablename__ = 'services'
|
||||
id = Column(Integer, primary_key=True)
|
||||
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
||||
binary = Column(String(255))
|
||||
topic = Column(String(255))
|
||||
report_count = Column(Integer, nullable=False, default=0)
|
||||
disabled = Column(Boolean, default=False)
|
||||
availability_zone = Column(String(255), default='cinder')
|
||||
disabled_reason = Column(String(255))
|
||||
|
||||
|
||||
class Volume(BASE, CinderBase):
|
||||
|
||||
"""Represents a block storage device that can be attached to a vm."""
|
||||
__tablename__ = 'volumes'
|
||||
id = Column(String(36), primary_key=True)
|
||||
_name_id = Column(String(36)) # Don't access/modify this directly!
|
||||
|
||||
@property
|
||||
def name_id(self):
|
||||
return self.id if not self._name_id else self._name_id
|
||||
|
||||
@name_id.setter
|
||||
def name_id(self, value):
|
||||
self._name_id = value
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return CONF.volume_name_template % self.name_id
|
||||
|
||||
ec2_id = Column(Integer)
|
||||
user_id = Column(String(255))
|
||||
project_id = Column(String(255))
|
||||
|
||||
snapshot_id = Column(String(36))
|
||||
|
||||
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
||||
size = Column(Integer)
|
||||
availability_zone = Column(String(255)) # TODO(vish): foreign key?
|
||||
instance_uuid = Column(String(36))
|
||||
attached_host = Column(String(255))
|
||||
mountpoint = Column(String(255))
|
||||
attach_time = Column(String(255)) # TODO(vish): datetime
|
||||
status = Column(String(255)) # TODO(vish): enum?
|
||||
attach_status = Column(String(255)) # TODO(vish): enum
|
||||
migration_status = Column(String(255))
|
||||
|
||||
scheduled_at = Column(DateTime)
|
||||
launched_at = Column(DateTime)
|
||||
terminated_at = Column(DateTime)
|
||||
|
||||
display_name = Column(String(255))
|
||||
display_description = Column(String(255))
|
||||
|
||||
provider_location = Column(String(255))
|
||||
provider_auth = Column(String(255))
|
||||
provider_geometry = Column(String(255))
|
||||
|
||||
volume_type_id = Column(String(36))
|
||||
source_volid = Column(String(36))
|
||||
encryption_key_id = Column(String(36))
|
||||
|
||||
deleted = Column(Boolean, default=False)
|
||||
bootable = Column(Boolean, default=False)
|
||||
|
||||
mapping_uuid = Column(String(36))
|
||||
|
||||
|
||||
class VolumeMetadata(BASE, CinderBase):
|
||||
|
||||
"""Represents a metadata key/value pair for a volume."""
|
||||
__tablename__ = 'volume_metadata'
|
||||
id = Column(Integer, primary_key=True)
|
||||
key = Column(String(255))
|
||||
value = Column(String(255))
|
||||
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
|
||||
volume = relationship(Volume, backref="volume_metadata",
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='and_('
|
||||
'VolumeMetadata.volume_id == Volume.id,'
|
||||
'VolumeMetadata.deleted == False)')
|
||||
|
||||
|
||||
class VolumeAdminMetadata(BASE, CinderBase):
|
||||
|
||||
"""Represents a administrator metadata key/value pair for a volume."""
|
||||
__tablename__ = 'volume_admin_metadata'
|
||||
id = Column(Integer, primary_key=True)
|
||||
key = Column(String(255))
|
||||
value = Column(String(255))
|
||||
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
|
||||
volume = relationship(Volume, backref="volume_admin_metadata",
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='and_('
|
||||
'VolumeAdminMetadata.volume_id == Volume.id,'
|
||||
'VolumeAdminMetadata.deleted == False)')
|
||||
|
||||
|
||||
class VolumeTypes(BASE, CinderBase):
|
||||
|
||||
"""Represent possible volume_types of volumes offered."""
|
||||
__tablename__ = "volume_types"
|
||||
id = Column(String(36), primary_key=True)
|
||||
name = Column(String(255))
|
||||
# A reference to qos_specs entity
|
||||
qos_specs_id = Column(String(36),
|
||||
ForeignKey('quality_of_service_specs.id'))
|
||||
volumes = relationship(Volume,
|
||||
backref=backref('volume_type', uselist=False),
|
||||
foreign_keys=id,
|
||||
primaryjoin='and_('
|
||||
'Volume.volume_type_id == VolumeTypes.id, '
|
||||
'VolumeTypes.deleted == False)')
|
||||
|
||||
|
||||
class VolumeTypeExtraSpecs(BASE, CinderBase):
|
||||
|
||||
"""Represents additional specs as key/value pairs for a volume_type."""
|
||||
__tablename__ = 'volume_type_extra_specs'
|
||||
id = Column(Integer, primary_key=True)
|
||||
key = Column(String(255))
|
||||
value = Column(String(255))
|
||||
volume_type_id = Column(String(36),
|
||||
ForeignKey('volume_types.id'),
|
||||
nullable=False)
|
||||
volume_type = relationship(
|
||||
VolumeTypes,
|
||||
backref="extra_specs",
|
||||
foreign_keys=volume_type_id,
|
||||
primaryjoin='and_('
|
||||
'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
|
||||
'VolumeTypeExtraSpecs.deleted == False)'
|
||||
)
|
||||
|
||||
|
||||
class QualityOfServiceSpecs(BASE, CinderBase):
|
||||
|
||||
"""Represents QoS specs as key/value pairs.
|
||||
|
||||
QoS specs is standalone entity that can be associated/disassociated
|
||||
with volume types (one to many relation). Adjacency list relationship
|
||||
pattern is used in this model in order to represent following hierarchical
|
||||
data with in flat table, e.g, following structure
|
||||
|
||||
qos-specs-1 'Rate-Limit'
|
||||
|
|
||||
+------> consumer = 'front-end'
|
||||
+------> total_bytes_sec = 1048576
|
||||
+------> total_iops_sec = 500
|
||||
|
||||
qos-specs-2 'QoS_Level1'
|
||||
|
|
||||
+------> consumer = 'back-end'
|
||||
+------> max-iops = 1000
|
||||
+------> min-iops = 200
|
||||
|
||||
is represented by:
|
||||
|
||||
id specs_id key value
|
||||
------ -------- ------------- -----
|
||||
UUID-1 NULL QoSSpec_Name Rate-Limit
|
||||
UUID-2 UUID-1 consumer front-end
|
||||
UUID-3 UUID-1 total_bytes_sec 1048576
|
||||
UUID-4 UUID-1 total_iops_sec 500
|
||||
UUID-5 NULL QoSSpec_Name QoS_Level1
|
||||
UUID-6 UUID-5 consumer back-end
|
||||
UUID-7 UUID-5 max-iops 1000
|
||||
UUID-8 UUID-5 min-iops 200
|
||||
"""
|
||||
__tablename__ = 'quality_of_service_specs'
|
||||
id = Column(String(36), primary_key=True)
|
||||
specs_id = Column(String(36), ForeignKey(id))
|
||||
key = Column(String(255))
|
||||
value = Column(String(255))
|
||||
|
||||
specs = relationship(
|
||||
"QualityOfServiceSpecs",
|
||||
cascade="all, delete-orphan",
|
||||
backref=backref("qos_spec", remote_side=id),
|
||||
)
|
||||
|
||||
vol_types = relationship(
|
||||
VolumeTypes,
|
||||
backref=backref('qos_specs'),
|
||||
foreign_keys=id,
|
||||
primaryjoin='and_('
|
||||
'or_(VolumeTypes.qos_specs_id == '
|
||||
'QualityOfServiceSpecs.id,'
|
||||
'VolumeTypes.qos_specs_id == '
|
||||
'QualityOfServiceSpecs.specs_id),'
|
||||
'QualityOfServiceSpecs.deleted == False)')
|
||||
|
||||
|
||||
class VolumeGlanceMetadata(BASE, CinderBase):
|
||||
|
||||
"""Glance metadata for a bootable volume."""
|
||||
__tablename__ = 'volume_glance_metadata'
|
||||
id = Column(Integer, primary_key=True, nullable=False)
|
||||
volume_id = Column(String(36), ForeignKey('volumes.id'))
|
||||
snapshot_id = Column(String(36), ForeignKey('snapshots.id'))
|
||||
key = Column(String(255))
|
||||
value = Column(Text)
|
||||
volume = relationship(Volume, backref="volume_glance_metadata",
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='and_('
|
||||
'VolumeGlanceMetadata.volume_id == Volume.id,'
|
||||
'VolumeGlanceMetadata.deleted == False)')
|
||||
|
||||
|
||||
class Quota(BASE, CinderBase):
|
||||
|
||||
"""Represents a single quota override for a project.
|
||||
|
||||
If there is no row for a given project id and resource, then the
|
||||
default for the quota class is used. If there is no row for a
|
||||
given quota class and resource, then the default for the
|
||||
deployment is used. If the row is present but the hard limit is
|
||||
Null, then the resource is unlimited.
|
||||
"""
|
||||
|
||||
__tablename__ = 'quotas'
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
project_id = Column(String(255), index=True)
|
||||
|
||||
resource = Column(String(255))
|
||||
hard_limit = Column(Integer, nullable=True)
|
||||
|
||||
|
||||
class QuotaClass(BASE, CinderBase):
|
||||
|
||||
"""Represents a single quota override for a quota class.
|
||||
|
||||
If there is no row for a given quota class and resource, then the
|
||||
default for the deployment is used. If the row is present but the
|
||||
hard limit is Null, then the resource is unlimited.
|
||||
"""
|
||||
|
||||
__tablename__ = 'quota_classes'
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
class_name = Column(String(255), index=True)
|
||||
|
||||
resource = Column(String(255))
|
||||
hard_limit = Column(Integer, nullable=True)
|
||||
|
||||
|
||||
class QuotaUsage(BASE, CinderBase):
|
||||
|
||||
"""Represents the current usage for a given resource."""
|
||||
|
||||
__tablename__ = 'quota_usages'
|
||||
id = Column(Integer, primary_key=True)
|
||||
|
||||
project_id = Column(String(255), index=True)
|
||||
resource = Column(String(255))
|
||||
|
||||
in_use = Column(Integer)
|
||||
reserved = Column(Integer)
|
||||
|
||||
@property
|
||||
def total(self):
|
||||
return self.in_use + self.reserved
|
||||
|
||||
until_refresh = Column(Integer, nullable=True)
|
||||
|
||||
|
||||
class Reservation(BASE, CinderBase):
|
||||
|
||||
"""Represents a resource reservation for quotas."""
|
||||
|
||||
__tablename__ = 'reservations'
|
||||
id = Column(Integer, primary_key=True)
|
||||
uuid = Column(String(36), nullable=False)
|
||||
|
||||
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
|
||||
|
||||
project_id = Column(String(255), index=True)
|
||||
resource = Column(String(255))
|
||||
|
||||
delta = Column(Integer)
|
||||
expire = Column(DateTime, nullable=False)
|
||||
|
||||
usage = relationship(
|
||||
"QuotaUsage",
|
||||
foreign_keys=usage_id,
|
||||
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
|
||||
'QuotaUsage.deleted == 0)')
|
||||
|
||||
|
||||
class Snapshot(BASE, CinderBase):
|
||||
|
||||
"""Represents a snapshot of volume."""
|
||||
__tablename__ = 'snapshots'
|
||||
id = Column(String(36), primary_key=True)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return CONF.snapshot_name_template % self.id
|
||||
|
||||
@property
|
||||
def volume_name(self):
|
||||
return self.volume.name # pylint: disable=E1101
|
||||
|
||||
user_id = Column(String(255))
|
||||
project_id = Column(String(255))
|
||||
|
||||
volume_id = Column(String(36))
|
||||
status = Column(String(255))
|
||||
progress = Column(String(255))
|
||||
volume_size = Column(Integer)
|
||||
|
||||
display_name = Column(String(255))
|
||||
display_description = Column(String(255))
|
||||
|
||||
encryption_key_id = Column(String(36))
|
||||
volume_type_id = Column(String(36))
|
||||
|
||||
provider_location = Column(String(255))
|
||||
|
||||
volume = relationship(Volume, backref="snapshots",
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='Snapshot.volume_id == Volume.id')
|
||||
|
||||
mapping_uuid = Column(String(36))
|
||||
|
||||
|
||||
class SnapshotMetadata(BASE, CinderBase):
|
||||
|
||||
"""Represents a metadata key/value pair for a snapshot."""
|
||||
__tablename__ = 'snapshot_metadata'
|
||||
id = Column(Integer, primary_key=True)
|
||||
key = Column(String(255))
|
||||
value = Column(String(255))
|
||||
snapshot_id = Column(String(36),
|
||||
ForeignKey('snapshots.id'),
|
||||
nullable=False)
|
||||
snapshot = relationship(Snapshot, backref="snapshot_metadata",
|
||||
foreign_keys=snapshot_id,
|
||||
primaryjoin='and_('
|
||||
'SnapshotMetadata.snapshot_id == Snapshot.id,'
|
||||
'SnapshotMetadata.deleted == False)')
|
||||
|
||||
|
||||
class IscsiTarget(BASE, CinderBase):
|
||||
|
||||
"""Represents an iscsi target for a given host."""
|
||||
__tablename__ = 'iscsi_targets'
|
||||
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
|
||||
{'mysql_engine': 'InnoDB'})
|
||||
id = Column(Integer, primary_key=True)
|
||||
target_num = Column(Integer)
|
||||
host = Column(String(255))
|
||||
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True)
|
||||
volume = relationship(Volume,
|
||||
backref=backref('iscsi_target', uselist=False),
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
|
||||
'IscsiTarget.deleted==False)')
|
||||
|
||||
|
||||
class Backup(BASE, CinderBase):
|
||||
|
||||
"""Represents a backup of a volume to Swift."""
|
||||
__tablename__ = 'backups'
|
||||
id = Column(String(36), primary_key=True)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return CONF.backup_name_template % self.id
|
||||
|
||||
user_id = Column(String(255), nullable=False)
|
||||
project_id = Column(String(255), nullable=False)
|
||||
|
||||
volume_id = Column(String(36), nullable=False)
|
||||
host = Column(String(255))
|
||||
availability_zone = Column(String(255))
|
||||
display_name = Column(String(255))
|
||||
display_description = Column(String(255))
|
||||
container = Column(String(255))
|
||||
status = Column(String(255))
|
||||
fail_reason = Column(String(255))
|
||||
service_metadata = Column(String(255))
|
||||
service = Column(String(255))
|
||||
size = Column(Integer)
|
||||
object_count = Column(Integer)
|
||||
mapping_uuid = Column(String(36))
|
||||
|
||||
|
||||
class Encryption(BASE, CinderBase):
|
||||
|
||||
"""Represents encryption requirement for a volume type.
|
||||
|
||||
Encryption here is a set of performance characteristics describing
|
||||
cipher, provider, and key_size for a certain volume type.
|
||||
"""
|
||||
|
||||
__tablename__ = 'encryption'
|
||||
cipher = Column(String(255))
|
||||
key_size = Column(Integer)
|
||||
provider = Column(String(255))
|
||||
control_location = Column(String(255))
|
||||
volume_type_id = Column(String(36),
|
||||
ForeignKey('volume_types.id'),
|
||||
primary_key=True)
|
||||
volume_type = relationship(
|
||||
VolumeTypes,
|
||||
backref="encryption",
|
||||
foreign_keys=volume_type_id,
|
||||
primaryjoin='and_('
|
||||
'Encryption.volume_type_id == VolumeTypes.id,'
|
||||
'Encryption.deleted == False)'
|
||||
)
|
||||
|
||||
|
||||
class Transfer(BASE, CinderBase):
|
||||
|
||||
"""Represents a volume transfer request."""
|
||||
__tablename__ = 'transfers'
|
||||
id = Column(String(36), primary_key=True)
|
||||
volume_id = Column(String(36), ForeignKey('volumes.id'))
|
||||
display_name = Column(String(255))
|
||||
salt = Column(String(255))
|
||||
crypt_hash = Column(String(255))
|
||||
expires_at = Column(DateTime)
|
||||
volume = relationship(Volume, backref="transfer",
|
||||
foreign_keys=volume_id,
|
||||
primaryjoin='and_('
|
||||
'Transfer.volume_id == Volume.id,'
|
||||
'Transfer.deleted == False)')
|
||||
|
||||
|
||||
def register_models():
|
||||
"""Register Models and create metadata.
|
||||
|
||||
Called from cinder.db.sqlalchemy.__init__ as part of loading the driver,
|
||||
it will never need to be called explicitly elsewhere unless the
|
||||
connection is lost and needs to be reestablished.
|
||||
"""
|
||||
from sqlalchemy import create_engine
|
||||
models = (Backup,
|
||||
Service,
|
||||
Volume,
|
||||
VolumeMetadata,
|
||||
VolumeAdminMetadata,
|
||||
SnapshotMetadata,
|
||||
Transfer,
|
||||
VolumeTypeExtraSpecs,
|
||||
VolumeTypes,
|
||||
VolumeGlanceMetadata,
|
||||
)
|
||||
engine = create_engine(CONF.database.connection, echo=False)
|
||||
for model in models:
|
||||
model.metadata.create_all(engine)
|
@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_MYSQL_PASS="1234"
|
||||
_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../cinder"
|
||||
_BACKUP_DIR="${_CINDER_DIR}/cinder_mapping_uuid_patch-installation-backup"
|
||||
|
||||
_SCRIPT_LOGFILE="/var/log/cinder/cinder_mapping_uuid_patch/installation/install.log"
|
||||
|
||||
function log()
|
||||
{
|
||||
log_path=`dirname ${_SCRIPT_LOGFILE}`
|
||||
if [ ! -d $log_path ] ; then
|
||||
mkdir -p $log_path
|
||||
chmod 777 $log_path
|
||||
fi
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE
|
||||
}
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_CINDER_DIR}" ] ; then
|
||||
log "Could not find the cinder installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking previous installation..."
|
||||
if [ -d "${_BACKUP_DIR}/cinder" ] ; then
|
||||
log "It seems cinder mapping-uuid-patch has already been installed!"
|
||||
log "Please check README for solution if this is not true."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
mkdir -p "${_BACKUP_DIR}/cinder"
|
||||
mkdir -p "${_BACKUP_DIR}/etc/cinder"
|
||||
cp -r "${_CINDER_DIR}/db" "${_BACKUP_DIR}/cinder"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/cinder"
|
||||
echo "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "copying in new files..."
|
||||
cp -r "${_CODE_DIR}" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering original files..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please install manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "syc cinder db..."
|
||||
mysql -u root -p$_MYSQL_PASS -e "DROP DATABASE if exists cinder;CREATE DATABASE cinder;"
|
||||
|
||||
cinder-manage db sync
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart cinder api manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Completed."
|
||||
log "See README to get started."
|
||||
exit 0
|
@ -0,0 +1,118 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_MYSQL_PASS="1234"
|
||||
_CINDER_CONF_DIR="/etc/cinder"
|
||||
_CINDER_CONF_FILE="cinder.conf"
|
||||
_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
||||
_CINDER_INSTALL_LOG="/var/log/cinder/cinder_mapping_uuid_patch/installation/install.log"
|
||||
|
||||
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../cinder"
|
||||
_BACKUP_DIR="${_CINDER_DIR}/cinder_mapping_uuid_patch-installation-backup"
|
||||
|
||||
#_SCRIPT_NAME="${0##*/}"
|
||||
#_SCRIPT_LOGFILE="/var/log/nova-solver-scheduler/installation/${_SCRIPT_NAME}.log"
|
||||
|
||||
function log()
|
||||
{
|
||||
if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then
|
||||
mkdir -p `dirname ${_CINDER_INSTALL_LOG}`
|
||||
touch $_CINDER_INSTALL_LOG
|
||||
chmod 777 $_CINDER_INSTALL_LOG
|
||||
fi
|
||||
echo "$@"
|
||||
echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG
|
||||
}
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
log "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
log "checking installation directories..."
|
||||
if [ ! -d "${_CINDER_DIR}" ] ; then
|
||||
log "Could not find the cinder installation. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then
|
||||
log "Could not find cinder config file. Please check the variables in the beginning of the script."
|
||||
log "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "checking backup..."
|
||||
if [ ! -d "${_BACKUP_DIR}/cinder" ] ; then
|
||||
log "Could not find backup files. It is possible that the cinder-proxy has been uninstalled."
|
||||
log "If this is not the case, then please uninstall manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "backing up current files that might be overwritten..."
|
||||
if [ -d "${_BACKUP_DIR}/uninstall" ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall"
|
||||
fi
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/cinder"
|
||||
mkdir -p "${_BACKUP_DIR}/uninstall/etc/cinder"
|
||||
cp -r "${_CINDER_DIR}/db" "${_BACKUP_DIR}/uninstall/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
log "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/uninstall/etc/cinder/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/uninstall/cinder"
|
||||
rm -r "${_BACKUP_DIR}/uninstall/etc"
|
||||
log "Error in config backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "restoring code to the status before installing cinder-proxy..."
|
||||
cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Error in copying, aborted."
|
||||
log "Recovering current files..."
|
||||
cp -r "${_BACKUP_DIR}/uninstall/cinder" `dirname ${_CINDER_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "Recovering failed! Please uninstall manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log "cleaning up backup files..."
|
||||
rm -r "${_BACKUP_DIR}/cinder" && rm -r "${_BACKUP_DIR}/etc"
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error when cleaning up the backup files."
|
||||
fi
|
||||
|
||||
log "restarting cinder api..."
|
||||
mysql -u root -p$_MYSQL_PASS -e "DROP DATABASE if exists cinder;CREATE DATABASE cinder;"
|
||||
cinder-manage db sync
|
||||
service openstack-cinder-api restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in restarting the service, please restart cinder volume manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Completed."
|
||||
|
||||
exit 0
|
23
icehouse-patches/glance/glance_location_patch/README.md
Normal file
23
icehouse-patches/glance/glance_location_patch/README.md
Normal file
@ -0,0 +1,23 @@
|
||||
Glance-Cascading Patch
|
||||
================
|
||||
|
||||
|
||||
Introduction
|
||||
-----------------------------
|
||||
|
||||
*For glance cascading, we have to create the relationship bewteen one cascading-glance and some cascaded-glances. In order to achieve this goal, we using glance's multi-location feature, the relationshiop can be as a location with the special format. Besides, we modify the image status changing-rule: The image's active toggle into 'active' only if the cascaded have been synced. Because of these two reasons, a few existing source files were modified for adapting the cascading:
|
||||
|
||||
glance/store/http.py
|
||||
glance/store/__init__.py
|
||||
glance/api/v2/image.py
|
||||
glance/gateway.py
|
||||
glance/common/utils.py
|
||||
glance/common/config.py
|
||||
glance/common/exception.py
|
||||
|
||||
|
||||
Install
|
||||
------------------------------
|
||||
|
||||
|
||||
*To implement this patch just replacing the original files to these files, or run the install.sh in glancesync/installation/ directory.
|
@ -0,0 +1,21 @@
|
||||
[console_scripts]
|
||||
glance-api = glance.cmd.api:main
|
||||
glance-cache-cleaner = glance.cmd.cache_cleaner:main
|
||||
glance-cache-manage = glance.cmd.cache_manage:main
|
||||
glance-cache-prefetcher = glance.cmd.cache_prefetcher:main
|
||||
glance-cache-pruner = glance.cmd.cache_pruner:main
|
||||
glance-control = glance.cmd.control:main
|
||||
glance-manage = glance.cmd.manage:main
|
||||
glance-registry = glance.cmd.registry:main
|
||||
glance-replicator = glance.cmd.replicator:main
|
||||
glance-scrubber = glance.cmd.scrubber:main
|
||||
|
||||
[glance.common.image_location_strategy.modules]
|
||||
location_order_strategy = glance.common.location_strategy.location_order
|
||||
store_type_strategy = glance.common.location_strategy.store_type
|
||||
|
||||
[glance.sync.store.location]
|
||||
filesystem = glance.sync.store._drivers.filesystem:LocationCreator
|
||||
|
||||
[glance.sync.store.driver]
|
||||
filesystem = glance.sync.store._drivers.filesystem:Store
|
@ -0,0 +1,822 @@
|
||||
# Copyright 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
from oslo.config import cfg
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import webob.exc
|
||||
|
||||
from glance.api import policy
|
||||
from glance.common import exception
|
||||
from glance.common import location_strategy
|
||||
from glance.common import utils
|
||||
from glance.common import wsgi
|
||||
import glance.db
|
||||
import glance.gateway
|
||||
import glance.notifier
|
||||
from glance.openstack.common import jsonutils as json
|
||||
import glance.openstack.common.log as logging
|
||||
from glance.openstack.common import timeutils
|
||||
import glance.schema
|
||||
import glance.store
|
||||
import glance.sync.client.v1.api as sync_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('disk_formats', 'glance.common.config', group='image_format')
|
||||
CONF.import_opt('container_formats', 'glance.common.config',
|
||||
group='image_format')
|
||||
CONF.import_opt('sync_enabled', 'glance.common.config')
|
||||
|
||||
|
||||
class ImagesController(object):
|
||||
|
||||
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
|
||||
store_api=None):
|
||||
self.db_api = db_api or glance.db.get_api()
|
||||
self.policy = policy_enforcer or policy.Enforcer()
|
||||
self.notifier = notifier or glance.notifier.Notifier()
|
||||
self.store_api = store_api or glance.store
|
||||
self.sync_api = sync_api
|
||||
self.sync_api.configure_sync_client()
|
||||
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
|
||||
self.notifier, self.policy,
|
||||
self.sync_api)
|
||||
|
||||
@utils.mutating
|
||||
def create(self, req, image, extra_properties, tags):
|
||||
image_factory = self.gateway.get_image_factory(req.context)
|
||||
image_repo = self.gateway.get_repo(req.context)
|
||||
try:
|
||||
image = image_factory.new_image(extra_properties=extra_properties,
|
||||
tags=tags, **image)
|
||||
image_repo.add(image)
|
||||
except exception.DuplicateLocation as dup:
|
||||
raise webob.exc.HTTPBadRequest(explanation=dup.msg)
|
||||
except exception.Forbidden as e:
|
||||
raise webob.exc.HTTPForbidden(explanation=e.msg)
|
||||
except exception.InvalidParameterValue as e:
|
||||
raise webob.exc.HTTPBadRequest(explanation=e.msg)
|
||||
except exception.LimitExceeded as e:
|
||||
LOG.info(unicode(e))
|
||||
raise webob.exc.HTTPRequestEntityTooLarge(
|
||||
explanation=e.msg, request=req, content_type='text/plain')
|
||||
|
||||
return image
|
||||
|
||||
def index(self, req, marker=None, limit=None, sort_key='created_at',
|
||||
sort_dir='desc', filters=None, member_status='accepted'):
|
||||
result = {}
|
||||
if filters is None:
|
||||
filters = {}
|
||||
filters['deleted'] = False
|
||||
|
||||
if limit is None:
|
||||
limit = CONF.limit_param_default
|
||||
limit = min(CONF.api_limit_max, limit)
|
||||
|
||||
image_repo = self.gateway.get_repo(req.context)
|
||||
try:
|
||||
images = image_repo.list(marker=marker, limit=limit,
|
||||
sort_key=sort_key, sort_dir=sort_dir,
|
||||
filters=filters,
|
||||
member_status=member_status)
|
||||
if len(images) != 0 and len(images) == limit:
|
||||
result['next_marker'] = images[-1].image_id
|
||||
except (exception.NotFound, exception.InvalidSortKey,
|
||||
exception.InvalidFilterRangeValue) as e:
|
||||
raise webob.exc.HTTPBadRequest(explanation=e.msg)
|
||||
except exception.Forbidden as e:
|
||||
raise webob.exc.HTTPForbidden(explanation=e.msg)
|
||||
result['images'] = images
|
||||
return result
|
||||
|
||||
def show(self, req, image_id):
|
||||
image_repo = self.gateway.get_repo(req.context)
|
||||
try:
|
||||
image = image_repo.get(image_id)
|
||||
if CONF.sync_enabled:
|
||||
sync_client = sync_api.get_sync_client(req.context)
|
||||
eps = sync_client.get_cascaded_endpoints()
|
||||
utils.check_synced(image, eps)
|
||||
return image
|
||||
except exception.Forbidden as e:
|
||||
raise webob.exc.HTTPForbidden(explanation=e.msg)
|
||||
except exception.NotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.msg)
|
||||
|
||||
@utils.mutating
|
||||
def update(self, req, image_id, changes):
|
||||
image_repo = self.gateway.get_repo(req.context)
|
||||
try:
|
||||
image = image_repo.get(image_id)
|
||||
|
||||
for change in changes:
|
||||
change_method_name = '_do_%s' % change['op']
|
||||
assert hasattr(self, change_method_name)
|
||||
change_method = getattr(self, change_method_name)
|
||||
change_method(req, image, change)
|
||||
|
||||
if changes:
|
||||
image_repo.save(image)
|
||||
except exception.NotFound as e:
|
||||
raise webob.exc.HTTPNotFound(explanation=e.msg)
|
||||
except exception.Forbidden as e:
|
||||
raise webob.exc.HTTPForbidden(explanation=e.msg)
|
||||
except exception.InvalidParameterValue as e:
|
||||
raise webob.exc.HTTPBadRequest(explanation=e.msg)
|
||||
except exception.StorageQuotaFull as e:
|
||||
msg = (_("Denying attempt to upload image because it exceeds the ."
|
||||
"quota: %s") % e)
|
||||
LOG.info(msg)
|
||||
raise webob.exc.HTTPRequestEntityTooLarge(
|
||||
explanation=msg, request=req, content_type='text/plain')
|
||||
except exception.LimitExceeded as e:
|
||||
LOG.info(unicode(e))
|
||||
raise webob.exc.HTTPRequestEntityTooLarge(
|
||||
explanation=e.msg, request=req, content_type='text/plain')
|
||||
|
||||
return image
|
||||
|
||||
def _do_replace(self, req, image, change):
|
||||
path = change['path']
|
||||
path_root = path[0]
|
||||
value = change['value']
|
||||
if path_root == 'locations':
|
||||
self._do_replace_locations(image, value)
|
||||
else:
|
||||
if hasattr(image, path_root):
|
||||
setattr(image, path_root, value)
|
||||
elif path_root in image.extra_properties:
|
||||
image.extra_properties[path_root] = value
|
||||
else:
|
||||
msg = _("Property %s does not exist.")
|
||||
raise webob.exc.HTTPConflict(msg % path_root)
|
||||
|
||||
def _do_add(self, req, image, change):
|
||||
path = change['path']
|
||||
path_root = path[0]
|
||||
value = change['value']
|
||||
if path_root == 'locations':
|
||||
self._do_add_locations(image, path[1], value)
|
||||
else:
|
||||
if (hasattr(image, path_root) or
|
||||
path_root in image.extra_properties):
|
||||
msg = _("Property %s already present.")
|
||||
raise webob.exc.HTTPConflict(msg % path_root)
|
||||
image.extra_properties[path_root] = value
|
||||
|
||||
def _do_remove(self, req, image, change):
|
||||
path = change['path']
|
||||
path_root = path[0]
|
||||
if path_root == 'locations':
|
||||
self._do_remove_locations(image, path[1])
|
||||
else:
|
||||
if hasattr(image, path_root):
|
||||
msg = _("Property %s may not be removed.")
|
||||
raise webob.exc.HTTPForbidden(msg % path_root)
|
||||
elif path_root in image.extra_properties:
|
||||
del image.extra_properties[path_root]
|
||||
else:
|
||||
msg = _("Property %s does not exist.")
|
||||
raise webob.exc.HTTPConflict(msg % path_root)
|
||||
|
||||
@utils.mutating
|
||||
def delete(self, req, image_id):
|
||||
image_repo = self.gateway.get_repo(req.context)
|
||||
try:
|
||||
image = image_repo.get(image_id)
|
||||
image.delete()
|
||||
image_repo.remove(image)
|
||||
except exception.Forbidden as e:
|
||||
raise webob.exc.HTTPForbidden(explanation=e.msg)
|
||||
except exception.NotFound as e:
|
||||
msg = (_("Failed to find image %(image_id)s to delete") %
|
||||
{'image_id': image_id})
|
||||
LOG.info(msg)
|
||||
raise webob.exc.HTTPNotFound(explanation=msg)
|
||||
|
||||
def _get_locations_op_pos(self, path_pos, max_pos, allow_max):
|
||||
if path_pos is None or max_pos is None:
|
||||
return None
|
||||
pos = max_pos if allow_max else max_pos - 1
|
||||
if path_pos.isdigit():
|
||||
pos = int(path_pos)
|
||||
elif path_pos != '-':
|
||||
return None
|
||||
if (not allow_max) and (pos not in range(max_pos)):
|
||||
return None
|
||||
return pos
|
||||
|
||||
def _do_replace_locations(self, image, value):
|
||||
if len(image.locations) > 0 and len(value) > 0:
|
||||
msg = _("Cannot replace locations from a non-empty "
|
||||
"list to a non-empty list.")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
if len(value) == 0:
|
||||
# NOTE(zhiyan): this actually deletes the location
|
||||
# from the backend store.
|
||||
del image.locations[:]
|
||||
if image.status == 'active':
|
||||
image.status = 'queued'
|
||||
else: # NOTE(zhiyan): len(image.locations) == 0
|
||||
try:
|
||||
image.locations = value
|
||||
if image.status == 'queued':
|
||||
image.status = 'active'
|
||||
except (exception.BadStoreUri, exception.DuplicateLocation) as bse:
|
||||
raise webob.exc.HTTPBadRequest(explanation=bse.msg)
|
||||
except ValueError as ve: # update image status failed.
|
||||
raise webob.exc.HTTPBadRequest(explanation=unicode(ve))
|
||||
|
||||
def _do_add_locations(self, image, path_pos, value):
|
||||
pos = self._get_locations_op_pos(path_pos,
|
||||
len(image.locations), True)
|
||||
if pos is None:
|
||||
msg = _("Invalid position for adding a location.")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
try:
|
||||
image.locations.insert(pos, value)
|
||||
if image.status == 'queued':
|
||||
image.status = 'active'
|
||||
except (exception.BadStoreUri, exception.DuplicateLocation) as bse:
|
||||
raise webob.exc.HTTPBadRequest(explanation=bse.msg)
|
||||
except ValueError as ve: # update image status failed.
|
||||
raise webob.exc.HTTPBadRequest(explanation=unicode(ve))
|
||||
|
||||
def _do_remove_locations(self, image, path_pos):
|
||||
pos = self._get_locations_op_pos(path_pos,
|
||||
len(image.locations), False)
|
||||
if pos is None:
|
||||
msg = _("Invalid position for removing a location.")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
try:
|
||||
# NOTE(zhiyan): this actually deletes the location
|
||||
# from the backend store.
|
||||
image.locations.pop(pos)
|
||||
except Exception as e:
|
||||
raise webob.exc.HTTPInternalServerError(explanation=unicode(e))
|
||||
if (len(image.locations) == 0) and (image.status == 'active'):
|
||||
image.status = 'queued'
|
||||
|
||||
|
||||
class RequestDeserializer(wsgi.JSONRequestDeserializer):
|
||||
|
||||
_disallowed_properties = ['direct_url', 'self', 'file', 'schema']
|
||||
_readonly_properties = ['created_at', 'updated_at', 'status', 'checksum',
|
||||
'size', 'virtual_size', 'direct_url', 'self',
|
||||
'file', 'schema']
|
||||
_reserved_properties = ['owner', 'is_public', 'location', 'deleted',
|
||||
'deleted_at']
|
||||
_base_properties = ['checksum', 'created_at', 'container_format',
|
||||
'disk_format', 'id', 'min_disk', 'min_ram', 'name',
|
||||
'size', 'virtual_size', 'status', 'tags',
|
||||
'updated_at', 'visibility', 'protected']
|
||||
_path_depth_limits = {'locations': {'add': 2, 'remove': 2, 'replace': 1}}
|
||||
|
||||
def __init__(self, schema=None):
|
||||
super(RequestDeserializer, self).__init__()
|
||||
self.schema = schema or get_schema()
|
||||
|
||||
def _get_request_body(self, request):
|
||||
output = super(RequestDeserializer, self).default(request)
|
||||
if 'body' not in output:
|
||||
msg = _('Body expected in request.')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
return output['body']
|
||||
|
||||
@classmethod
|
||||
def _check_allowed(cls, image):
|
||||
for key in cls._disallowed_properties:
|
||||
if key in image:
|
||||
msg = _("Attribute '%s' is read-only.") % key
|
||||
raise webob.exc.HTTPForbidden(explanation=unicode(msg))
|
||||
|
||||
def create(self, request):
|
||||
body = self._get_request_body(request)
|
||||
self._check_allowed(body)
|
||||
try:
|
||||
self.schema.validate(body)
|
||||
except exception.InvalidObject as e:
|
||||
raise webob.exc.HTTPBadRequest(explanation=e.msg)
|
||||
image = {}
|
||||
properties = body
|
||||
tags = properties.pop('tags', None)
|
||||
for key in self._base_properties:
|
||||
try:
|
||||
image[key] = properties.pop(key)
|
||||
except KeyError:
|
||||
pass
|
||||
return dict(image=image, extra_properties=properties, tags=tags)
|
||||
|
||||
def _get_change_operation_d10(self, raw_change):
|
||||
try:
|
||||
return raw_change['op']
|
||||
except KeyError:
|
||||
msg = _("Unable to find '%s' in JSON Schema change") % 'op'
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
def _get_change_operation_d4(self, raw_change):
|
||||
op = None
|
||||
for key in ['replace', 'add', 'remove']:
|
||||
if key in raw_change:
|
||||
if op is not None:
|
||||
msg = _('Operation objects must contain only one member'
|
||||
' named "add", "remove", or "replace".')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
op = key
|
||||
if op is None:
|
||||
msg = _('Operation objects must contain exactly one member'
|
||||
' named "add", "remove", or "replace".')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
return op
|
||||
|
||||
def _get_change_path_d10(self, raw_change):
|
||||
try:
|
||||
return raw_change['path']
|
||||
except KeyError:
|
||||
msg = _("Unable to find '%s' in JSON Schema change") % 'path'
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
def _get_change_path_d4(self, raw_change, op):
|
||||
return raw_change[op]
|
||||
|
||||
def _decode_json_pointer(self, pointer):
|
||||
"""Parse a json pointer.
|
||||
|
||||
Json Pointers are defined in
|
||||
http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .
|
||||
The pointers use '/' for separation between object attributes, such
|
||||
that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character
|
||||
in an attribute name is encoded as "~1" and a '~' character is encoded
|
||||
as "~0".
|
||||
"""
|
||||
self._validate_json_pointer(pointer)
|
||||
ret = []
|
||||
for part in pointer.lstrip('/').split('/'):
|
||||
ret.append(part.replace('~1', '/').replace('~0', '~').strip())
|
||||
return ret
|
||||
|
||||
def _validate_json_pointer(self, pointer):
|
||||
"""Validate a json pointer.
|
||||
|
||||
We only accept a limited form of json pointers.
|
||||
"""
|
||||
if not pointer.startswith('/'):
|
||||
msg = _('Pointer `%s` does not start with "/".') % pointer
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
if re.search('/\s*?/', pointer[1:]):
|
||||
msg = _('Pointer `%s` contains adjacent "/".') % pointer
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
if len(pointer) > 1 and pointer.endswith('/'):
|
||||
msg = _('Pointer `%s` end with "/".') % pointer
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
if pointer[1:].strip() == '/':
|
||||
msg = _('Pointer `%s` does not contains valid token.') % pointer
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
if re.search('~[^01]', pointer) or pointer.endswith('~'):
|
||||
msg = _('Pointer `%s` contains "~" not part of'
|
||||
' a recognized escape sequence.') % pointer
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
def _get_change_value(self, raw_change, op):
|
||||
if 'value' not in raw_change:
|
||||
msg = _('Operation "%s" requires a member named "value".')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg % op)
|
||||
return raw_change['value']
|
||||
|
||||
def _validate_change(self, change):
|
||||
path_root = change['path'][0]
|
||||
if path_root in self._readonly_properties:
|
||||
msg = _("Attribute '%s' is read-only.") % path_root
|
||||
raise webob.exc.HTTPForbidden(explanation=unicode(msg))
|
||||
if path_root in self._reserved_properties:
|
||||
msg = _("Attribute '%s' is reserved.") % path_root
|
||||
raise webob.exc.HTTPForbidden(explanation=unicode(msg))
|
||||
|
||||
if change['op'] == 'delete':
|
||||
return
|
||||
|
||||
partial_image = None
|
||||
if len(change['path']) == 1:
|
||||
partial_image = {path_root: change['value']}
|
||||
elif ((path_root in _get_base_properties().keys()) and
|
||||
(_get_base_properties()[path_root].get('type', '') == 'array')):
|
||||
# NOTE(zhiyan): cient can use PATCH API to adding element to
|
||||
# the image's existing set property directly.
|
||||
# Such as: 1. using '/locations/N' path to adding a location
|
||||
# to the image's 'locations' list at N position.
|
||||
# (implemented)
|
||||
# 2. using '/tags/-' path to appending a tag to the
|
||||
# image's 'tags' list at last. (Not implemented)
|
||||
partial_image = {path_root: [change['value']]}
|
||||
|
||||
if partial_image:
|
||||
try:
|
||||
self.schema.validate(partial_image)
|
||||
except exception.InvalidObject as e:
|
||||
raise webob.exc.HTTPBadRequest(explanation=e.msg)
|
||||
|
||||
def _validate_path(self, op, path):
|
||||
path_root = path[0]
|
||||
limits = self._path_depth_limits.get(path_root, {})
|
||||
if len(path) != limits.get(op, 1):
|
||||
msg = _("Invalid JSON pointer for this resource: "
|
||||
"'/%s'") % '/'.join(path)
|
||||
raise webob.exc.HTTPBadRequest(explanation=unicode(msg))
|
||||
|
||||
def _parse_json_schema_change(self, raw_change, draft_version):
|
||||
if draft_version == 10:
|
||||
op = self._get_change_operation_d10(raw_change)
|
||||
path = self._get_change_path_d10(raw_change)
|
||||
elif draft_version == 4:
|
||||
op = self._get_change_operation_d4(raw_change)
|
||||
path = self._get_change_path_d4(raw_change, op)
|
||||
else:
|
||||
msg = _('Unrecognized JSON Schema draft version')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
path_list = self._decode_json_pointer(path)
|
||||
return op, path_list
|
||||
|
||||
def update(self, request):
|
||||
changes = []
|
||||
content_types = {
|
||||
'application/openstack-images-v2.0-json-patch': 4,
|
||||
'application/openstack-images-v2.1-json-patch': 10,
|
||||
}
|
||||
if request.content_type not in content_types:
|
||||
headers = {'Accept-Patch': ', '.join(content_types.keys())}
|
||||
raise webob.exc.HTTPUnsupportedMediaType(headers=headers)
|
||||
|
||||
json_schema_version = content_types[request.content_type]
|
||||
|
||||
body = self._get_request_body(request)
|
||||
|
||||
if not isinstance(body, list):
|
||||
msg = _('Request body must be a JSON array of operation objects.')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
for raw_change in body:
|
||||
if not isinstance(raw_change, dict):
|
||||
msg = _('Operations must be JSON objects.')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
(op, path) = self._parse_json_schema_change(raw_change,
|
||||
json_schema_version)
|
||||
|
||||
# NOTE(zhiyan): the 'path' is a list.
|
||||
self._validate_path(op, path)
|
||||
change = {'op': op, 'path': path}
|
||||
|
||||
if not op == 'remove':
|
||||
change['value'] = self._get_change_value(raw_change, op)
|
||||
self._validate_change(change)
|
||||
|
||||
changes.append(change)
|
||||
|
||||
return {'changes': changes}
|
||||
|
||||
def _validate_limit(self, limit):
|
||||
try:
|
||||
limit = int(limit)
|
||||
except ValueError:
|
||||
msg = _("limit param must be an integer")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
if limit < 0:
|
||||
msg = _("limit param must be positive")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
return limit
|
||||
|
||||
def _validate_sort_dir(self, sort_dir):
|
||||
if sort_dir not in ['asc', 'desc']:
|
||||
msg = _('Invalid sort direction: %s') % sort_dir
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
return sort_dir
|
||||
|
||||
def _validate_member_status(self, member_status):
|
||||
if member_status not in ['pending', 'accepted', 'rejected', 'all']:
|
||||
msg = _('Invalid status: %s') % member_status
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
return member_status
|
||||
|
||||
def _get_filters(self, filters):
|
||||
visibility = filters.get('visibility')
|
||||
if visibility:
|
||||
if visibility not in ['public', 'private', 'shared']:
|
||||
msg = _('Invalid visibility value: %s') % visibility
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
return filters
|
||||
|
||||
def index(self, request):
|
||||
params = request.params.copy()
|
||||
limit = params.pop('limit', None)
|
||||
marker = params.pop('marker', None)
|
||||
sort_dir = params.pop('sort_dir', 'desc')
|
||||
member_status = params.pop('member_status', 'accepted')
|
||||
|
||||
# NOTE (flwang) To avoid using comma or any predefined chars to split
|
||||
# multiple tags, now we allow user specify multiple 'tag' parameters
|
||||
# in URL, such as v2/images?tag=x86&tag=64bit.
|
||||
tags = []
|
||||
while 'tag' in params:
|
||||
tags.append(params.pop('tag').strip())
|
||||
|
||||
query_params = {
|
||||
'sort_key': params.pop('sort_key', 'created_at'),
|
||||
'sort_dir': self._validate_sort_dir(sort_dir),
|
||||
'filters': self._get_filters(params),
|
||||
'member_status': self._validate_member_status(member_status),
|
||||
}
|
||||
|
||||
if marker is not None:
|
||||
query_params['marker'] = marker
|
||||
|
||||
if limit is not None:
|
||||
query_params['limit'] = self._validate_limit(limit)
|
||||
|
||||
if tags:
|
||||
query_params['filters']['tags'] = tags
|
||||
|
||||
return query_params
|
||||
|
||||
|
||||
class ResponseSerializer(wsgi.JSONResponseSerializer):
|
||||
|
||||
def __init__(self, schema=None):
|
||||
super(ResponseSerializer, self).__init__()
|
||||
self.schema = schema or get_schema()
|
||||
|
||||
def _get_image_href(self, image, subcollection=''):
|
||||
base_href = '/v2/images/%s' % image.image_id
|
||||
if subcollection:
|
||||
base_href = '%s/%s' % (base_href, subcollection)
|
||||
return base_href
|
||||
|
||||
def _format_image(self, image):
|
||||
image_view = dict()
|
||||
try:
|
||||
image_view = dict(image.extra_properties)
|
||||
attributes = ['name', 'disk_format', 'container_format',
|
||||
'visibility', 'size', 'virtual_size', 'status',
|
||||
'checksum', 'protected', 'min_ram', 'min_disk',
|
||||
'owner']
|
||||
for key in attributes:
|
||||
image_view[key] = getattr(image, key)
|
||||
image_view['id'] = image.image_id
|
||||
image_view['created_at'] = timeutils.isotime(image.created_at)
|
||||
image_view['updated_at'] = timeutils.isotime(image.updated_at)
|
||||
|
||||
if CONF.show_multiple_locations:
|
||||
if image.locations:
|
||||
image_view['locations'] = list(image.locations)
|
||||
else:
|
||||
# NOTE (flwang): We will still show "locations": [] if
|
||||
# image.locations is None to indicate it's allowed to show
|
||||
# locations but it's just non-existent.
|
||||
image_view['locations'] = []
|
||||
|
||||
if CONF.show_image_direct_url and image.locations:
|
||||
# Choose best location configured strategy
|
||||
best_location = (
|
||||
location_strategy.choose_best_location(image.locations))
|
||||
image_view['direct_url'] = best_location['url']
|
||||
|
||||
image_view['tags'] = list(image.tags)
|
||||
image_view['self'] = self._get_image_href(image)
|
||||
image_view['file'] = self._get_image_href(image, 'file')
|
||||
image_view['schema'] = '/v2/schemas/image'
|
||||
image_view = self.schema.filter(image_view) # domain
|
||||
except exception.Forbidden as e:
|
||||
raise webob.exc.HTTPForbidden(explanation=e.msg)
|
||||
return image_view
|
||||
|
||||
def create(self, response, image):
|
||||
response.status_int = 201
|
||||
self.show(response, image)
|
||||
response.location = self._get_image_href(image)
|
||||
|
||||
def show(self, response, image):
|
||||
image_view = self._format_image(image)
|
||||
body = json.dumps(image_view, ensure_ascii=False)
|
||||
response.unicode_body = unicode(body)
|
||||
response.content_type = 'application/json'
|
||||
|
||||
def update(self, response, image):
|
||||
image_view = self._format_image(image)
|
||||
body = json.dumps(image_view, ensure_ascii=False)
|
||||
response.unicode_body = unicode(body)
|
||||
response.content_type = 'application/json'
|
||||
|
||||
def index(self, response, result):
|
||||
params = dict(response.request.params)
|
||||
params.pop('marker', None)
|
||||
query = urlparse.urlencode(params)
|
||||
body = {
|
||||
'images': [self._format_image(i) for i in result['images']],
|
||||
'first': '/v2/images',
|
||||
'schema': '/v2/schemas/images',
|
||||
}
|
||||
if query:
|
||||
body['first'] = '%s?%s' % (body['first'], query)
|
||||
if 'next_marker' in result:
|
||||
params['marker'] = result['next_marker']
|
||||
next_query = urlparse.urlencode(params)
|
||||
body['next'] = '/v2/images?%s' % next_query
|
||||
response.unicode_body = unicode(json.dumps(body, ensure_ascii=False))
|
||||
response.content_type = 'application/json'
|
||||
|
||||
def delete(self, response, result):
|
||||
response.status_int = 204
|
||||
|
||||
|
||||
def _get_base_properties():
|
||||
return {
|
||||
'id': {
|
||||
'type': 'string',
|
||||
'description': _('An identifier for the image'),
|
||||
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
|
||||
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
|
||||
},
|
||||
'name': {
|
||||
'type': 'string',
|
||||
'description': _('Descriptive name for the image'),
|
||||
'maxLength': 255,
|
||||
},
|
||||
'status': {
|
||||
'type': 'string',
|
||||
'description': _('Status of the image (READ-ONLY)'),
|
||||
'enum': ['queued', 'saving', 'active', 'killed',
|
||||
'deleted', 'pending_delete'],
|
||||
},
|
||||
'visibility': {
|
||||
'type': 'string',
|
||||
'description': _('Scope of image accessibility'),
|
||||
'enum': ['public', 'private'],
|
||||
},
|
||||
'protected': {
|
||||
'type': 'boolean',
|
||||
'description': _('If true, image will not be deletable.'),
|
||||
},
|
||||
'checksum': {
|
||||
'type': 'string',
|
||||
'description': _('md5 hash of image contents. (READ-ONLY)'),
|
||||
'maxLength': 32,
|
||||
},
|
||||
'owner': {
|
||||
'type': 'string',
|
||||
'description': _('Owner of the image'),
|
||||
'maxLength': 255,
|
||||
},
|
||||
'size': {
|
||||
'type': 'integer',
|
||||
'description': _('Size of image file in bytes (READ-ONLY)'),
|
||||
},
|
||||
'virtual_size': {
|
||||
'type': 'integer',
|
||||
'description': _('Virtual size of image in bytes (READ-ONLY)'),
|
||||
},
|
||||
'container_format': {
|
||||
'type': 'string',
|
||||
'description': _('Format of the container'),
|
||||
'enum': CONF.image_format.container_formats,
|
||||
},
|
||||
'disk_format': {
|
||||
'type': 'string',
|
||||
'description': _('Format of the disk'),
|
||||
'enum': CONF.image_format.disk_formats,
|
||||
},
|
||||
'created_at': {
|
||||
'type': 'string',
|
||||
'description': _('Date and time of image registration'
|
||||
' (READ-ONLY)'),
|
||||
# TODO(bcwaldon): our jsonschema library doesn't seem to like the
|
||||
# format attribute, figure out why!
|
||||
#'format': 'date-time',
|
||||
},
|
||||
'updated_at': {
|
||||
'type': 'string',
|
||||
'description': _('Date and time of the last image modification'
|
||||
' (READ-ONLY)'),
|
||||
#'format': 'date-time',
|
||||
},
|
||||
'tags': {
|
||||
'type': 'array',
|
||||
'description': _('List of strings related to the image'),
|
||||
'items': {
|
||||
'type': 'string',
|
||||
'maxLength': 255,
|
||||
},
|
||||
},
|
||||
'direct_url': {
|
||||
'type': 'string',
|
||||
'description': _('URL to access the image file kept in external '
|
||||
'store (READ-ONLY)'),
|
||||
},
|
||||
'min_ram': {
|
||||
'type': 'integer',
|
||||
'description': _('Amount of ram (in MB) required to boot image.'),
|
||||
},
|
||||
'min_disk': {
|
||||
'type': 'integer',
|
||||
'description': _('Amount of disk space (in GB) required to boot '
|
||||
'image.'),
|
||||
},
|
||||
'self': {
|
||||
'type': 'string',
|
||||
'description': '(READ-ONLY)'
|
||||
},
|
||||
'file': {
|
||||
'type': 'string',
|
||||
'description': '(READ-ONLY)'
|
||||
},
|
||||
'schema': {
|
||||
'type': 'string',
|
||||
'description': '(READ-ONLY)'
|
||||
},
|
||||
'locations': {
|
||||
'type': 'array',
|
||||
'items': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'url': {
|
||||
'type': 'string',
|
||||
'maxLength': 255,
|
||||
},
|
||||
'metadata': {
|
||||
'type': 'object',
|
||||
},
|
||||
},
|
||||
'required': ['url', 'metadata'],
|
||||
},
|
||||
'description': _('A set of URLs to access the image file kept in '
|
||||
'external store'),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _get_base_links():
|
||||
return [
|
||||
{'rel': 'self', 'href': '{self}'},
|
||||
{'rel': 'enclosure', 'href': '{file}'},
|
||||
{'rel': 'describedby', 'href': '{schema}'},
|
||||
]
|
||||
|
||||
|
||||
def get_schema(custom_properties=None):
|
||||
properties = _get_base_properties()
|
||||
links = _get_base_links()
|
||||
if CONF.allow_additional_image_properties:
|
||||
schema = glance.schema.PermissiveSchema('image', properties, links)
|
||||
else:
|
||||
schema = glance.schema.Schema('image', properties)
|
||||
schema.merge_properties(custom_properties or {})
|
||||
return schema
|
||||
|
||||
|
||||
def get_collection_schema(custom_properties=None):
|
||||
image_schema = get_schema(custom_properties)
|
||||
return glance.schema.CollectionSchema('images', image_schema)
|
||||
|
||||
|
||||
def load_custom_properties():
|
||||
"""Find the schema properties files and load them into a dict."""
|
||||
filename = 'schema-image.json'
|
||||
match = CONF.find_file(filename)
|
||||
if match:
|
||||
schema_file = open(match)
|
||||
schema_data = schema_file.read()
|
||||
return json.loads(schema_data)
|
||||
else:
|
||||
msg = _('Could not find schema properties file %s. Continuing '
|
||||
'without custom properties')
|
||||
LOG.warn(msg % filename)
|
||||
return {}
|
||||
|
||||
|
||||
def create_resource(custom_properties=None):
|
||||
"""Images resource factory method"""
|
||||
schema = get_schema(custom_properties)
|
||||
deserializer = RequestDeserializer(schema)
|
||||
serializer = ResponseSerializer(schema)
|
||||
controller = ImagesController()
|
||||
return wsgi.Resource(controller, deserializer, serializer)
|
@ -0,0 +1,260 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Routines for configuring Glance
|
||||
"""
|
||||
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
from paste import deploy
|
||||
|
||||
from glance.version import version_info as version
|
||||
|
||||
paste_deploy_opts = [
|
||||
cfg.StrOpt('flavor',
|
||||
help=_('Partial name of a pipeline in your paste configuration '
|
||||
'file with the service name removed. For example, if '
|
||||
'your paste section name is '
|
||||
'[pipeline:glance-api-keystone] use the value '
|
||||
'"keystone"')),
|
||||
cfg.StrOpt('config_file',
|
||||
help=_('Name of the paste configuration file.')),
|
||||
]
|
||||
image_format_opts = [
|
||||
cfg.ListOpt('container_formats',
|
||||
default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova'],
|
||||
help=_("Supported values for the 'container_format' "
|
||||
"image attribute"),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('container_formats',
|
||||
group='DEFAULT')]),
|
||||
cfg.ListOpt('disk_formats',
|
||||
default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2',
|
||||
'vdi', 'iso'],
|
||||
help=_("Supported values for the 'disk_format' "
|
||||
"image attribute"),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('disk_formats',
|
||||
group='DEFAULT')]),
|
||||
]
|
||||
task_opts = [
|
||||
cfg.IntOpt('task_time_to_live',
|
||||
default=48,
|
||||
help=_("Time in hours for which a task lives after, either "
|
||||
"succeeding or failing"),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live',
|
||||
group='DEFAULT')]),
|
||||
]
|
||||
common_opts = [
|
||||
cfg.BoolOpt('allow_additional_image_properties', default=True,
|
||||
help=_('Whether to allow users to specify image properties '
|
||||
'beyond what the image schema provides')),
|
||||
cfg.IntOpt('image_member_quota', default=128,
|
||||
help=_('Maximum number of image members per image. '
|
||||
'Negative values evaluate to unlimited.')),
|
||||
cfg.IntOpt('image_property_quota', default=128,
|
||||
help=_('Maximum number of properties allowed on an image. '
|
||||
'Negative values evaluate to unlimited.')),
|
||||
cfg.IntOpt('image_tag_quota', default=128,
|
||||
help=_('Maximum number of tags allowed on an image. '
|
||||
'Negative values evaluate to unlimited.')),
|
||||
cfg.IntOpt('image_location_quota', default=10,
|
||||
help=_('Maximum number of locations allowed on an image. '
|
||||
'Negative values evaluate to unlimited.')),
|
||||
cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api',
|
||||
help=_('Python module path of data access API')),
|
||||
cfg.IntOpt('limit_param_default', default=25,
|
||||
help=_('Default value for the number of items returned by a '
|
||||
'request if not specified explicitly in the request')),
|
||||
cfg.IntOpt('api_limit_max', default=1000,
|
||||
help=_('Maximum permissible number of items that could be '
|
||||
'returned by a request')),
|
||||
cfg.BoolOpt('show_image_direct_url', default=False,
|
||||
help=_('Whether to include the backend image storage location '
|
||||
'in image properties. Revealing storage location can '
|
||||
'be a security risk, so use this setting with '
|
||||
'caution!')),
|
||||
cfg.BoolOpt('show_multiple_locations', default=False,
|
||||
help=_('Whether to include the backend image locations '
|
||||
'in image properties. Revealing storage location can '
|
||||
'be a security risk, so use this setting with '
|
||||
'caution! The overrides show_image_direct_url.')),
|
||||
cfg.IntOpt('image_size_cap', default=1099511627776,
|
||||
help=_("Maximum size of image a user can upload in bytes. "
|
||||
"Defaults to 1099511627776 bytes (1 TB).")),
|
||||
cfg.IntOpt('user_storage_quota', default=0,
|
||||
help=_("Set a system wide quota for every user. This value is "
|
||||
"the total number of bytes that a user can use across "
|
||||
"all storage systems. A value of 0 means unlimited.")),
|
||||
cfg.BoolOpt('enable_v1_api', default=True,
|
||||
help=_("Deploy the v1 OpenStack Images API.")),
|
||||
cfg.BoolOpt('enable_v2_api', default=True,
|
||||
help=_("Deploy the v2 OpenStack Images API.")),
|
||||
cfg.BoolOpt('enable_v1_registry', default=True,
|
||||
help=_("Deploy the v1 OpenStack Registry API.")),
|
||||
cfg.BoolOpt('enable_v2_registry', default=True,
|
||||
help=_("Deploy the v2 OpenStack Registry API.")),
|
||||
cfg.StrOpt('pydev_worker_debug_host', default=None,
|
||||
help=_('The hostname/IP of the pydev process listening for '
|
||||
'debug connections')),
|
||||
cfg.IntOpt('pydev_worker_debug_port', default=5678,
|
||||
help=_('The port on which a pydev process is listening for '
|
||||
'connections.')),
|
||||
cfg.StrOpt('metadata_encryption_key', secret=True,
|
||||
help=_('Key used for encrypting sensitive metadata while '
|
||||
'talking to the registry or database.')),
|
||||
cfg.BoolOpt('sync_enabled', default=False,
|
||||
help=_("Whether to launch the Sync function.")),
|
||||
cfg.StrOpt('sync_server_host', default='127.0.0.1',
|
||||
help=_('host ip where sync_web_server in.')),
|
||||
cfg.IntOpt('sync_server_port', default=9595,
|
||||
help=_('host port where sync_web_server in.')),
|
||||
]
|
||||
sync_opts = [
|
||||
cfg.StrOpt('cascading_endpoint_url', default='http://127.0.0.1:9292/',
|
||||
help=_('host ip where glance in.'),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('cascading_endpoint_url',
|
||||
group='DEFAULT')]),
|
||||
cfg.StrOpt('sync_strategy', default='None',
|
||||
help=_("Define the sync strategy, value can be All/User/None."),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sync_strategy',
|
||||
group='DEFAULT')]),
|
||||
cfg.IntOpt('snapshot_timeout', default=300,
|
||||
help=_('when snapshot, max wait (second)time for snapshot '
|
||||
'status become active.'),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('snapshot_timeout',
|
||||
group='DEFAULT')]),
|
||||
cfg.IntOpt('snapshot_sleep_interval', default=10,
|
||||
help=_('when snapshot, sleep interval for waiting snapshot '
|
||||
'status become active.'),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('snapshot_sleep_interval',
|
||||
group='DEFAULT')]),
|
||||
cfg.IntOpt('task_retry_times', default=0,
|
||||
help=_('sync task fail retry times.'),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('task_retry_times',
|
||||
group='DEFAULT')]),
|
||||
cfg.IntOpt('scp_copy_timeout', default=3600,
|
||||
help=_('when snapshot, max wait (second)time for snapshot '
|
||||
'status become active.'),
|
||||
deprecated_opts=[cfg.DeprecatedOpt('scp_copy_timeout',
|
||||
group='DEFAULT')]),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(paste_deploy_opts, group='paste_deploy')
|
||||
CONF.register_opts(image_format_opts, group='image_format')
|
||||
CONF.register_opts(task_opts, group='task')
|
||||
CONF.register_opts(sync_opts, group='sync')
|
||||
CONF.register_opts(common_opts)
|
||||
|
||||
|
||||
def parse_args(args=None, usage=None, default_config_files=None):
|
||||
CONF(args=args,
|
||||
project='glance',
|
||||
version=version.cached_version_string(),
|
||||
usage=usage,
|
||||
default_config_files=default_config_files)
|
||||
|
||||
|
||||
def parse_cache_args(args=None):
|
||||
config_files = cfg.find_config_files(project='glance', prog='glance-cache')
|
||||
parse_args(args=args, default_config_files=config_files)
|
||||
|
||||
|
||||
def _get_deployment_flavor(flavor=None):
|
||||
"""
|
||||
Retrieve the paste_deploy.flavor config item, formatted appropriately
|
||||
for appending to the application name.
|
||||
|
||||
:param flavor: if specified, use this setting rather than the
|
||||
paste_deploy.flavor configuration setting
|
||||
"""
|
||||
if not flavor:
|
||||
flavor = CONF.paste_deploy.flavor
|
||||
return '' if not flavor else ('-' + flavor)
|
||||
|
||||
|
||||
def _get_paste_config_path():
|
||||
paste_suffix = '-paste.ini'
|
||||
conf_suffix = '.conf'
|
||||
if CONF.config_file:
|
||||
# Assume paste config is in a paste.ini file corresponding
|
||||
# to the last config file
|
||||
path = CONF.config_file[-1].replace(conf_suffix, paste_suffix)
|
||||
else:
|
||||
path = CONF.prog + paste_suffix
|
||||
return CONF.find_file(os.path.basename(path))
|
||||
|
||||
|
||||
def _get_deployment_config_file():
|
||||
"""
|
||||
Retrieve the deployment_config_file config item, formatted as an
|
||||
absolute pathname.
|
||||
"""
|
||||
path = CONF.paste_deploy.config_file
|
||||
if not path:
|
||||
path = _get_paste_config_path()
|
||||
if not path:
|
||||
msg = _("Unable to locate paste config file for %s.") % CONF.prog
|
||||
raise RuntimeError(msg)
|
||||
return os.path.abspath(path)
|
||||
|
||||
|
||||
def load_paste_app(app_name, flavor=None, conf_file=None):
|
||||
"""
|
||||
Builds and returns a WSGI app from a paste config file.
|
||||
|
||||
We assume the last config file specified in the supplied ConfigOpts
|
||||
object is the paste config file, if conf_file is None.
|
||||
|
||||
:param app_name: name of the application to load
|
||||
:param flavor: name of the variant of the application to load
|
||||
:param conf_file: path to the paste config file
|
||||
|
||||
:raises RuntimeError when config file cannot be located or application
|
||||
cannot be loaded from config file
|
||||
"""
|
||||
# append the deployment flavor to the application name,
|
||||
# in order to identify the appropriate paste pipeline
|
||||
app_name += _get_deployment_flavor(flavor)
|
||||
|
||||
if not conf_file:
|
||||
conf_file = _get_deployment_config_file()
|
||||
|
||||
try:
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.debug(_("Loading %(app_name)s from %(conf_file)s"),
|
||||
{'conf_file': conf_file, 'app_name': app_name})
|
||||
|
||||
app = deploy.loadapp("config:%s" % conf_file, name=app_name)
|
||||
|
||||
# Log the options used when starting if we're in debug mode...
|
||||
if CONF.debug:
|
||||
CONF.log_opt_values(logger, logging.DEBUG)
|
||||
|
||||
return app
|
||||
except (LookupError, ImportError) as e:
|
||||
msg = (_("Unable to load %(app_name)s from "
|
||||
"configuration file %(conf_file)s."
|
||||
"\nGot: %(e)r") % {'app_name': app_name,
|
||||
'conf_file': conf_file,
|
||||
'e': e})
|
||||
logger.error(msg)
|
||||
raise RuntimeError(msg)
|
@ -0,0 +1,362 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Glance exception subclasses"""
|
||||
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
_FATAL_EXCEPTION_FORMAT_ERRORS = False
|
||||
|
||||
|
||||
class RedirectException(Exception):
|
||||
|
||||
def __init__(self, url):
|
||||
self.url = urlparse.urlparse(url)
|
||||
|
||||
|
||||
class GlanceException(Exception):
|
||||
|
||||
"""
|
||||
Base Glance Exception
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = _("An unknown exception occurred")
|
||||
|
||||
def __init__(self, message=None, *args, **kwargs):
|
||||
if not message:
|
||||
message = self.message
|
||||
try:
|
||||
if kwargs:
|
||||
message = message % kwargs
|
||||
except Exception:
|
||||
if _FATAL_EXCEPTION_FORMAT_ERRORS:
|
||||
raise
|
||||
else:
|
||||
# at least get the core message out if something happened
|
||||
pass
|
||||
self.msg = message
|
||||
super(GlanceException, self).__init__(message)
|
||||
|
||||
def __unicode__(self):
|
||||
# NOTE(flwang): By default, self.msg is an instance of Message, which
|
||||
# can't be converted by str(). Based on the definition of
|
||||
# __unicode__, it should return unicode always.
|
||||
return six.text_type(self.msg)
|
||||
|
||||
|
||||
class MissingCredentialError(GlanceException):
|
||||
message = _("Missing required credential: %(required)s")
|
||||
|
||||
|
||||
class BadAuthStrategy(GlanceException):
|
||||
message = _("Incorrect auth strategy, expected \"%(expected)s\" but "
|
||||
"received \"%(received)s\"")
|
||||
|
||||
|
||||
class NotFound(GlanceException):
|
||||
message = _("An object with the specified identifier was not found.")
|
||||
|
||||
|
||||
class UnknownScheme(GlanceException):
|
||||
message = _("Unknown scheme '%(scheme)s' found in URI")
|
||||
|
||||
|
||||
class BadStoreUri(GlanceException):
|
||||
message = _("The Store URI was malformed.")
|
||||
|
||||
|
||||
class Duplicate(GlanceException):
|
||||
message = _("An object with the same identifier already exists.")
|
||||
|
||||
|
||||
class Conflict(GlanceException):
|
||||
message = _("An object with the same identifier is currently being "
|
||||
"operated on.")
|
||||
|
||||
|
||||
class StorageFull(GlanceException):
|
||||
message = _("There is not enough disk space on the image storage media.")
|
||||
|
||||
|
||||
class StorageQuotaFull(GlanceException):
|
||||
message = _("The size of the data %(image_size)s will exceed the limit. "
|
||||
"%(remaining)s bytes remaining.")
|
||||
|
||||
|
||||
class StorageWriteDenied(GlanceException):
|
||||
message = _("Permission to write image storage media denied.")
|
||||
|
||||
|
||||
class AuthBadRequest(GlanceException):
|
||||
message = _("Connect error/bad request to Auth service at URL %(url)s.")
|
||||
|
||||
|
||||
class AuthUrlNotFound(GlanceException):
|
||||
message = _("Auth service at URL %(url)s not found.")
|
||||
|
||||
|
||||
class AuthorizationFailure(GlanceException):
|
||||
message = _("Authorization failed.")
|
||||
|
||||
|
||||
class NotAuthenticated(GlanceException):
|
||||
message = _("You are not authenticated.")
|
||||
|
||||
|
||||
class Forbidden(GlanceException):
|
||||
message = _("You are not authorized to complete this action.")
|
||||
|
||||
|
||||
class ForbiddenPublicImage(Forbidden):
|
||||
message = _("You are not authorized to complete this action.")
|
||||
|
||||
|
||||
class ProtectedImageDelete(Forbidden):
|
||||
message = _("Image %(image_id)s is protected and cannot be deleted.")
|
||||
|
||||
|
||||
class Invalid(GlanceException):
|
||||
message = _("Data supplied was not valid.")
|
||||
|
||||
|
||||
class InvalidSortKey(Invalid):
|
||||
message = _("Sort key supplied was not valid.")
|
||||
|
||||
|
||||
class InvalidPropertyProtectionConfiguration(Invalid):
|
||||
message = _("Invalid configuration in property protection file.")
|
||||
|
||||
|
||||
class InvalidFilterRangeValue(Invalid):
|
||||
message = _("Unable to filter using the specified range.")
|
||||
|
||||
|
||||
class ReadonlyProperty(Forbidden):
|
||||
message = _("Attribute '%(property)s' is read-only.")
|
||||
|
||||
|
||||
class ReservedProperty(Forbidden):
|
||||
message = _("Attribute '%(property)s' is reserved.")
|
||||
|
||||
|
||||
class AuthorizationRedirect(GlanceException):
|
||||
message = _("Redirecting to %(uri)s for authorization.")
|
||||
|
||||
|
||||
class ClientConnectionError(GlanceException):
|
||||
message = _("There was an error connecting to a server")
|
||||
|
||||
|
||||
class ClientConfigurationError(GlanceException):
|
||||
message = _("There was an error configuring the client.")
|
||||
|
||||
|
||||
class MultipleChoices(GlanceException):
|
||||
message = _("The request returned a 302 Multiple Choices. This generally "
|
||||
"means that you have not included a version indicator in a "
|
||||
"request URI.\n\nThe body of response returned:\n%(body)s")
|
||||
|
||||
|
||||
class LimitExceeded(GlanceException):
|
||||
message = _("The request returned a 413 Request Entity Too Large. This "
|
||||
"generally means that rate limiting or a quota threshold was "
|
||||
"breached.\n\nThe response body:\n%(body)s")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
|
||||
else None)
|
||||
super(LimitExceeded, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class ServiceUnavailable(GlanceException):
|
||||
message = _("The request returned 503 Service Unavilable. This "
|
||||
"generally occurs on service overload or other transient "
|
||||
"outage.")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
|
||||
else None)
|
||||
super(ServiceUnavailable, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class ServerError(GlanceException):
|
||||
message = _("The request returned 500 Internal Server Error.")
|
||||
|
||||
|
||||
class UnexpectedStatus(GlanceException):
|
||||
message = _("The request returned an unexpected status: %(status)s."
|
||||
"\n\nThe response body:\n%(body)s")
|
||||
|
||||
|
||||
class InvalidContentType(GlanceException):
|
||||
message = _("Invalid content type %(content_type)s")
|
||||
|
||||
|
||||
class BadRegistryConnectionConfiguration(GlanceException):
|
||||
message = _("Registry was not configured correctly on API server. "
|
||||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class BadStoreConfiguration(GlanceException):
|
||||
message = _("Store %(store_name)s could not be configured correctly. "
|
||||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class BadDriverConfiguration(GlanceException):
|
||||
message = _("Driver %(driver_name)s could not be configured correctly. "
|
||||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class StoreDeleteNotSupported(GlanceException):
|
||||
message = _("Deleting images from this store is not supported.")
|
||||
|
||||
|
||||
class StoreGetNotSupported(GlanceException):
|
||||
message = _("Getting images from this store is not supported.")
|
||||
|
||||
|
||||
class StoreAddNotSupported(GlanceException):
|
||||
message = _("Adding images to this store is not supported.")
|
||||
|
||||
|
||||
class StoreAddDisabled(GlanceException):
|
||||
message = _("Configuration for store failed. Adding images to this "
|
||||
"store is disabled.")
|
||||
|
||||
|
||||
class MaxRedirectsExceeded(GlanceException):
|
||||
message = _("Maximum redirects (%(redirects)s) was exceeded.")
|
||||
|
||||
|
||||
class InvalidRedirect(GlanceException):
|
||||
message = _("Received invalid HTTP redirect.")
|
||||
|
||||
|
||||
class NoServiceEndpoint(GlanceException):
|
||||
message = _("Response from Keystone does not contain a Glance endpoint.")
|
||||
|
||||
|
||||
class RegionAmbiguity(GlanceException):
|
||||
message = _("Multiple 'image' service matches for region %(region)s. This "
|
||||
"generally means that a region is required and you have not "
|
||||
"supplied one.")
|
||||
|
||||
|
||||
class WorkerCreationFailure(GlanceException):
|
||||
message = _("Server worker creation failed: %(reason)s.")
|
||||
|
||||
|
||||
class SchemaLoadError(GlanceException):
|
||||
message = _("Unable to load schema: %(reason)s")
|
||||
|
||||
|
||||
class InvalidObject(GlanceException):
|
||||
message = _("Provided object does not match schema "
|
||||
"'%(schema)s': %(reason)s")
|
||||
|
||||
|
||||
class UnsupportedHeaderFeature(GlanceException):
|
||||
message = _("Provided header feature is unsupported: %(feature)s")
|
||||
|
||||
|
||||
class InUseByStore(GlanceException):
|
||||
message = _("The image cannot be deleted because it is in use through "
|
||||
"the backend store outside of Glance.")
|
||||
|
||||
|
||||
class ImageSizeLimitExceeded(GlanceException):
|
||||
message = _("The provided image is too large.")
|
||||
|
||||
|
||||
class ImageMemberLimitExceeded(LimitExceeded):
|
||||
message = _("The limit has been exceeded on the number of allowed image "
|
||||
"members for this image. Attempted: %(attempted)s, "
|
||||
"Maximum: %(maximum)s")
|
||||
|
||||
|
||||
class ImagePropertyLimitExceeded(LimitExceeded):
|
||||
message = _("The limit has been exceeded on the number of allowed image "
|
||||
"properties. Attempted: %(attempted)s, Maximum: %(maximum)s")
|
||||
|
||||
|
||||
class ImageTagLimitExceeded(LimitExceeded):
|
||||
message = _("The limit has been exceeded on the number of allowed image "
|
||||
"tags. Attempted: %(attempted)s, Maximum: %(maximum)s")
|
||||
|
||||
|
||||
class ImageLocationLimitExceeded(LimitExceeded):
|
||||
message = _("The limit has been exceeded on the number of allowed image "
|
||||
"locations. Attempted: %(attempted)s, Maximum: %(maximum)s")
|
||||
|
||||
|
||||
class RPCError(GlanceException):
|
||||
message = _("%(cls)s exception was raised in the last rpc call: %(val)s")
|
||||
|
||||
|
||||
class TaskException(GlanceException):
|
||||
message = _("An unknown task exception occurred")
|
||||
|
||||
|
||||
class TaskNotFound(TaskException, NotFound):
|
||||
message = _("Task with the given id %(task_id)s was not found")
|
||||
|
||||
|
||||
class InvalidTaskStatus(TaskException, Invalid):
|
||||
message = _("Provided status of task is unsupported: %(status)s")
|
||||
|
||||
|
||||
class InvalidTaskType(TaskException, Invalid):
|
||||
message = _("Provided type of task is unsupported: %(type)s")
|
||||
|
||||
|
||||
class InvalidTaskStatusTransition(TaskException, Invalid):
|
||||
message = _("Status transition from %(cur_status)s to"
|
||||
" %(new_status)s is not allowed")
|
||||
|
||||
|
||||
class DuplicateLocation(Duplicate):
|
||||
message = _("The location %(location)s already exists")
|
||||
|
||||
|
||||
class ImageDataNotFound(NotFound):
|
||||
message = _("No image data could be found")
|
||||
|
||||
|
||||
class InvalidParameterValue(Invalid):
|
||||
message = _("Invalid value '%(value)s' for parameter '%(param)s': "
|
||||
"%(extra_msg)s")
|
||||
|
||||
|
||||
class InvalidImageStatusTransition(Invalid):
|
||||
message = _("Image status transition from %(cur_status)s to"
|
||||
" %(new_status)s is not allowed")
|
||||
|
||||
|
||||
class FunctionNameNotFound(GlanceException):
|
||||
message = _("Can not found the function name: %(func_name)s "
|
||||
"in object/module: %(owner_name)s")
|
||||
|
||||
|
||||
class SyncServiceOperationError(GlanceException):
|
||||
message = _("Image sync service execute failed with reason: %(reason)s")
|
||||
|
||||
|
||||
class SyncStoreCopyError(GlanceException):
|
||||
message = _("Image sync store failed with reason: %(reason)s")
|
@ -0,0 +1,602 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
System-level utilities and helper functions.
|
||||
"""
|
||||
|
||||
import errno
|
||||
|
||||
try:
|
||||
from eventlet import sleep
|
||||
except ImportError:
|
||||
from time import sleep
|
||||
from eventlet.green import socket
|
||||
|
||||
import functools
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import urlparse
|
||||
import uuid
|
||||
|
||||
from OpenSSL import crypto
|
||||
from oslo.config import cfg
|
||||
from webob import exc
|
||||
|
||||
from glance.common import exception
|
||||
import glance.openstack.common.log as logging
|
||||
from glance.openstack.common import strutils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FEATURE_BLACKLIST = ['content-length', 'content-type', 'x-image-meta-size']
|
||||
|
||||
# Whitelist of v1 API headers of form x-image-meta-xxx
|
||||
IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size',
|
||||
'x-image-meta-is_public', 'x-image-meta-disk_format',
|
||||
'x-image-meta-container_format', 'x-image-meta-name',
|
||||
'x-image-meta-status', 'x-image-meta-copy_from',
|
||||
'x-image-meta-uri', 'x-image-meta-checksum',
|
||||
'x-image-meta-created_at', 'x-image-meta-updated_at',
|
||||
'x-image-meta-deleted_at', 'x-image-meta-min_ram',
|
||||
'x-image-meta-min_disk', 'x-image-meta-owner',
|
||||
'x-image-meta-store', 'x-image-meta-id',
|
||||
'x-image-meta-protected', 'x-image-meta-deleted']
|
||||
|
||||
GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD'
|
||||
|
||||
|
||||
def chunkreadable(iter, chunk_size=65536):
|
||||
"""
|
||||
Wrap a readable iterator with a reader yielding chunks of
|
||||
a preferred size, otherwise leave iterator unchanged.
|
||||
|
||||
:param iter: an iter which may also be readable
|
||||
:param chunk_size: maximum size of chunk
|
||||
"""
|
||||
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
|
||||
|
||||
|
||||
def chunkiter(fp, chunk_size=65536):
|
||||
"""
|
||||
Return an iterator to a file-like obj which yields fixed size chunks
|
||||
|
||||
:param fp: a file-like object
|
||||
:param chunk_size: maximum size of chunk
|
||||
"""
|
||||
while True:
|
||||
chunk = fp.read(chunk_size)
|
||||
if chunk:
|
||||
yield chunk
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
def cooperative_iter(iter):
|
||||
"""
|
||||
Return an iterator which schedules after each
|
||||
iteration. This can prevent eventlet thread starvation.
|
||||
|
||||
:param iter: an iterator to wrap
|
||||
"""
|
||||
try:
|
||||
for chunk in iter:
|
||||
sleep(0)
|
||||
yield chunk
|
||||
except Exception as err:
|
||||
msg = _("Error: cooperative_iter exception %s") % err
|
||||
LOG.error(msg)
|
||||
raise
|
||||
|
||||
|
||||
def cooperative_read(fd):
|
||||
"""
|
||||
Wrap a file descriptor's read with a partial function which schedules
|
||||
after each read. This can prevent eventlet thread starvation.
|
||||
|
||||
:param fd: a file descriptor to wrap
|
||||
"""
|
||||
def readfn(*args):
|
||||
result = fd.read(*args)
|
||||
sleep(0)
|
||||
return result
|
||||
return readfn
|
||||
|
||||
|
||||
class CooperativeReader(object):
|
||||
|
||||
"""
|
||||
An eventlet thread friendly class for reading in image data.
|
||||
|
||||
When accessing data either through the iterator or the read method
|
||||
we perform a sleep to allow a co-operative yield. When there is more than
|
||||
one image being uploaded/downloaded this prevents eventlet thread
|
||||
starvation, ie allows all threads to be scheduled periodically rather than
|
||||
having the same thread be continuously active.
|
||||
"""
|
||||
|
||||
def __init__(self, fd):
|
||||
"""
|
||||
:param fd: Underlying image file object
|
||||
"""
|
||||
self.fd = fd
|
||||
self.iterator = None
|
||||
# NOTE(markwash): if the underlying supports read(), overwrite the
|
||||
# default iterator-based implementation with cooperative_read which
|
||||
# is more straightforward
|
||||
if hasattr(fd, 'read'):
|
||||
self.read = cooperative_read(fd)
|
||||
|
||||
def read(self, length=None):
|
||||
"""Return the next chunk of the underlying iterator.
|
||||
|
||||
This is replaced with cooperative_read in __init__ if the underlying
|
||||
fd already supports read().
|
||||
"""
|
||||
if self.iterator is None:
|
||||
self.iterator = self.__iter__()
|
||||
try:
|
||||
return self.iterator.next()
|
||||
except StopIteration:
|
||||
return ''
|
||||
|
||||
def __iter__(self):
|
||||
return cooperative_iter(self.fd.__iter__())
|
||||
|
||||
|
||||
class LimitingReader(object):
|
||||
|
||||
"""
|
||||
Reader designed to fail when reading image data past the configured
|
||||
allowable amount.
|
||||
"""
|
||||
|
||||
def __init__(self, data, limit):
|
||||
"""
|
||||
:param data: Underlying image data object
|
||||
:param limit: maximum number of bytes the reader should allow
|
||||
"""
|
||||
self.data = data
|
||||
self.limit = limit
|
||||
self.bytes_read = 0
|
||||
|
||||
def __iter__(self):
|
||||
for chunk in self.data:
|
||||
self.bytes_read += len(chunk)
|
||||
if self.bytes_read > self.limit:
|
||||
raise exception.ImageSizeLimitExceeded()
|
||||
else:
|
||||
yield chunk
|
||||
|
||||
def read(self, i):
|
||||
result = self.data.read(i)
|
||||
self.bytes_read += len(result)
|
||||
if self.bytes_read > self.limit:
|
||||
raise exception.ImageSizeLimitExceeded()
|
||||
return result
|
||||
|
||||
|
||||
def image_meta_to_http_headers(image_meta):
|
||||
"""
|
||||
Returns a set of image metadata into a dict
|
||||
of HTTP headers that can be fed to either a Webob
|
||||
Request object or an httplib.HTTP(S)Connection object
|
||||
|
||||
:param image_meta: Mapping of image metadata
|
||||
"""
|
||||
headers = {}
|
||||
for k, v in image_meta.items():
|
||||
if v is not None:
|
||||
if k == 'properties':
|
||||
for pk, pv in v.items():
|
||||
if pv is not None:
|
||||
headers["x-image-meta-property-%s"
|
||||
% pk.lower()] = unicode(pv)
|
||||
else:
|
||||
headers["x-image-meta-%s" % k.lower()] = unicode(v)
|
||||
return headers
|
||||
|
||||
|
||||
def add_features_to_http_headers(features, headers):
|
||||
"""
|
||||
Adds additional headers representing glance features to be enabled.
|
||||
|
||||
:param headers: Base set of headers
|
||||
:param features: Map of enabled features
|
||||
"""
|
||||
if features:
|
||||
for k, v in features.items():
|
||||
if k.lower() in FEATURE_BLACKLIST:
|
||||
raise exception.UnsupportedHeaderFeature(feature=k)
|
||||
if v is not None:
|
||||
headers[k.lower()] = unicode(v)
|
||||
|
||||
|
||||
def get_image_meta_from_headers(response):
|
||||
"""
|
||||
Processes HTTP headers from a supplied response that
|
||||
match the x-image-meta and x-image-meta-property and
|
||||
returns a mapping of image metadata and properties
|
||||
|
||||
:param response: Response to process
|
||||
"""
|
||||
result = {}
|
||||
properties = {}
|
||||
|
||||
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
|
||||
headers = response.getheaders()
|
||||
else: # webob.Response
|
||||
headers = response.headers.items()
|
||||
|
||||
for key, value in headers:
|
||||
key = str(key.lower())
|
||||
if key.startswith('x-image-meta-property-'):
|
||||
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
|
||||
properties[field_name] = value or None
|
||||
elif key.startswith('x-image-meta-'):
|
||||
field_name = key[len('x-image-meta-'):].replace('-', '_')
|
||||
if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS:
|
||||
msg = _("Bad header: %(header_name)s") % {'header_name': key}
|
||||
raise exc.HTTPBadRequest(msg, content_type="text/plain")
|
||||
result[field_name] = value or None
|
||||
result['properties'] = properties
|
||||
|
||||
for key in ('size', 'min_disk', 'min_ram'):
|
||||
if key in result:
|
||||
try:
|
||||
result[key] = int(result[key])
|
||||
except ValueError:
|
||||
extra = (_("Cannot convert image %(key)s '%(value)s' "
|
||||
"to an integer.")
|
||||
% {'key': key, 'value': result[key]})
|
||||
raise exception.InvalidParameterValue(value=result[key],
|
||||
param=key,
|
||||
extra_msg=extra)
|
||||
if result[key] < 0:
|
||||
extra = (_("Image %(key)s must be >= 0 "
|
||||
"('%(value)s' specified).")
|
||||
% {'key': key, 'value': result[key]})
|
||||
raise exception.InvalidParameterValue(value=result[key],
|
||||
param=key,
|
||||
extra_msg=extra)
|
||||
|
||||
for key in ('is_public', 'deleted', 'protected'):
|
||||
if key in result:
|
||||
result[key] = strutils.bool_from_string(result[key])
|
||||
return result
|
||||
|
||||
|
||||
def safe_mkdirs(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def safe_remove(path):
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
class PrettyTable(object):
|
||||
|
||||
"""Creates an ASCII art table for use in bin/glance
|
||||
|
||||
Example:
|
||||
|
||||
ID Name Size Hits
|
||||
--- ----------------- ------------ -----
|
||||
122 image 22 0
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.columns = []
|
||||
|
||||
def add_column(self, width, label="", just='l'):
|
||||
"""Add a column to the table
|
||||
|
||||
:param width: number of characters wide the column should be
|
||||
:param label: column heading
|
||||
:param just: justification for the column, 'l' for left,
|
||||
'r' for right
|
||||
"""
|
||||
self.columns.append((width, label, just))
|
||||
|
||||
def make_header(self):
|
||||
label_parts = []
|
||||
break_parts = []
|
||||
for width, label, _ in self.columns:
|
||||
# NOTE(sirp): headers are always left justified
|
||||
label_part = self._clip_and_justify(label, width, 'l')
|
||||
label_parts.append(label_part)
|
||||
|
||||
break_part = '-' * width
|
||||
break_parts.append(break_part)
|
||||
|
||||
label_line = ' '.join(label_parts)
|
||||
break_line = ' '.join(break_parts)
|
||||
return '\n'.join([label_line, break_line])
|
||||
|
||||
def make_row(self, *args):
|
||||
row = args
|
||||
row_parts = []
|
||||
for data, (width, _, just) in zip(row, self.columns):
|
||||
row_part = self._clip_and_justify(data, width, just)
|
||||
row_parts.append(row_part)
|
||||
|
||||
row_line = ' '.join(row_parts)
|
||||
return row_line
|
||||
|
||||
@staticmethod
|
||||
def _clip_and_justify(data, width, just):
|
||||
# clip field to column width
|
||||
clipped_data = str(data)[:width]
|
||||
|
||||
if just == 'r':
|
||||
# right justify
|
||||
justified = clipped_data.rjust(width)
|
||||
else:
|
||||
# left justify
|
||||
justified = clipped_data.ljust(width)
|
||||
|
||||
return justified
|
||||
|
||||
|
||||
def get_terminal_size():
|
||||
|
||||
def _get_terminal_size_posix():
|
||||
import fcntl
|
||||
import struct
|
||||
import termios
|
||||
|
||||
height_width = None
|
||||
|
||||
try:
|
||||
height_width = struct.unpack(
|
||||
'hh',
|
||||
fcntl.ioctl(
|
||||
sys.stderr.fileno(),
|
||||
termios.TIOCGWINSZ,
|
||||
struct.pack(
|
||||
'HH',
|
||||
0,
|
||||
0)))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not height_width:
|
||||
try:
|
||||
p = subprocess.Popen(['stty', 'size'],
|
||||
shell=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=open(os.devnull, 'w'))
|
||||
result = p.communicate()
|
||||
if p.returncode == 0:
|
||||
return tuple(int(x) for x in result[0].split())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return height_width
|
||||
|
||||
def _get_terminal_size_win32():
|
||||
try:
|
||||
from ctypes import create_string_buffer
|
||||
from ctypes import windll
|
||||
handle = windll.kernel32.GetStdHandle(-12)
|
||||
csbi = create_string_buffer(22)
|
||||
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
|
||||
except Exception:
|
||||
return None
|
||||
if res:
|
||||
import struct
|
||||
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
|
||||
(bufx, bufy, curx, cury, wattr,
|
||||
left, top, right, bottom, maxx, maxy) = unpack_tmp
|
||||
height = bottom - top + 1
|
||||
width = right - left + 1
|
||||
return (height, width)
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_terminal_size_unknownOS():
|
||||
raise NotImplementedError
|
||||
|
||||
func = {'posix': _get_terminal_size_posix,
|
||||
'win32': _get_terminal_size_win32}
|
||||
|
||||
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
|
||||
|
||||
if height_width is None:
|
||||
raise exception.Invalid()
|
||||
|
||||
for i in height_width:
|
||||
if not isinstance(i, int) or i <= 0:
|
||||
raise exception.Invalid()
|
||||
|
||||
return height_width[0], height_width[1]
|
||||
|
||||
|
||||
def mutating(func):
|
||||
"""Decorator to enforce read-only logic"""
|
||||
@functools.wraps(func)
|
||||
def wrapped(self, req, *args, **kwargs):
|
||||
if req.context.read_only:
|
||||
msg = _("Read-only access")
|
||||
LOG.debug(msg)
|
||||
raise exc.HTTPForbidden(msg, request=req,
|
||||
content_type="text/plain")
|
||||
return func(self, req, *args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
|
||||
def setup_remote_pydev_debug(host, port):
|
||||
error_msg = ('Error setting up the debug environment. Verify that the'
|
||||
' option pydev_worker_debug_port is pointing to a valid '
|
||||
'hostname or IP on which a pydev server is listening on'
|
||||
' the port indicated by pydev_worker_debug_port.')
|
||||
|
||||
try:
|
||||
try:
|
||||
from pydev import pydevd
|
||||
except ImportError:
|
||||
import pydevd
|
||||
|
||||
pydevd.settrace(host,
|
||||
port=port,
|
||||
stdoutToServer=True,
|
||||
stderrToServer=True)
|
||||
return True
|
||||
except Exception:
|
||||
LOG.exception(error_msg)
|
||||
raise
|
||||
|
||||
|
||||
class LazyPluggable(object):
|
||||
|
||||
"""A pluggable backend loaded lazily based on some value."""
|
||||
|
||||
def __init__(self, pivot, config_group=None, **backends):
|
||||
self.__backends = backends
|
||||
self.__pivot = pivot
|
||||
self.__backend = None
|
||||
self.__config_group = config_group
|
||||
|
||||
def __get_backend(self):
|
||||
if not self.__backend:
|
||||
if self.__config_group is None:
|
||||
backend_name = CONF[self.__pivot]
|
||||
else:
|
||||
backend_name = CONF[self.__config_group][self.__pivot]
|
||||
if backend_name not in self.__backends:
|
||||
msg = _('Invalid backend: %s') % backend_name
|
||||
raise exception.GlanceException(msg)
|
||||
|
||||
backend = self.__backends[backend_name]
|
||||
if isinstance(backend, tuple):
|
||||
name = backend[0]
|
||||
fromlist = backend[1]
|
||||
else:
|
||||
name = backend
|
||||
fromlist = backend
|
||||
|
||||
self.__backend = __import__(name, None, None, fromlist)
|
||||
return self.__backend
|
||||
|
||||
def __getattr__(self, key):
|
||||
backend = self.__get_backend()
|
||||
return getattr(backend, key)
|
||||
|
||||
|
||||
def validate_key_cert(key_file, cert_file):
|
||||
try:
|
||||
error_key_name = "private key"
|
||||
error_filename = key_file
|
||||
key_str = open(key_file, "r").read()
|
||||
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)
|
||||
|
||||
error_key_name = "certficate"
|
||||
error_filename = cert_file
|
||||
cert_str = open(cert_file, "r").read()
|
||||
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
|
||||
except IOError as ioe:
|
||||
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
|
||||
"%(error_filename)s. Please verify it."
|
||||
" Error: %(ioe)s") %
|
||||
{'error_key_name': error_key_name,
|
||||
'error_filename': error_filename,
|
||||
'ioe': ioe})
|
||||
except crypto.Error as ce:
|
||||
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
|
||||
"%(error_filename)s. Please verify it. OpenSSL"
|
||||
" error: %(ce)s") %
|
||||
{'error_key_name': error_key_name,
|
||||
'error_filename': error_filename,
|
||||
'ce': ce})
|
||||
|
||||
try:
|
||||
data = str(uuid.uuid4())
|
||||
digest = "sha1"
|
||||
|
||||
out = crypto.sign(key, data, digest)
|
||||
crypto.verify(cert, out, data, digest)
|
||||
except crypto.Error as ce:
|
||||
raise RuntimeError(_("There is a problem with your key pair. "
|
||||
"Please verify that cert %(cert_file)s and "
|
||||
"key %(key_file)s belong together. OpenSSL "
|
||||
"error %(ce)s") % {'cert_file': cert_file,
|
||||
'key_file': key_file,
|
||||
'ce': ce})
|
||||
|
||||
|
||||
def get_test_suite_socket():
|
||||
global GLANCE_TEST_SOCKET_FD_STR
|
||||
if GLANCE_TEST_SOCKET_FD_STR in os.environ:
|
||||
fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR])
|
||||
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock = socket.SocketType(_sock=sock)
|
||||
sock.listen(CONF.backlog)
|
||||
del os.environ[GLANCE_TEST_SOCKET_FD_STR]
|
||||
os.close(fd)
|
||||
return sock
|
||||
return None
|
||||
|
||||
|
||||
def is_uuid_like(val):
|
||||
"""Returns validation of a value as a UUID.
|
||||
|
||||
For our purposes, a UUID is a canonical form string:
|
||||
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
||||
"""
|
||||
try:
|
||||
return str(uuid.UUID(val)) == val
|
||||
except (TypeError, ValueError, AttributeError):
|
||||
return False
|
||||
|
||||
|
||||
pattern = re.compile(r'^https?://\S+/v2/images/\S+$')
|
||||
|
||||
|
||||
def is_glance_location(loc_url):
|
||||
return pattern.match(loc_url)
|
||||
|
||||
|
||||
def check_synced(image, ep_url_list):
|
||||
if image.status != 'active':
|
||||
return
|
||||
|
||||
is_synced = True
|
||||
if not ep_url_list:
|
||||
is_synced = False
|
||||
else:
|
||||
all_host_list = [urlparse.urlparse(url).netloc for url in ep_url_list]
|
||||
synced_host_list = [urlparse.urlparse(loc['url']).netloc
|
||||
for loc in image.locations
|
||||
if is_glance_location(loc['url'])]
|
||||
is_synced = set(all_host_list) == set(synced_host_list)
|
||||
|
||||
if not is_synced:
|
||||
image.status = 'queued'
|
||||
image.size = None
|
||||
image.virtual_size = None
|
125
icehouse-patches/glance/glance_location_patch/glance/gateway.py
Normal file
125
icehouse-patches/glance/glance_location_patch/glance/gateway.py
Normal file
@ -0,0 +1,125 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# Copyright 2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from glance.api import authorization
|
||||
from glance.api import policy
|
||||
from glance.api import property_protections
|
||||
from glance.common import property_utils
|
||||
import glance.db
|
||||
import glance.domain
|
||||
import glance.notifier
|
||||
import glance.quota
|
||||
import glance.store
|
||||
from glance.sync.client.v1 import api as syncapi
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('sync_enabled', 'glance.common.config')
|
||||
|
||||
|
||||
class Gateway(object):
|
||||
|
||||
def __init__(self, db_api=None, store_api=None, notifier=None,
|
||||
policy_enforcer=None, sync_api=None):
|
||||
self.db_api = db_api or glance.db.get_api()
|
||||
self.store_api = store_api or glance.store
|
||||
self.notifier = notifier or glance.notifier.Notifier()
|
||||
self.policy = policy_enforcer or policy.Enforcer()
|
||||
self.sync_api = sync_api or syncapi
|
||||
|
||||
def get_im |