Initial checkin
This commit is contained in:
parent
24be39395d
commit
5e9ada9fc6
115
HACKING
Normal file
115
HACKING
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
Cinder Style Commandments
|
||||||
|
=========================
|
||||||
|
|
||||||
|
Step 1: Read http://www.python.org/dev/peps/pep-0008/
|
||||||
|
Step 2: Read http://www.python.org/dev/peps/pep-0008/ again
|
||||||
|
Step 3: Read on
|
||||||
|
|
||||||
|
Imports
|
||||||
|
-------
|
||||||
|
- thou shalt not import objects, only modules
|
||||||
|
- thou shalt not import more than one module per line
|
||||||
|
- thou shalt not make relative imports
|
||||||
|
- thou shalt organize your imports according to the following template
|
||||||
|
|
||||||
|
::
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
{{stdlib imports in human alphabetical order}}
|
||||||
|
\n
|
||||||
|
{{cinder imports in human alphabetical order}}
|
||||||
|
\n
|
||||||
|
\n
|
||||||
|
{{begin your code}}
|
||||||
|
|
||||||
|
|
||||||
|
General
|
||||||
|
-------
|
||||||
|
- thou shalt put two newlines twixt toplevel code (funcs, classes, etc)
|
||||||
|
- thou shalt put one newline twixt methods in classes and anywhere else
|
||||||
|
- thou shalt not write "except:", use "except Exception:" at the very least
|
||||||
|
- thou shalt include your name with TODOs as in "TODO(termie)"
|
||||||
|
- thou shalt not name anything the same name as a builtin or reserved word
|
||||||
|
- thou shalt not violate causality in our time cone, or else
|
||||||
|
|
||||||
|
|
||||||
|
Human Alphabetical Order Examples
|
||||||
|
---------------------------------
|
||||||
|
::
|
||||||
|
import httplib
|
||||||
|
import logging
|
||||||
|
import random
|
||||||
|
import StringIO
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import test
|
||||||
|
from cinder.auth import users
|
||||||
|
from cinder.endpoint import api
|
||||||
|
from cinder.endpoint import cloud
|
||||||
|
|
||||||
|
Docstrings
|
||||||
|
----------
|
||||||
|
"""A one line docstring looks like this and ends in a period."""
|
||||||
|
|
||||||
|
|
||||||
|
"""A multiline docstring has a one-line summary, less than 80 characters.
|
||||||
|
|
||||||
|
Then a new paragraph after a newline that explains in more detail any
|
||||||
|
general information about the function, class or method. Example usages
|
||||||
|
are also great to have here if it is a complex class for function. After
|
||||||
|
you have finished your descriptions add an extra newline and close the
|
||||||
|
quotations.
|
||||||
|
|
||||||
|
When writing the docstring for a class, an extra line should be placed
|
||||||
|
after the closing quotations. For more in-depth explanations for these
|
||||||
|
decisions see http://www.python.org/dev/peps/pep-0257/
|
||||||
|
|
||||||
|
If you are going to describe parameters and return values, use Sphinx, the
|
||||||
|
appropriate syntax is as follows.
|
||||||
|
|
||||||
|
:param foo: the foo parameter
|
||||||
|
:param bar: the bar parameter
|
||||||
|
:returns: description of the return value
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
Text encoding
|
||||||
|
----------
|
||||||
|
- All text within python code should be of type 'unicode'.
|
||||||
|
|
||||||
|
WRONG:
|
||||||
|
|
||||||
|
>>> s = 'foo'
|
||||||
|
>>> s
|
||||||
|
'foo'
|
||||||
|
>>> type(s)
|
||||||
|
<type 'str'>
|
||||||
|
|
||||||
|
RIGHT:
|
||||||
|
|
||||||
|
>>> u = u'foo'
|
||||||
|
>>> u
|
||||||
|
u'foo'
|
||||||
|
>>> type(u)
|
||||||
|
<type 'unicode'>
|
||||||
|
|
||||||
|
- Transitions between internal unicode and external strings should always
|
||||||
|
be immediately and explicitly encoded or decoded.
|
||||||
|
|
||||||
|
- All external text that is not explicitly encoded (database storage,
|
||||||
|
commandline arguments, etc.) should be presumed to be encoded as utf-8.
|
||||||
|
|
||||||
|
WRONG:
|
||||||
|
|
||||||
|
mystring = infile.readline()
|
||||||
|
myreturnstring = do_some_magic_with(mystring)
|
||||||
|
outfile.write(myreturnstring)
|
||||||
|
|
||||||
|
RIGHT:
|
||||||
|
|
||||||
|
mystring = infile.readline()
|
||||||
|
mytext = s.decode('utf-8')
|
||||||
|
returntext = do_some_magic_with(mytext)
|
||||||
|
returnstring = returntext.encode('utf-8')
|
||||||
|
outfile.write(returnstring)
|
208
LICENSE
Normal file
208
LICENSE
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
Copyright (c) 2009 Jacob Kaplan-Moss - initial codebase (< v2.1)
|
||||||
|
Copyright (c) 2011 Rackspace - OpenStack extensions (>= v2.1)
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
--- License for python-cinderclient versions prior to 2.1 ---
|
||||||
|
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
3. Neither the name of this project nor the names of its contributors may
|
||||||
|
be used to endorse or promote products derived from this software without
|
||||||
|
specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
4
MANIFEST.in
Normal file
4
MANIFEST.in
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
include AUTHORS
|
||||||
|
include ChangeLog
|
||||||
|
exclude .gitignore
|
||||||
|
exclude .gitreview
|
150
README.rst
Normal file
150
README.rst
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
Python bindings to the OpenStack Cinder API
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
This is a client for the OpenStack Cinder API. There's a Python API (the
|
||||||
|
``cinderclient`` module), and a command-line script (``cinder``). Each
|
||||||
|
implements 100% of the OpenStack Cinder API.
|
||||||
|
|
||||||
|
See the `OpenStack CLI guide`_ for information on how to use the ``cinder``
|
||||||
|
command-line tool. You may also want to look at the
|
||||||
|
`OpenStack API documentation`_.
|
||||||
|
|
||||||
|
.. _OpenStack CLI Guide: http://docs.openstack.org/cli/quick-start/content/
|
||||||
|
.. _OpenStack API documentation: http://docs.openstack.org/api/
|
||||||
|
|
||||||
|
The project is hosted on `Launchpad`_, where bugs can be filed. The code is
|
||||||
|
hosted on `Github`_. Patches must be submitted using `Gerrit`_, *not* Github
|
||||||
|
pull requests.
|
||||||
|
|
||||||
|
.. _Github: https://github.com/openstack/python-cinderclient
|
||||||
|
.. _Launchpad: https://launchpad.net/python-cinderclient
|
||||||
|
.. _Gerrit: http://wiki.openstack.org/GerritWorkflow
|
||||||
|
|
||||||
|
This code a fork of `Jacobian's python-cloudservers`__ If you need API support
|
||||||
|
for the Rackspace API solely or the BSD license, you should use that repository.
|
||||||
|
python-cinderclient is licensed under the Apache License like the rest of OpenStack.
|
||||||
|
|
||||||
|
__ http://github.com/jacobian/python-cloudservers
|
||||||
|
|
||||||
|
.. contents:: Contents:
|
||||||
|
:local:
|
||||||
|
|
||||||
|
Command-line API
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Installing this package gets you a shell command, ``cinder``, that you
|
||||||
|
can use to interact with any Rackspace compatible API (including OpenStack).
|
||||||
|
|
||||||
|
You'll need to provide your OpenStack username and password. You can do this
|
||||||
|
with the ``--os-username``, ``--os-password`` and ``--os-tenant-name``
|
||||||
|
params, but it's easier to just set them as environment variables::
|
||||||
|
|
||||||
|
export OS_USERNAME=openstack
|
||||||
|
export OS_PASSWORD=yadayada
|
||||||
|
export OS_TENANT_NAME=myproject
|
||||||
|
|
||||||
|
You will also need to define the authentication url with ``--os-auth-url``
|
||||||
|
and the version of the API with ``--version``. Or set them as an environment
|
||||||
|
variables as well::
|
||||||
|
|
||||||
|
export OS_AUTH_URL=http://example.com:8774/v1.1/
|
||||||
|
export OS_VOLUME_API_VERSION=1
|
||||||
|
|
||||||
|
If you are using Keystone, you need to set the CINDER_URL to the keystone
|
||||||
|
endpoint::
|
||||||
|
|
||||||
|
export OS_AUTH_URL=http://example.com:5000/v2.0/
|
||||||
|
|
||||||
|
Since Keystone can return multiple regions in the Service Catalog, you
|
||||||
|
can specify the one you want with ``--os-region-name`` (or
|
||||||
|
``export OS_REGION_NAME``). It defaults to the first in the list returned.
|
||||||
|
|
||||||
|
You'll find complete documentation on the shell by running
|
||||||
|
``cinder help``::
|
||||||
|
|
||||||
|
usage: cinder [--debug] [--os-username <auth-user-name>]
|
||||||
|
[--os-password <auth-password>]
|
||||||
|
[--os-tenant-name <auth-tenant-name>] [--os-auth-url <auth-url>]
|
||||||
|
[--os-region-name <region-name>] [--service-type <service-type>]
|
||||||
|
[--service-name <service-name>]
|
||||||
|
[--volume-service-name <volume-service-name>]
|
||||||
|
[--endpoint-type <endpoint-type>]
|
||||||
|
[--os-volume-api-version <compute-api-ver>]
|
||||||
|
[--os-cacert <ca-certificate>] [--retries <retries>]
|
||||||
|
<subcommand> ...
|
||||||
|
|
||||||
|
Command-line interface to the OpenStack Cinder API.
|
||||||
|
|
||||||
|
Positional arguments:
|
||||||
|
<subcommand>
|
||||||
|
absolute-limits Print a list of absolute limits for a user
|
||||||
|
create Add a new volume.
|
||||||
|
credentials Show user credentials returned from auth
|
||||||
|
delete Remove a volume.
|
||||||
|
endpoints Discover endpoints that get returned from the
|
||||||
|
authenticate services
|
||||||
|
extra-specs-list Print a list of current 'volume types and extra specs'
|
||||||
|
(Admin Only).
|
||||||
|
list List all the volumes.
|
||||||
|
quota-class-show List the quotas for a quota class.
|
||||||
|
quota-class-update Update the quotas for a quota class.
|
||||||
|
quota-defaults List the default quotas for a tenant.
|
||||||
|
quota-show List the quotas for a tenant.
|
||||||
|
quota-update Update the quotas for a tenant.
|
||||||
|
rate-limits Print a list of rate limits for a user
|
||||||
|
rename Rename a volume.
|
||||||
|
show Show details about a volume.
|
||||||
|
snapshot-create Add a new snapshot.
|
||||||
|
snapshot-delete Remove a snapshot.
|
||||||
|
snapshot-list List all the snapshots.
|
||||||
|
snapshot-rename Rename a snapshot.
|
||||||
|
snapshot-show Show details about a snapshot.
|
||||||
|
type-create Create a new volume type.
|
||||||
|
type-delete Delete a specific volume type
|
||||||
|
type-key Set or unset extra_spec for a volume type.
|
||||||
|
type-list Print a list of available 'volume types'.
|
||||||
|
bash-completion Prints all of the commands and options to stdout so
|
||||||
|
that the
|
||||||
|
help Display help about this program or one of its
|
||||||
|
subcommands.
|
||||||
|
list-extensions List all the os-api extensions that are available.
|
||||||
|
|
||||||
|
Optional arguments:
|
||||||
|
--debug Print debugging output
|
||||||
|
--os-username <auth-user-name>
|
||||||
|
Defaults to env[OS_USERNAME].
|
||||||
|
--os-password <auth-password>
|
||||||
|
Defaults to env[OS_PASSWORD].
|
||||||
|
--os-tenant-name <auth-tenant-name>
|
||||||
|
Defaults to env[OS_TENANT_NAME].
|
||||||
|
--os-auth-url <auth-url>
|
||||||
|
Defaults to env[OS_AUTH_URL].
|
||||||
|
--os-region-name <region-name>
|
||||||
|
Defaults to env[OS_REGION_NAME].
|
||||||
|
--service-type <service-type>
|
||||||
|
Defaults to compute for most actions
|
||||||
|
--service-name <service-name>
|
||||||
|
Defaults to env[CINDER_SERVICE_NAME]
|
||||||
|
--volume-service-name <volume-service-name>
|
||||||
|
Defaults to env[CINDER_VOLUME_SERVICE_NAME]
|
||||||
|
--endpoint-type <endpoint-type>
|
||||||
|
Defaults to env[CINDER_ENDPOINT_TYPE] or publicURL.
|
||||||
|
--os-volume-api-version <compute-api-ver>
|
||||||
|
Accepts 1,defaults to env[OS_VOLUME_API_VERSION].
|
||||||
|
--os-cacert <ca-certificate>
|
||||||
|
Specify a CA bundle file to use in verifying a TLS
|
||||||
|
(https) server certificate. Defaults to env[OS_CACERT]
|
||||||
|
--retries <retries> Number of retries.
|
||||||
|
|
||||||
|
Python API
|
||||||
|
----------
|
||||||
|
|
||||||
|
There's also a complete Python API, but it has not yet been documented.
|
||||||
|
|
||||||
|
Quick-start using keystone::
|
||||||
|
|
||||||
|
# use v2.0 auth with http://example.com:5000/v2.0/")
|
||||||
|
>>> from cinderclient.v1 import client
|
||||||
|
>>> nt = client.Client(USER, PASS, TENANT, AUTH_URL, service_type="volume")
|
||||||
|
>>> nt.volumes.list()
|
||||||
|
[...]
|
25
cinderclient/__init__.py
Normal file
25
cinderclient/__init__.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2012 OpenStack LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient.openstack.common import version
|
||||||
|
|
||||||
|
version_info = version.VersionInfo('python-cinderclient')
|
||||||
|
# We have a circular import problem when we first run python setup.py sdist
|
||||||
|
# It's harmless, so deflect it.
|
||||||
|
try:
|
||||||
|
__version__ = version_info.version_string()
|
||||||
|
except AttributeError:
|
||||||
|
__version__ = None
|
293
cinderclient/base.py
Normal file
293
cinderclient/base.py
Normal file
@ -0,0 +1,293 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Base utilities to build API operation managers and objects on top of.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
# Python 2.4 compat
|
||||||
|
try:
|
||||||
|
all
|
||||||
|
except NameError:
|
||||||
|
def all(iterable):
|
||||||
|
return True not in (not x for x in iterable)
|
||||||
|
|
||||||
|
|
||||||
|
def getid(obj):
|
||||||
|
"""
|
||||||
|
Abstracts the common pattern of allowing both an object or an object's ID
|
||||||
|
as a parameter when dealing with relationships.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return obj.id
|
||||||
|
except AttributeError:
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
class Manager(utils.HookableMixin):
|
||||||
|
"""
|
||||||
|
Managers interact with a particular type of API (servers, flavors, images,
|
||||||
|
etc.) and provide CRUD operations for them.
|
||||||
|
"""
|
||||||
|
resource_class = None
|
||||||
|
|
||||||
|
def __init__(self, api):
|
||||||
|
self.api = api
|
||||||
|
|
||||||
|
def _list(self, url, response_key, obj_class=None, body=None):
|
||||||
|
resp = None
|
||||||
|
if body:
|
||||||
|
resp, body = self.api.client.post(url, body=body)
|
||||||
|
else:
|
||||||
|
resp, body = self.api.client.get(url)
|
||||||
|
|
||||||
|
if obj_class is None:
|
||||||
|
obj_class = self.resource_class
|
||||||
|
|
||||||
|
data = body[response_key]
|
||||||
|
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
|
||||||
|
# unlike other services which just return the list...
|
||||||
|
if isinstance(data, dict):
|
||||||
|
try:
|
||||||
|
data = data['values']
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
with self.completion_cache('human_id', obj_class, mode="w"):
|
||||||
|
with self.completion_cache('uuid', obj_class, mode="w"):
|
||||||
|
return [obj_class(self, res, loaded=True)
|
||||||
|
for res in data if res]
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def completion_cache(self, cache_type, obj_class, mode):
|
||||||
|
"""
|
||||||
|
The completion cache store items that can be used for bash
|
||||||
|
autocompletion, like UUIDs or human-friendly IDs.
|
||||||
|
|
||||||
|
A resource listing will clear and repopulate the cache.
|
||||||
|
|
||||||
|
A resource create will append to the cache.
|
||||||
|
|
||||||
|
Delete is not handled because listings are assumed to be performed
|
||||||
|
often enough to keep the cache reasonably up-to-date.
|
||||||
|
"""
|
||||||
|
base_dir = utils.env('CINDERCLIENT_UUID_CACHE_DIR',
|
||||||
|
default="~/.cinderclient")
|
||||||
|
|
||||||
|
# NOTE(sirp): Keep separate UUID caches for each username + endpoint
|
||||||
|
# pair
|
||||||
|
username = utils.env('OS_USERNAME', 'CINDER_USERNAME')
|
||||||
|
url = utils.env('OS_URL', 'CINDER_URL')
|
||||||
|
uniqifier = hashlib.md5(username + url).hexdigest()
|
||||||
|
|
||||||
|
cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier))
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.makedirs(cache_dir, 0755)
|
||||||
|
except OSError:
|
||||||
|
# NOTE(kiall): This is typicaly either permission denied while
|
||||||
|
# attempting to create the directory, or the directory
|
||||||
|
# already exists. Either way, don't fail.
|
||||||
|
pass
|
||||||
|
|
||||||
|
resource = obj_class.__name__.lower()
|
||||||
|
filename = "%s-%s-cache" % (resource, cache_type.replace('_', '-'))
|
||||||
|
path = os.path.join(cache_dir, filename)
|
||||||
|
|
||||||
|
cache_attr = "_%s_cache" % cache_type
|
||||||
|
|
||||||
|
try:
|
||||||
|
setattr(self, cache_attr, open(path, mode))
|
||||||
|
except IOError:
|
||||||
|
# NOTE(kiall): This is typicaly a permission denied while
|
||||||
|
# attempting to write the cache file.
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
cache = getattr(self, cache_attr, None)
|
||||||
|
if cache:
|
||||||
|
cache.close()
|
||||||
|
delattr(self, cache_attr)
|
||||||
|
|
||||||
|
def write_to_completion_cache(self, cache_type, val):
|
||||||
|
cache = getattr(self, "_%s_cache" % cache_type, None)
|
||||||
|
if cache:
|
||||||
|
cache.write("%s\n" % val)
|
||||||
|
|
||||||
|
def _get(self, url, response_key=None):
|
||||||
|
resp, body = self.api.client.get(url)
|
||||||
|
if response_key:
|
||||||
|
return self.resource_class(self, body[response_key], loaded=True)
|
||||||
|
else:
|
||||||
|
return self.resource_class(self, body, loaded=True)
|
||||||
|
|
||||||
|
def _create(self, url, body, response_key, return_raw=False, **kwargs):
|
||||||
|
self.run_hooks('modify_body_for_create', body, **kwargs)
|
||||||
|
resp, body = self.api.client.post(url, body=body)
|
||||||
|
if return_raw:
|
||||||
|
return body[response_key]
|
||||||
|
|
||||||
|
with self.completion_cache('human_id', self.resource_class, mode="a"):
|
||||||
|
with self.completion_cache('uuid', self.resource_class, mode="a"):
|
||||||
|
return self.resource_class(self, body[response_key])
|
||||||
|
|
||||||
|
def _delete(self, url):
|
||||||
|
resp, body = self.api.client.delete(url)
|
||||||
|
|
||||||
|
def _update(self, url, body, **kwargs):
|
||||||
|
self.run_hooks('modify_body_for_update', body, **kwargs)
|
||||||
|
resp, body = self.api.client.put(url, body=body)
|
||||||
|
return body
|
||||||
|
|
||||||
|
|
||||||
|
class ManagerWithFind(Manager):
|
||||||
|
"""
|
||||||
|
Like a `Manager`, but with additional `find()`/`findall()` methods.
|
||||||
|
"""
|
||||||
|
def find(self, **kwargs):
|
||||||
|
"""
|
||||||
|
Find a single item with attributes matching ``**kwargs``.
|
||||||
|
|
||||||
|
This isn't very efficient: it loads the entire list then filters on
|
||||||
|
the Python side.
|
||||||
|
"""
|
||||||
|
matches = self.findall(**kwargs)
|
||||||
|
num_matches = len(matches)
|
||||||
|
if num_matches == 0:
|
||||||
|
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
|
||||||
|
raise exceptions.NotFound(404, msg)
|
||||||
|
elif num_matches > 1:
|
||||||
|
raise exceptions.NoUniqueMatch
|
||||||
|
else:
|
||||||
|
return matches[0]
|
||||||
|
|
||||||
|
def findall(self, **kwargs):
|
||||||
|
"""
|
||||||
|
Find all items with attributes matching ``**kwargs``.
|
||||||
|
|
||||||
|
This isn't very efficient: it loads the entire list then filters on
|
||||||
|
the Python side.
|
||||||
|
"""
|
||||||
|
found = []
|
||||||
|
searches = kwargs.items()
|
||||||
|
|
||||||
|
for obj in self.list():
|
||||||
|
try:
|
||||||
|
if all(getattr(obj, attr) == value
|
||||||
|
for (attr, value) in searches):
|
||||||
|
found.append(obj)
|
||||||
|
except AttributeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return found
|
||||||
|
|
||||||
|
def list(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class Resource(object):
|
||||||
|
"""
|
||||||
|
A resource represents a particular instance of an object (server, flavor,
|
||||||
|
etc). This is pretty much just a bag for attributes.
|
||||||
|
|
||||||
|
:param manager: Manager object
|
||||||
|
:param info: dictionary representing resource attributes
|
||||||
|
:param loaded: prevent lazy-loading if set to True
|
||||||
|
"""
|
||||||
|
HUMAN_ID = False
|
||||||
|
|
||||||
|
def __init__(self, manager, info, loaded=False):
|
||||||
|
self.manager = manager
|
||||||
|
self._info = info
|
||||||
|
self._add_details(info)
|
||||||
|
self._loaded = loaded
|
||||||
|
|
||||||
|
# NOTE(sirp): ensure `id` is already present because if it isn't we'll
|
||||||
|
# enter an infinite loop of __getattr__ -> get -> __init__ ->
|
||||||
|
# __getattr__ -> ...
|
||||||
|
if 'id' in self.__dict__ and len(str(self.id)) == 36:
|
||||||
|
self.manager.write_to_completion_cache('uuid', self.id)
|
||||||
|
|
||||||
|
human_id = self.human_id
|
||||||
|
if human_id:
|
||||||
|
self.manager.write_to_completion_cache('human_id', human_id)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def human_id(self):
|
||||||
|
"""Subclasses may override this provide a pretty ID which can be used
|
||||||
|
for bash completion.
|
||||||
|
"""
|
||||||
|
if 'name' in self.__dict__ and self.HUMAN_ID:
|
||||||
|
return utils.slugify(self.name)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _add_details(self, info):
|
||||||
|
for (k, v) in info.iteritems():
|
||||||
|
try:
|
||||||
|
setattr(self, k, v)
|
||||||
|
except AttributeError:
|
||||||
|
# In this case we already defined the attribute on the class
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __getattr__(self, k):
|
||||||
|
if k not in self.__dict__:
|
||||||
|
#NOTE(bcwaldon): disallow lazy-loading if already loaded once
|
||||||
|
if not self.is_loaded():
|
||||||
|
self.get()
|
||||||
|
return self.__getattr__(k)
|
||||||
|
|
||||||
|
raise AttributeError(k)
|
||||||
|
else:
|
||||||
|
return self.__dict__[k]
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and
|
||||||
|
k != 'manager')
|
||||||
|
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
|
||||||
|
return "<%s %s>" % (self.__class__.__name__, info)
|
||||||
|
|
||||||
|
def get(self):
|
||||||
|
# set_loaded() first ... so if we have to bail, we know we tried.
|
||||||
|
self.set_loaded(True)
|
||||||
|
if not hasattr(self.manager, 'get'):
|
||||||
|
return
|
||||||
|
|
||||||
|
new = self.manager.get(self.id)
|
||||||
|
if new:
|
||||||
|
self._add_details(new._info)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, self.__class__):
|
||||||
|
return False
|
||||||
|
if hasattr(self, 'id') and hasattr(other, 'id'):
|
||||||
|
return self.id == other.id
|
||||||
|
return self._info == other._info
|
||||||
|
|
||||||
|
def is_loaded(self):
|
||||||
|
return self._loaded
|
||||||
|
|
||||||
|
def set_loaded(self, val):
|
||||||
|
self._loaded = val
|
378
cinderclient/client.py
Normal file
378
cinderclient/client.py
Normal file
@ -0,0 +1,378 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||||
|
|
||||||
|
# All Rights Reserved.
|
||||||
|
"""
|
||||||
|
OpenStack Client interface. Handles the REST calls and responses.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import urlparse
|
||||||
|
try:
|
||||||
|
from eventlet import sleep
|
||||||
|
except ImportError:
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
try:
|
||||||
|
import json
|
||||||
|
except ImportError:
|
||||||
|
import simplejson as json
|
||||||
|
|
||||||
|
# Python 2.5 compat fix
|
||||||
|
if not hasattr(urlparse, 'parse_qsl'):
|
||||||
|
import cgi
|
||||||
|
urlparse.parse_qsl = cgi.parse_qsl
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient import service_catalog
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPClient(object):
|
||||||
|
|
||||||
|
USER_AGENT = 'python-cinderclient'
|
||||||
|
|
||||||
|
def __init__(self, user, password, projectid, auth_url, insecure=False,
|
||||||
|
timeout=None, tenant_id=None, proxy_tenant_id=None,
|
||||||
|
proxy_token=None, region_name=None,
|
||||||
|
endpoint_type='publicURL', service_type=None,
|
||||||
|
service_name=None, volume_service_name=None, retries=None,
|
||||||
|
http_log_debug=False, cacert=None):
|
||||||
|
self.user = user
|
||||||
|
self.password = password
|
||||||
|
self.projectid = projectid
|
||||||
|
self.tenant_id = tenant_id
|
||||||
|
self.auth_url = auth_url.rstrip('/')
|
||||||
|
self.version = 'v1'
|
||||||
|
self.region_name = region_name
|
||||||
|
self.endpoint_type = endpoint_type
|
||||||
|
self.service_type = service_type
|
||||||
|
self.service_name = service_name
|
||||||
|
self.volume_service_name = volume_service_name
|
||||||
|
self.retries = int(retries or 0)
|
||||||
|
self.http_log_debug = http_log_debug
|
||||||
|
|
||||||
|
self.management_url = None
|
||||||
|
self.auth_token = None
|
||||||
|
self.proxy_token = proxy_token
|
||||||
|
self.proxy_tenant_id = proxy_tenant_id
|
||||||
|
|
||||||
|
if insecure:
|
||||||
|
self.verify_cert = False
|
||||||
|
else:
|
||||||
|
if cacert:
|
||||||
|
self.verify_cert = cacert
|
||||||
|
else:
|
||||||
|
self.verify_cert = True
|
||||||
|
|
||||||
|
self._logger = logging.getLogger(__name__)
|
||||||
|
if self.http_log_debug:
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
self._logger.setLevel(logging.DEBUG)
|
||||||
|
self._logger.addHandler(ch)
|
||||||
|
if hasattr(requests, 'logging'):
|
||||||
|
requests.logging.getLogger(requests.__name__).addHandler(ch)
|
||||||
|
|
||||||
|
def http_log_req(self, args, kwargs):
|
||||||
|
if not self.http_log_debug:
|
||||||
|
return
|
||||||
|
|
||||||
|
string_parts = ['curl -i']
|
||||||
|
for element in args:
|
||||||
|
if element in ('GET', 'POST', 'DELETE', 'PUT'):
|
||||||
|
string_parts.append(' -X %s' % element)
|
||||||
|
else:
|
||||||
|
string_parts.append(' %s' % element)
|
||||||
|
|
||||||
|
for element in kwargs['headers']:
|
||||||
|
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
|
||||||
|
string_parts.append(header)
|
||||||
|
|
||||||
|
if 'data' in kwargs:
|
||||||
|
string_parts.append(" -d '%s'" % (kwargs['data']))
|
||||||
|
self._logger.debug("\nREQ: %s\n" % "".join(string_parts))
|
||||||
|
|
||||||
|
def http_log_resp(self, resp):
|
||||||
|
if not self.http_log_debug:
|
||||||
|
return
|
||||||
|
self._logger.debug(
|
||||||
|
"RESP: [%s] %s\nRESP BODY: %s\n",
|
||||||
|
resp.status_code,
|
||||||
|
resp.headers,
|
||||||
|
resp.text)
|
||||||
|
|
||||||
|
def request(self, url, method, **kwargs):
|
||||||
|
kwargs.setdefault('headers', kwargs.get('headers', {}))
|
||||||
|
kwargs['headers']['User-Agent'] = self.USER_AGENT
|
||||||
|
kwargs['headers']['Accept'] = 'application/json'
|
||||||
|
if 'body' in kwargs:
|
||||||
|
kwargs['headers']['Content-Type'] = 'application/json'
|
||||||
|
kwargs['data'] = json.dumps(kwargs['body'])
|
||||||
|
del kwargs['body']
|
||||||
|
|
||||||
|
self.http_log_req((url, method,), kwargs)
|
||||||
|
resp = requests.request(
|
||||||
|
method,
|
||||||
|
url,
|
||||||
|
verify=self.verify_cert,
|
||||||
|
**kwargs)
|
||||||
|
self.http_log_resp(resp)
|
||||||
|
|
||||||
|
if resp.text:
|
||||||
|
try:
|
||||||
|
body = json.loads(resp.text)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
body = None
|
||||||
|
else:
|
||||||
|
body = None
|
||||||
|
|
||||||
|
if resp.status_code >= 400:
|
||||||
|
raise exceptions.from_response(resp, body)
|
||||||
|
|
||||||
|
return resp, body
|
||||||
|
|
||||||
|
def _cs_request(self, url, method, **kwargs):
|
||||||
|
auth_attempts = 0
|
||||||
|
attempts = 0
|
||||||
|
backoff = 1
|
||||||
|
while True:
|
||||||
|
attempts += 1
|
||||||
|
if not self.management_url or not self.auth_token:
|
||||||
|
self.authenticate()
|
||||||
|
kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
|
||||||
|
if self.projectid:
|
||||||
|
kwargs['headers']['X-Auth-Project-Id'] = self.projectid
|
||||||
|
try:
|
||||||
|
resp, body = self.request(self.management_url + url, method,
|
||||||
|
**kwargs)
|
||||||
|
return resp, body
|
||||||
|
except exceptions.BadRequest as e:
|
||||||
|
if attempts > self.retries:
|
||||||
|
raise
|
||||||
|
except exceptions.Unauthorized:
|
||||||
|
if auth_attempts > 0:
|
||||||
|
raise
|
||||||
|
self._logger.debug("Unauthorized, reauthenticating.")
|
||||||
|
self.management_url = self.auth_token = None
|
||||||
|
# First reauth. Discount this attempt.
|
||||||
|
attempts -= 1
|
||||||
|
auth_attempts += 1
|
||||||
|
continue
|
||||||
|
except exceptions.ClientException as e:
|
||||||
|
if attempts > self.retries:
|
||||||
|
raise
|
||||||
|
if 500 <= e.code <= 599:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
except requests.exceptions.ConnectionError as e:
|
||||||
|
# Catch a connection refused from requests.request
|
||||||
|
self._logger.debug("Connection refused: %s" % e)
|
||||||
|
raise
|
||||||
|
self._logger.debug(
|
||||||
|
"Failed attempt(%s of %s), retrying in %s seconds" %
|
||||||
|
(attempts, self.retries, backoff))
|
||||||
|
sleep(backoff)
|
||||||
|
backoff *= 2
|
||||||
|
|
||||||
|
def get(self, url, **kwargs):
|
||||||
|
return self._cs_request(url, 'GET', **kwargs)
|
||||||
|
|
||||||
|
def post(self, url, **kwargs):
|
||||||
|
return self._cs_request(url, 'POST', **kwargs)
|
||||||
|
|
||||||
|
def put(self, url, **kwargs):
|
||||||
|
return self._cs_request(url, 'PUT', **kwargs)
|
||||||
|
|
||||||
|
def delete(self, url, **kwargs):
|
||||||
|
return self._cs_request(url, 'DELETE', **kwargs)
|
||||||
|
|
||||||
|
def _extract_service_catalog(self, url, resp, body, extract_token=True):
|
||||||
|
"""See what the auth service told us and process the response.
|
||||||
|
We may get redirected to another site, fail or actually get
|
||||||
|
back a service catalog with a token and our endpoints."""
|
||||||
|
|
||||||
|
if resp.status_code == 200: # content must always present
|
||||||
|
try:
|
||||||
|
self.auth_url = url
|
||||||
|
self.service_catalog = \
|
||||||
|
service_catalog.ServiceCatalog(body)
|
||||||
|
|
||||||
|
if extract_token:
|
||||||
|
self.auth_token = self.service_catalog.get_token()
|
||||||
|
|
||||||
|
management_url = self.service_catalog.url_for(
|
||||||
|
attr='region',
|
||||||
|
filter_value=self.region_name,
|
||||||
|
endpoint_type=self.endpoint_type,
|
||||||
|
service_type=self.service_type,
|
||||||
|
service_name=self.service_name,
|
||||||
|
volume_service_name=self.volume_service_name)
|
||||||
|
self.management_url = management_url.rstrip('/')
|
||||||
|
return None
|
||||||
|
except exceptions.AmbiguousEndpoints:
|
||||||
|
print "Found more than one valid endpoint. Use a more " \
|
||||||
|
"restrictive filter"
|
||||||
|
raise
|
||||||
|
except KeyError:
|
||||||
|
raise exceptions.AuthorizationFailure()
|
||||||
|
except exceptions.EndpointNotFound:
|
||||||
|
print "Could not find any suitable endpoint. Correct region?"
|
||||||
|
raise
|
||||||
|
|
||||||
|
elif resp.status_code == 305:
|
||||||
|
return resp['location']
|
||||||
|
else:
|
||||||
|
raise exceptions.from_response(resp, body)
|
||||||
|
|
||||||
|
def _fetch_endpoints_from_auth(self, url):
|
||||||
|
"""We have a token, but don't know the final endpoint for
|
||||||
|
the region. We have to go back to the auth service and
|
||||||
|
ask again. This request requires an admin-level token
|
||||||
|
to work. The proxy token supplied could be from a low-level enduser.
|
||||||
|
|
||||||
|
We can't get this from the keystone service endpoint, we have to use
|
||||||
|
the admin endpoint.
|
||||||
|
|
||||||
|
This will overwrite our admin token with the user token.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# GET ...:5001/v2.0/tokens/#####/endpoints
|
||||||
|
url = '/'.join([url, 'tokens', '%s?belongsTo=%s'
|
||||||
|
% (self.proxy_token, self.proxy_tenant_id)])
|
||||||
|
self._logger.debug("Using Endpoint URL: %s" % url)
|
||||||
|
resp, body = self.request(url, "GET",
|
||||||
|
headers={'X-Auth-Token': self.auth_token})
|
||||||
|
return self._extract_service_catalog(url, resp, body,
|
||||||
|
extract_token=False)
|
||||||
|
|
||||||
|
def authenticate(self):
|
||||||
|
magic_tuple = urlparse.urlsplit(self.auth_url)
|
||||||
|
scheme, netloc, path, query, frag = magic_tuple
|
||||||
|
port = magic_tuple.port
|
||||||
|
if port is None:
|
||||||
|
port = 80
|
||||||
|
path_parts = path.split('/')
|
||||||
|
for part in path_parts:
|
||||||
|
if len(part) > 0 and part[0] == 'v':
|
||||||
|
self.version = part
|
||||||
|
break
|
||||||
|
|
||||||
|
# TODO(sandy): Assume admin endpoint is 35357 for now.
|
||||||
|
# Ideally this is going to have to be provided by the service catalog.
|
||||||
|
new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,))
|
||||||
|
admin_url = urlparse.urlunsplit((scheme, new_netloc,
|
||||||
|
path, query, frag))
|
||||||
|
|
||||||
|
auth_url = self.auth_url
|
||||||
|
if self.version == "v2.0":
|
||||||
|
while auth_url:
|
||||||
|
if "CINDER_RAX_AUTH" in os.environ:
|
||||||
|
auth_url = self._rax_auth(auth_url)
|
||||||
|
else:
|
||||||
|
auth_url = self._v2_auth(auth_url)
|
||||||
|
|
||||||
|
# Are we acting on behalf of another user via an
|
||||||
|
# existing token? If so, our actual endpoints may
|
||||||
|
# be different than that of the admin token.
|
||||||
|
if self.proxy_token:
|
||||||
|
self._fetch_endpoints_from_auth(admin_url)
|
||||||
|
# Since keystone no longer returns the user token
|
||||||
|
# with the endpoints any more, we need to replace
|
||||||
|
# our service account token with the user token.
|
||||||
|
self.auth_token = self.proxy_token
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
while auth_url:
|
||||||
|
auth_url = self._v1_auth(auth_url)
|
||||||
|
# In some configurations cinder makes redirection to
|
||||||
|
# v2.0 keystone endpoint. Also, new location does not contain
|
||||||
|
# real endpoint, only hostname and port.
|
||||||
|
except exceptions.AuthorizationFailure:
|
||||||
|
if auth_url.find('v2.0') < 0:
|
||||||
|
auth_url = auth_url + '/v2.0'
|
||||||
|
self._v2_auth(auth_url)
|
||||||
|
|
||||||
|
def _v1_auth(self, url):
|
||||||
|
if self.proxy_token:
|
||||||
|
raise exceptions.NoTokenLookupException()
|
||||||
|
|
||||||
|
headers = {'X-Auth-User': self.user,
|
||||||
|
'X-Auth-Key': self.password}
|
||||||
|
if self.projectid:
|
||||||
|
headers['X-Auth-Project-Id'] = self.projectid
|
||||||
|
|
||||||
|
resp, body = self.request(url, 'GET', headers=headers)
|
||||||
|
if resp.status_code in (200, 204): # in some cases we get No Content
|
||||||
|
try:
|
||||||
|
mgmt_header = 'x-server-management-url'
|
||||||
|
self.management_url = resp.headers[mgmt_header].rstrip('/')
|
||||||
|
self.auth_token = resp.headers['x-auth-token']
|
||||||
|
self.auth_url = url
|
||||||
|
except (KeyError, TypeError):
|
||||||
|
raise exceptions.AuthorizationFailure()
|
||||||
|
elif resp.status_code == 305:
|
||||||
|
return resp.headers['location']
|
||||||
|
else:
|
||||||
|
raise exceptions.from_response(resp, body)
|
||||||
|
|
||||||
|
def _v2_auth(self, url):
|
||||||
|
"""Authenticate against a v2.0 auth service."""
|
||||||
|
body = {"auth": {
|
||||||
|
"passwordCredentials": {"username": self.user,
|
||||||
|
"password": self.password}}}
|
||||||
|
|
||||||
|
if self.projectid:
|
||||||
|
body['auth']['tenantName'] = self.projectid
|
||||||
|
elif self.tenant_id:
|
||||||
|
body['auth']['tenantId'] = self.tenant_id
|
||||||
|
|
||||||
|
self._authenticate(url, body)
|
||||||
|
|
||||||
|
def _rax_auth(self, url):
|
||||||
|
"""Authenticate against the Rackspace auth service."""
|
||||||
|
body = {"auth": {
|
||||||
|
"RAX-KSKEY:apiKeyCredentials": {
|
||||||
|
"username": self.user,
|
||||||
|
"apiKey": self.password,
|
||||||
|
"tenantName": self.projectid}}}
|
||||||
|
|
||||||
|
self._authenticate(url, body)
|
||||||
|
|
||||||
|
def _authenticate(self, url, body):
|
||||||
|
"""Authenticate and extract the service catalog."""
|
||||||
|
token_url = url + "/tokens"
|
||||||
|
|
||||||
|
# Make sure we follow redirects when trying to reach Keystone
|
||||||
|
resp, body = self.request(
|
||||||
|
token_url,
|
||||||
|
"POST",
|
||||||
|
body=body,
|
||||||
|
allow_redirects=True)
|
||||||
|
|
||||||
|
return self._extract_service_catalog(url, resp, body)
|
||||||
|
|
||||||
|
|
||||||
|
def get_client_class(version):
|
||||||
|
version_map = {
|
||||||
|
'1': 'cinderclient.v1.client.Client',
|
||||||
|
'2': 'cinderclient.v2.client.Client',
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
client_path = version_map[str(version)]
|
||||||
|
except (KeyError, ValueError):
|
||||||
|
msg = "Invalid client version '%s'. must be one of: %s" % (
|
||||||
|
(version, ', '.join(version_map.keys())))
|
||||||
|
raise exceptions.UnsupportedVersion(msg)
|
||||||
|
|
||||||
|
return utils.import_class(client_path)
|
||||||
|
|
||||||
|
|
||||||
|
def Client(version, *args, **kwargs):
|
||||||
|
client_class = get_client_class(version)
|
||||||
|
return client_class(*args, **kwargs)
|
150
cinderclient/exceptions.py
Normal file
150
cinderclient/exceptions.py
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
"""
|
||||||
|
Exception definitions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class UnsupportedVersion(Exception):
|
||||||
|
"""Indicates that the user is trying to use an unsupported
|
||||||
|
version of the API"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CommandError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AuthorizationFailure(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NoUniqueMatch(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NoTokenLookupException(Exception):
|
||||||
|
"""This form of authentication does not support looking up
|
||||||
|
endpoints from an existing token."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EndpointNotFound(Exception):
|
||||||
|
"""Could not find Service or Region in Service Catalog."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AmbiguousEndpoints(Exception):
|
||||||
|
"""Found more than one matching endpoint in Service Catalog."""
|
||||||
|
def __init__(self, endpoints=None):
|
||||||
|
self.endpoints = endpoints
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "AmbiguousEndpoints: %s" % repr(self.endpoints)
|
||||||
|
|
||||||
|
|
||||||
|
class ClientException(Exception):
|
||||||
|
"""
|
||||||
|
The base exception class for all exceptions this library raises.
|
||||||
|
"""
|
||||||
|
def __init__(self, code, message=None, details=None, request_id=None):
|
||||||
|
self.code = code
|
||||||
|
self.message = message or self.__class__.message
|
||||||
|
self.details = details
|
||||||
|
self.request_id = request_id
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
formatted_string = "%s (HTTP %s)" % (self.message, self.code)
|
||||||
|
if self.request_id:
|
||||||
|
formatted_string += " (Request-ID: %s)" % self.request_id
|
||||||
|
|
||||||
|
return formatted_string
|
||||||
|
|
||||||
|
|
||||||
|
class BadRequest(ClientException):
|
||||||
|
"""
|
||||||
|
HTTP 400 - Bad request: you sent some malformed data.
|
||||||
|
"""
|
||||||
|
http_status = 400
|
||||||
|
message = "Bad request"
|
||||||
|
|
||||||
|
|
||||||
|
class Unauthorized(ClientException):
|
||||||
|
"""
|
||||||
|
HTTP 401 - Unauthorized: bad credentials.
|
||||||
|
"""
|
||||||
|
http_status = 401
|
||||||
|
message = "Unauthorized"
|
||||||
|
|
||||||
|
|
||||||
|
class Forbidden(ClientException):
|
||||||
|
"""
|
||||||
|
HTTP 403 - Forbidden: your credentials don't give you access to this
|
||||||
|
resource.
|
||||||
|
"""
|
||||||
|
http_status = 403
|
||||||
|
message = "Forbidden"
|
||||||
|
|
||||||
|
|
||||||
|
class NotFound(ClientException):
|
||||||
|
"""
|
||||||
|
HTTP 404 - Not found
|
||||||
|
"""
|
||||||
|
http_status = 404
|
||||||
|
message = "Not found"
|
||||||
|
|
||||||
|
|
||||||
|
class OverLimit(ClientException):
|
||||||
|
"""
|
||||||
|
HTTP 413 - Over limit: you're over the API limits for this time period.
|
||||||
|
"""
|
||||||
|
http_status = 413
|
||||||
|
message = "Over limit"
|
||||||
|
|
||||||
|
|
||||||
|
# NotImplemented is a python keyword.
|
||||||
|
class HTTPNotImplemented(ClientException):
|
||||||
|
"""
|
||||||
|
HTTP 501 - Not Implemented: the server does not support this operation.
|
||||||
|
"""
|
||||||
|
http_status = 501
|
||||||
|
message = "Not Implemented"
|
||||||
|
|
||||||
|
|
||||||
|
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
|
||||||
|
# so we can do this:
|
||||||
|
# _code_map = dict((c.http_status, c)
|
||||||
|
# for c in ClientException.__subclasses__())
|
||||||
|
#
|
||||||
|
# Instead, we have to hardcode it:
|
||||||
|
_code_map = dict((c.http_status, c) for c in [BadRequest, Unauthorized,
|
||||||
|
Forbidden, NotFound,
|
||||||
|
OverLimit, HTTPNotImplemented])
|
||||||
|
|
||||||
|
|
||||||
|
def from_response(response, body):
|
||||||
|
"""
|
||||||
|
Return an instance of an ClientException or subclass
|
||||||
|
based on an requests response.
|
||||||
|
|
||||||
|
Usage::
|
||||||
|
|
||||||
|
resp, body = requests.request(...)
|
||||||
|
if resp.status_code != 200:
|
||||||
|
raise exception_from_response(resp, rest.text)
|
||||||
|
"""
|
||||||
|
cls = _code_map.get(response.status_code, ClientException)
|
||||||
|
if response.headers:
|
||||||
|
request_id = response.headers.get('x-compute-request-id')
|
||||||
|
else:
|
||||||
|
request_id = None
|
||||||
|
if body:
|
||||||
|
message = "n/a"
|
||||||
|
details = "n/a"
|
||||||
|
if hasattr(body, 'keys'):
|
||||||
|
error = body[body.keys()[0]]
|
||||||
|
message = error.get('message', None)
|
||||||
|
details = error.get('details', None)
|
||||||
|
return cls(code=response.status_code, message=message, details=details,
|
||||||
|
request_id=request_id)
|
||||||
|
else:
|
||||||
|
return cls(code=response.status_code, request_id=request_id)
|
39
cinderclient/extension.py
Normal file
39
cinderclient/extension.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
class Extension(utils.HookableMixin):
|
||||||
|
"""Extension descriptor."""
|
||||||
|
|
||||||
|
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
|
||||||
|
|
||||||
|
def __init__(self, name, module):
|
||||||
|
self.name = name
|
||||||
|
self.module = module
|
||||||
|
self._parse_extension_module()
|
||||||
|
|
||||||
|
def _parse_extension_module(self):
|
||||||
|
self.manager_class = None
|
||||||
|
for attr_name, attr_value in self.module.__dict__.items():
|
||||||
|
if attr_name in self.SUPPORTED_HOOKS:
|
||||||
|
self.add_hook(attr_name, attr_value)
|
||||||
|
elif utils.safe_issubclass(attr_value, base.Manager):
|
||||||
|
self.manager_class = attr_value
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Extension '%s'>" % self.name
|
0
cinderclient/openstack/__init__.py
Normal file
0
cinderclient/openstack/__init__.py
Normal file
0
cinderclient/openstack/common/__init__.py
Normal file
0
cinderclient/openstack/common/__init__.py
Normal file
367
cinderclient/openstack/common/setup.py
Normal file
367
cinderclient/openstack/common/setup.py
Normal file
@ -0,0 +1,367 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack Foundation.
|
||||||
|
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Utilities with minimum-depends for use in setup.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import email
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from setuptools.command import sdist
|
||||||
|
|
||||||
|
|
||||||
|
def parse_mailmap(mailmap='.mailmap'):
|
||||||
|
mapping = {}
|
||||||
|
if os.path.exists(mailmap):
|
||||||
|
with open(mailmap, 'r') as fp:
|
||||||
|
for l in fp:
|
||||||
|
try:
|
||||||
|
canonical_email, alias = re.match(
|
||||||
|
r'[^#]*?(<.+>).*(<.+>).*', l).groups()
|
||||||
|
except AttributeError:
|
||||||
|
continue
|
||||||
|
mapping[alias] = canonical_email
|
||||||
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
|
||||||
|
mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
|
||||||
|
return parse_mailmap(mailmap)
|
||||||
|
|
||||||
|
|
||||||
|
def canonicalize_emails(changelog, mapping):
|
||||||
|
"""Takes in a string and an email alias mapping and replaces all
|
||||||
|
instances of the aliases in the string with their real email.
|
||||||
|
"""
|
||||||
|
for alias, email_address in mapping.iteritems():
|
||||||
|
changelog = changelog.replace(alias, email_address)
|
||||||
|
return changelog
|
||||||
|
|
||||||
|
|
||||||
|
# Get requirements from the first file that exists
|
||||||
|
def get_reqs_from_files(requirements_files):
|
||||||
|
for requirements_file in requirements_files:
|
||||||
|
if os.path.exists(requirements_file):
|
||||||
|
with open(requirements_file, 'r') as fil:
|
||||||
|
return fil.read().split('\n')
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def parse_requirements(requirements_files=['requirements.txt',
|
||||||
|
'tools/pip-requires']):
|
||||||
|
requirements = []
|
||||||
|
for line in get_reqs_from_files(requirements_files):
|
||||||
|
# For the requirements list, we need to inject only the portion
|
||||||
|
# after egg= so that distutils knows the package it's looking for
|
||||||
|
# such as:
|
||||||
|
# -e git://github.com/openstack/nova/master#egg=nova
|
||||||
|
if re.match(r'\s*-e\s+', line):
|
||||||
|
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
|
||||||
|
line))
|
||||||
|
# such as:
|
||||||
|
# http://github.com/openstack/nova/zipball/master#egg=nova
|
||||||
|
elif re.match(r'\s*https?:', line):
|
||||||
|
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
|
||||||
|
line))
|
||||||
|
# -f lines are for index locations, and don't get used here
|
||||||
|
elif re.match(r'\s*-f\s+', line):
|
||||||
|
pass
|
||||||
|
# argparse is part of the standard library starting with 2.7
|
||||||
|
# adding it to the requirements list screws distro installs
|
||||||
|
elif line == 'argparse' and sys.version_info >= (2, 7):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
requirements.append(line)
|
||||||
|
|
||||||
|
return requirements
|
||||||
|
|
||||||
|
|
||||||
|
def parse_dependency_links(requirements_files=['requirements.txt',
|
||||||
|
'tools/pip-requires']):
|
||||||
|
dependency_links = []
|
||||||
|
# dependency_links inject alternate locations to find packages listed
|
||||||
|
# in requirements
|
||||||
|
for line in get_reqs_from_files(requirements_files):
|
||||||
|
# skip comments and blank lines
|
||||||
|
if re.match(r'(\s*#)|(\s*$)', line):
|
||||||
|
continue
|
||||||
|
# lines with -e or -f need the whole line, minus the flag
|
||||||
|
if re.match(r'\s*-[ef]\s+', line):
|
||||||
|
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
|
||||||
|
# lines that are only urls can go in unmolested
|
||||||
|
elif re.match(r'\s*https?:', line):
|
||||||
|
dependency_links.append(line)
|
||||||
|
return dependency_links
|
||||||
|
|
||||||
|
|
||||||
|
def _run_shell_command(cmd, throw_on_error=False):
|
||||||
|
if os.name == 'nt':
|
||||||
|
output = subprocess.Popen(["cmd.exe", "/C", cmd],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE)
|
||||||
|
else:
|
||||||
|
output = subprocess.Popen(["/bin/sh", "-c", cmd],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE)
|
||||||
|
out = output.communicate()
|
||||||
|
if output.returncode and throw_on_error:
|
||||||
|
raise Exception("%s returned %d" % cmd, output.returncode)
|
||||||
|
if len(out) == 0:
|
||||||
|
return None
|
||||||
|
if len(out[0].strip()) == 0:
|
||||||
|
return None
|
||||||
|
return out[0].strip()
|
||||||
|
|
||||||
|
|
||||||
|
def _get_git_directory():
|
||||||
|
parent_dir = os.path.dirname(__file__)
|
||||||
|
while True:
|
||||||
|
git_dir = os.path.join(parent_dir, '.git')
|
||||||
|
if os.path.exists(git_dir):
|
||||||
|
return git_dir
|
||||||
|
parent_dir, child = os.path.split(parent_dir)
|
||||||
|
if not child: # reached to root dir
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def write_git_changelog():
|
||||||
|
"""Write a changelog based on the git changelog."""
|
||||||
|
new_changelog = 'ChangeLog'
|
||||||
|
git_dir = _get_git_directory()
|
||||||
|
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
|
||||||
|
if git_dir:
|
||||||
|
git_log_cmd = 'git --git-dir=%s log' % git_dir
|
||||||
|
changelog = _run_shell_command(git_log_cmd)
|
||||||
|
mailmap = _parse_git_mailmap(git_dir)
|
||||||
|
with open(new_changelog, "w") as changelog_file:
|
||||||
|
changelog_file.write(canonicalize_emails(changelog, mailmap))
|
||||||
|
else:
|
||||||
|
open(new_changelog, 'w').close()
|
||||||
|
|
||||||
|
|
||||||
|
def generate_authors():
|
||||||
|
"""Create AUTHORS file using git commits."""
|
||||||
|
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
|
||||||
|
old_authors = 'AUTHORS.in'
|
||||||
|
new_authors = 'AUTHORS'
|
||||||
|
git_dir = _get_git_directory()
|
||||||
|
if not os.getenv('SKIP_GENERATE_AUTHORS'):
|
||||||
|
if git_dir:
|
||||||
|
# don't include jenkins email address in AUTHORS file
|
||||||
|
git_log_cmd = ("git --git-dir=" + git_dir +
|
||||||
|
" log --format='%aN <%aE>' | sort -u | "
|
||||||
|
"egrep -v '" + jenkins_email + "'")
|
||||||
|
changelog = _run_shell_command(git_log_cmd)
|
||||||
|
signed_cmd = ("git --git-dir=" + git_dir +
|
||||||
|
" log | grep -i Co-authored-by: | sort -u")
|
||||||
|
signed_entries = _run_shell_command(signed_cmd)
|
||||||
|
if signed_entries:
|
||||||
|
new_entries = "\n".join(
|
||||||
|
[signed.split(":", 1)[1].strip()
|
||||||
|
for signed in signed_entries.split("\n") if signed])
|
||||||
|
changelog = "\n".join((changelog, new_entries))
|
||||||
|
mailmap = _parse_git_mailmap(git_dir)
|
||||||
|
with open(new_authors, 'w') as new_authors_fh:
|
||||||
|
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
|
||||||
|
if os.path.exists(old_authors):
|
||||||
|
with open(old_authors, "r") as old_authors_fh:
|
||||||
|
new_authors_fh.write('\n' + old_authors_fh.read())
|
||||||
|
else:
|
||||||
|
open(new_authors, 'w').close()
|
||||||
|
|
||||||
|
|
||||||
|
_rst_template = """%(heading)s
|
||||||
|
%(underline)s
|
||||||
|
|
||||||
|
.. automodule:: %(module)s
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_cmdclass():
|
||||||
|
"""Return dict of commands to run from setup.py."""
|
||||||
|
|
||||||
|
cmdclass = dict()
|
||||||
|
|
||||||
|
def _find_modules(arg, dirname, files):
|
||||||
|
for filename in files:
|
||||||
|
if filename.endswith('.py') and filename != '__init__.py':
|
||||||
|
arg["%s.%s" % (dirname.replace('/', '.'),
|
||||||
|
filename[:-3])] = True
|
||||||
|
|
||||||
|
class LocalSDist(sdist.sdist):
|
||||||
|
"""Builds the ChangeLog and Authors files from VC first."""
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
write_git_changelog()
|
||||||
|
generate_authors()
|
||||||
|
# sdist.sdist is an old style class, can't use super()
|
||||||
|
sdist.sdist.run(self)
|
||||||
|
|
||||||
|
cmdclass['sdist'] = LocalSDist
|
||||||
|
|
||||||
|
# If Sphinx is installed on the box running setup.py,
|
||||||
|
# enable setup.py to build the documentation, otherwise,
|
||||||
|
# just ignore it
|
||||||
|
try:
|
||||||
|
from sphinx.setup_command import BuildDoc
|
||||||
|
|
||||||
|
class LocalBuildDoc(BuildDoc):
|
||||||
|
|
||||||
|
builders = ['html', 'man']
|
||||||
|
|
||||||
|
def generate_autoindex(self):
|
||||||
|
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
|
||||||
|
modules = {}
|
||||||
|
option_dict = self.distribution.get_option_dict('build_sphinx')
|
||||||
|
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
|
||||||
|
if not os.path.exists(source_dir):
|
||||||
|
os.makedirs(source_dir)
|
||||||
|
for pkg in self.distribution.packages:
|
||||||
|
if '.' not in pkg:
|
||||||
|
os.path.walk(pkg, _find_modules, modules)
|
||||||
|
module_list = modules.keys()
|
||||||
|
module_list.sort()
|
||||||
|
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
|
||||||
|
with open(autoindex_filename, 'w') as autoindex:
|
||||||
|
autoindex.write(""".. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
""")
|
||||||
|
for module in module_list:
|
||||||
|
output_filename = os.path.join(source_dir,
|
||||||
|
"%s.rst" % module)
|
||||||
|
heading = "The :mod:`%s` Module" % module
|
||||||
|
underline = "=" * len(heading)
|
||||||
|
values = dict(module=module, heading=heading,
|
||||||
|
underline=underline)
|
||||||
|
|
||||||
|
print "Generating %s" % output_filename
|
||||||
|
with open(output_filename, 'w') as output_file:
|
||||||
|
output_file.write(_rst_template % values)
|
||||||
|
autoindex.write(" %s.rst\n" % module)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
if not os.getenv('SPHINX_DEBUG'):
|
||||||
|
self.generate_autoindex()
|
||||||
|
|
||||||
|
for builder in self.builders:
|
||||||
|
self.builder = builder
|
||||||
|
self.finalize_options()
|
||||||
|
self.project = self.distribution.get_name()
|
||||||
|
self.version = self.distribution.get_version()
|
||||||
|
self.release = self.distribution.get_version()
|
||||||
|
BuildDoc.run(self)
|
||||||
|
|
||||||
|
class LocalBuildLatex(LocalBuildDoc):
|
||||||
|
builders = ['latex']
|
||||||
|
|
||||||
|
cmdclass['build_sphinx'] = LocalBuildDoc
|
||||||
|
cmdclass['build_sphinx_latex'] = LocalBuildLatex
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return cmdclass
|
||||||
|
|
||||||
|
|
||||||
|
def _get_revno(git_dir):
|
||||||
|
"""Return the number of commits since the most recent tag.
|
||||||
|
|
||||||
|
We use git-describe to find this out, but if there are no
|
||||||
|
tags then we fall back to counting commits since the beginning
|
||||||
|
of time.
|
||||||
|
"""
|
||||||
|
describe = _run_shell_command(
|
||||||
|
"git --git-dir=%s describe --always" % git_dir)
|
||||||
|
if "-" in describe:
|
||||||
|
return describe.rsplit("-", 2)[-2]
|
||||||
|
|
||||||
|
# no tags found
|
||||||
|
revlist = _run_shell_command(
|
||||||
|
"git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
|
||||||
|
return len(revlist.splitlines())
|
||||||
|
|
||||||
|
|
||||||
|
def _get_version_from_git(pre_version):
|
||||||
|
"""Return a version which is equal to the tag that's on the current
|
||||||
|
revision if there is one, or tag plus number of additional revisions
|
||||||
|
if the current revision has no tag."""
|
||||||
|
|
||||||
|
git_dir = _get_git_directory()
|
||||||
|
if git_dir:
|
||||||
|
if pre_version:
|
||||||
|
try:
|
||||||
|
return _run_shell_command(
|
||||||
|
"git --git-dir=" + git_dir + " describe --exact-match",
|
||||||
|
throw_on_error=True).replace('-', '.')
|
||||||
|
except Exception:
|
||||||
|
sha = _run_shell_command(
|
||||||
|
"git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
|
||||||
|
return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
|
||||||
|
else:
|
||||||
|
return _run_shell_command(
|
||||||
|
"git --git-dir=" + git_dir + " describe --always").replace(
|
||||||
|
'-', '.')
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _get_version_from_pkg_info(package_name):
|
||||||
|
"""Get the version from PKG-INFO file if we can."""
|
||||||
|
try:
|
||||||
|
pkg_info_file = open('PKG-INFO', 'r')
|
||||||
|
except (IOError, OSError):
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
pkg_info = email.message_from_file(pkg_info_file)
|
||||||
|
except email.MessageError:
|
||||||
|
return None
|
||||||
|
# Check to make sure we're in our own dir
|
||||||
|
if pkg_info.get('Name', None) != package_name:
|
||||||
|
return None
|
||||||
|
return pkg_info.get('Version', None)
|
||||||
|
|
||||||
|
|
||||||
|
def get_version(package_name, pre_version=None):
|
||||||
|
"""Get the version of the project. First, try getting it from PKG-INFO, if
|
||||||
|
it exists. If it does, that means we're in a distribution tarball or that
|
||||||
|
install has happened. Otherwise, if there is no PKG-INFO file, pull the
|
||||||
|
version from git.
|
||||||
|
|
||||||
|
We do not support setup.py version sanity in git archive tarballs, nor do
|
||||||
|
we support packagers directly sucking our git repo into theirs. We expect
|
||||||
|
that a source tarball be made from our git repo - or that if someone wants
|
||||||
|
to make a source tarball from a fork of our repo with additional tags in it
|
||||||
|
that they understand and desire the results of doing that.
|
||||||
|
"""
|
||||||
|
version = os.environ.get("OSLO_PACKAGE_VERSION", None)
|
||||||
|
if version:
|
||||||
|
return version
|
||||||
|
version = _get_version_from_pkg_info(package_name)
|
||||||
|
if version:
|
||||||
|
return version
|
||||||
|
version = _get_version_from_git(pre_version)
|
||||||
|
if version:
|
||||||
|
return version
|
||||||
|
raise Exception("Versioning for this project requires either an sdist"
|
||||||
|
" tarball, or access to an upstream git repository.")
|
133
cinderclient/openstack/common/strutils.py
Normal file
133
cinderclient/openstack/common/strutils.py
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
System-level utilities and helper functions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def int_from_bool_as_string(subject):
|
||||||
|
"""
|
||||||
|
Interpret a string as a boolean and return either 1 or 0.
|
||||||
|
|
||||||
|
Any string value in:
|
||||||
|
|
||||||
|
('True', 'true', 'On', 'on', '1')
|
||||||
|
|
||||||
|
is interpreted as a boolean True.
|
||||||
|
|
||||||
|
Useful for JSON-decoded stuff and config file parsing
|
||||||
|
"""
|
||||||
|
return bool_from_string(subject) and 1 or 0
|
||||||
|
|
||||||
|
|
||||||
|
def bool_from_string(subject):
|
||||||
|
"""
|
||||||
|
Interpret a string as a boolean.
|
||||||
|
|
||||||
|
Any string value in:
|
||||||
|
|
||||||
|
('True', 'true', 'On', 'on', 'Yes', 'yes', '1')
|
||||||
|
|
||||||
|
is interpreted as a boolean True.
|
||||||
|
|
||||||
|
Useful for JSON-decoded stuff and config file parsing
|
||||||
|
"""
|
||||||
|
if isinstance(subject, bool):
|
||||||
|
return subject
|
||||||
|
if isinstance(subject, basestring):
|
||||||
|
if subject.strip().lower() in ('true', 'on', 'yes', '1'):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def safe_decode(text, incoming=None, errors='strict'):
|
||||||
|
"""
|
||||||
|
Decodes incoming str using `incoming` if they're
|
||||||
|
not already unicode.
|
||||||
|
|
||||||
|
:param incoming: Text's current encoding
|
||||||
|
:param errors: Errors handling policy. See here for valid
|
||||||
|
values http://docs.python.org/2/library/codecs.html
|
||||||
|
:returns: text or a unicode `incoming` encoded
|
||||||
|
representation of it.
|
||||||
|
:raises TypeError: If text is not an isntance of basestring
|
||||||
|
"""
|
||||||
|
if not isinstance(text, basestring):
|
||||||
|
raise TypeError("%s can't be decoded" % type(text))
|
||||||
|
|
||||||
|
if isinstance(text, unicode):
|
||||||
|
return text
|
||||||
|
|
||||||
|
if not incoming:
|
||||||
|
incoming = (sys.stdin.encoding or
|
||||||
|
sys.getdefaultencoding())
|
||||||
|
|
||||||
|
try:
|
||||||
|
return text.decode(incoming, errors)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
# Note(flaper87) If we get here, it means that
|
||||||
|
# sys.stdin.encoding / sys.getdefaultencoding
|
||||||
|
# didn't return a suitable encoding to decode
|
||||||
|
# text. This happens mostly when global LANG
|
||||||
|
# var is not set correctly and there's no
|
||||||
|
# default encoding. In this case, most likely
|
||||||
|
# python will use ASCII or ANSI encoders as
|
||||||
|
# default encodings but they won't be capable
|
||||||
|
# of decoding non-ASCII characters.
|
||||||
|
#
|
||||||
|
# Also, UTF-8 is being used since it's an ASCII
|
||||||
|
# extension.
|
||||||
|
return text.decode('utf-8', errors)
|
||||||
|
|
||||||
|
|
||||||
|
def safe_encode(text, incoming=None,
|
||||||
|
encoding='utf-8', errors='strict'):
|
||||||
|
"""
|
||||||
|
Encodes incoming str/unicode using `encoding`. If
|
||||||
|
incoming is not specified, text is expected to
|
||||||
|
be encoded with current python's default encoding.
|
||||||
|
(`sys.getdefaultencoding`)
|
||||||
|
|
||||||
|
:param incoming: Text's current encoding
|
||||||
|
:param encoding: Expected encoding for text (Default UTF-8)
|
||||||
|
:param errors: Errors handling policy. See here for valid
|
||||||
|
values http://docs.python.org/2/library/codecs.html
|
||||||
|
:returns: text or a bytestring `encoding` encoded
|
||||||
|
representation of it.
|
||||||
|
:raises TypeError: If text is not an isntance of basestring
|
||||||
|
"""
|
||||||
|
if not isinstance(text, basestring):
|
||||||
|
raise TypeError("%s can't be encoded" % type(text))
|
||||||
|
|
||||||
|
if not incoming:
|
||||||
|
incoming = (sys.stdin.encoding or
|
||||||
|
sys.getdefaultencoding())
|
||||||
|
|
||||||
|
if isinstance(text, unicode):
|
||||||
|
return text.encode(encoding, errors)
|
||||||
|
elif text and encoding != incoming:
|
||||||
|
# Decode text before encoding it with `encoding`
|
||||||
|
text = safe_decode(text, incoming, errors)
|
||||||
|
return text.encode(encoding, errors)
|
||||||
|
|
||||||
|
return text
|
94
cinderclient/openstack/common/version.py
Normal file
94
cinderclient/openstack/common/version.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
|
||||||
|
# Copyright 2012 OpenStack Foundation
|
||||||
|
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Utilities for consuming the version from pkg_resources.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pkg_resources
|
||||||
|
|
||||||
|
|
||||||
|
class VersionInfo(object):
|
||||||
|
|
||||||
|
def __init__(self, package):
|
||||||
|
"""Object that understands versioning for a package
|
||||||
|
:param package: name of the python package, such as glance, or
|
||||||
|
python-glanceclient
|
||||||
|
"""
|
||||||
|
self.package = package
|
||||||
|
self.release = None
|
||||||
|
self.version = None
|
||||||
|
self._cached_version = None
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Make the VersionInfo object behave like a string."""
|
||||||
|
return self.version_string()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Include the name."""
|
||||||
|
return "VersionInfo(%s:%s)" % (self.package, self.version_string())
|
||||||
|
|
||||||
|
def _get_version_from_pkg_resources(self):
|
||||||
|
"""Get the version of the package from the pkg_resources record
|
||||||
|
associated with the package."""
|
||||||
|
try:
|
||||||
|
requirement = pkg_resources.Requirement.parse(self.package)
|
||||||
|
provider = pkg_resources.get_provider(requirement)
|
||||||
|
return provider.version
|
||||||
|
except pkg_resources.DistributionNotFound:
|
||||||
|
# The most likely cause for this is running tests in a tree
|
||||||
|
# produced from a tarball where the package itself has not been
|
||||||
|
# installed into anything. Revert to setup-time logic.
|
||||||
|
from cinderclient.openstack.common import setup
|
||||||
|
return setup.get_version(self.package)
|
||||||
|
|
||||||
|
def release_string(self):
|
||||||
|
"""Return the full version of the package including suffixes indicating
|
||||||
|
VCS status.
|
||||||
|
"""
|
||||||
|
if self.release is None:
|
||||||
|
self.release = self._get_version_from_pkg_resources()
|
||||||
|
|
||||||
|
return self.release
|
||||||
|
|
||||||
|
def version_string(self):
|
||||||
|
"""Return the short version minus any alpha/beta tags."""
|
||||||
|
if self.version is None:
|
||||||
|
parts = []
|
||||||
|
for part in self.release_string().split('.'):
|
||||||
|
if part[0].isdigit():
|
||||||
|
parts.append(part)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
self.version = ".".join(parts)
|
||||||
|
|
||||||
|
return self.version
|
||||||
|
|
||||||
|
# Compatibility functions
|
||||||
|
canonical_version_string = version_string
|
||||||
|
version_string_with_vcs = release_string
|
||||||
|
|
||||||
|
def cached_version_string(self, prefix=""):
|
||||||
|
"""Generate an object which will expand in a string context to
|
||||||
|
the results of version_string(). We do this so that don't
|
||||||
|
call into pkg_resources every time we start up a program when
|
||||||
|
passing version information into the CONF constructor, but
|
||||||
|
rather only do the calculation when and if a version is requested
|
||||||
|
"""
|
||||||
|
if not self._cached_version:
|
||||||
|
self._cached_version = "%s%s" % (prefix,
|
||||||
|
self.version_string())
|
||||||
|
return self._cached_version
|
77
cinderclient/service_catalog.py
Normal file
77
cinderclient/service_catalog.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# Copyright 2011, Piston Cloud Computing, Inc.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import cinderclient.exceptions
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceCatalog(object):
|
||||||
|
"""Helper methods for dealing with a Keystone Service Catalog."""
|
||||||
|
|
||||||
|
def __init__(self, resource_dict):
|
||||||
|
self.catalog = resource_dict
|
||||||
|
|
||||||
|
def get_token(self):
|
||||||
|
return self.catalog['access']['token']['id']
|
||||||
|
|
||||||
|
def url_for(self, attr=None, filter_value=None,
|
||||||
|
service_type=None, endpoint_type='publicURL',
|
||||||
|
service_name=None, volume_service_name=None):
|
||||||
|
"""Fetch the public URL from the Compute service for
|
||||||
|
a particular endpoint attribute. If none given, return
|
||||||
|
the first. See tests for sample service catalog."""
|
||||||
|
matching_endpoints = []
|
||||||
|
if 'endpoints' in self.catalog:
|
||||||
|
# We have a bastardized service catalog. Treat it special. :/
|
||||||
|
for endpoint in self.catalog['endpoints']:
|
||||||
|
if not filter_value or endpoint[attr] == filter_value:
|
||||||
|
matching_endpoints.append(endpoint)
|
||||||
|
if not matching_endpoints:
|
||||||
|
raise cinderclient.exceptions.EndpointNotFound()
|
||||||
|
|
||||||
|
# We don't always get a service catalog back ...
|
||||||
|
if not 'serviceCatalog' in self.catalog['access']:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Full catalog ...
|
||||||
|
catalog = self.catalog['access']['serviceCatalog']
|
||||||
|
|
||||||
|
for service in catalog:
|
||||||
|
if service.get("type") != service_type:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if (service_name and service_type == 'compute' and
|
||||||
|
service.get('name') != service_name):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if (volume_service_name and service_type == 'volume' and
|
||||||
|
service.get('name') != volume_service_name):
|
||||||
|
continue
|
||||||
|
|
||||||
|
endpoints = service['endpoints']
|
||||||
|
for endpoint in endpoints:
|
||||||
|
if not filter_value or endpoint.get(attr) == filter_value:
|
||||||
|
endpoint["serviceName"] = service.get("name")
|
||||||
|
matching_endpoints.append(endpoint)
|
||||||
|
|
||||||
|
if not matching_endpoints:
|
||||||
|
raise cinderclient.exceptions.EndpointNotFound()
|
||||||
|
elif len(matching_endpoints) > 1:
|
||||||
|
raise cinderclient.exceptions.AmbiguousEndpoints(
|
||||||
|
endpoints=matching_endpoints)
|
||||||
|
else:
|
||||||
|
return matching_endpoints[0][endpoint_type]
|
515
cinderclient/shell.py
Normal file
515
cinderclient/shell.py
Normal file
@ -0,0 +1,515 @@
|
|||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Command-line interface to the OpenStack Cinder API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import glob
|
||||||
|
import imp
|
||||||
|
import itertools
|
||||||
|
import os
|
||||||
|
import pkgutil
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from cinderclient import client
|
||||||
|
from cinderclient import exceptions as exc
|
||||||
|
import cinderclient.extension
|
||||||
|
from cinderclient.openstack.common import strutils
|
||||||
|
from cinderclient import utils
|
||||||
|
from cinderclient.v1 import shell as shell_v1
|
||||||
|
from cinderclient.v2 import shell as shell_v2
|
||||||
|
|
||||||
|
DEFAULT_OS_VOLUME_API_VERSION = "1"
|
||||||
|
DEFAULT_CINDER_ENDPOINT_TYPE = 'publicURL'
|
||||||
|
DEFAULT_CINDER_SERVICE_TYPE = 'compute'
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CinderClientArgumentParser(argparse.ArgumentParser):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(CinderClientArgumentParser, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def error(self, message):
|
||||||
|
"""error(message: string)
|
||||||
|
|
||||||
|
Prints a usage message incorporating the message to stderr and
|
||||||
|
exits.
|
||||||
|
"""
|
||||||
|
self.print_usage(sys.stderr)
|
||||||
|
#FIXME(lzyeval): if changes occur in argparse.ArgParser._check_value
|
||||||
|
choose_from = ' (choose from'
|
||||||
|
progparts = self.prog.partition(' ')
|
||||||
|
self.exit(2, "error: %(errmsg)s\nTry '%(mainp)s help %(subp)s'"
|
||||||
|
" for more information.\n" %
|
||||||
|
{'errmsg': message.split(choose_from)[0],
|
||||||
|
'mainp': progparts[0],
|
||||||
|
'subp': progparts[2]})
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackCinderShell(object):
|
||||||
|
|
||||||
|
def get_base_parser(self):
|
||||||
|
parser = CinderClientArgumentParser(
|
||||||
|
prog='cinder',
|
||||||
|
description=__doc__.strip(),
|
||||||
|
epilog='See "cinder help COMMAND" '
|
||||||
|
'for help on a specific command.',
|
||||||
|
add_help=False,
|
||||||
|
formatter_class=OpenStackHelpFormatter,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Global arguments
|
||||||
|
parser.add_argument('-h', '--help',
|
||||||
|
action='store_true',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--version',
|
||||||
|
action='version',
|
||||||
|
version=cinderclient.__version__)
|
||||||
|
|
||||||
|
parser.add_argument('--debug',
|
||||||
|
action='store_true',
|
||||||
|
default=utils.env('CINDERCLIENT_DEBUG',
|
||||||
|
default=False),
|
||||||
|
help="Print debugging output")
|
||||||
|
|
||||||
|
parser.add_argument('--os-username',
|
||||||
|
metavar='<auth-user-name>',
|
||||||
|
default=utils.env('OS_USERNAME',
|
||||||
|
'CINDER_USERNAME'),
|
||||||
|
help='Defaults to env[OS_USERNAME].')
|
||||||
|
parser.add_argument('--os_username',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--os-password',
|
||||||
|
metavar='<auth-password>',
|
||||||
|
default=utils.env('OS_PASSWORD',
|
||||||
|
'CINDER_PASSWORD'),
|
||||||
|
help='Defaults to env[OS_PASSWORD].')
|
||||||
|
parser.add_argument('--os_password',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--os-tenant-name',
|
||||||
|
metavar='<auth-tenant-name>',
|
||||||
|
default=utils.env('OS_TENANT_NAME',
|
||||||
|
'CINDER_PROJECT_ID'),
|
||||||
|
help='Defaults to env[OS_TENANT_NAME].')
|
||||||
|
parser.add_argument('--os_tenant_name',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--os-tenant-id',
|
||||||
|
metavar='<auth-tenant-id>',
|
||||||
|
default=utils.env('OS_TENANT_ID',
|
||||||
|
'CINDER_TENANT_ID'),
|
||||||
|
help='Defaults to env[OS_TENANT_ID].')
|
||||||
|
parser.add_argument('--os_tenant_id',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--os-auth-url',
|
||||||
|
metavar='<auth-url>',
|
||||||
|
default=utils.env('OS_AUTH_URL',
|
||||||
|
'CINDER_URL'),
|
||||||
|
help='Defaults to env[OS_AUTH_URL].')
|
||||||
|
parser.add_argument('--os_auth_url',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--os-region-name',
|
||||||
|
metavar='<region-name>',
|
||||||
|
default=utils.env('OS_REGION_NAME',
|
||||||
|
'CINDER_REGION_NAME'),
|
||||||
|
help='Defaults to env[OS_REGION_NAME].')
|
||||||
|
parser.add_argument('--os_region_name',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--service-type',
|
||||||
|
metavar='<service-type>',
|
||||||
|
help='Defaults to compute for most actions')
|
||||||
|
parser.add_argument('--service_type',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--service-name',
|
||||||
|
metavar='<service-name>',
|
||||||
|
default=utils.env('CINDER_SERVICE_NAME'),
|
||||||
|
help='Defaults to env[CINDER_SERVICE_NAME]')
|
||||||
|
parser.add_argument('--service_name',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--volume-service-name',
|
||||||
|
metavar='<volume-service-name>',
|
||||||
|
default=utils.env('CINDER_VOLUME_SERVICE_NAME'),
|
||||||
|
help='Defaults to env[CINDER_VOLUME_SERVICE_NAME]')
|
||||||
|
parser.add_argument('--volume_service_name',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--endpoint-type',
|
||||||
|
metavar='<endpoint-type>',
|
||||||
|
default=utils.env('CINDER_ENDPOINT_TYPE',
|
||||||
|
default=DEFAULT_CINDER_ENDPOINT_TYPE),
|
||||||
|
help='Defaults to env[CINDER_ENDPOINT_TYPE] or '
|
||||||
|
+ DEFAULT_CINDER_ENDPOINT_TYPE + '.')
|
||||||
|
parser.add_argument('--endpoint_type',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--os-volume-api-version',
|
||||||
|
metavar='<compute-api-ver>',
|
||||||
|
default=utils.env('OS_VOLUME_API_VERSION',
|
||||||
|
default=DEFAULT_OS_VOLUME_API_VERSION),
|
||||||
|
help='Accepts 1 or 2,defaults '
|
||||||
|
'to env[OS_VOLUME_API_VERSION].')
|
||||||
|
parser.add_argument('--os_volume_api_version',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--os-cacert',
|
||||||
|
metavar='<ca-certificate>',
|
||||||
|
default=utils.env('OS_CACERT', default=None),
|
||||||
|
help='Specify a CA bundle file to use in '
|
||||||
|
'verifying a TLS (https) server certificate. '
|
||||||
|
'Defaults to env[OS_CACERT]')
|
||||||
|
|
||||||
|
parser.add_argument('--insecure',
|
||||||
|
default=utils.env('CINDERCLIENT_INSECURE',
|
||||||
|
default=False),
|
||||||
|
action='store_true',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
parser.add_argument('--retries',
|
||||||
|
metavar='<retries>',
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help='Number of retries.')
|
||||||
|
|
||||||
|
# FIXME(dtroyer): The args below are here for diablo compatibility,
|
||||||
|
# remove them in folsum cycle
|
||||||
|
|
||||||
|
# alias for --os-username, left in for backwards compatibility
|
||||||
|
parser.add_argument('--username',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
# alias for --os-region_name, left in for backwards compatibility
|
||||||
|
parser.add_argument('--region_name',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
# alias for --os-password, left in for backwards compatibility
|
||||||
|
parser.add_argument('--apikey', '--password', dest='apikey',
|
||||||
|
default=utils.env('CINDER_API_KEY'),
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
# alias for --os-tenant-name, left in for backward compatibility
|
||||||
|
parser.add_argument('--projectid', '--tenant_name', dest='projectid',
|
||||||
|
default=utils.env('CINDER_PROJECT_ID'),
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
# alias for --os-auth-url, left in for backward compatibility
|
||||||
|
parser.add_argument('--url', '--auth_url', dest='url',
|
||||||
|
default=utils.env('CINDER_URL'),
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def get_subcommand_parser(self, version):
|
||||||
|
parser = self.get_base_parser()
|
||||||
|
|
||||||
|
self.subcommands = {}
|
||||||
|
subparsers = parser.add_subparsers(metavar='<subcommand>')
|
||||||
|
|
||||||
|
try:
|
||||||
|
actions_module = {
|
||||||
|
'1.1': shell_v1,
|
||||||
|
'2': shell_v2,
|
||||||
|
}[version]
|
||||||
|
except KeyError:
|
||||||
|
actions_module = shell_v1
|
||||||
|
|
||||||
|
self._find_actions(subparsers, actions_module)
|
||||||
|
self._find_actions(subparsers, self)
|
||||||
|
|
||||||
|
for extension in self.extensions:
|
||||||
|
self._find_actions(subparsers, extension.module)
|
||||||
|
|
||||||
|
self._add_bash_completion_subparser(subparsers)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def _discover_extensions(self, version):
|
||||||
|
extensions = []
|
||||||
|
for name, module in itertools.chain(
|
||||||
|
self._discover_via_python_path(version),
|
||||||
|
self._discover_via_contrib_path(version)):
|
||||||
|
|
||||||
|
extension = cinderclient.extension.Extension(name, module)
|
||||||
|
extensions.append(extension)
|
||||||
|
|
||||||
|
return extensions
|
||||||
|
|
||||||
|
def _discover_via_python_path(self, version):
|
||||||
|
for (module_loader, name, ispkg) in pkgutil.iter_modules():
|
||||||
|
if name.endswith('python_cinderclient_ext'):
|
||||||
|
if not hasattr(module_loader, 'load_module'):
|
||||||
|
# Python 2.6 compat: actually get an ImpImporter obj
|
||||||
|
module_loader = module_loader.find_module(name)
|
||||||
|
|
||||||
|
module = module_loader.load_module(name)
|
||||||
|
yield name, module
|
||||||
|
|
||||||
|
def _discover_via_contrib_path(self, version):
|
||||||
|
module_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
version_str = "v%s" % version.replace('.', '_')
|
||||||
|
ext_path = os.path.join(module_path, version_str, 'contrib')
|
||||||
|
ext_glob = os.path.join(ext_path, "*.py")
|
||||||
|
|
||||||
|
for ext_path in glob.iglob(ext_glob):
|
||||||
|
name = os.path.basename(ext_path)[:-3]
|
||||||
|
|
||||||
|
if name == "__init__":
|
||||||
|
continue
|
||||||
|
|
||||||
|
module = imp.load_source(name, ext_path)
|
||||||
|
yield name, module
|
||||||
|
|
||||||
|
def _add_bash_completion_subparser(self, subparsers):
|
||||||
|
subparser = subparsers.add_parser(
|
||||||
|
'bash_completion',
|
||||||
|
add_help=False,
|
||||||
|
formatter_class=OpenStackHelpFormatter)
|
||||||
|
|
||||||
|
self.subcommands['bash_completion'] = subparser
|
||||||
|
subparser.set_defaults(func=self.do_bash_completion)
|
||||||
|
|
||||||
|
def _find_actions(self, subparsers, actions_module):
|
||||||
|
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
|
||||||
|
# I prefer to be hypen-separated instead of underscores.
|
||||||
|
command = attr[3:].replace('_', '-')
|
||||||
|
callback = getattr(actions_module, attr)
|
||||||
|
desc = callback.__doc__ or ''
|
||||||
|
help = desc.strip().split('\n')[0]
|
||||||
|
arguments = getattr(callback, 'arguments', [])
|
||||||
|
|
||||||
|
subparser = subparsers.add_parser(
|
||||||
|
command,
|
||||||
|
help=help,
|
||||||
|
description=desc,
|
||||||
|
add_help=False,
|
||||||
|
formatter_class=OpenStackHelpFormatter)
|
||||||
|
|
||||||
|
subparser.add_argument('-h', '--help',
|
||||||
|
action='help',
|
||||||
|
help=argparse.SUPPRESS,)
|
||||||
|
|
||||||
|
self.subcommands[command] = subparser
|
||||||
|
for (args, kwargs) in arguments:
|
||||||
|
subparser.add_argument(*args, **kwargs)
|
||||||
|
subparser.set_defaults(func=callback)
|
||||||
|
|
||||||
|
def setup_debugging(self, debug):
|
||||||
|
if not debug:
|
||||||
|
return
|
||||||
|
|
||||||
|
streamhandler = logging.StreamHandler()
|
||||||
|
streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
|
||||||
|
streamhandler.setFormatter(logging.Formatter(streamformat))
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
|
logger.addHandler(streamhandler)
|
||||||
|
|
||||||
|
def main(self, argv):
|
||||||
|
# Parse args once to find version and debug settings
|
||||||
|
parser = self.get_base_parser()
|
||||||
|
(options, args) = parser.parse_known_args(argv)
|
||||||
|
self.setup_debugging(options.debug)
|
||||||
|
|
||||||
|
# build available subcommands based on version
|
||||||
|
self.extensions = self._discover_extensions(
|
||||||
|
options.os_volume_api_version)
|
||||||
|
self._run_extension_hooks('__pre_parse_args__')
|
||||||
|
|
||||||
|
subcommand_parser = self.get_subcommand_parser(
|
||||||
|
options.os_volume_api_version)
|
||||||
|
self.parser = subcommand_parser
|
||||||
|
|
||||||
|
if options.help or not argv:
|
||||||
|
subcommand_parser.print_help()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
args = subcommand_parser.parse_args(argv)
|
||||||
|
self._run_extension_hooks('__post_parse_args__', args)
|
||||||
|
|
||||||
|
# Short-circuit and deal with help right away.
|
||||||
|
if args.func == self.do_help:
|
||||||
|
self.do_help(args)
|
||||||
|
return 0
|
||||||
|
elif args.func == self.do_bash_completion:
|
||||||
|
self.do_bash_completion(args)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
(os_username, os_password, os_tenant_name, os_auth_url,
|
||||||
|
os_region_name, os_tenant_id, endpoint_type, insecure,
|
||||||
|
service_type, service_name, volume_service_name,
|
||||||
|
username, apikey, projectid, url, region_name, cacert) = (
|
||||||
|
args.os_username, args.os_password,
|
||||||
|
args.os_tenant_name, args.os_auth_url,
|
||||||
|
args.os_region_name, args.os_tenant_id,
|
||||||
|
args.endpoint_type, args.insecure,
|
||||||
|
args.service_type, args.service_name,
|
||||||
|
args.volume_service_name, args.username,
|
||||||
|
args.apikey, args.projectid,
|
||||||
|
args.url, args.region_name, args.os_cacert)
|
||||||
|
|
||||||
|
if not endpoint_type:
|
||||||
|
endpoint_type = DEFAULT_CINDER_ENDPOINT_TYPE
|
||||||
|
|
||||||
|
if not service_type:
|
||||||
|
service_type = DEFAULT_CINDER_SERVICE_TYPE
|
||||||
|
service_type = utils.get_service_type(args.func) or service_type
|
||||||
|
|
||||||
|
#FIXME(usrleon): Here should be restrict for project id same as
|
||||||
|
# for os_username or os_password but for compatibility it is not.
|
||||||
|
|
||||||
|
if not utils.isunauthenticated(args.func):
|
||||||
|
if not os_username:
|
||||||
|
if not username:
|
||||||
|
raise exc.CommandError(
|
||||||
|
"You must provide a username "
|
||||||
|
"via either --os-username or env[OS_USERNAME]")
|
||||||
|
else:
|
||||||
|
os_username = username
|
||||||
|
|
||||||
|
if not os_password:
|
||||||
|
if not apikey:
|
||||||
|
raise exc.CommandError("You must provide a password "
|
||||||
|
"via either --os-password or via "
|
||||||
|
"env[OS_PASSWORD]")
|
||||||
|
else:
|
||||||
|
os_password = apikey
|
||||||
|
|
||||||
|
if not (os_tenant_name or os_tenant_id):
|
||||||
|
if not projectid:
|
||||||
|
raise exc.CommandError("You must provide a tenant_id "
|
||||||
|
"via either --os-tenant-id or "
|
||||||
|
"env[OS_TENANT_ID]")
|
||||||
|
else:
|
||||||
|
os_tenant_name = projectid
|
||||||
|
|
||||||
|
if not os_auth_url:
|
||||||
|
if not url:
|
||||||
|
raise exc.CommandError(
|
||||||
|
"You must provide an auth url "
|
||||||
|
"via either --os-auth-url or env[OS_AUTH_URL]")
|
||||||
|
else:
|
||||||
|
os_auth_url = url
|
||||||
|
|
||||||
|
if not os_region_name and region_name:
|
||||||
|
os_region_name = region_name
|
||||||
|
|
||||||
|
if not (os_tenant_name or os_tenant_id):
|
||||||
|
raise exc.CommandError(
|
||||||
|
"You must provide a tenant_id "
|
||||||
|
"via either --os-tenant-id or env[OS_TENANT_ID]")
|
||||||
|
|
||||||
|
if not os_auth_url:
|
||||||
|
raise exc.CommandError(
|
||||||
|
"You must provide an auth url "
|
||||||
|
"via either --os-auth-url or env[OS_AUTH_URL]")
|
||||||
|
|
||||||
|
self.cs = client.Client(options.os_volume_api_version, os_username,
|
||||||
|
os_password, os_tenant_name, os_auth_url,
|
||||||
|
insecure, region_name=os_region_name,
|
||||||
|
tenant_id=os_tenant_id,
|
||||||
|
endpoint_type=endpoint_type,
|
||||||
|
extensions=self.extensions,
|
||||||
|
service_type=service_type,
|
||||||
|
service_name=service_name,
|
||||||
|
volume_service_name=volume_service_name,
|
||||||
|
retries=options.retries,
|
||||||
|
http_log_debug=args.debug,
|
||||||
|
cacert=cacert)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not utils.isunauthenticated(args.func):
|
||||||
|
self.cs.authenticate()
|
||||||
|
except exc.Unauthorized:
|
||||||
|
raise exc.CommandError("Invalid OpenStack Cinder credentials.")
|
||||||
|
except exc.AuthorizationFailure:
|
||||||
|
raise exc.CommandError("Unable to authorize user")
|
||||||
|
|
||||||
|
args.func(self.cs, args)
|
||||||
|
|
||||||
|
def _run_extension_hooks(self, hook_type, *args, **kwargs):
|
||||||
|
"""Run hooks for all registered extensions."""
|
||||||
|
for extension in self.extensions:
|
||||||
|
extension.run_hooks(hook_type, *args, **kwargs)
|
||||||
|
|
||||||
|
def do_bash_completion(self, args):
|
||||||
|
"""Print arguments for bash_completion.
|
||||||
|
|
||||||
|
Prints all of the commands and options to stdout so that the
|
||||||
|
cinder.bash_completion script doesn't have to hard code them.
|
||||||
|
"""
|
||||||
|
commands = set()
|
||||||
|
options = set()
|
||||||
|
for sc_str, sc in self.subcommands.items():
|
||||||
|
commands.add(sc_str)
|
||||||
|
for option in sc._optionals._option_string_actions.keys():
|
||||||
|
options.add(option)
|
||||||
|
|
||||||
|
commands.remove('bash-completion')
|
||||||
|
commands.remove('bash_completion')
|
||||||
|
print ' '.join(commands | options)
|
||||||
|
|
||||||
|
@utils.arg('command', metavar='<subcommand>', nargs='?',
|
||||||
|
help='Display help for <subcommand>')
|
||||||
|
def do_help(self, args):
|
||||||
|
"""
|
||||||
|
Display help about this program or one of its subcommands.
|
||||||
|
"""
|
||||||
|
if args.command:
|
||||||
|
if args.command in self.subcommands:
|
||||||
|
self.subcommands[args.command].print_help()
|
||||||
|
else:
|
||||||
|
raise exc.CommandError("'%s' is not a valid subcommand" %
|
||||||
|
args.command)
|
||||||
|
else:
|
||||||
|
self.parser.print_help()
|
||||||
|
|
||||||
|
|
||||||
|
# I'm picky about my shell help.
|
||||||
|
class OpenStackHelpFormatter(argparse.HelpFormatter):
|
||||||
|
def start_section(self, heading):
|
||||||
|
# Title-case the headings
|
||||||
|
heading = '%s%s' % (heading[0].upper(), heading[1:])
|
||||||
|
super(OpenStackHelpFormatter, self).start_section(heading)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
try:
|
||||||
|
OpenStackCinderShell().main(map(strutils.safe_decode, sys.argv[1:]))
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print >> sys.stderr, "... terminating cinder client"
|
||||||
|
sys.exit(130)
|
||||||
|
except Exception, e:
|
||||||
|
logger.debug(e, exc_info=1)
|
||||||
|
message = e.message
|
||||||
|
if not isinstance(message, basestring):
|
||||||
|
message = str(message)
|
||||||
|
print >> sys.stderr, "ERROR: %s" % strutils.safe_encode(message)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
266
cinderclient/utils.py
Normal file
266
cinderclient/utils.py
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import prettytable
|
||||||
|
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient.openstack.common import strutils
|
||||||
|
|
||||||
|
|
||||||
|
def arg(*args, **kwargs):
|
||||||
|
"""Decorator for CLI args."""
|
||||||
|
def _decorator(func):
|
||||||
|
add_arg(func, *args, **kwargs)
|
||||||
|
return func
|
||||||
|
return _decorator
|
||||||
|
|
||||||
|
|
||||||
|
def env(*vars, **kwargs):
|
||||||
|
"""
|
||||||
|
returns the first environment variable set
|
||||||
|
if none are non-empty, defaults to '' or keyword arg default
|
||||||
|
"""
|
||||||
|
for v in vars:
|
||||||
|
value = os.environ.get(v, None)
|
||||||
|
if value:
|
||||||
|
return value
|
||||||
|
return kwargs.get('default', '')
|
||||||
|
|
||||||
|
|
||||||
|
def add_arg(f, *args, **kwargs):
|
||||||
|
"""Bind CLI arguments to a shell.py `do_foo` function."""
|
||||||
|
|
||||||
|
if not hasattr(f, 'arguments'):
|
||||||
|
f.arguments = []
|
||||||
|
|
||||||
|
# NOTE(sirp): avoid dups that can occur when the module is shared across
|
||||||
|
# tests.
|
||||||
|
if (args, kwargs) not in f.arguments:
|
||||||
|
# Because of the sematics of decorator composition if we just append
|
||||||
|
# to the options list positional options will appear to be backwards.
|
||||||
|
f.arguments.insert(0, (args, kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
def add_resource_manager_extra_kwargs_hook(f, hook):
|
||||||
|
"""Adds hook to bind CLI arguments to ResourceManager calls.
|
||||||
|
|
||||||
|
The `do_foo` calls in shell.py will receive CLI args and then in turn pass
|
||||||
|
them through to the ResourceManager. Before passing through the args, the
|
||||||
|
hooks registered here will be called, giving us a chance to add extra
|
||||||
|
kwargs (taken from the command-line) to what's passed to the
|
||||||
|
ResourceManager.
|
||||||
|
"""
|
||||||
|
if not hasattr(f, 'resource_manager_kwargs_hooks'):
|
||||||
|
f.resource_manager_kwargs_hooks = []
|
||||||
|
|
||||||
|
names = [h.__name__ for h in f.resource_manager_kwargs_hooks]
|
||||||
|
if hook.__name__ not in names:
|
||||||
|
f.resource_manager_kwargs_hooks.append(hook)
|
||||||
|
|
||||||
|
|
||||||
|
def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False):
|
||||||
|
"""Return extra_kwargs by calling resource manager kwargs hooks."""
|
||||||
|
hooks = getattr(f, "resource_manager_kwargs_hooks", [])
|
||||||
|
extra_kwargs = {}
|
||||||
|
for hook in hooks:
|
||||||
|
hook_name = hook.__name__
|
||||||
|
hook_kwargs = hook(args)
|
||||||
|
|
||||||
|
conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys())
|
||||||
|
if conflicting_keys and not allow_conflicts:
|
||||||
|
raise Exception("Hook '%(hook_name)s' is attempting to redefine"
|
||||||
|
" attributes '%(conflicting_keys)s'" % locals())
|
||||||
|
|
||||||
|
extra_kwargs.update(hook_kwargs)
|
||||||
|
|
||||||
|
return extra_kwargs
|
||||||
|
|
||||||
|
|
||||||
|
def unauthenticated(f):
|
||||||
|
"""
|
||||||
|
Adds 'unauthenticated' attribute to decorated function.
|
||||||
|
Usage:
|
||||||
|
@unauthenticated
|
||||||
|
def mymethod(f):
|
||||||
|
...
|
||||||
|
"""
|
||||||
|
f.unauthenticated = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def isunauthenticated(f):
|
||||||
|
"""
|
||||||
|
Checks to see if the function is marked as not requiring authentication
|
||||||
|
with the @unauthenticated decorator. Returns True if decorator is
|
||||||
|
set to True, False otherwise.
|
||||||
|
"""
|
||||||
|
return getattr(f, 'unauthenticated', False)
|
||||||
|
|
||||||
|
|
||||||
|
def service_type(stype):
|
||||||
|
"""
|
||||||
|
Adds 'service_type' attribute to decorated function.
|
||||||
|
Usage:
|
||||||
|
@service_type('volume')
|
||||||
|
def mymethod(f):
|
||||||
|
...
|
||||||
|
"""
|
||||||
|
def inner(f):
|
||||||
|
f.service_type = stype
|
||||||
|
return f
|
||||||
|
return inner
|
||||||
|
|
||||||
|
|
||||||
|
def get_service_type(f):
|
||||||
|
"""
|
||||||
|
Retrieves service type from function
|
||||||
|
"""
|
||||||
|
return getattr(f, 'service_type', None)
|
||||||
|
|
||||||
|
|
||||||
|
def pretty_choice_list(l):
|
||||||
|
return ', '.join("'%s'" % i for i in l)
|
||||||
|
|
||||||
|
|
||||||
|
def print_list(objs, fields, formatters={}):
|
||||||
|
mixed_case_fields = ['serverId']
|
||||||
|
pt = prettytable.PrettyTable([f for f in fields], caching=False)
|
||||||
|
pt.aligns = ['l' for f in fields]
|
||||||
|
|
||||||
|
for o in objs:
|
||||||
|
row = []
|
||||||
|
for field in fields:
|
||||||
|
if field in formatters:
|
||||||
|
row.append(formatters[field](o))
|
||||||
|
else:
|
||||||
|
if field in mixed_case_fields:
|
||||||
|
field_name = field.replace(' ', '_')
|
||||||
|
else:
|
||||||
|
field_name = field.lower().replace(' ', '_')
|
||||||
|
data = getattr(o, field_name, '')
|
||||||
|
row.append(data)
|
||||||
|
pt.add_row(row)
|
||||||
|
|
||||||
|
if len(objs) > 0:
|
||||||
|
print strutils.safe_encode(pt.get_string(sortby=fields[0]))
|
||||||
|
|
||||||
|
|
||||||
|
def print_dict(d, property="Property"):
|
||||||
|
pt = prettytable.PrettyTable([property, 'Value'], caching=False)
|
||||||
|
pt.aligns = ['l', 'l']
|
||||||
|
[pt.add_row(list(r)) for r in d.iteritems()]
|
||||||
|
print strutils.safe_encode(pt.get_string(sortby=property))
|
||||||
|
|
||||||
|
|
||||||
|
def find_resource(manager, name_or_id):
|
||||||
|
"""Helper for the _find_* methods."""
|
||||||
|
# first try to get entity as integer id
|
||||||
|
try:
|
||||||
|
if isinstance(name_or_id, int) or name_or_id.isdigit():
|
||||||
|
return manager.get(int(name_or_id))
|
||||||
|
except exceptions.NotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# now try to get entity as uuid
|
||||||
|
try:
|
||||||
|
uuid.UUID(strutils.safe_decode(name_or_id))
|
||||||
|
return manager.get(name_or_id)
|
||||||
|
except (ValueError, exceptions.NotFound):
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
return manager.find(human_id=name_or_id)
|
||||||
|
except exceptions.NotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# finally try to find entity by name
|
||||||
|
try:
|
||||||
|
return manager.find(name=name_or_id)
|
||||||
|
except exceptions.NotFound:
|
||||||
|
try:
|
||||||
|
return manager.find(display_name=name_or_id)
|
||||||
|
except (UnicodeDecodeError, exceptions.NotFound):
|
||||||
|
try:
|
||||||
|
# Volumes does not have name, but display_name
|
||||||
|
return manager.find(display_name=name_or_id)
|
||||||
|
except exceptions.NotFound:
|
||||||
|
msg = "No %s with a name or ID of '%s' exists." % \
|
||||||
|
(manager.resource_class.__name__.lower(), name_or_id)
|
||||||
|
raise exceptions.CommandError(msg)
|
||||||
|
except exceptions.NoUniqueMatch:
|
||||||
|
msg = ("Multiple %s matches found for '%s', use an ID to be more"
|
||||||
|
" specific." % (manager.resource_class.__name__.lower(),
|
||||||
|
name_or_id))
|
||||||
|
raise exceptions.CommandError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _format_servers_list_networks(server):
|
||||||
|
output = []
|
||||||
|
for (network, addresses) in server.networks.items():
|
||||||
|
if len(addresses) == 0:
|
||||||
|
continue
|
||||||
|
addresses_csv = ', '.join(addresses)
|
||||||
|
group = "%s=%s" % (network, addresses_csv)
|
||||||
|
output.append(group)
|
||||||
|
|
||||||
|
return '; '.join(output)
|
||||||
|
|
||||||
|
|
||||||
|
class HookableMixin(object):
|
||||||
|
"""Mixin so classes can register and run hooks."""
|
||||||
|
_hooks_map = {}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add_hook(cls, hook_type, hook_func):
|
||||||
|
if hook_type not in cls._hooks_map:
|
||||||
|
cls._hooks_map[hook_type] = []
|
||||||
|
|
||||||
|
cls._hooks_map[hook_type].append(hook_func)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run_hooks(cls, hook_type, *args, **kwargs):
|
||||||
|
hook_funcs = cls._hooks_map.get(hook_type) or []
|
||||||
|
for hook_func in hook_funcs:
|
||||||
|
hook_func(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def safe_issubclass(*args):
|
||||||
|
"""Like issubclass, but will just return False if not a class."""
|
||||||
|
|
||||||
|
try:
|
||||||
|
if issubclass(*args):
|
||||||
|
return True
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def import_class(import_str):
|
||||||
|
"""Returns a class from a string including module and class."""
|
||||||
|
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||||
|
__import__(mod_str)
|
||||||
|
return getattr(sys.modules[mod_str], class_str)
|
||||||
|
|
||||||
|
_slugify_strip_re = re.compile(r'[^\w\s-]')
|
||||||
|
_slugify_hyphenate_re = re.compile(r'[-\s]+')
|
||||||
|
|
||||||
|
|
||||||
|
# http://code.activestate.com/recipes/
|
||||||
|
# 577257-slugify-make-a-string-usable-in-a-url-or-filename/
|
||||||
|
def slugify(value):
|
||||||
|
"""
|
||||||
|
Normalizes string, converts to lowercase, removes non-alpha characters,
|
||||||
|
and converts spaces to hyphens.
|
||||||
|
|
||||||
|
From Django's "django/template/defaultfilters.py".
|
||||||
|
"""
|
||||||
|
import unicodedata
|
||||||
|
if not isinstance(value, unicode):
|
||||||
|
value = unicode(value)
|
||||||
|
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
|
||||||
|
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
|
||||||
|
return _slugify_hyphenate_re.sub('-', value)
|
17
cinderclient/v1/__init__.py
Normal file
17
cinderclient/v1/__init__.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Copyright (c) 2012 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient.v1.client import Client
|
89
cinderclient/v1/client.py
Normal file
89
cinderclient/v1/client.py
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
from cinderclient import client
|
||||||
|
from cinderclient.v1 import limits
|
||||||
|
from cinderclient.v1 import quota_classes
|
||||||
|
from cinderclient.v1 import quotas
|
||||||
|
from cinderclient.v1 import shares
|
||||||
|
from cinderclient.v1 import share_snapshots
|
||||||
|
from cinderclient.v1 import volumes
|
||||||
|
from cinderclient.v1 import volume_snapshots
|
||||||
|
from cinderclient.v1 import volume_types
|
||||||
|
from cinderclient.v1 import volume_backups
|
||||||
|
from cinderclient.v1 import volume_backups_restore
|
||||||
|
|
||||||
|
|
||||||
|
class Client(object):
|
||||||
|
"""
|
||||||
|
Top-level object to access the OpenStack Volume API.
|
||||||
|
|
||||||
|
Create an instance with your creds::
|
||||||
|
|
||||||
|
>>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL)
|
||||||
|
|
||||||
|
Then call methods on its managers::
|
||||||
|
|
||||||
|
>>> client.volumes.list()
|
||||||
|
...
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, username, api_key, project_id=None, auth_url='',
|
||||||
|
insecure=False, timeout=None, tenant_id=None,
|
||||||
|
proxy_tenant_id=None, proxy_token=None, region_name=None,
|
||||||
|
endpoint_type='publicURL', extensions=None,
|
||||||
|
service_type='volume', service_name=None,
|
||||||
|
volume_service_name=None, retries=None,
|
||||||
|
http_log_debug=False,
|
||||||
|
cacert=None):
|
||||||
|
# FIXME(comstud): Rename the api_key argument above when we
|
||||||
|
# know it's not being used as keyword argument
|
||||||
|
password = api_key
|
||||||
|
self.limits = limits.LimitsManager(self)
|
||||||
|
|
||||||
|
# extensions
|
||||||
|
self.volumes = volumes.VolumeManager(self)
|
||||||
|
self.volume_snapshots = volume_snapshots.SnapshotManager(self)
|
||||||
|
self.volume_types = volume_types.VolumeTypeManager(self)
|
||||||
|
self.quota_classes = quota_classes.QuotaClassSetManager(self)
|
||||||
|
self.quotas = quotas.QuotaSetManager(self)
|
||||||
|
self.backups = volume_backups.VolumeBackupManager(self)
|
||||||
|
self.restores = volume_backups_restore.VolumeBackupRestoreManager(self)
|
||||||
|
self.shares = shares.ShareManager(self)
|
||||||
|
self.share_snapshots = share_snapshots.ShareSnapshotManager(self)
|
||||||
|
|
||||||
|
# Add in any extensions...
|
||||||
|
if extensions:
|
||||||
|
for extension in extensions:
|
||||||
|
if extension.manager_class:
|
||||||
|
setattr(self, extension.name,
|
||||||
|
extension.manager_class(self))
|
||||||
|
|
||||||
|
self.client = client.HTTPClient(
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
project_id,
|
||||||
|
auth_url,
|
||||||
|
insecure=insecure,
|
||||||
|
timeout=timeout,
|
||||||
|
tenant_id=tenant_id,
|
||||||
|
proxy_token=proxy_token,
|
||||||
|
proxy_tenant_id=proxy_tenant_id,
|
||||||
|
region_name=region_name,
|
||||||
|
endpoint_type=endpoint_type,
|
||||||
|
service_type=service_type,
|
||||||
|
service_name=service_name,
|
||||||
|
volume_service_name=volume_service_name,
|
||||||
|
retries=retries,
|
||||||
|
http_log_debug=http_log_debug,
|
||||||
|
cacert=cacert)
|
||||||
|
|
||||||
|
def authenticate(self):
|
||||||
|
"""
|
||||||
|
Authenticate against the server.
|
||||||
|
|
||||||
|
Normally this is called automatically when you first access the API,
|
||||||
|
but you can call this method to force authentication right now.
|
||||||
|
|
||||||
|
Returns on success; raises :exc:`exceptions.Unauthorized` if the
|
||||||
|
credentials are wrong.
|
||||||
|
"""
|
||||||
|
self.client.authenticate()
|
0
cinderclient/v1/contrib/__init__.py
Normal file
0
cinderclient/v1/contrib/__init__.py
Normal file
47
cinderclient/v1/contrib/list_extensions.py
Normal file
47
cinderclient/v1/contrib/list_extensions.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
class ListExtResource(base.Resource):
|
||||||
|
@property
|
||||||
|
def summary(self):
|
||||||
|
descr = self.description.strip()
|
||||||
|
if not descr:
|
||||||
|
return '??'
|
||||||
|
lines = descr.split("\n")
|
||||||
|
if len(lines) == 1:
|
||||||
|
return lines[0]
|
||||||
|
else:
|
||||||
|
return lines[0] + "..."
|
||||||
|
|
||||||
|
|
||||||
|
class ListExtManager(base.Manager):
|
||||||
|
resource_class = ListExtResource
|
||||||
|
|
||||||
|
def show_all(self):
|
||||||
|
return self._list("/extensions", 'extensions')
|
||||||
|
|
||||||
|
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_list_extensions(client, _args):
|
||||||
|
"""
|
||||||
|
List all the os-api extensions that are available.
|
||||||
|
"""
|
||||||
|
extensions = client.list_extensions.show_all()
|
||||||
|
fields = ["Name", "Summary", "Alias", "Updated"]
|
||||||
|
utils.print_list(extensions, fields)
|
79
cinderclient/v1/limits.py
Normal file
79
cinderclient/v1/limits.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class Limits(base.Resource):
|
||||||
|
"""A collection of RateLimit and AbsoluteLimit objects"""
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Limits>"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def absolute(self):
|
||||||
|
for (name, value) in self._info['absolute'].items():
|
||||||
|
yield AbsoluteLimit(name, value)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def rate(self):
|
||||||
|
for group in self._info['rate']:
|
||||||
|
uri = group['uri']
|
||||||
|
regex = group['regex']
|
||||||
|
for rate in group['limit']:
|
||||||
|
yield RateLimit(rate['verb'], uri, regex, rate['value'],
|
||||||
|
rate['remaining'], rate['unit'],
|
||||||
|
rate['next-available'])
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimit(object):
|
||||||
|
"""Data model that represents a flattened view of a single rate limit"""
|
||||||
|
|
||||||
|
def __init__(self, verb, uri, regex, value, remain,
|
||||||
|
unit, next_available):
|
||||||
|
self.verb = verb
|
||||||
|
self.uri = uri
|
||||||
|
self.regex = regex
|
||||||
|
self.value = value
|
||||||
|
self.remain = remain
|
||||||
|
self.unit = unit
|
||||||
|
self.next_available = next_available
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.uri == other.uri \
|
||||||
|
and self.regex == other.regex \
|
||||||
|
and self.value == other.value \
|
||||||
|
and self.verb == other.verb \
|
||||||
|
and self.remain == other.remain \
|
||||||
|
and self.unit == other.unit \
|
||||||
|
and self.next_available == other.next_available
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<RateLimit: method=%s uri=%s>" % (self.method, self.uri)
|
||||||
|
|
||||||
|
|
||||||
|
class AbsoluteLimit(object):
|
||||||
|
"""Data model that represents a single absolute limit"""
|
||||||
|
|
||||||
|
def __init__(self, name, value):
|
||||||
|
self.name = name
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.value == other.value and self.name == other.name
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<AbsoluteLimit: name=%s>" % (self.name)
|
||||||
|
|
||||||
|
|
||||||
|
class LimitsManager(base.Manager):
|
||||||
|
"""Manager object used to interact with limits resource"""
|
||||||
|
|
||||||
|
resource_class = Limits
|
||||||
|
|
||||||
|
def get(self):
|
||||||
|
"""
|
||||||
|
Get a specific extension.
|
||||||
|
|
||||||
|
:rtype: :class:`Limits`
|
||||||
|
"""
|
||||||
|
return self._get("/limits", "limits")
|
52
cinderclient/v1/quota_classes.py
Normal file
52
cinderclient/v1/quota_classes.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# Copyright 2012 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaClassSet(base.Resource):
|
||||||
|
|
||||||
|
@property
|
||||||
|
def id(self):
|
||||||
|
"""QuotaClassSet does not have a 'id' attribute but base.Resource
|
||||||
|
needs it to self-refresh and QuotaSet is indexed by class_name"""
|
||||||
|
return self.class_name
|
||||||
|
|
||||||
|
def update(self, *args, **kwargs):
|
||||||
|
self.manager.update(self.class_name, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaClassSetManager(base.ManagerWithFind):
|
||||||
|
resource_class = QuotaClassSet
|
||||||
|
|
||||||
|
def get(self, class_name):
|
||||||
|
return self._get("/os-quota-class-sets/%s" % (class_name),
|
||||||
|
"quota_class_set")
|
||||||
|
|
||||||
|
def update(self,
|
||||||
|
class_name,
|
||||||
|
volumes=None,
|
||||||
|
gigabytes=None):
|
||||||
|
|
||||||
|
body = {'quota_class_set': {
|
||||||
|
'class_name': class_name,
|
||||||
|
'volumes': volumes,
|
||||||
|
'gigabytes': gigabytes}}
|
||||||
|
|
||||||
|
for key in body['quota_class_set'].keys():
|
||||||
|
if body['quota_class_set'][key] is None:
|
||||||
|
body['quota_class_set'].pop(key)
|
||||||
|
|
||||||
|
self._update('/os-quota-class-sets/%s' % (class_name), body)
|
55
cinderclient/v1/quotas.py
Normal file
55
cinderclient/v1/quotas.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaSet(base.Resource):
|
||||||
|
|
||||||
|
@property
|
||||||
|
def id(self):
|
||||||
|
"""QuotaSet does not have a 'id' attribute but base.Resource needs it
|
||||||
|
to self-refresh and QuotaSet is indexed by tenant_id"""
|
||||||
|
return self.tenant_id
|
||||||
|
|
||||||
|
def update(self, *args, **kwargs):
|
||||||
|
self.manager.update(self.tenant_id, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaSetManager(base.ManagerWithFind):
|
||||||
|
resource_class = QuotaSet
|
||||||
|
|
||||||
|
def get(self, tenant_id):
|
||||||
|
if hasattr(tenant_id, 'tenant_id'):
|
||||||
|
tenant_id = tenant_id.tenant_id
|
||||||
|
return self._get("/os-quota-sets/%s" % (tenant_id), "quota_set")
|
||||||
|
|
||||||
|
def update(self, tenant_id, volumes=None, snapshots=None, gigabytes=None):
|
||||||
|
|
||||||
|
body = {'quota_set': {
|
||||||
|
'tenant_id': tenant_id,
|
||||||
|
'volumes': volumes,
|
||||||
|
'snapshots': snapshots,
|
||||||
|
'gigabytes': gigabytes}}
|
||||||
|
|
||||||
|
for key in body['quota_set'].keys():
|
||||||
|
if body['quota_set'][key] is None:
|
||||||
|
body['quota_set'].pop(key)
|
||||||
|
|
||||||
|
self._update('/os-quota-sets/%s' % (tenant_id), body)
|
||||||
|
|
||||||
|
def defaults(self, tenant_id):
|
||||||
|
return self._get('/os-quota-sets/%s/defaults' % tenant_id,
|
||||||
|
'quota_set')
|
91
cinderclient/v1/share_snapshots.py
Normal file
91
cinderclient/v1/share_snapshots.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
# Copyright 2012 NetApp
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""Interface for shares extention."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
class ShareSnapshot(base.Resource):
|
||||||
|
"""Represent a snapshot of a share."""
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<ShareSnapshot: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this snapshot."""
|
||||||
|
self.manager.delete(self)
|
||||||
|
|
||||||
|
|
||||||
|
class ShareSnapshotManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`ShareSnapshot` resources.
|
||||||
|
"""
|
||||||
|
resource_class = ShareSnapshot
|
||||||
|
|
||||||
|
def create(self, share_id, force=False, name=None, description=None):
|
||||||
|
"""Create a snapshot of the given share.
|
||||||
|
|
||||||
|
:param share_id: The ID of the share to snapshot.
|
||||||
|
:param force: If force is True, create a snapshot even if the
|
||||||
|
share is busy. Default is False.
|
||||||
|
:param name: Name of the snapshot
|
||||||
|
:param description: Description of the snapshot
|
||||||
|
:rtype: :class:`ShareSnapshot`
|
||||||
|
"""
|
||||||
|
body = {'share-snapshot': {'share_id': share_id,
|
||||||
|
'force': force,
|
||||||
|
'name': name,
|
||||||
|
'description': description}}
|
||||||
|
return self._create('/share-snapshots', body, 'share-snapshot')
|
||||||
|
|
||||||
|
def get(self, snapshot_id):
|
||||||
|
"""Get a snapshot.
|
||||||
|
|
||||||
|
:param snapshot_id: The ID of the snapshot to get.
|
||||||
|
:rtype: :class:`ShareSnapshot`
|
||||||
|
"""
|
||||||
|
return self._get('/share-snapshots/%s' % snapshot_id, 'share-snapshot')
|
||||||
|
|
||||||
|
def list(self, detailed=True, search_opts=None):
|
||||||
|
"""Get a list of all snapshots of shares.
|
||||||
|
|
||||||
|
:rtype: list of :class:`ShareSnapshot`
|
||||||
|
"""
|
||||||
|
if search_opts:
|
||||||
|
query_string = urllib.urlencode([(key, value)
|
||||||
|
for (key, value)
|
||||||
|
in search_opts.items()
|
||||||
|
if value])
|
||||||
|
if query_string:
|
||||||
|
query_string = "?%s" % (query_string,)
|
||||||
|
else:
|
||||||
|
query_string = ''
|
||||||
|
|
||||||
|
if detailed:
|
||||||
|
path = "/share-snapshots/detail%s" % (query_string,)
|
||||||
|
else:
|
||||||
|
path = "/share-snapshots%s" % (query_string,)
|
||||||
|
|
||||||
|
return self._list(path, 'share-snapshots')
|
||||||
|
|
||||||
|
def delete(self, snapshot):
|
||||||
|
"""Delete a snapshot of a share.
|
||||||
|
|
||||||
|
:param share: The :class:`ShareSnapshot` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/share-snapshots/%s" % base.getid(snapshot))
|
182
cinderclient/v1/shares.py
Normal file
182
cinderclient/v1/shares.py
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
# Copyright 2012 NetApp
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""Interface for shares extention."""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
class Share(base.Resource):
|
||||||
|
"""A share is an extra block level storage to the OpenStack instances."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Share: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this share."""
|
||||||
|
self.manager.delete(self)
|
||||||
|
|
||||||
|
def allow(self, access_type, access):
|
||||||
|
"""Allow access to a share."""
|
||||||
|
self._validate_access(access_type, access)
|
||||||
|
return self.manager.allow(self, access_type, access)
|
||||||
|
|
||||||
|
def deny(self, id):
|
||||||
|
"""Deny access from IP to a share."""
|
||||||
|
return self.manager.deny(self, id)
|
||||||
|
|
||||||
|
def access_list(self):
|
||||||
|
"""Deny access from IP to a share."""
|
||||||
|
return self.manager.access_list(self)
|
||||||
|
|
||||||
|
def _validate_access(self, access_type, access):
|
||||||
|
if access_type == 'ip':
|
||||||
|
self._validate_ip_range(access)
|
||||||
|
elif access_type == 'passwd':
|
||||||
|
self._validate_username(access)
|
||||||
|
else:
|
||||||
|
raise exceptions.CommandError(
|
||||||
|
'Only ip and passwd type are supported')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _validate_username(access):
|
||||||
|
valid_useraname_re = '\w{4,32}'
|
||||||
|
username = access
|
||||||
|
if not re.match(valid_useraname_re, username):
|
||||||
|
exc_str = _('Invalid user name. Must be alphanum 4-32 chars long')
|
||||||
|
raise exceptions.CommandError(exc_str)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _validate_ip_range(ip_range):
|
||||||
|
ip_range = ip_range.split('/')
|
||||||
|
exc_str = ('Supported ip format examples:\n'
|
||||||
|
'\t10.0.0.2, 10.0.0.*, 10.0.0.0/24')
|
||||||
|
if len(ip_range) > 2:
|
||||||
|
raise exceptions.CommandError(exc_str)
|
||||||
|
allow_asterisk = (len(ip_range) == 1)
|
||||||
|
ip_range = ip_range[0].split('.')
|
||||||
|
if len(ip_range) != 4:
|
||||||
|
raise exceptions.CommandError(exc_str)
|
||||||
|
for item in ip_range:
|
||||||
|
try:
|
||||||
|
if 0 <= int(item) <= 255:
|
||||||
|
continue
|
||||||
|
raise ValueError()
|
||||||
|
except ValueError:
|
||||||
|
if not (allow_asterisk and item == '*'):
|
||||||
|
raise exceptions.CommandError(exc_str)
|
||||||
|
|
||||||
|
|
||||||
|
class ShareManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`Share` resources."""
|
||||||
|
resource_class = Share
|
||||||
|
|
||||||
|
def create(self, share_proto, size, snapshot_id=None, name=None,
|
||||||
|
description=None):
|
||||||
|
"""Create NAS.
|
||||||
|
|
||||||
|
:param size: Size of NAS in GB
|
||||||
|
:param snapshot_id: ID of the snapshot
|
||||||
|
:param name: Name of the NAS
|
||||||
|
:param description: Short description of a share
|
||||||
|
:param share_proto: Type of NAS (NFS or CIFS)
|
||||||
|
:rtype: :class:`Share`
|
||||||
|
"""
|
||||||
|
body = {'share': {'size': size,
|
||||||
|
'snapshot_id': snapshot_id,
|
||||||
|
'name': name,
|
||||||
|
'description': description,
|
||||||
|
'share_proto': share_proto}}
|
||||||
|
return self._create('/shares', body, 'share')
|
||||||
|
|
||||||
|
def get(self, share_id):
|
||||||
|
"""Get a share.
|
||||||
|
|
||||||
|
:param share_id: The ID of the share to delete.
|
||||||
|
:rtype: :class:`Share`
|
||||||
|
"""
|
||||||
|
return self._get("/shares/%s" % share_id, "share")
|
||||||
|
|
||||||
|
def list(self, detailed=True, search_opts=None):
|
||||||
|
"""Get a list of all shares.
|
||||||
|
|
||||||
|
:rtype: list of :class:`Share`
|
||||||
|
"""
|
||||||
|
if search_opts:
|
||||||
|
query_string = urllib.urlencode([(key, value)
|
||||||
|
for (key, value)
|
||||||
|
in search_opts.items()
|
||||||
|
if value])
|
||||||
|
if query_string:
|
||||||
|
query_string = "?%s" % (query_string,)
|
||||||
|
else:
|
||||||
|
query_string = ''
|
||||||
|
|
||||||
|
if detailed:
|
||||||
|
path = "/shares/detail%s" % (query_string,)
|
||||||
|
else:
|
||||||
|
path = "/shares%s" % (query_string,)
|
||||||
|
|
||||||
|
return self._list(path, 'shares')
|
||||||
|
|
||||||
|
def delete(self, share):
|
||||||
|
"""Delete a share.
|
||||||
|
|
||||||
|
:param share: The :class:`Share` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/shares/%s" % base.getid(share))
|
||||||
|
|
||||||
|
def allow(self, share, access_type, access):
|
||||||
|
"""Allow access from IP to a shares.
|
||||||
|
|
||||||
|
:param share: The :class:`Share` to delete.
|
||||||
|
:param access_type: string that represents access type ('ip','domain')
|
||||||
|
:param access: string that represents access ('127.0.0.1')
|
||||||
|
"""
|
||||||
|
return self._action('os-allow_access', share,
|
||||||
|
{'access_type': access_type,
|
||||||
|
'access_to': access})
|
||||||
|
|
||||||
|
def deny(self, share, id):
|
||||||
|
"""Deny access from IP to a shares.
|
||||||
|
|
||||||
|
:param share: The :class:`Share` to delete.
|
||||||
|
:param ip: string that represents ip address
|
||||||
|
"""
|
||||||
|
return self._action('os-deny_access', share, {'access_id': id})
|
||||||
|
|
||||||
|
def access_list(self, share):
|
||||||
|
"""Get access list to the share."""
|
||||||
|
access_list = self._action("os-access_list", share)[1]["access_list"]
|
||||||
|
if access_list:
|
||||||
|
t = collections.namedtuple('Access', access_list[0].keys())
|
||||||
|
return [t(*value.values()) for value in access_list]
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _action(self, action, share, info=None, **kwargs):
|
||||||
|
"""Perform a share 'action'."""
|
||||||
|
body = {action: info}
|
||||||
|
self.run_hooks('modify_body_for_action', body, **kwargs)
|
||||||
|
url = '/shares/%s/action' % base.getid(share)
|
||||||
|
return self.api.client.post(url, body=body)
|
||||||
|
|
||||||
|
|
||||||
|
#########################
|
974
cinderclient/v1/shell.py
Normal file
974
cinderclient/v1/shell.py
Normal file
@ -0,0 +1,974 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
def _poll_for_status(poll_fn, obj_id, action, final_ok_states,
|
||||||
|
poll_period=5, show_progress=True):
|
||||||
|
"""Block while an action is being performed, periodically printing
|
||||||
|
progress.
|
||||||
|
"""
|
||||||
|
def print_progress(progress):
|
||||||
|
if show_progress:
|
||||||
|
msg = ('\rInstance %(action)s... %(progress)s%% complete'
|
||||||
|
% dict(action=action, progress=progress))
|
||||||
|
else:
|
||||||
|
msg = '\rInstance %(action)s...' % dict(action=action)
|
||||||
|
|
||||||
|
sys.stdout.write(msg)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
print
|
||||||
|
while True:
|
||||||
|
obj = poll_fn(obj_id)
|
||||||
|
status = obj.status.lower()
|
||||||
|
progress = getattr(obj, 'progress', None) or 0
|
||||||
|
if status in final_ok_states:
|
||||||
|
print_progress(100)
|
||||||
|
print "\nFinished"
|
||||||
|
break
|
||||||
|
elif status == "error":
|
||||||
|
print "\nError %(action)s instance" % locals()
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
print_progress(progress)
|
||||||
|
time.sleep(poll_period)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_volume(cs, volume):
|
||||||
|
"""Get a volume by ID."""
|
||||||
|
return utils.find_resource(cs.volumes, volume)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_volume_snapshot(cs, snapshot):
|
||||||
|
"""Get a volume snapshot by ID."""
|
||||||
|
return utils.find_resource(cs.volume_snapshots, snapshot)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_backup(cs, backup):
|
||||||
|
"""Get a backup by ID."""
|
||||||
|
return utils.find_resource(cs.backups, backup)
|
||||||
|
|
||||||
|
|
||||||
|
def _print_volume(volume):
|
||||||
|
utils.print_dict(volume._info)
|
||||||
|
|
||||||
|
|
||||||
|
def _print_volume_snapshot(snapshot):
|
||||||
|
utils.print_dict(snapshot._info)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_share(cs, share):
|
||||||
|
"""Get a share by ID."""
|
||||||
|
return utils.find_resource(cs.shares, share)
|
||||||
|
|
||||||
|
|
||||||
|
def _print_share(cs, share):
|
||||||
|
info = share._info.copy()
|
||||||
|
info.pop('links', None)
|
||||||
|
utils.print_dict(info)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_share_snapshot(cs, snapshot):
|
||||||
|
"""Get a snapshot by ID."""
|
||||||
|
return utils.find_resource(cs.share_snapshots, snapshot)
|
||||||
|
|
||||||
|
|
||||||
|
def _print_share_snapshot(cs, snapshot):
|
||||||
|
info = snapshot._info.copy()
|
||||||
|
info.pop('links', None)
|
||||||
|
utils.print_dict(info)
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_keys(collection, convert):
|
||||||
|
for item in collection:
|
||||||
|
keys = item.__dict__.keys()
|
||||||
|
for from_key, to_key in convert:
|
||||||
|
if from_key in keys and to_key not in keys:
|
||||||
|
setattr(item, to_key, item._info[from_key])
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_volume_keys(collection):
|
||||||
|
convert = [('displayName', 'display_name'), ('volumeType', 'volume_type')]
|
||||||
|
_translate_keys(collection, convert)
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_volume_snapshot_keys(collection):
|
||||||
|
convert = [('displayName', 'display_name'), ('volumeId', 'volume_id')]
|
||||||
|
_translate_keys(collection, convert)
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_metadata(args):
|
||||||
|
metadata = {}
|
||||||
|
for metadatum in args.metadata:
|
||||||
|
# unset doesn't require a val, so we have the if/else
|
||||||
|
if '=' in metadatum:
|
||||||
|
(key, value) = metadatum.split('=', 1)
|
||||||
|
else:
|
||||||
|
key = metadatum
|
||||||
|
value = None
|
||||||
|
|
||||||
|
metadata[key] = value
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'--all-tenants',
|
||||||
|
dest='all_tenants',
|
||||||
|
metavar='<0|1>',
|
||||||
|
nargs='?',
|
||||||
|
type=int,
|
||||||
|
const=1,
|
||||||
|
default=0,
|
||||||
|
help='Display information from all tenants (Admin only).')
|
||||||
|
@utils.arg(
|
||||||
|
'--all_tenants',
|
||||||
|
nargs='?',
|
||||||
|
type=int,
|
||||||
|
const=1,
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-name',
|
||||||
|
metavar='<display-name>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by display-name')
|
||||||
|
@utils.arg(
|
||||||
|
'--status',
|
||||||
|
metavar='<status>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by status')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_list(cs, args):
|
||||||
|
"""List all the volumes."""
|
||||||
|
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
|
||||||
|
search_opts = {
|
||||||
|
'all_tenants': all_tenants,
|
||||||
|
'display_name': args.display_name,
|
||||||
|
'status': args.status,
|
||||||
|
}
|
||||||
|
volumes = cs.volumes.list(search_opts=search_opts)
|
||||||
|
_translate_volume_keys(volumes)
|
||||||
|
|
||||||
|
# Create a list of servers to which the volume is attached
|
||||||
|
for vol in volumes:
|
||||||
|
servers = [s.get('server_id') for s in vol.attachments]
|
||||||
|
setattr(vol, 'attached_to', ','.join(map(str, servers)))
|
||||||
|
utils.print_list(volumes, ['ID', 'Status', 'Display Name',
|
||||||
|
'Size', 'Volume Type', 'Bootable', 'Attached to'])
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('volume', metavar='<volume>', help='ID of the volume.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_show(cs, args):
|
||||||
|
"""Show details about a volume."""
|
||||||
|
volume = _find_volume(cs, args.volume)
|
||||||
|
_print_volume(volume)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('size',
|
||||||
|
metavar='<size>',
|
||||||
|
type=int,
|
||||||
|
help='Size of volume in GB')
|
||||||
|
@utils.arg(
|
||||||
|
'--snapshot-id',
|
||||||
|
metavar='<snapshot-id>',
|
||||||
|
default=None,
|
||||||
|
help='Create volume from snapshot id (Optional, Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--snapshot_id',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--source-volid',
|
||||||
|
metavar='<source-volid>',
|
||||||
|
default=None,
|
||||||
|
help='Create volume from volume id (Optional, Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--source_volid',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--image-id',
|
||||||
|
metavar='<image-id>',
|
||||||
|
default=None,
|
||||||
|
help='Create volume from image id (Optional, Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--image_id',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-name',
|
||||||
|
metavar='<display-name>',
|
||||||
|
default=None,
|
||||||
|
help='Volume name (Optional, Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--display_name',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-description',
|
||||||
|
metavar='<display-description>',
|
||||||
|
default=None,
|
||||||
|
help='Volume description (Optional, Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--display_description',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--volume-type',
|
||||||
|
metavar='<volume-type>',
|
||||||
|
default=None,
|
||||||
|
help='Volume type (Optional, Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--volume_type',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--availability-zone',
|
||||||
|
metavar='<availability-zone>',
|
||||||
|
default=None,
|
||||||
|
help='Availability zone for volume (Optional, Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--availability_zone',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg('--metadata',
|
||||||
|
type=str,
|
||||||
|
nargs='*',
|
||||||
|
metavar='<key=value>',
|
||||||
|
help='Metadata key=value pairs (Optional, Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_create(cs, args):
|
||||||
|
"""Add a new volume."""
|
||||||
|
volume_metadata = None
|
||||||
|
if args.metadata is not None:
|
||||||
|
volume_metadata = _extract_metadata(args)
|
||||||
|
|
||||||
|
volume = cs.volumes.create(args.size,
|
||||||
|
args.snapshot_id,
|
||||||
|
args.source_volid,
|
||||||
|
args.display_name,
|
||||||
|
args.display_description,
|
||||||
|
args.volume_type,
|
||||||
|
availability_zone=args.availability_zone,
|
||||||
|
imageRef=args.image_id,
|
||||||
|
metadata=volume_metadata)
|
||||||
|
_print_volume(volume)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('volume', metavar='<volume>', help='ID of the volume to delete.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_delete(cs, args):
|
||||||
|
"""Remove a volume."""
|
||||||
|
volume = _find_volume(cs, args.volume)
|
||||||
|
volume.delete()
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('volume', metavar='<volume>', help='ID of the volume to delete.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_force_delete(cs, args):
|
||||||
|
"""Attempt forced removal of a volume, regardless of its state."""
|
||||||
|
volume = _find_volume(cs, args.volume)
|
||||||
|
volume.force_delete()
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('volume', metavar='<volume>', help='ID of the volume to rename.')
|
||||||
|
@utils.arg('display_name', nargs='?', metavar='<display-name>',
|
||||||
|
help='New display-name for the volume.')
|
||||||
|
@utils.arg('--display-description', metavar='<display-description>',
|
||||||
|
help='Optional volume description. (Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_rename(cs, args):
|
||||||
|
"""Rename a volume."""
|
||||||
|
kwargs = {}
|
||||||
|
if args.display_name is not None:
|
||||||
|
kwargs['display_name'] = args.display_name
|
||||||
|
if args.display_description is not None:
|
||||||
|
kwargs['display_description'] = args.display_description
|
||||||
|
_find_volume(cs, args.volume).update(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('volume',
|
||||||
|
metavar='<volume>',
|
||||||
|
help='ID of the volume to update metadata on.')
|
||||||
|
@utils.arg('action',
|
||||||
|
metavar='<action>',
|
||||||
|
choices=['set', 'unset'],
|
||||||
|
help="Actions: 'set' or 'unset'")
|
||||||
|
@utils.arg('metadata',
|
||||||
|
metavar='<key=value>',
|
||||||
|
nargs='+',
|
||||||
|
default=[],
|
||||||
|
help='Metadata to set/unset (only key is necessary on unset)')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_metadata(cs, args):
|
||||||
|
"""Set or Delete metadata on a volume."""
|
||||||
|
volume = _find_volume(cs, args.volume)
|
||||||
|
metadata = _extract_metadata(args)
|
||||||
|
|
||||||
|
if args.action == 'set':
|
||||||
|
cs.volumes.set_metadata(volume, metadata)
|
||||||
|
elif args.action == 'unset':
|
||||||
|
cs.volumes.delete_metadata(volume, metadata.keys())
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'--all-tenants',
|
||||||
|
dest='all_tenants',
|
||||||
|
metavar='<0|1>',
|
||||||
|
nargs='?',
|
||||||
|
type=int,
|
||||||
|
const=1,
|
||||||
|
default=0,
|
||||||
|
help='Display information from all tenants (Admin only).')
|
||||||
|
@utils.arg(
|
||||||
|
'--all_tenants',
|
||||||
|
nargs='?',
|
||||||
|
type=int,
|
||||||
|
const=1,
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-name',
|
||||||
|
metavar='<display-name>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by display-name')
|
||||||
|
@utils.arg(
|
||||||
|
'--status',
|
||||||
|
metavar='<status>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by status')
|
||||||
|
@utils.arg(
|
||||||
|
'--volume-id',
|
||||||
|
metavar='<volume-id>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by volume-id')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_snapshot_list(cs, args):
|
||||||
|
"""List all the snapshots."""
|
||||||
|
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
|
||||||
|
search_opts = {
|
||||||
|
'all_tenants': all_tenants,
|
||||||
|
'display_name': args.display_name,
|
||||||
|
'status': args.status,
|
||||||
|
'volume_id': args.volume_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshots = cs.volume_snapshots.list(search_opts=search_opts)
|
||||||
|
_translate_volume_snapshot_keys(snapshots)
|
||||||
|
utils.print_list(snapshots,
|
||||||
|
['ID', 'Volume ID', 'Status', 'Display Name', 'Size',
|
||||||
|
'Source Type'])
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('snapshot', metavar='<snapshot>', help='ID of the snapshot.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_snapshot_show(cs, args):
|
||||||
|
"""Show details about a snapshot."""
|
||||||
|
snapshot = _find_volume_snapshot(cs, args.snapshot)
|
||||||
|
_print_volume_snapshot(snapshot)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('volume_id',
|
||||||
|
metavar='<volume-id>',
|
||||||
|
help='ID of the volume to snapshot')
|
||||||
|
@utils.arg('--force',
|
||||||
|
metavar='<True|False>',
|
||||||
|
help='Optional flag to indicate whether '
|
||||||
|
'to snapshot a volume even if it\'s '
|
||||||
|
'attached to an instance. (Default=False)',
|
||||||
|
default=False)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-name',
|
||||||
|
metavar='<display-name>',
|
||||||
|
default=None,
|
||||||
|
help='Optional snapshot name. (Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--display_name',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-description',
|
||||||
|
metavar='<display-description>',
|
||||||
|
default=None,
|
||||||
|
help='Optional snapshot description. (Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--display_description',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_snapshot_create(cs, args):
|
||||||
|
"""Add a new snapshot."""
|
||||||
|
snapshot = cs.volume_snapshots.create(args.volume_id,
|
||||||
|
args.force,
|
||||||
|
args.display_name,
|
||||||
|
args.display_description)
|
||||||
|
_print_volume_snapshot(snapshot)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('snapshot_id',
|
||||||
|
metavar='<snapshot-id>',
|
||||||
|
help='ID of the snapshot to delete.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_snapshot_delete(cs, args):
|
||||||
|
"""Remove a snapshot."""
|
||||||
|
snapshot = _find_volume_snapshot(cs, args.snapshot_id)
|
||||||
|
snapshot.delete()
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('snapshot', metavar='<snapshot>', help='ID of the snapshot.')
|
||||||
|
@utils.arg('display_name', nargs='?', metavar='<display-name>',
|
||||||
|
help='New display-name for the snapshot.')
|
||||||
|
@utils.arg('--display-description', metavar='<display-description>',
|
||||||
|
help='Optional snapshot description. (Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_snapshot_rename(cs, args):
|
||||||
|
"""Rename a snapshot."""
|
||||||
|
kwargs = {}
|
||||||
|
if args.display_name is not None:
|
||||||
|
kwargs['display_name'] = args.display_name
|
||||||
|
if args.display_description is not None:
|
||||||
|
kwargs['display_description'] = args.display_description
|
||||||
|
_find_volume_snapshot(cs, args.snapshot).update(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def _print_volume_type_list(vtypes):
|
||||||
|
utils.print_list(vtypes, ['ID', 'Name'])
|
||||||
|
|
||||||
|
|
||||||
|
def _print_type_and_extra_specs_list(vtypes):
|
||||||
|
formatters = {'extra_specs': _print_type_extra_specs}
|
||||||
|
utils.print_list(vtypes, ['ID', 'Name', 'extra_specs'], formatters)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_type_list(cs, args):
|
||||||
|
"""Print a list of available 'volume types'."""
|
||||||
|
vtypes = cs.volume_types.list()
|
||||||
|
_print_volume_type_list(vtypes)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_extra_specs_list(cs, args):
|
||||||
|
"""Print a list of current 'volume types and extra specs' (Admin Only)."""
|
||||||
|
vtypes = cs.volume_types.list()
|
||||||
|
_print_type_and_extra_specs_list(vtypes)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('name',
|
||||||
|
metavar='<name>',
|
||||||
|
help="Name of the new volume type")
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_type_create(cs, args):
|
||||||
|
"""Create a new volume type."""
|
||||||
|
vtype = cs.volume_types.create(args.name)
|
||||||
|
_print_volume_type_list([vtype])
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('id',
|
||||||
|
metavar='<id>',
|
||||||
|
help="Unique ID of the volume type to delete")
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_type_delete(cs, args):
|
||||||
|
"""Delete a specific volume type"""
|
||||||
|
cs.volume_types.delete(args.id)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('vtype',
|
||||||
|
metavar='<vtype>',
|
||||||
|
help="Name or ID of the volume type")
|
||||||
|
@utils.arg('action',
|
||||||
|
metavar='<action>',
|
||||||
|
choices=['set', 'unset'],
|
||||||
|
help="Actions: 'set' or 'unset'")
|
||||||
|
@utils.arg('metadata',
|
||||||
|
metavar='<key=value>',
|
||||||
|
nargs='*',
|
||||||
|
default=None,
|
||||||
|
help='Extra_specs to set/unset (only key is necessary on unset)')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_type_key(cs, args):
|
||||||
|
"Set or unset extra_spec for a volume type."""
|
||||||
|
vtype = _find_volume_type(cs, args.vtype)
|
||||||
|
|
||||||
|
if args.metadata is not None:
|
||||||
|
keypair = _extract_metadata(args)
|
||||||
|
|
||||||
|
if args.action == 'set':
|
||||||
|
vtype.set_keys(keypair)
|
||||||
|
elif args.action == 'unset':
|
||||||
|
vtype.unset_keys(keypair.keys())
|
||||||
|
|
||||||
|
|
||||||
|
def do_endpoints(cs, args):
|
||||||
|
"""Discover endpoints that get returned from the authenticate services"""
|
||||||
|
catalog = cs.client.service_catalog.catalog
|
||||||
|
for e in catalog['access']['serviceCatalog']:
|
||||||
|
utils.print_dict(e['endpoints'][0], e['name'])
|
||||||
|
|
||||||
|
|
||||||
|
def do_credentials(cs, args):
|
||||||
|
"""Show user credentials returned from auth"""
|
||||||
|
catalog = cs.client.service_catalog.catalog
|
||||||
|
utils.print_dict(catalog['access']['user'], "User Credentials")
|
||||||
|
utils.print_dict(catalog['access']['token'], "Token")
|
||||||
|
|
||||||
|
_quota_resources = ['volumes', 'snapshots', 'gigabytes']
|
||||||
|
|
||||||
|
|
||||||
|
def _quota_show(quotas):
|
||||||
|
quota_dict = {}
|
||||||
|
for resource in _quota_resources:
|
||||||
|
quota_dict[resource] = getattr(quotas, resource, None)
|
||||||
|
utils.print_dict(quota_dict)
|
||||||
|
|
||||||
|
|
||||||
|
def _quota_update(manager, identifier, args):
|
||||||
|
updates = {}
|
||||||
|
for resource in _quota_resources:
|
||||||
|
val = getattr(args, resource, None)
|
||||||
|
if val is not None:
|
||||||
|
updates[resource] = val
|
||||||
|
|
||||||
|
if updates:
|
||||||
|
manager.update(identifier, **updates)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('tenant', metavar='<tenant_id>',
|
||||||
|
help='UUID of tenant to list the quotas for.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_quota_show(cs, args):
|
||||||
|
"""List the quotas for a tenant."""
|
||||||
|
|
||||||
|
_quota_show(cs.quotas.get(args.tenant))
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('tenant', metavar='<tenant_id>',
|
||||||
|
help='UUID of tenant to list the default quotas for.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_quota_defaults(cs, args):
|
||||||
|
"""List the default quotas for a tenant."""
|
||||||
|
|
||||||
|
_quota_show(cs.quotas.defaults(args.tenant))
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('tenant', metavar='<tenant_id>',
|
||||||
|
help='UUID of tenant to set the quotas for.')
|
||||||
|
@utils.arg('--volumes',
|
||||||
|
metavar='<volumes>',
|
||||||
|
type=int, default=None,
|
||||||
|
help='New value for the "volumes" quota.')
|
||||||
|
@utils.arg('--snapshots',
|
||||||
|
metavar='<snapshots>',
|
||||||
|
type=int, default=None,
|
||||||
|
help='New value for the "snapshots" quota.')
|
||||||
|
@utils.arg('--gigabytes',
|
||||||
|
metavar='<gigabytes>',
|
||||||
|
type=int, default=None,
|
||||||
|
help='New value for the "gigabytes" quota.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_quota_update(cs, args):
|
||||||
|
"""Update the quotas for a tenant."""
|
||||||
|
|
||||||
|
_quota_update(cs.quotas, args.tenant, args)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('class_name', metavar='<class>',
|
||||||
|
help='Name of quota class to list the quotas for.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_quota_class_show(cs, args):
|
||||||
|
"""List the quotas for a quota class."""
|
||||||
|
|
||||||
|
_quota_show(cs.quota_classes.get(args.class_name))
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('class_name', metavar='<class>',
|
||||||
|
help='Name of quota class to set the quotas for.')
|
||||||
|
@utils.arg('--volumes',
|
||||||
|
metavar='<volumes>',
|
||||||
|
type=int, default=None,
|
||||||
|
help='New value for the "volumes" quota.')
|
||||||
|
@utils.arg('--snapshots',
|
||||||
|
metavar='<snapshots>',
|
||||||
|
type=int, default=None,
|
||||||
|
help='New value for the "snapshots" quota.')
|
||||||
|
@utils.arg('--gigabytes',
|
||||||
|
metavar='<gigabytes>',
|
||||||
|
type=int, default=None,
|
||||||
|
help='New value for the "gigabytes" quota.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_quota_class_update(cs, args):
|
||||||
|
"""Update the quotas for a quota class."""
|
||||||
|
|
||||||
|
_quota_update(cs.quota_classes, args.class_name, args)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_absolute_limits(cs, args):
|
||||||
|
"""Print a list of absolute limits for a user"""
|
||||||
|
limits = cs.limits.get().absolute
|
||||||
|
columns = ['Name', 'Value']
|
||||||
|
utils.print_list(limits, columns)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_rate_limits(cs, args):
|
||||||
|
"""Print a list of rate limits for a user"""
|
||||||
|
limits = cs.limits.get().rate
|
||||||
|
columns = ['Verb', 'URI', 'Value', 'Remain', 'Unit', 'Next_Available']
|
||||||
|
utils.print_list(limits, columns)
|
||||||
|
|
||||||
|
|
||||||
|
def _print_type_extra_specs(vol_type):
|
||||||
|
try:
|
||||||
|
return vol_type.get_keys()
|
||||||
|
except exceptions.NotFound:
|
||||||
|
return "N/A"
|
||||||
|
|
||||||
|
|
||||||
|
def _find_volume_type(cs, vtype):
|
||||||
|
"""Get a volume type by name or ID."""
|
||||||
|
return utils.find_resource(cs.volume_types, vtype)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('volume_id',
|
||||||
|
metavar='<volume-id>',
|
||||||
|
help='ID of the volume to upload to an image')
|
||||||
|
@utils.arg('--force',
|
||||||
|
metavar='<True|False>',
|
||||||
|
help='Optional flag to indicate whether '
|
||||||
|
'to upload a volume even if it\'s '
|
||||||
|
'attached to an instance. (Default=False)',
|
||||||
|
default=False)
|
||||||
|
@utils.arg('--container-format',
|
||||||
|
metavar='<container-format>',
|
||||||
|
help='Optional type for container format '
|
||||||
|
'(Default=bare)',
|
||||||
|
default='bare')
|
||||||
|
@utils.arg('--disk-format',
|
||||||
|
metavar='<disk-format>',
|
||||||
|
help='Optional type for disk format '
|
||||||
|
'(Default=raw)',
|
||||||
|
default='raw')
|
||||||
|
@utils.arg('image_name',
|
||||||
|
metavar='<image-name>',
|
||||||
|
help='Name for created image')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_upload_to_image(cs, args):
|
||||||
|
"""Upload volume to image service as image."""
|
||||||
|
volume = _find_volume(cs, args.volume_id)
|
||||||
|
volume.upload_to_image(args.force,
|
||||||
|
args.image_name,
|
||||||
|
args.container_format,
|
||||||
|
args.disk_format)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('volume', metavar='<volume>',
|
||||||
|
help='ID of the volume to backup.')
|
||||||
|
@utils.arg('--container', metavar='<container>',
|
||||||
|
help='Optional Backup container name. (Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.arg('--display-name', metavar='<display-name>',
|
||||||
|
help='Optional backup name. (Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.arg('--display-description', metavar='<display-description>',
|
||||||
|
help='Optional backup description. (Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_backup_create(cs, args):
|
||||||
|
"""Creates a backup."""
|
||||||
|
cs.backups.create(args.volume,
|
||||||
|
args.container,
|
||||||
|
args.display_name,
|
||||||
|
args.display_description)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('backup', metavar='<backup>', help='ID of the backup.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_backup_show(cs, args):
|
||||||
|
"""Show details about a backup."""
|
||||||
|
backup = _find_backup(cs, args.backup)
|
||||||
|
info = dict()
|
||||||
|
info.update(backup._info)
|
||||||
|
|
||||||
|
if 'links' in info:
|
||||||
|
info.pop('links')
|
||||||
|
|
||||||
|
utils.print_dict(info)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_backup_list(cs, args):
|
||||||
|
"""List all the backups."""
|
||||||
|
backups = cs.backups.list()
|
||||||
|
columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'Object Count',
|
||||||
|
'Container']
|
||||||
|
utils.print_list(backups, columns)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('backup', metavar='<backup>',
|
||||||
|
help='ID of the backup to delete.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_backup_delete(cs, args):
|
||||||
|
"""Remove a backup."""
|
||||||
|
backup = _find_backup(cs, args.backup)
|
||||||
|
backup.delete()
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg('backup', metavar='<backup>',
|
||||||
|
help='ID of the backup to restore.')
|
||||||
|
@utils.arg('--volume-id', metavar='<volume-id>',
|
||||||
|
help='Optional ID of the volume to restore to.',
|
||||||
|
default=None)
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_backup_restore(cs, args):
|
||||||
|
"""Restore a backup."""
|
||||||
|
cs.restores.restore(args.backup,
|
||||||
|
args.volume_id)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'share_protocol',
|
||||||
|
metavar='<share_protocol>',
|
||||||
|
type=str,
|
||||||
|
help='Share type (NFS or CIFS)')
|
||||||
|
@utils.arg(
|
||||||
|
'size',
|
||||||
|
metavar='<size>',
|
||||||
|
type=int,
|
||||||
|
help='Share size in GB')
|
||||||
|
@utils.arg(
|
||||||
|
'--snapshot-id',
|
||||||
|
metavar='<snapshot-id>',
|
||||||
|
help='Optional snapshot id to create the share from. (Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-name',
|
||||||
|
metavar='<display-name>',
|
||||||
|
help='Optional share name. (Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.arg(
|
||||||
|
'--display_name',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-description',
|
||||||
|
metavar='<display-description>',
|
||||||
|
help='Optional share description. (Default=None)',
|
||||||
|
default=None)
|
||||||
|
@utils.arg(
|
||||||
|
'--display_description',
|
||||||
|
help=argparse.SUPPRESS)
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_create(cs, args):
|
||||||
|
"""Creates new NAS storage (NFS or CIFS)."""
|
||||||
|
share = cs.shares.create(args.share_protocol, args.size, args.snapshot_id,
|
||||||
|
args.display_name, args.display_description)
|
||||||
|
_print_share(cs, share)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'share',
|
||||||
|
metavar='<share>',
|
||||||
|
help='ID of the NAS to delete.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_delete(cs, args):
|
||||||
|
"""Deletes NAS storage."""
|
||||||
|
cs.shares.delete(args.share)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'share',
|
||||||
|
metavar='<share>',
|
||||||
|
help='ID of the NAS share.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_show(cs, args):
|
||||||
|
"""Show details about a NAS share."""
|
||||||
|
share = _find_share(cs, args.share)
|
||||||
|
_print_share(cs, share)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'share',
|
||||||
|
metavar='<share>',
|
||||||
|
help='ID of the NAS share to modify.')
|
||||||
|
@utils.arg(
|
||||||
|
'access_type',
|
||||||
|
metavar='<access_type>',
|
||||||
|
help='access rule type (only "ip" is supported).')
|
||||||
|
@utils.arg(
|
||||||
|
'access_to',
|
||||||
|
metavar='<access_to>',
|
||||||
|
help='Value that defines access')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_allow(cs, args):
|
||||||
|
"""Allow access to the share."""
|
||||||
|
share = _find_share(cs, args.share)
|
||||||
|
share.allow(args.access_type, args.access_to)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'share',
|
||||||
|
metavar='<share>',
|
||||||
|
help='ID of the NAS share to modify.')
|
||||||
|
@utils.arg(
|
||||||
|
'id',
|
||||||
|
metavar='<id>',
|
||||||
|
help='id of the access rule to be deleted.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_deny(cs, args):
|
||||||
|
"""Deny access to a share."""
|
||||||
|
share = _find_share(cs, args.share)
|
||||||
|
share.deny(args.id)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'share',
|
||||||
|
metavar='<share>',
|
||||||
|
help='ID of the share.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_access_list(cs, args):
|
||||||
|
"""Show access list for share."""
|
||||||
|
share = _find_share(cs, args.share)
|
||||||
|
access_list = share.access_list()
|
||||||
|
utils.print_list(access_list, ['id', 'access type', 'access to', 'state'])
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'--all-tenants',
|
||||||
|
dest='all_tenants',
|
||||||
|
metavar='<0|1>',
|
||||||
|
nargs='?',
|
||||||
|
type=int,
|
||||||
|
const=1,
|
||||||
|
default=0,
|
||||||
|
help='Display information from all tenants (Admin only).')
|
||||||
|
@utils.arg(
|
||||||
|
'--display-name',
|
||||||
|
metavar='<display-name>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by name')
|
||||||
|
@utils.arg(
|
||||||
|
'--status',
|
||||||
|
metavar='<status>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by status')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_list(cs, args):
|
||||||
|
"""List all NAS shares."""
|
||||||
|
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
|
||||||
|
search_opts = {
|
||||||
|
'all_tenants': all_tenants,
|
||||||
|
'display_name': args.display_name,
|
||||||
|
'status': args.status,
|
||||||
|
}
|
||||||
|
shares = cs.shares.list(search_opts=search_opts)
|
||||||
|
utils.print_list(shares,
|
||||||
|
['ID', 'Display Name', 'Size', 'Share Proto', 'Status',
|
||||||
|
'Export location'])
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'--all-tenants',
|
||||||
|
dest='all_tenants',
|
||||||
|
metavar='<0|1>',
|
||||||
|
nargs='?',
|
||||||
|
type=int,
|
||||||
|
const=1,
|
||||||
|
default=0,
|
||||||
|
help='Display information from all tenants (Admin only).')
|
||||||
|
@utils.arg(
|
||||||
|
'--display-name',
|
||||||
|
metavar='<display-name>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by name')
|
||||||
|
@utils.arg(
|
||||||
|
'--status',
|
||||||
|
metavar='<status>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by status')
|
||||||
|
@utils.arg(
|
||||||
|
'--share-id',
|
||||||
|
metavar='<share-id>',
|
||||||
|
default=None,
|
||||||
|
help='Filter results by share-id')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_snapshot_list(cs, args):
|
||||||
|
"""List all the snapshots."""
|
||||||
|
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
|
||||||
|
search_opts = {
|
||||||
|
'all_tenants': all_tenants,
|
||||||
|
'display_name': args.display_name,
|
||||||
|
'status': args.status,
|
||||||
|
'share_id': args.share_id,
|
||||||
|
}
|
||||||
|
snapshots = cs.share_snapshots.list(search_opts=search_opts)
|
||||||
|
utils.print_list(snapshots,
|
||||||
|
['ID', 'Share ID', 'Status', 'Display Name',
|
||||||
|
'Share Size'])
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'snapshot',
|
||||||
|
metavar='<snapshot>',
|
||||||
|
help='ID of the snapshot.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_snapshot_show(cs, args):
|
||||||
|
"""Show details about a snapshot."""
|
||||||
|
snapshot = _find_share_snapshot(cs, args.snapshot)
|
||||||
|
_print_share_snapshot(cs, snapshot)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'share_id',
|
||||||
|
metavar='<share-id>',
|
||||||
|
help='ID of the share to snapshot')
|
||||||
|
@utils.arg(
|
||||||
|
'--force',
|
||||||
|
metavar='<True|False>',
|
||||||
|
help='Optional flag to indicate whether '
|
||||||
|
'to snapshot a share even if it\'s busy.'
|
||||||
|
' (Default=False)',
|
||||||
|
default=False)
|
||||||
|
@utils.arg(
|
||||||
|
'--display-name',
|
||||||
|
metavar='<display-name>',
|
||||||
|
default=None,
|
||||||
|
help='Optional snapshot name. (Default=None)')
|
||||||
|
@utils.arg(
|
||||||
|
'--display-description',
|
||||||
|
metavar='<display-description>',
|
||||||
|
default=None,
|
||||||
|
help='Optional snapshot description. (Default=None)')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_snapshot_create(cs, args):
|
||||||
|
"""Add a new snapshot."""
|
||||||
|
snapshot = cs.share_snapshots.create(args.share_id,
|
||||||
|
args.force,
|
||||||
|
args.display_name,
|
||||||
|
args.display_description)
|
||||||
|
_print_share_snapshot(cs, snapshot)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.arg(
|
||||||
|
'snapshot_id',
|
||||||
|
metavar='<snapshot-id>',
|
||||||
|
help='ID of the snapshot to delete.')
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_share_snapshot_delete(cs, args):
|
||||||
|
"""Remove a snapshot."""
|
||||||
|
snapshot = _find_share_snapshot(cs, args.snapshot_id)
|
||||||
|
snapshot.delete()
|
76
cinderclient/v1/volume_backups.py
Normal file
76
cinderclient/v1/volume_backups.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Volume Backups interface (1.1 extension).
|
||||||
|
"""
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackup(base.Resource):
|
||||||
|
"""A volume backup is a block level backup of a volume."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<VolumeBackup: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this volume backup."""
|
||||||
|
return self.manager.delete(self)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackupManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`VolumeBackup` resources."""
|
||||||
|
resource_class = VolumeBackup
|
||||||
|
|
||||||
|
def create(self, volume_id, container=None,
|
||||||
|
name=None, description=None):
|
||||||
|
"""Create a volume backup.
|
||||||
|
|
||||||
|
:param volume_id: The ID of the volume to backup.
|
||||||
|
:param container: The name of the backup service container.
|
||||||
|
:param name: The name of the backup.
|
||||||
|
:param description: The description of the backup.
|
||||||
|
:rtype: :class:`VolumeBackup`
|
||||||
|
"""
|
||||||
|
body = {'backup': {'volume_id': volume_id,
|
||||||
|
'container': container,
|
||||||
|
'name': name,
|
||||||
|
'description': description}}
|
||||||
|
return self._create('/backups', body, 'backup')
|
||||||
|
|
||||||
|
def get(self, backup_id):
|
||||||
|
"""Show details of a volume backup.
|
||||||
|
|
||||||
|
:param backup_id: The ID of the backup to display.
|
||||||
|
:rtype: :class:`VolumeBackup`
|
||||||
|
"""
|
||||||
|
return self._get("/backups/%s" % backup_id, "backup")
|
||||||
|
|
||||||
|
def list(self, detailed=True):
|
||||||
|
"""Get a list of all volume backups.
|
||||||
|
|
||||||
|
:rtype: list of :class:`VolumeBackup`
|
||||||
|
"""
|
||||||
|
if detailed is True:
|
||||||
|
return self._list("/backups/detail", "backups")
|
||||||
|
else:
|
||||||
|
return self._list("/backups", "backups")
|
||||||
|
|
||||||
|
def delete(self, backup):
|
||||||
|
"""Delete a volume backup.
|
||||||
|
|
||||||
|
:param backup: The :class:`VolumeBackup` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/backups/%s" % base.getid(backup))
|
43
cinderclient/v1/volume_backups_restore.py
Normal file
43
cinderclient/v1/volume_backups_restore.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Volume Backups Restore interface (1.1 extension).
|
||||||
|
|
||||||
|
This is part of the Volume Backups interface.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackupsRestore(base.Resource):
|
||||||
|
"""A Volume Backups Restore represents a restore operation."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<VolumeBackupsRestore: %s>" % self.id
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackupRestoreManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`VolumeBackupsRestore` resources."""
|
||||||
|
resource_class = VolumeBackupsRestore
|
||||||
|
|
||||||
|
def restore(self, backup_id, volume_id=None):
|
||||||
|
"""Restore a backup to a volume.
|
||||||
|
|
||||||
|
:param backup_id: The ID of the backup to restore.
|
||||||
|
:param volume_id: The ID of the volume to restore the backup to.
|
||||||
|
:rtype: :class:`Restore`
|
||||||
|
"""
|
||||||
|
body = {'restore': {'volume_id': volume_id}}
|
||||||
|
return self._create("/backups/%s/restore" % backup_id,
|
||||||
|
body, "restore")
|
130
cinderclient/v1/volume_snapshots.py
Normal file
130
cinderclient/v1/volume_snapshots.py
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
# Copyright 2011 Denali Systems, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Volume snapshot interface (1.1 extension).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import urllib
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class Snapshot(base.Resource):
|
||||||
|
"""
|
||||||
|
A Snapshot is a point-in-time snapshot of an openstack volume.
|
||||||
|
"""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Snapshot: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""
|
||||||
|
Delete this snapshot.
|
||||||
|
"""
|
||||||
|
self.manager.delete(self)
|
||||||
|
|
||||||
|
def update(self, **kwargs):
|
||||||
|
"""
|
||||||
|
Update the display_name or display_description for this snapshot.
|
||||||
|
"""
|
||||||
|
self.manager.update(self, **kwargs)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def progress(self):
|
||||||
|
return self._info.get('os-extended-snapshot-attributes:progress')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def project_id(self):
|
||||||
|
return self._info.get('os-extended-snapshot-attributes:project_id')
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotManager(base.ManagerWithFind):
|
||||||
|
"""
|
||||||
|
Manage :class:`Snapshot` resources.
|
||||||
|
"""
|
||||||
|
resource_class = Snapshot
|
||||||
|
|
||||||
|
def create(self, volume_id, force=False,
|
||||||
|
display_name=None, display_description=None):
|
||||||
|
|
||||||
|
"""
|
||||||
|
Create a snapshot of the given volume.
|
||||||
|
|
||||||
|
:param volume_id: The ID of the volume to snapshot.
|
||||||
|
:param force: If force is True, create a snapshot even if the volume is
|
||||||
|
attached to an instance. Default is False.
|
||||||
|
:param display_name: Name of the snapshot
|
||||||
|
:param display_description: Description of the snapshot
|
||||||
|
:rtype: :class:`Snapshot`
|
||||||
|
"""
|
||||||
|
body = {'snapshot': {'volume_id': volume_id,
|
||||||
|
'force': force,
|
||||||
|
'display_name': display_name,
|
||||||
|
'display_description': display_description}}
|
||||||
|
return self._create('/snapshots', body, 'snapshot')
|
||||||
|
|
||||||
|
def get(self, snapshot_id):
|
||||||
|
"""
|
||||||
|
Get a snapshot.
|
||||||
|
|
||||||
|
:param snapshot_id: The ID of the snapshot to get.
|
||||||
|
:rtype: :class:`Snapshot`
|
||||||
|
"""
|
||||||
|
return self._get("/snapshots/%s" % snapshot_id, "snapshot")
|
||||||
|
|
||||||
|
def list(self, detailed=True, search_opts=None):
|
||||||
|
"""
|
||||||
|
Get a list of all snapshots.
|
||||||
|
|
||||||
|
:rtype: list of :class:`Snapshot`
|
||||||
|
"""
|
||||||
|
|
||||||
|
if search_opts is None:
|
||||||
|
search_opts = {}
|
||||||
|
|
||||||
|
qparams = {}
|
||||||
|
|
||||||
|
for opt, val in search_opts.iteritems():
|
||||||
|
if val:
|
||||||
|
qparams[opt] = val
|
||||||
|
|
||||||
|
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
|
||||||
|
|
||||||
|
detail = ""
|
||||||
|
if detailed:
|
||||||
|
detail = "/detail"
|
||||||
|
|
||||||
|
return self._list("/snapshots%s%s" % (detail, query_string),
|
||||||
|
"snapshots")
|
||||||
|
|
||||||
|
def delete(self, snapshot):
|
||||||
|
"""
|
||||||
|
Delete a snapshot.
|
||||||
|
|
||||||
|
:param snapshot: The :class:`Snapshot` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/snapshots/%s" % base.getid(snapshot))
|
||||||
|
|
||||||
|
def update(self, snapshot, **kwargs):
|
||||||
|
"""
|
||||||
|
Update the display_name or display_description for a snapshot.
|
||||||
|
|
||||||
|
:param snapshot: The :class:`Snapshot` to delete.
|
||||||
|
"""
|
||||||
|
if not kwargs:
|
||||||
|
return
|
||||||
|
|
||||||
|
body = {"snapshot": kwargs}
|
||||||
|
|
||||||
|
self._update("/snapshots/%s" % base.getid(snapshot), body)
|
122
cinderclient/v1/volume_types.py
Normal file
122
cinderclient/v1/volume_types.py
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
# Copyright (c) 2011 Rackspace US, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Volume Type interface.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeType(base.Resource):
|
||||||
|
"""
|
||||||
|
A Volume Type is the type of volume to be created
|
||||||
|
"""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<VolumeType: %s>" % self.name
|
||||||
|
|
||||||
|
def get_keys(self):
|
||||||
|
"""
|
||||||
|
Get extra specs from a volume type.
|
||||||
|
|
||||||
|
:param vol_type: The :class:`VolumeType` to get extra specs from
|
||||||
|
"""
|
||||||
|
_resp, body = self.manager.api.client.get(
|
||||||
|
"/types/%s/extra_specs" %
|
||||||
|
base.getid(self))
|
||||||
|
return body["extra_specs"]
|
||||||
|
|
||||||
|
def set_keys(self, metadata):
|
||||||
|
"""
|
||||||
|
Set extra specs on a volume type.
|
||||||
|
|
||||||
|
:param type : The :class:`VolumeType` to set extra spec on
|
||||||
|
:param metadata: A dict of key/value pairs to be set
|
||||||
|
"""
|
||||||
|
body = {'extra_specs': metadata}
|
||||||
|
return self.manager._create(
|
||||||
|
"/types/%s/extra_specs" % base.getid(self),
|
||||||
|
body,
|
||||||
|
"extra_specs",
|
||||||
|
return_raw=True)
|
||||||
|
|
||||||
|
def unset_keys(self, keys):
|
||||||
|
"""
|
||||||
|
Unset extra specs on a volue type.
|
||||||
|
|
||||||
|
:param type_id: The :class:`VolumeType` to unset extra spec on
|
||||||
|
:param keys: A list of keys to be unset
|
||||||
|
"""
|
||||||
|
|
||||||
|
# NOTE(jdg): This wasn't actually doing all of the keys before
|
||||||
|
# the return in the loop resulted in ony ONE key being unset.
|
||||||
|
# since on success the return was NONE, we'll only interrupt the loop
|
||||||
|
# and return if there's an error
|
||||||
|
resp = None
|
||||||
|
for k in keys:
|
||||||
|
resp = self.manager._delete(
|
||||||
|
"/types/%s/extra_specs/%s" % (
|
||||||
|
base.getid(self), k))
|
||||||
|
if resp is not None:
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypeManager(base.ManagerWithFind):
|
||||||
|
"""
|
||||||
|
Manage :class:`VolumeType` resources.
|
||||||
|
"""
|
||||||
|
resource_class = VolumeType
|
||||||
|
|
||||||
|
def list(self):
|
||||||
|
"""
|
||||||
|
Get a list of all volume types.
|
||||||
|
|
||||||
|
:rtype: list of :class:`VolumeType`.
|
||||||
|
"""
|
||||||
|
return self._list("/types", "volume_types")
|
||||||
|
|
||||||
|
def get(self, volume_type):
|
||||||
|
"""
|
||||||
|
Get a specific volume type.
|
||||||
|
|
||||||
|
:param volume_type: The ID of the :class:`VolumeType` to get.
|
||||||
|
:rtype: :class:`VolumeType`
|
||||||
|
"""
|
||||||
|
return self._get("/types/%s" % base.getid(volume_type), "volume_type")
|
||||||
|
|
||||||
|
def delete(self, volume_type):
|
||||||
|
"""
|
||||||
|
Delete a specific volume_type.
|
||||||
|
|
||||||
|
:param volume_type: The ID of the :class:`VolumeType` to get.
|
||||||
|
"""
|
||||||
|
self._delete("/types/%s" % base.getid(volume_type))
|
||||||
|
|
||||||
|
def create(self, name):
|
||||||
|
"""
|
||||||
|
Create a volume type.
|
||||||
|
|
||||||
|
:param name: Descriptive name of the volume type
|
||||||
|
:rtype: :class:`VolumeType`
|
||||||
|
"""
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"volume_type": {
|
||||||
|
"name": name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return self._create("/types", body, "volume_type")
|
328
cinderclient/v1/volumes.py
Normal file
328
cinderclient/v1/volumes.py
Normal file
@ -0,0 +1,328 @@
|
|||||||
|
# Copyright 2011 Denali Systems, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Volume interface (1.1 extension).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import urllib
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class Volume(base.Resource):
|
||||||
|
"""A volume is an extra block level storage to the OpenStack instances."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Volume: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this volume."""
|
||||||
|
self.manager.delete(self)
|
||||||
|
|
||||||
|
def update(self, **kwargs):
|
||||||
|
"""Update the display_name or display_description for this volume."""
|
||||||
|
self.manager.update(self, **kwargs)
|
||||||
|
|
||||||
|
def attach(self, instance_uuid, mountpoint):
|
||||||
|
"""Set attachment metadata.
|
||||||
|
|
||||||
|
:param instance_uuid: uuid of the attaching instance.
|
||||||
|
:param mountpoint: mountpoint on the attaching instance.
|
||||||
|
"""
|
||||||
|
return self.manager.attach(self, instance_uuid, mountpoint)
|
||||||
|
|
||||||
|
def detach(self):
|
||||||
|
"""Clear attachment metadata."""
|
||||||
|
return self.manager.detach(self)
|
||||||
|
|
||||||
|
def reserve(self, volume):
|
||||||
|
"""Reserve this volume."""
|
||||||
|
return self.manager.reserve(self)
|
||||||
|
|
||||||
|
def unreserve(self, volume):
|
||||||
|
"""Unreserve this volume."""
|
||||||
|
return self.manager.unreserve(self)
|
||||||
|
|
||||||
|
def begin_detaching(self, volume):
|
||||||
|
"""Begin detaching volume."""
|
||||||
|
return self.manager.begin_detaching(self)
|
||||||
|
|
||||||
|
def roll_detaching(self, volume):
|
||||||
|
"""Roll detaching volume."""
|
||||||
|
return self.manager.roll_detaching(self)
|
||||||
|
|
||||||
|
def initialize_connection(self, volume, connector):
|
||||||
|
"""Initialize a volume connection.
|
||||||
|
|
||||||
|
:param connector: connector dict from nova.
|
||||||
|
"""
|
||||||
|
return self.manager.initialize_connection(self, connector)
|
||||||
|
|
||||||
|
def terminate_connection(self, volume, connector):
|
||||||
|
"""Terminate a volume connection.
|
||||||
|
|
||||||
|
:param connector: connector dict from nova.
|
||||||
|
"""
|
||||||
|
return self.manager.terminate_connection(self, connector)
|
||||||
|
|
||||||
|
def set_metadata(self, volume, metadata):
|
||||||
|
"""Set or Append metadata to a volume.
|
||||||
|
|
||||||
|
:param type : The :class: `Volume` to set metadata on
|
||||||
|
:param metadata: A dict of key/value pairs to set
|
||||||
|
"""
|
||||||
|
return self.manager.set_metadata(self, metadata)
|
||||||
|
|
||||||
|
def upload_to_image(self, force, image_name, container_format,
|
||||||
|
disk_format):
|
||||||
|
"""Upload a volume to image service as an image."""
|
||||||
|
self.manager.upload_to_image(self, force, image_name, container_format,
|
||||||
|
disk_format)
|
||||||
|
|
||||||
|
def force_delete(self):
|
||||||
|
"""Delete the specified volume ignoring its current state.
|
||||||
|
|
||||||
|
:param volume: The UUID of the volume to force-delete.
|
||||||
|
"""
|
||||||
|
self.manager.force_delete(self)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeManager(base.ManagerWithFind):
|
||||||
|
"""
|
||||||
|
Manage :class:`Volume` resources.
|
||||||
|
"""
|
||||||
|
resource_class = Volume
|
||||||
|
|
||||||
|
def create(self, size, snapshot_id=None, source_volid=None,
|
||||||
|
display_name=None, display_description=None,
|
||||||
|
volume_type=None, user_id=None,
|
||||||
|
project_id=None, availability_zone=None,
|
||||||
|
metadata=None, imageRef=None):
|
||||||
|
"""
|
||||||
|
Create a volume.
|
||||||
|
|
||||||
|
:param size: Size of volume in GB
|
||||||
|
:param snapshot_id: ID of the snapshot
|
||||||
|
:param display_name: Name of the volume
|
||||||
|
:param display_description: Description of the volume
|
||||||
|
:param volume_type: Type of volume
|
||||||
|
:rtype: :class:`Volume`
|
||||||
|
:param user_id: User id derived from context
|
||||||
|
:param project_id: Project id derived from context
|
||||||
|
:param availability_zone: Availability Zone to use
|
||||||
|
:param metadata: Optional metadata to set on volume creation
|
||||||
|
:param imageRef: reference to an image stored in glance
|
||||||
|
:param source_volid: ID of source volume to clone from
|
||||||
|
"""
|
||||||
|
|
||||||
|
if metadata is None:
|
||||||
|
volume_metadata = {}
|
||||||
|
else:
|
||||||
|
volume_metadata = metadata
|
||||||
|
|
||||||
|
body = {'volume': {'size': size,
|
||||||
|
'snapshot_id': snapshot_id,
|
||||||
|
'display_name': display_name,
|
||||||
|
'display_description': display_description,
|
||||||
|
'volume_type': volume_type,
|
||||||
|
'user_id': user_id,
|
||||||
|
'project_id': project_id,
|
||||||
|
'availability_zone': availability_zone,
|
||||||
|
'status': "creating",
|
||||||
|
'attach_status': "detached",
|
||||||
|
'metadata': volume_metadata,
|
||||||
|
'imageRef': imageRef,
|
||||||
|
'source_volid': source_volid,
|
||||||
|
}}
|
||||||
|
return self._create('/volumes', body, 'volume')
|
||||||
|
|
||||||
|
def get(self, volume_id):
|
||||||
|
"""
|
||||||
|
Get a volume.
|
||||||
|
|
||||||
|
:param volume_id: The ID of the volume to delete.
|
||||||
|
:rtype: :class:`Volume`
|
||||||
|
"""
|
||||||
|
return self._get("/volumes/%s" % volume_id, "volume")
|
||||||
|
|
||||||
|
def list(self, detailed=True, search_opts=None):
|
||||||
|
"""
|
||||||
|
Get a list of all volumes.
|
||||||
|
|
||||||
|
:rtype: list of :class:`Volume`
|
||||||
|
"""
|
||||||
|
if search_opts is None:
|
||||||
|
search_opts = {}
|
||||||
|
|
||||||
|
qparams = {}
|
||||||
|
|
||||||
|
for opt, val in search_opts.iteritems():
|
||||||
|
if val:
|
||||||
|
qparams[opt] = val
|
||||||
|
|
||||||
|
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
|
||||||
|
|
||||||
|
detail = ""
|
||||||
|
if detailed:
|
||||||
|
detail = "/detail"
|
||||||
|
|
||||||
|
return self._list("/volumes%s%s" % (detail, query_string),
|
||||||
|
"volumes")
|
||||||
|
|
||||||
|
def delete(self, volume):
|
||||||
|
"""
|
||||||
|
Delete a volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/volumes/%s" % base.getid(volume))
|
||||||
|
|
||||||
|
def update(self, volume, **kwargs):
|
||||||
|
"""
|
||||||
|
Update the display_name or display_description for a volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` to delete.
|
||||||
|
"""
|
||||||
|
if not kwargs:
|
||||||
|
return
|
||||||
|
|
||||||
|
body = {"volume": kwargs}
|
||||||
|
|
||||||
|
self._update("/volumes/%s" % base.getid(volume), body)
|
||||||
|
|
||||||
|
def _action(self, action, volume, info=None, **kwargs):
|
||||||
|
"""
|
||||||
|
Perform a volume "action."
|
||||||
|
"""
|
||||||
|
body = {action: info}
|
||||||
|
self.run_hooks('modify_body_for_action', body, **kwargs)
|
||||||
|
url = '/volumes/%s/action' % base.getid(volume)
|
||||||
|
return self.api.client.post(url, body=body)
|
||||||
|
|
||||||
|
def attach(self, volume, instance_uuid, mountpoint):
|
||||||
|
"""
|
||||||
|
Set attachment metadata.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to attach.
|
||||||
|
:param instance_uuid: uuid of the attaching instance.
|
||||||
|
:param mountpoint: mountpoint on the attaching instance.
|
||||||
|
"""
|
||||||
|
return self._action('os-attach',
|
||||||
|
volume,
|
||||||
|
{'instance_uuid': instance_uuid,
|
||||||
|
'mountpoint': mountpoint})
|
||||||
|
|
||||||
|
def detach(self, volume):
|
||||||
|
"""
|
||||||
|
Clear attachment metadata.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to detach.
|
||||||
|
"""
|
||||||
|
return self._action('os-detach', volume)
|
||||||
|
|
||||||
|
def reserve(self, volume):
|
||||||
|
"""
|
||||||
|
Reserve this volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to reserve.
|
||||||
|
"""
|
||||||
|
return self._action('os-reserve', volume)
|
||||||
|
|
||||||
|
def unreserve(self, volume):
|
||||||
|
"""
|
||||||
|
Unreserve this volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to unreserve.
|
||||||
|
"""
|
||||||
|
return self._action('os-unreserve', volume)
|
||||||
|
|
||||||
|
def begin_detaching(self, volume):
|
||||||
|
"""
|
||||||
|
Begin detaching this volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to detach.
|
||||||
|
"""
|
||||||
|
return self._action('os-begin_detaching', volume)
|
||||||
|
|
||||||
|
def roll_detaching(self, volume):
|
||||||
|
"""
|
||||||
|
Roll detaching this volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to roll detaching.
|
||||||
|
"""
|
||||||
|
return self._action('os-roll_detaching', volume)
|
||||||
|
|
||||||
|
def initialize_connection(self, volume, connector):
|
||||||
|
"""
|
||||||
|
Initialize a volume connection.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID).
|
||||||
|
:param connector: connector dict from nova.
|
||||||
|
"""
|
||||||
|
return self._action('os-initialize_connection', volume,
|
||||||
|
{'connector': connector})[1]['connection_info']
|
||||||
|
|
||||||
|
def terminate_connection(self, volume, connector):
|
||||||
|
"""
|
||||||
|
Terminate a volume connection.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID).
|
||||||
|
:param connector: connector dict from nova.
|
||||||
|
"""
|
||||||
|
self._action('os-terminate_connection', volume,
|
||||||
|
{'connector': connector})
|
||||||
|
|
||||||
|
def set_metadata(self, volume, metadata):
|
||||||
|
"""
|
||||||
|
Update/Set a volumes metadata.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume`.
|
||||||
|
:param metadata: A list of keys to be set.
|
||||||
|
"""
|
||||||
|
body = {'metadata': metadata}
|
||||||
|
return self._create("/volumes/%s/metadata" % base.getid(volume),
|
||||||
|
body, "metadata")
|
||||||
|
|
||||||
|
def delete_metadata(self, volume, keys):
|
||||||
|
"""
|
||||||
|
Delete specified keys from volumes metadata.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume`.
|
||||||
|
:param metadata: A list of keys to be removed.
|
||||||
|
"""
|
||||||
|
for k in keys:
|
||||||
|
self._delete("/volumes/%s/metadata/%s" % (base.getid(volume), k))
|
||||||
|
|
||||||
|
def upload_to_image(self, volume, force, image_name, container_format,
|
||||||
|
disk_format):
|
||||||
|
"""
|
||||||
|
Upload volume to image service as image.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` to upload.
|
||||||
|
"""
|
||||||
|
return self._action('os-volume_upload_image',
|
||||||
|
volume,
|
||||||
|
{'force': force,
|
||||||
|
'image_name': image_name,
|
||||||
|
'container_format': container_format,
|
||||||
|
'disk_format': disk_format})
|
||||||
|
|
||||||
|
def force_delete(self, volume):
|
||||||
|
return self._action('os-force_delete', base.getid(volume))
|
17
cinderclient/v2/__init__.py
Normal file
17
cinderclient/v2/__init__.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient.v2.client import Client
|
86
cinderclient/v2/client.py
Normal file
86
cinderclient/v2/client.py
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
from cinderclient import client
|
||||||
|
from cinderclient.v2 import limits
|
||||||
|
from cinderclient.v2 import quota_classes
|
||||||
|
from cinderclient.v2 import quotas
|
||||||
|
from cinderclient.v2 import shares
|
||||||
|
from cinderclient.v2 import share_snapshots
|
||||||
|
from cinderclient.v2 import volumes
|
||||||
|
from cinderclient.v2 import volume_snapshots
|
||||||
|
from cinderclient.v2 import volume_types
|
||||||
|
from cinderclient.v2 import volume_backups
|
||||||
|
from cinderclient.v2 import volume_backups_restore
|
||||||
|
|
||||||
|
|
||||||
|
class Client(object):
|
||||||
|
"""Top-level object to access the OpenStack Volume API.
|
||||||
|
|
||||||
|
Create an instance with your creds::
|
||||||
|
|
||||||
|
>>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL)
|
||||||
|
|
||||||
|
Then call methods on its managers::
|
||||||
|
|
||||||
|
>>> client.volumes.list()
|
||||||
|
...
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, username, api_key, project_id=None, auth_url='',
|
||||||
|
insecure=False, timeout=None, tenant_id=None,
|
||||||
|
proxy_tenant_id=None, proxy_token=None, region_name=None,
|
||||||
|
endpoint_type='publicURL', extensions=None,
|
||||||
|
service_type='volume', service_name=None,
|
||||||
|
volume_service_name=None, retries=None,
|
||||||
|
http_log_debug=False,
|
||||||
|
cacert=None):
|
||||||
|
# FIXME(comstud): Rename the api_key argument above when we
|
||||||
|
# know it's not being used as keyword argument
|
||||||
|
password = api_key
|
||||||
|
self.limits = limits.LimitsManager(self)
|
||||||
|
|
||||||
|
# extensions
|
||||||
|
self.volumes = volumes.VolumeManager(self)
|
||||||
|
self.volume_snapshots = volume_snapshots.SnapshotManager(self)
|
||||||
|
self.volume_types = volume_types.VolumeTypeManager(self)
|
||||||
|
self.quota_classes = quota_classes.QuotaClassSetManager(self)
|
||||||
|
self.quotas = quotas.QuotaSetManager(self)
|
||||||
|
self.backups = volume_backups.VolumeBackupManager(self)
|
||||||
|
self.restores = volume_backups_restore.VolumeBackupRestoreManager(self)
|
||||||
|
self.shares = shares.ShareManager(self)
|
||||||
|
self.share_snapshots = share_snapshots.ShareSnapshotManager(self)
|
||||||
|
|
||||||
|
# Add in any extensions...
|
||||||
|
if extensions:
|
||||||
|
for extension in extensions:
|
||||||
|
if extension.manager_class:
|
||||||
|
setattr(self, extension.name,
|
||||||
|
extension.manager_class(self))
|
||||||
|
|
||||||
|
self.client = client.HTTPClient(
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
project_id,
|
||||||
|
auth_url,
|
||||||
|
insecure=insecure,
|
||||||
|
timeout=timeout,
|
||||||
|
tenant_id=tenant_id,
|
||||||
|
proxy_token=proxy_token,
|
||||||
|
proxy_tenant_id=proxy_tenant_id,
|
||||||
|
region_name=region_name,
|
||||||
|
endpoint_type=endpoint_type,
|
||||||
|
service_type=service_type,
|
||||||
|
service_name=service_name,
|
||||||
|
volume_service_name=volume_service_name,
|
||||||
|
retries=retries,
|
||||||
|
http_log_debug=http_log_debug,
|
||||||
|
cacert=cacert)
|
||||||
|
|
||||||
|
def authenticate(self):
|
||||||
|
"""Authenticate against the server.
|
||||||
|
|
||||||
|
Normally this is called automatically when you first access the API,
|
||||||
|
but you can call this method to force authentication right now.
|
||||||
|
|
||||||
|
Returns on success; raises :exc:`exceptions.Unauthorized` if the
|
||||||
|
credentials are wrong.
|
||||||
|
"""
|
||||||
|
self.client.authenticate()
|
15
cinderclient/v2/contrib/__init__.py
Normal file
15
cinderclient/v2/contrib/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
47
cinderclient/v2/contrib/list_extensions.py
Normal file
47
cinderclient/v2/contrib/list_extensions.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
class ListExtResource(base.Resource):
|
||||||
|
@property
|
||||||
|
def summary(self):
|
||||||
|
descr = self.description.strip()
|
||||||
|
if not descr:
|
||||||
|
return '??'
|
||||||
|
lines = descr.split("\n")
|
||||||
|
if len(lines) == 1:
|
||||||
|
return lines[0]
|
||||||
|
else:
|
||||||
|
return lines[0] + "..."
|
||||||
|
|
||||||
|
|
||||||
|
class ListExtManager(base.Manager):
|
||||||
|
resource_class = ListExtResource
|
||||||
|
|
||||||
|
def show_all(self):
|
||||||
|
return self._list("/extensions", 'extensions')
|
||||||
|
|
||||||
|
|
||||||
|
@utils.service_type('volume')
|
||||||
|
def do_list_extensions(client, _args):
|
||||||
|
"""
|
||||||
|
List all the os-api extensions that are available.
|
||||||
|
"""
|
||||||
|
extensions = client.list_extensions.show_all()
|
||||||
|
fields = ["Name", "Summary", "Alias", "Updated"]
|
||||||
|
utils.print_list(extensions, fields)
|
78
cinderclient/v2/limits.py
Normal file
78
cinderclient/v2/limits.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class Limits(base.Resource):
|
||||||
|
"""A collection of RateLimit and AbsoluteLimit objects"""
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Limits>"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def absolute(self):
|
||||||
|
for (name, value) in self._info['absolute'].items():
|
||||||
|
yield AbsoluteLimit(name, value)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def rate(self):
|
||||||
|
for group in self._info['rate']:
|
||||||
|
uri = group['uri']
|
||||||
|
regex = group['regex']
|
||||||
|
for rate in group['limit']:
|
||||||
|
yield RateLimit(rate['verb'], uri, regex, rate['value'],
|
||||||
|
rate['remaining'], rate['unit'],
|
||||||
|
rate['next-available'])
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimit(object):
|
||||||
|
"""Data model that represents a flattened view of a single rate limit"""
|
||||||
|
|
||||||
|
def __init__(self, verb, uri, regex, value, remain,
|
||||||
|
unit, next_available):
|
||||||
|
self.verb = verb
|
||||||
|
self.uri = uri
|
||||||
|
self.regex = regex
|
||||||
|
self.value = value
|
||||||
|
self.remain = remain
|
||||||
|
self.unit = unit
|
||||||
|
self.next_available = next_available
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.uri == other.uri \
|
||||||
|
and self.regex == other.regex \
|
||||||
|
and self.value == other.value \
|
||||||
|
and self.verb == other.verb \
|
||||||
|
and self.remain == other.remain \
|
||||||
|
and self.unit == other.unit \
|
||||||
|
and self.next_available == other.next_available
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<RateLimit: method=%s uri=%s>" % (self.method, self.uri)
|
||||||
|
|
||||||
|
|
||||||
|
class AbsoluteLimit(object):
|
||||||
|
"""Data model that represents a single absolute limit"""
|
||||||
|
|
||||||
|
def __init__(self, name, value):
|
||||||
|
self.name = name
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.value == other.value and self.name == other.name
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<AbsoluteLimit: name=%s>" % (self.name)
|
||||||
|
|
||||||
|
|
||||||
|
class LimitsManager(base.Manager):
|
||||||
|
"""Manager object used to interact with limits resource"""
|
||||||
|
|
||||||
|
resource_class = Limits
|
||||||
|
|
||||||
|
def get(self):
|
||||||
|
"""Get a specific extension.
|
||||||
|
|
||||||
|
:rtype: :class:`Limits`
|
||||||
|
"""
|
||||||
|
return self._get("/limits", "limits")
|
51
cinderclient/v2/quota_classes.py
Normal file
51
cinderclient/v2/quota_classes.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaClassSet(base.Resource):
|
||||||
|
|
||||||
|
@property
|
||||||
|
def id(self):
|
||||||
|
"""Needed by base.Resource to self-refresh and be indexed"""
|
||||||
|
return self.class_name
|
||||||
|
|
||||||
|
def update(self, *args, **kwargs):
|
||||||
|
self.manager.update(self.class_name, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaClassSetManager(base.ManagerWithFind):
|
||||||
|
resource_class = QuotaClassSet
|
||||||
|
|
||||||
|
def get(self, class_name):
|
||||||
|
return self._get("/os-quota-class-sets/%s" % (class_name),
|
||||||
|
"quota_class_set")
|
||||||
|
|
||||||
|
def update(self,
|
||||||
|
class_name,
|
||||||
|
volumes=None,
|
||||||
|
gigabytes=None):
|
||||||
|
|
||||||
|
body = {'quota_class_set': {
|
||||||
|
'class_name': class_name,
|
||||||
|
'volumes': volumes,
|
||||||
|
'gigabytes': gigabytes}}
|
||||||
|
|
||||||
|
for key in body['quota_class_set'].keys():
|
||||||
|
if body['quota_class_set'][key] is None:
|
||||||
|
body['quota_class_set'].pop(key)
|
||||||
|
|
||||||
|
self._update('/os-quota-class-sets/%s' % (class_name), body)
|
54
cinderclient/v2/quotas.py
Normal file
54
cinderclient/v2/quotas.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaSet(base.Resource):
|
||||||
|
|
||||||
|
@property
|
||||||
|
def id(self):
|
||||||
|
"""Needed by base.Resource to self-refresh and be indexed"""
|
||||||
|
return self.tenant_id
|
||||||
|
|
||||||
|
def update(self, *args, **kwargs):
|
||||||
|
self.manager.update(self.tenant_id, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaSetManager(base.ManagerWithFind):
|
||||||
|
resource_class = QuotaSet
|
||||||
|
|
||||||
|
def get(self, tenant_id):
|
||||||
|
if hasattr(tenant_id, 'tenant_id'):
|
||||||
|
tenant_id = tenant_id.tenant_id
|
||||||
|
return self._get("/os-quota-sets/%s" % (tenant_id), "quota_set")
|
||||||
|
|
||||||
|
def update(self, tenant_id, volumes=None, snapshots=None, gigabytes=None):
|
||||||
|
|
||||||
|
body = {'quota_set': {
|
||||||
|
'tenant_id': tenant_id,
|
||||||
|
'volumes': volumes,
|
||||||
|
'snapshots': snapshots,
|
||||||
|
'gigabytes': gigabytes}}
|
||||||
|
|
||||||
|
for key in body['quota_set'].keys():
|
||||||
|
if body['quota_set'][key] is None:
|
||||||
|
body['quota_set'].pop(key)
|
||||||
|
|
||||||
|
self._update('/os-quota-sets/%s' % (tenant_id), body)
|
||||||
|
|
||||||
|
def defaults(self, tenant_id):
|
||||||
|
return self._get('/os-quota-sets/%s/defaults' % tenant_id,
|
||||||
|
'quota_set')
|
91
cinderclient/v2/share_snapshots.py
Normal file
91
cinderclient/v2/share_snapshots.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
# Copyright 2012 NetApp
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""Interface for shares extention."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
class ShareSnapshot(base.Resource):
|
||||||
|
"""Represent a snapshot of a share."""
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<ShareSnapshot: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this snapshot."""
|
||||||
|
self.manager.delete(self)
|
||||||
|
|
||||||
|
|
||||||
|
class ShareSnapshotManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`ShareSnapshot` resources.
|
||||||
|
"""
|
||||||
|
resource_class = ShareSnapshot
|
||||||
|
|
||||||
|
def create(self, share_id, force=False, name=None, description=None):
|
||||||
|
"""Create a snapshot of the given share.
|
||||||
|
|
||||||
|
:param share_id: The ID of the share to snapshot.
|
||||||
|
:param force: If force is True, create a snapshot even if the
|
||||||
|
share is busy. Default is False.
|
||||||
|
:param name: Name of the snapshot
|
||||||
|
:param description: Description of the snapshot
|
||||||
|
:rtype: :class:`ShareSnapshot`
|
||||||
|
"""
|
||||||
|
body = {'share-snapshot': {'share_id': share_id,
|
||||||
|
'force': force,
|
||||||
|
'name': name,
|
||||||
|
'description': description}}
|
||||||
|
return self._create('/share-snapshots', body, 'share-snapshot')
|
||||||
|
|
||||||
|
def get(self, snapshot_id):
|
||||||
|
"""Get a snapshot.
|
||||||
|
|
||||||
|
:param snapshot_id: The ID of the snapshot to get.
|
||||||
|
:rtype: :class:`ShareSnapshot`
|
||||||
|
"""
|
||||||
|
return self._get('/share-snapshots/%s' % snapshot_id, 'share-snapshot')
|
||||||
|
|
||||||
|
def list(self, detailed=True, search_opts=None):
|
||||||
|
"""Get a list of all snapshots of shares.
|
||||||
|
|
||||||
|
:rtype: list of :class:`ShareSnapshot`
|
||||||
|
"""
|
||||||
|
if search_opts:
|
||||||
|
query_string = urllib.urlencode([(key, value)
|
||||||
|
for (key, value)
|
||||||
|
in search_opts.items()
|
||||||
|
if value])
|
||||||
|
if query_string:
|
||||||
|
query_string = "?%s" % (query_string,)
|
||||||
|
else:
|
||||||
|
query_string = ''
|
||||||
|
|
||||||
|
if detailed:
|
||||||
|
path = "/share-snapshots/detail%s" % (query_string,)
|
||||||
|
else:
|
||||||
|
path = "/share-snapshots%s" % (query_string,)
|
||||||
|
|
||||||
|
return self._list(path, 'share-snapshots')
|
||||||
|
|
||||||
|
def delete(self, snapshot):
|
||||||
|
"""Delete a snapshot of a share.
|
||||||
|
|
||||||
|
:param share: The :class:`ShareSnapshot` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/share-snapshots/%s" % base.getid(snapshot))
|
182
cinderclient/v2/shares.py
Normal file
182
cinderclient/v2/shares.py
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
# Copyright 2012 NetApp
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""Interface for shares extention."""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient import utils
|
||||||
|
|
||||||
|
|
||||||
|
class Share(base.Resource):
|
||||||
|
"""A share is an extra block level storage to the OpenStack instances."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Share: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this share."""
|
||||||
|
self.manager.delete(self)
|
||||||
|
|
||||||
|
def allow(self, access_type, access):
|
||||||
|
"""Allow access to a share."""
|
||||||
|
self._validate_access(access_type, access)
|
||||||
|
return self.manager.allow(self, access_type, access)
|
||||||
|
|
||||||
|
def deny(self, id):
|
||||||
|
"""Deny access from IP to a share."""
|
||||||
|
return self.manager.deny(self, id)
|
||||||
|
|
||||||
|
def access_list(self):
|
||||||
|
"""Deny access from IP to a share."""
|
||||||
|
return self.manager.access_list(self)
|
||||||
|
|
||||||
|
def _validate_access(self, access_type, access):
|
||||||
|
if access_type == 'ip':
|
||||||
|
self._validate_ip_range(access)
|
||||||
|
elif access_type == 'passwd':
|
||||||
|
self._validate_username(access)
|
||||||
|
else:
|
||||||
|
raise exceptions.CommandError(
|
||||||
|
'Only ip and passwd type are supported')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _validate_username(access):
|
||||||
|
valid_useraname_re = '\w{4,32}'
|
||||||
|
username = access
|
||||||
|
if not re.match(valid_useraname_re, username):
|
||||||
|
exc_str = _('Invalid user name. Must be alphanum 4-32 chars long')
|
||||||
|
raise exceptions.CommandError(exc_str)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _validate_ip_range(ip_range):
|
||||||
|
ip_range = ip_range.split('/')
|
||||||
|
exc_str = ('Supported ip format examples:\n'
|
||||||
|
'\t10.0.0.2, 10.0.0.*, 10.0.0.0/24')
|
||||||
|
if len(ip_range) > 2:
|
||||||
|
raise exceptions.CommandError(exc_str)
|
||||||
|
allow_asterisk = (len(ip_range) == 1)
|
||||||
|
ip_range = ip_range[0].split('.')
|
||||||
|
if len(ip_range) != 4:
|
||||||
|
raise exceptions.CommandError(exc_str)
|
||||||
|
for item in ip_range:
|
||||||
|
try:
|
||||||
|
if 0 <= int(item) <= 255:
|
||||||
|
continue
|
||||||
|
raise ValueError()
|
||||||
|
except ValueError:
|
||||||
|
if not (allow_asterisk and item == '*'):
|
||||||
|
raise exceptions.CommandError(exc_str)
|
||||||
|
|
||||||
|
|
||||||
|
class ShareManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`Share` resources."""
|
||||||
|
resource_class = Share
|
||||||
|
|
||||||
|
def create(self, share_proto, size, snapshot_id=None, name=None,
|
||||||
|
description=None):
|
||||||
|
"""Create NAS.
|
||||||
|
|
||||||
|
:param size: Size of NAS in GB
|
||||||
|
:param snapshot_id: ID of the snapshot
|
||||||
|
:param name: Name of the NAS
|
||||||
|
:param description: Short description of a share
|
||||||
|
:param share_proto: Type of NAS (NFS or CIFS)
|
||||||
|
:rtype: :class:`Share`
|
||||||
|
"""
|
||||||
|
body = {'share': {'size': size,
|
||||||
|
'snapshot_id': snapshot_id,
|
||||||
|
'name': name,
|
||||||
|
'description': description,
|
||||||
|
'share_proto': share_proto}}
|
||||||
|
return self._create('/shares', body, 'share')
|
||||||
|
|
||||||
|
def get(self, share_id):
|
||||||
|
"""Get a share.
|
||||||
|
|
||||||
|
:param share_id: The ID of the share to delete.
|
||||||
|
:rtype: :class:`Share`
|
||||||
|
"""
|
||||||
|
return self._get("/shares/%s" % share_id, "share")
|
||||||
|
|
||||||
|
def list(self, detailed=True, search_opts=None):
|
||||||
|
"""Get a list of all shares.
|
||||||
|
|
||||||
|
:rtype: list of :class:`Share`
|
||||||
|
"""
|
||||||
|
if search_opts:
|
||||||
|
query_string = urllib.urlencode([(key, value)
|
||||||
|
for (key, value)
|
||||||
|
in search_opts.items()
|
||||||
|
if value])
|
||||||
|
if query_string:
|
||||||
|
query_string = "?%s" % (query_string,)
|
||||||
|
else:
|
||||||
|
query_string = ''
|
||||||
|
|
||||||
|
if detailed:
|
||||||
|
path = "/shares/detail%s" % (query_string,)
|
||||||
|
else:
|
||||||
|
path = "/shares%s" % (query_string,)
|
||||||
|
|
||||||
|
return self._list(path, 'shares')
|
||||||
|
|
||||||
|
def delete(self, share):
|
||||||
|
"""Delete a share.
|
||||||
|
|
||||||
|
:param share: The :class:`Share` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/shares/%s" % base.getid(share))
|
||||||
|
|
||||||
|
def allow(self, share, access_type, access):
|
||||||
|
"""Allow access from IP to a shares.
|
||||||
|
|
||||||
|
:param share: The :class:`Share` to delete.
|
||||||
|
:param access_type: string that represents access type ('ip','domain')
|
||||||
|
:param access: string that represents access ('127.0.0.1')
|
||||||
|
"""
|
||||||
|
return self._action('os-allow_access', share,
|
||||||
|
{'access_type': access_type,
|
||||||
|
'access_to': access})
|
||||||
|
|
||||||
|
def deny(self, share, id):
|
||||||
|
"""Deny access from IP to a shares.
|
||||||
|
|
||||||
|
:param share: The :class:`Share` to delete.
|
||||||
|
:param ip: string that represents ip address
|
||||||
|
"""
|
||||||
|
return self._action('os-deny_access', share, {'access_id': id})
|
||||||
|
|
||||||
|
def access_list(self, share):
|
||||||
|
"""Get access list to the share."""
|
||||||
|
access_list = self._action("os-access_list", share)[1]["access_list"]
|
||||||
|
if access_list:
|
||||||
|
t = collections.namedtuple('Access', access_list[0].keys())
|
||||||
|
return [t(*value.values()) for value in access_list]
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _action(self, action, share, info=None, **kwargs):
|
||||||
|
"""Perform a share 'action'."""
|
||||||
|
body = {action: info}
|
||||||
|
self.run_hooks('modify_body_for_action', body, **kwargs)
|
||||||
|
url = '/shares/%s/action' % base.getid(share)
|
||||||
|
return self.api.client.post(url, body=body)
|
||||||
|
|
||||||
|
|
||||||
|
#########################
|
1033
cinderclient/v2/shell.py
Normal file
1033
cinderclient/v2/shell.py
Normal file
File diff suppressed because it is too large
Load Diff
76
cinderclient/v2/volume_backups.py
Normal file
76
cinderclient/v2/volume_backups.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Volume Backups interface (1.1 extension).
|
||||||
|
"""
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackup(base.Resource):
|
||||||
|
"""A volume backup is a block level backup of a volume."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<VolumeBackup: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this volume backup."""
|
||||||
|
return self.manager.delete(self)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackupManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`VolumeBackup` resources."""
|
||||||
|
resource_class = VolumeBackup
|
||||||
|
|
||||||
|
def create(self, volume_id, container=None,
|
||||||
|
name=None, description=None):
|
||||||
|
"""Create a volume backup.
|
||||||
|
|
||||||
|
:param volume_id: The ID of the volume to backup.
|
||||||
|
:param container: The name of the backup service container.
|
||||||
|
:param name: The name of the backup.
|
||||||
|
:param description: The description of the backup.
|
||||||
|
:rtype: :class:`VolumeBackup`
|
||||||
|
"""
|
||||||
|
body = {'backup': {'volume_id': volume_id,
|
||||||
|
'container': container,
|
||||||
|
'name': name,
|
||||||
|
'description': description}}
|
||||||
|
return self._create('/backups', body, 'backup')
|
||||||
|
|
||||||
|
def get(self, backup_id):
|
||||||
|
"""Show details of a volume backup.
|
||||||
|
|
||||||
|
:param backup_id: The ID of the backup to display.
|
||||||
|
:rtype: :class:`VolumeBackup`
|
||||||
|
"""
|
||||||
|
return self._get("/backups/%s" % backup_id, "backup")
|
||||||
|
|
||||||
|
def list(self, detailed=True):
|
||||||
|
"""Get a list of all volume backups.
|
||||||
|
|
||||||
|
:rtype: list of :class:`VolumeBackup`
|
||||||
|
"""
|
||||||
|
if detailed is True:
|
||||||
|
return self._list("/backups/detail", "backups")
|
||||||
|
else:
|
||||||
|
return self._list("/backups", "backups")
|
||||||
|
|
||||||
|
def delete(self, backup):
|
||||||
|
"""Delete a volume backup.
|
||||||
|
|
||||||
|
:param backup: The :class:`VolumeBackup` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/backups/%s" % base.getid(backup))
|
43
cinderclient/v2/volume_backups_restore.py
Normal file
43
cinderclient/v2/volume_backups_restore.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Volume Backups Restore interface (1.1 extension).
|
||||||
|
|
||||||
|
This is part of the Volume Backups interface.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackupsRestore(base.Resource):
|
||||||
|
"""A Volume Backups Restore represents a restore operation."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<VolumeBackupsRestore: %s>" % self.id
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackupRestoreManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`VolumeBackupsRestore` resources."""
|
||||||
|
resource_class = VolumeBackupsRestore
|
||||||
|
|
||||||
|
def restore(self, backup_id, volume_id=None):
|
||||||
|
"""Restore a backup to a volume.
|
||||||
|
|
||||||
|
:param backup_id: The ID of the backup to restore.
|
||||||
|
:param volume_id: The ID of the volume to restore the backup to.
|
||||||
|
:rtype: :class:`Restore`
|
||||||
|
"""
|
||||||
|
body = {'restore': {'volume_id': volume_id}}
|
||||||
|
return self._create("/backups/%s/restore" % backup_id,
|
||||||
|
body, "restore")
|
116
cinderclient/v2/volume_snapshots.py
Normal file
116
cinderclient/v2/volume_snapshots.py
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Volume snapshot interface (1.1 extension)."""
|
||||||
|
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class Snapshot(base.Resource):
|
||||||
|
"""A Snapshot is a point-in-time snapshot of an openstack volume."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Snapshot: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this snapshot."""
|
||||||
|
self.manager.delete(self)
|
||||||
|
|
||||||
|
def update(self, **kwargs):
|
||||||
|
"""Update the name or description for this snapshot."""
|
||||||
|
self.manager.update(self, **kwargs)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def progress(self):
|
||||||
|
return self._info.get('os-extended-snapshot-attributes:progress')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def project_id(self):
|
||||||
|
return self._info.get('os-extended-snapshot-attributes:project_id')
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`Snapshot` resources."""
|
||||||
|
resource_class = Snapshot
|
||||||
|
|
||||||
|
def create(self, volume_id, force=False,
|
||||||
|
name=None, description=None):
|
||||||
|
|
||||||
|
"""Create a snapshot of the given volume.
|
||||||
|
|
||||||
|
:param volume_id: The ID of the volume to snapshot.
|
||||||
|
:param force: If force is True, create a snapshot even if the volume is
|
||||||
|
attached to an instance. Default is False.
|
||||||
|
:param name: Name of the snapshot
|
||||||
|
:param description: Description of the snapshot
|
||||||
|
:rtype: :class:`Snapshot`
|
||||||
|
"""
|
||||||
|
body = {'snapshot': {'volume_id': volume_id,
|
||||||
|
'force': force,
|
||||||
|
'name': name,
|
||||||
|
'description': description}}
|
||||||
|
return self._create('/snapshots', body, 'snapshot')
|
||||||
|
|
||||||
|
def get(self, snapshot_id):
|
||||||
|
"""Get a snapshot.
|
||||||
|
|
||||||
|
:param snapshot_id: The ID of the snapshot to get.
|
||||||
|
:rtype: :class:`Snapshot`
|
||||||
|
"""
|
||||||
|
return self._get("/snapshots/%s" % snapshot_id, "snapshot")
|
||||||
|
|
||||||
|
def list(self, detailed=True, search_opts=None):
|
||||||
|
"""Get a list of all snapshots.
|
||||||
|
|
||||||
|
:rtype: list of :class:`Snapshot`
|
||||||
|
"""
|
||||||
|
|
||||||
|
if search_opts is None:
|
||||||
|
search_opts = {}
|
||||||
|
|
||||||
|
qparams = {}
|
||||||
|
|
||||||
|
for opt, val in search_opts.iteritems():
|
||||||
|
if val:
|
||||||
|
qparams[opt] = val
|
||||||
|
|
||||||
|
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
|
||||||
|
|
||||||
|
detail = ""
|
||||||
|
if detailed:
|
||||||
|
detail = "/detail"
|
||||||
|
|
||||||
|
return self._list("/snapshots%s%s" % (detail, query_string),
|
||||||
|
"snapshots")
|
||||||
|
|
||||||
|
def delete(self, snapshot):
|
||||||
|
"""Delete a snapshot.
|
||||||
|
|
||||||
|
:param snapshot: The :class:`Snapshot` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/snapshots/%s" % base.getid(snapshot))
|
||||||
|
|
||||||
|
def update(self, snapshot, **kwargs):
|
||||||
|
"""Update the name or description for a snapshot.
|
||||||
|
|
||||||
|
:param snapshot: The :class:`Snapshot` to delete.
|
||||||
|
"""
|
||||||
|
if not kwargs:
|
||||||
|
return
|
||||||
|
|
||||||
|
body = {"snapshot": kwargs}
|
||||||
|
|
||||||
|
self._update("/snapshots/%s" % base.getid(snapshot), body)
|
108
cinderclient/v2/volume_types.py
Normal file
108
cinderclient/v2/volume_types.py
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""Volume Type interface."""
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeType(base.Resource):
|
||||||
|
"""A Volume Type is the type of volume to be created."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<VolumeType: %s>" % self.name
|
||||||
|
|
||||||
|
def get_keys(self):
|
||||||
|
"""Get extra specs from a volume type.
|
||||||
|
|
||||||
|
:param vol_type: The :class:`VolumeType` to get extra specs from
|
||||||
|
"""
|
||||||
|
_resp, body = self.manager.api.client.get(
|
||||||
|
"/types/%s/extra_specs" %
|
||||||
|
base.getid(self))
|
||||||
|
return body["extra_specs"]
|
||||||
|
|
||||||
|
def set_keys(self, metadata):
|
||||||
|
"""Set extra specs on a volume type.
|
||||||
|
|
||||||
|
:param type : The :class:`VolumeType` to set extra spec on
|
||||||
|
:param metadata: A dict of key/value pairs to be set
|
||||||
|
"""
|
||||||
|
body = {'extra_specs': metadata}
|
||||||
|
return self.manager._create(
|
||||||
|
"/types/%s/extra_specs" % base.getid(self),
|
||||||
|
body,
|
||||||
|
"extra_specs",
|
||||||
|
return_raw=True)
|
||||||
|
|
||||||
|
def unset_keys(self, keys):
|
||||||
|
"""Unset extra specs on a volue type.
|
||||||
|
|
||||||
|
:param type_id: The :class:`VolumeType` to unset extra spec on
|
||||||
|
:param keys: A list of keys to be unset
|
||||||
|
"""
|
||||||
|
|
||||||
|
# NOTE(jdg): This wasn't actually doing all of the keys before
|
||||||
|
# the return in the loop resulted in ony ONE key being unset.
|
||||||
|
# since on success the return was NONE, we'll only interrupt the loop
|
||||||
|
# and return if there's an error
|
||||||
|
for k in keys:
|
||||||
|
resp = self.manager._delete(
|
||||||
|
"/types/%s/extra_specs/%s" % (
|
||||||
|
base.getid(self), k))
|
||||||
|
if resp is not None:
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypeManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`VolumeType` resources."""
|
||||||
|
resource_class = VolumeType
|
||||||
|
|
||||||
|
def list(self):
|
||||||
|
"""Get a list of all volume types.
|
||||||
|
|
||||||
|
:rtype: list of :class:`VolumeType`.
|
||||||
|
"""
|
||||||
|
return self._list("/types", "volume_types")
|
||||||
|
|
||||||
|
def get(self, volume_type):
|
||||||
|
"""Get a specific volume type.
|
||||||
|
|
||||||
|
:param volume_type: The ID of the :class:`VolumeType` to get.
|
||||||
|
:rtype: :class:`VolumeType`
|
||||||
|
"""
|
||||||
|
return self._get("/types/%s" % base.getid(volume_type), "volume_type")
|
||||||
|
|
||||||
|
def delete(self, volume_type):
|
||||||
|
"""Delete a specific volume_type.
|
||||||
|
|
||||||
|
:param volume_type: The ID of the :class:`VolumeType` to get.
|
||||||
|
"""
|
||||||
|
self._delete("/types/%s" % base.getid(volume_type))
|
||||||
|
|
||||||
|
def create(self, name):
|
||||||
|
"""Create a volume type.
|
||||||
|
|
||||||
|
:param name: Descriptive name of the volume type
|
||||||
|
:rtype: :class:`VolumeType`
|
||||||
|
"""
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"volume_type": {
|
||||||
|
"name": name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return self._create("/types", body, "volume_type")
|
308
cinderclient/v2/volumes.py
Normal file
308
cinderclient/v2/volumes.py
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Volume interface (v2 extension)."""
|
||||||
|
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class Volume(base.Resource):
|
||||||
|
"""A volume is an extra block level storage to the OpenStack instances."""
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Volume: %s>" % self.id
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
"""Delete this volume."""
|
||||||
|
self.manager.delete(self)
|
||||||
|
|
||||||
|
def update(self, **kwargs):
|
||||||
|
"""Update the name or description for this volume."""
|
||||||
|
self.manager.update(self, **kwargs)
|
||||||
|
|
||||||
|
def attach(self, instance_uuid, mountpoint):
|
||||||
|
"""Set attachment metadata.
|
||||||
|
|
||||||
|
:param instance_uuid: uuid of the attaching instance.
|
||||||
|
:param mountpoint: mountpoint on the attaching instance.
|
||||||
|
"""
|
||||||
|
return self.manager.attach(self, instance_uuid, mountpoint)
|
||||||
|
|
||||||
|
def detach(self):
|
||||||
|
"""Clear attachment metadata."""
|
||||||
|
return self.manager.detach(self)
|
||||||
|
|
||||||
|
def reserve(self, volume):
|
||||||
|
"""Reserve this volume."""
|
||||||
|
return self.manager.reserve(self)
|
||||||
|
|
||||||
|
def unreserve(self, volume):
|
||||||
|
"""Unreserve this volume."""
|
||||||
|
return self.manager.unreserve(self)
|
||||||
|
|
||||||
|
def begin_detaching(self, volume):
|
||||||
|
"""Begin detaching volume."""
|
||||||
|
return self.manager.begin_detaching(self)
|
||||||
|
|
||||||
|
def roll_detaching(self, volume):
|
||||||
|
"""Roll detaching volume."""
|
||||||
|
return self.manager.roll_detaching(self)
|
||||||
|
|
||||||
|
def initialize_connection(self, volume, connector):
|
||||||
|
"""Initialize a volume connection.
|
||||||
|
|
||||||
|
:param connector: connector dict from nova.
|
||||||
|
"""
|
||||||
|
return self.manager.initialize_connection(self, connector)
|
||||||
|
|
||||||
|
def terminate_connection(self, volume, connector):
|
||||||
|
"""Terminate a volume connection.
|
||||||
|
|
||||||
|
:param connector: connector dict from nova.
|
||||||
|
"""
|
||||||
|
return self.manager.terminate_connection(self, connector)
|
||||||
|
|
||||||
|
def set_metadata(self, volume, metadata):
|
||||||
|
"""Set or Append metadata to a volume.
|
||||||
|
|
||||||
|
:param type : The :class: `Volume` to set metadata on
|
||||||
|
:param metadata: A dict of key/value pairs to set
|
||||||
|
"""
|
||||||
|
return self.manager.set_metadata(self, metadata)
|
||||||
|
|
||||||
|
def upload_to_image(self, force, image_name, container_format,
|
||||||
|
disk_format):
|
||||||
|
"""Upload a volume to image service as an image."""
|
||||||
|
self.manager.upload_to_image(self, force, image_name, container_format,
|
||||||
|
disk_format)
|
||||||
|
|
||||||
|
def force_delete(self):
|
||||||
|
"""Delete the specified volume ignoring its current state.
|
||||||
|
|
||||||
|
:param volume: The UUID of the volume to force-delete.
|
||||||
|
"""
|
||||||
|
self.manager.force_delete(self)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeManager(base.ManagerWithFind):
|
||||||
|
"""Manage :class:`Volume` resources."""
|
||||||
|
resource_class = Volume
|
||||||
|
|
||||||
|
def create(self, size, snapshot_id=None, source_volid=None,
|
||||||
|
name=None, description=None,
|
||||||
|
volume_type=None, user_id=None,
|
||||||
|
project_id=None, availability_zone=None,
|
||||||
|
metadata=None, imageRef=None):
|
||||||
|
"""Create a volume.
|
||||||
|
|
||||||
|
:param size: Size of volume in GB
|
||||||
|
:param snapshot_id: ID of the snapshot
|
||||||
|
:param name: Name of the volume
|
||||||
|
:param description: Description of the volume
|
||||||
|
:param volume_type: Type of volume
|
||||||
|
:rtype: :class:`Volume`
|
||||||
|
:param user_id: User id derived from context
|
||||||
|
:param project_id: Project id derived from context
|
||||||
|
:param availability_zone: Availability Zone to use
|
||||||
|
:param metadata: Optional metadata to set on volume creation
|
||||||
|
:param imageRef: reference to an image stored in glance
|
||||||
|
:param source_volid: ID of source volume to clone from
|
||||||
|
"""
|
||||||
|
|
||||||
|
if metadata is None:
|
||||||
|
volume_metadata = {}
|
||||||
|
else:
|
||||||
|
volume_metadata = metadata
|
||||||
|
|
||||||
|
body = {'volume': {'size': size,
|
||||||
|
'snapshot_id': snapshot_id,
|
||||||
|
'name': name,
|
||||||
|
'description': description,
|
||||||
|
'volume_type': volume_type,
|
||||||
|
'user_id': user_id,
|
||||||
|
'project_id': project_id,
|
||||||
|
'availability_zone': availability_zone,
|
||||||
|
'status': "creating",
|
||||||
|
'attach_status': "detached",
|
||||||
|
'metadata': volume_metadata,
|
||||||
|
'imageRef': imageRef,
|
||||||
|
'source_volid': source_volid,
|
||||||
|
}}
|
||||||
|
return self._create('/volumes', body, 'volume')
|
||||||
|
|
||||||
|
def get(self, volume_id):
|
||||||
|
"""Get a volume.
|
||||||
|
|
||||||
|
:param volume_id: The ID of the volume to delete.
|
||||||
|
:rtype: :class:`Volume`
|
||||||
|
"""
|
||||||
|
return self._get("/volumes/%s" % volume_id, "volume")
|
||||||
|
|
||||||
|
def list(self, detailed=True, search_opts=None):
|
||||||
|
"""Get a list of all volumes.
|
||||||
|
|
||||||
|
:rtype: list of :class:`Volume`
|
||||||
|
"""
|
||||||
|
if search_opts is None:
|
||||||
|
search_opts = {}
|
||||||
|
|
||||||
|
qparams = {}
|
||||||
|
|
||||||
|
for opt, val in search_opts.iteritems():
|
||||||
|
if val:
|
||||||
|
qparams[opt] = val
|
||||||
|
|
||||||
|
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
|
||||||
|
|
||||||
|
detail = ""
|
||||||
|
if detailed:
|
||||||
|
detail = "/detail"
|
||||||
|
|
||||||
|
return self._list("/volumes%s%s" % (detail, query_string),
|
||||||
|
"volumes")
|
||||||
|
|
||||||
|
def delete(self, volume):
|
||||||
|
"""Delete a volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` to delete.
|
||||||
|
"""
|
||||||
|
self._delete("/volumes/%s" % base.getid(volume))
|
||||||
|
|
||||||
|
def update(self, volume, **kwargs):
|
||||||
|
"""Update the name or description for a volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` to delete.
|
||||||
|
"""
|
||||||
|
if not kwargs:
|
||||||
|
return
|
||||||
|
|
||||||
|
body = {"volume": kwargs}
|
||||||
|
|
||||||
|
self._update("/volumes/%s" % base.getid(volume), body)
|
||||||
|
|
||||||
|
def _action(self, action, volume, info=None, **kwargs):
|
||||||
|
"""Perform a volume "action."
|
||||||
|
"""
|
||||||
|
body = {action: info}
|
||||||
|
self.run_hooks('modify_body_for_action', body, **kwargs)
|
||||||
|
url = '/volumes/%s/action' % base.getid(volume)
|
||||||
|
return self.api.client.post(url, body=body)
|
||||||
|
|
||||||
|
def attach(self, volume, instance_uuid, mountpoint):
|
||||||
|
"""Set attachment metadata.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to attach.
|
||||||
|
:param instance_uuid: uuid of the attaching instance.
|
||||||
|
:param mountpoint: mountpoint on the attaching instance.
|
||||||
|
"""
|
||||||
|
return self._action('os-attach',
|
||||||
|
volume,
|
||||||
|
{'instance_uuid': instance_uuid,
|
||||||
|
'mountpoint': mountpoint})
|
||||||
|
|
||||||
|
def detach(self, volume):
|
||||||
|
"""Clear attachment metadata.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to detach.
|
||||||
|
"""
|
||||||
|
return self._action('os-detach', volume)
|
||||||
|
|
||||||
|
def reserve(self, volume):
|
||||||
|
"""Reserve this volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to reserve.
|
||||||
|
"""
|
||||||
|
return self._action('os-reserve', volume)
|
||||||
|
|
||||||
|
def unreserve(self, volume):
|
||||||
|
"""Unreserve this volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to unreserve.
|
||||||
|
"""
|
||||||
|
return self._action('os-unreserve', volume)
|
||||||
|
|
||||||
|
def begin_detaching(self, volume):
|
||||||
|
"""Begin detaching this volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to detach.
|
||||||
|
"""
|
||||||
|
return self._action('os-begin_detaching', volume)
|
||||||
|
|
||||||
|
def roll_detaching(self, volume):
|
||||||
|
"""Roll detaching this volume.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID)
|
||||||
|
you would like to roll detaching.
|
||||||
|
"""
|
||||||
|
return self._action('os-roll_detaching', volume)
|
||||||
|
|
||||||
|
def initialize_connection(self, volume, connector):
|
||||||
|
"""Initialize a volume connection.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID).
|
||||||
|
:param connector: connector dict from nova.
|
||||||
|
"""
|
||||||
|
return self._action('os-initialize_connection', volume,
|
||||||
|
{'connector': connector})[1]['connection_info']
|
||||||
|
|
||||||
|
def terminate_connection(self, volume, connector):
|
||||||
|
"""Terminate a volume connection.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` (or its ID).
|
||||||
|
:param connector: connector dict from nova.
|
||||||
|
"""
|
||||||
|
self._action('os-terminate_connection', volume,
|
||||||
|
{'connector': connector})
|
||||||
|
|
||||||
|
def set_metadata(self, volume, metadata):
|
||||||
|
"""Update/Set a volumes metadata.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume`.
|
||||||
|
:param metadata: A list of keys to be set.
|
||||||
|
"""
|
||||||
|
body = {'metadata': metadata}
|
||||||
|
return self._create("/volumes/%s/metadata" % base.getid(volume),
|
||||||
|
body, "metadata")
|
||||||
|
|
||||||
|
def delete_metadata(self, volume, keys):
|
||||||
|
"""Delete specified keys from volumes metadata.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume`.
|
||||||
|
:param metadata: A list of keys to be removed.
|
||||||
|
"""
|
||||||
|
for k in keys:
|
||||||
|
self._delete("/volumes/%s/metadata/%s" % (base.getid(volume), k))
|
||||||
|
|
||||||
|
def upload_to_image(self, volume, force, image_name, container_format,
|
||||||
|
disk_format):
|
||||||
|
"""Upload volume to image service as image.
|
||||||
|
|
||||||
|
:param volume: The :class:`Volume` to upload.
|
||||||
|
"""
|
||||||
|
return self._action('os-volume_upload_image',
|
||||||
|
volume,
|
||||||
|
{'force': force,
|
||||||
|
'image_name': image_name,
|
||||||
|
'container_format': container_format,
|
||||||
|
'disk_format': disk_format})
|
||||||
|
|
||||||
|
def force_delete(self, volume):
|
||||||
|
return self._action('os-force_delete', base.getid(volume))
|
1
doc/.gitignore
vendored
Normal file
1
doc/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
build/
|
90
doc/Makefile
Normal file
90
doc/Makefile
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
# Makefile for Sphinx documentation
|
||||||
|
#
|
||||||
|
|
||||||
|
# You can set these variables from the command line.
|
||||||
|
SPHINXOPTS =
|
||||||
|
SPHINXBUILD = sphinx-build
|
||||||
|
SPHINXSOURCE = source
|
||||||
|
PAPER =
|
||||||
|
BUILDDIR = build
|
||||||
|
|
||||||
|
# Internal variables.
|
||||||
|
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||||
|
PAPEROPT_letter = -D latex_paper_size=letter
|
||||||
|
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE)
|
||||||
|
|
||||||
|
.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
|
||||||
|
|
||||||
|
help:
|
||||||
|
@echo "Please use \`make <target>' where <target> is one of"
|
||||||
|
@echo " html to make standalone HTML files"
|
||||||
|
@echo " dirhtml to make HTML files named index.html in directories"
|
||||||
|
@echo " pickle to make pickle files"
|
||||||
|
@echo " json to make JSON files"
|
||||||
|
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||||
|
@echo " qthelp to make HTML files and a qthelp project"
|
||||||
|
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||||
|
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||||
|
@echo " linkcheck to check all external links for integrity"
|
||||||
|
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||||
|
|
||||||
|
clean:
|
||||||
|
-rm -rf $(BUILDDIR)/*
|
||||||
|
|
||||||
|
html:
|
||||||
|
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||||
|
|
||||||
|
dirhtml:
|
||||||
|
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||||
|
|
||||||
|
pickle:
|
||||||
|
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can process the pickle files."
|
||||||
|
|
||||||
|
json:
|
||||||
|
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can process the JSON files."
|
||||||
|
|
||||||
|
htmlhelp:
|
||||||
|
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||||
|
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||||
|
|
||||||
|
qthelp:
|
||||||
|
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||||
|
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||||
|
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/python-cinderclient.qhcp"
|
||||||
|
@echo "To view the help file:"
|
||||||
|
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/python-cinderclient.qhc"
|
||||||
|
|
||||||
|
latex:
|
||||||
|
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||||
|
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
|
||||||
|
"run these through (pdf)latex."
|
||||||
|
|
||||||
|
changes:
|
||||||
|
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||||
|
@echo
|
||||||
|
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||||
|
|
||||||
|
linkcheck:
|
||||||
|
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||||
|
@echo
|
||||||
|
@echo "Link check complete; look for any errors in the above output " \
|
||||||
|
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||||
|
|
||||||
|
doctest:
|
||||||
|
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||||
|
@echo "Testing of doctests in the sources finished, look at the " \
|
||||||
|
"results in $(BUILDDIR)/doctest/output.txt."
|
205
doc/source/conf.py
Normal file
205
doc/source/conf.py
Normal file
@ -0,0 +1,205 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# python-cinderclient documentation build configuration file, created by
|
||||||
|
# sphinx-quickstart on Sun Dec 6 14:19:25 2009.
|
||||||
|
#
|
||||||
|
# This file is execfile()d with current directory set to its containing dir.
|
||||||
|
#
|
||||||
|
# Note that not all possible configuration values are present in this
|
||||||
|
# autogenerated file.
|
||||||
|
#
|
||||||
|
# All configuration values have a default; values that are commented out
|
||||||
|
# serve to show the default.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
|
#sys.path.append(os.path.abspath('.'))
|
||||||
|
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
|
||||||
|
sys.path.insert(0, ROOT)
|
||||||
|
|
||||||
|
# -- General configuration ----------------------------------------------------
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions
|
||||||
|
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||||
|
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ['_templates']
|
||||||
|
|
||||||
|
# The suffix of source filenames.
|
||||||
|
source_suffix = '.rst'
|
||||||
|
|
||||||
|
# The encoding of source files.
|
||||||
|
#source_encoding = 'utf-8'
|
||||||
|
|
||||||
|
# The master toctree document.
|
||||||
|
master_doc = 'index'
|
||||||
|
|
||||||
|
# General information about the project.
|
||||||
|
project = u'python-cinderclient'
|
||||||
|
copyright = u'Rackspace, based on work by Jacob Kaplan-Moss'
|
||||||
|
|
||||||
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
# built documents.
|
||||||
|
#
|
||||||
|
# The short X.Y version.
|
||||||
|
version = '2.6'
|
||||||
|
# The full version, including alpha/beta/rc tags.
|
||||||
|
release = '2.6.10'
|
||||||
|
|
||||||
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
|
# for a list of supported languages.
|
||||||
|
#language = None
|
||||||
|
|
||||||
|
# There are two options for replacing |today|: either, you set today to some
|
||||||
|
# non-false value, then it is used:
|
||||||
|
#today = ''
|
||||||
|
# Else, today_fmt is used as the format for a strftime call.
|
||||||
|
#today_fmt = '%B %d, %Y'
|
||||||
|
|
||||||
|
# List of documents that shouldn't be included in the build.
|
||||||
|
#unused_docs = []
|
||||||
|
|
||||||
|
# List of directories, relative to source directory, that shouldn't be searched
|
||||||
|
# for source files.
|
||||||
|
exclude_trees = []
|
||||||
|
|
||||||
|
# The reST default role (used for this markup: `text`) to use for all
|
||||||
|
# documents.
|
||||||
|
#default_role = None
|
||||||
|
|
||||||
|
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||||
|
add_function_parentheses = True
|
||||||
|
|
||||||
|
# If true, the current module name will be prepended to all description
|
||||||
|
# unit titles (such as .. function::).
|
||||||
|
add_module_names = True
|
||||||
|
|
||||||
|
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||||
|
# output. They are ignored by default.
|
||||||
|
#show_authors = False
|
||||||
|
|
||||||
|
# The name of the Pygments (syntax highlighting) style to use.
|
||||||
|
pygments_style = 'sphinx'
|
||||||
|
|
||||||
|
# A list of ignored prefixes for module index sorting.
|
||||||
|
#modindex_common_prefix = []
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTML output --------------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||||
|
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||||
|
html_theme = 'nature'
|
||||||
|
|
||||||
|
# Theme options are theme-specific and customize the look and feel of a theme
|
||||||
|
# further. For a list of options available for each theme, see the
|
||||||
|
# documentation.
|
||||||
|
#html_theme_options = {}
|
||||||
|
|
||||||
|
# Add any paths that contain custom themes here, relative to this directory.
|
||||||
|
#html_theme_path = []
|
||||||
|
|
||||||
|
# The name for this set of Sphinx documents. If None, it defaults to
|
||||||
|
# "<project> v<release> documentation".
|
||||||
|
#html_title = None
|
||||||
|
|
||||||
|
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||||
|
#html_short_title = None
|
||||||
|
|
||||||
|
# The name of an image file (relative to this directory) to place at the top
|
||||||
|
# of the sidebar.
|
||||||
|
#html_logo = None
|
||||||
|
|
||||||
|
# The name of an image file (within the static path) to use as favicon of the
|
||||||
|
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||||
|
# pixels large.
|
||||||
|
#html_favicon = None
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = ['_static']
|
||||||
|
|
||||||
|
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||||
|
# using the given strftime format.
|
||||||
|
#html_last_updated_fmt = '%b %d, %Y'
|
||||||
|
|
||||||
|
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||||
|
# typographically correct entities.
|
||||||
|
#html_use_smartypants = True
|
||||||
|
|
||||||
|
# Custom sidebar templates, maps document names to template names.
|
||||||
|
#html_sidebars = {}
|
||||||
|
|
||||||
|
# Additional templates that should be rendered to pages, maps page names to
|
||||||
|
# template names.
|
||||||
|
#html_additional_pages = {}
|
||||||
|
|
||||||
|
# If false, no module index is generated.
|
||||||
|
#html_use_modindex = True
|
||||||
|
|
||||||
|
# If false, no index is generated.
|
||||||
|
#html_use_index = True
|
||||||
|
|
||||||
|
# If true, the index is split into individual pages for each letter.
|
||||||
|
#html_split_index = False
|
||||||
|
|
||||||
|
# If true, links to the reST sources are added to the pages.
|
||||||
|
#html_show_sourcelink = True
|
||||||
|
|
||||||
|
# If true, an OpenSearch description file will be output, and all pages will
|
||||||
|
# contain a <link> tag referring to it. The value of this option must be the
|
||||||
|
# base URL from which the finished HTML is served.
|
||||||
|
#html_use_opensearch = ''
|
||||||
|
|
||||||
|
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
||||||
|
#html_file_suffix = ''
|
||||||
|
|
||||||
|
# Output file base name for HTML help builder.
|
||||||
|
htmlhelp_basename = 'python-cinderclientdoc'
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for LaTeX output -------------------------------------------------
|
||||||
|
|
||||||
|
# The paper size ('letter' or 'a4').
|
||||||
|
#latex_paper_size = 'letter'
|
||||||
|
|
||||||
|
# The font size ('10pt', '11pt' or '12pt').
|
||||||
|
#latex_font_size = '10pt'
|
||||||
|
|
||||||
|
# Grouping the document tree into LaTeX files. List of tuples
|
||||||
|
# (source start file, target name, title, author, documentclass [howto/manual])
|
||||||
|
# .
|
||||||
|
latex_documents = [
|
||||||
|
('index', 'python-cinderclient.tex', u'python-cinderclient Documentation',
|
||||||
|
u'Rackspace - based on work by Jacob Kaplan-Moss', 'manual'),
|
||||||
|
]
|
||||||
|
|
||||||
|
# The name of an image file (relative to this directory) to place at the top of
|
||||||
|
# the title page.
|
||||||
|
#latex_logo = None
|
||||||
|
|
||||||
|
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||||
|
# not chapters.
|
||||||
|
#latex_use_parts = False
|
||||||
|
|
||||||
|
# Additional stuff for the LaTeX preamble.
|
||||||
|
#latex_preamble = ''
|
||||||
|
|
||||||
|
# Documents to append as an appendix to all manuals.
|
||||||
|
#latex_appendices = []
|
||||||
|
|
||||||
|
# If false, no module index is generated.
|
||||||
|
#latex_use_modindex = True
|
||||||
|
|
||||||
|
|
||||||
|
# Example configuration for intersphinx: refer to the Python standard library.
|
||||||
|
intersphinx_mapping = {'http://docs.python.org/': None}
|
60
doc/source/index.rst
Normal file
60
doc/source/index.rst
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
Python API
|
||||||
|
==========
|
||||||
|
In order to use the python api directly, you must first obtain an auth token and identify which endpoint you wish to speak to. Once you have done so, you can use the API like so::
|
||||||
|
|
||||||
|
>>> from cinderclient import client
|
||||||
|
>>> cinder = client.Client('1', $OS_USER_NAME, $OS_PASSWORD, $OS_TENANT_NAME, $OS_AUTH_URL)
|
||||||
|
>>> cinder.volumes.list()
|
||||||
|
[]
|
||||||
|
>>> myvol = cinder.volumes.create(display_name="test-vol", size=1)
|
||||||
|
>>> myvol.id
|
||||||
|
ce06d0a8-5c1b-4e2c-81d2-39eca6bbfb70
|
||||||
|
>>> cinder.volumes.list()
|
||||||
|
[<Volume: ce06d0a8-5c1b-4e2c-81d2-39eca6bbfb70>]
|
||||||
|
>>>myvol.delete
|
||||||
|
|
||||||
|
Command-line Tool
|
||||||
|
=================
|
||||||
|
In order to use the CLI, you must provide your OpenStack username, password, tenant, and auth endpoint. Use the corresponding configuration options (``--os-username``, ``--os-password``, ``--os-tenant-id``, and ``--os-auth-url``) or set them in environment variables::
|
||||||
|
|
||||||
|
export OS_USERNAME=user
|
||||||
|
export OS_PASSWORD=pass
|
||||||
|
export OS_TENANT_ID=b363706f891f48019483f8bd6503c54b
|
||||||
|
export OS_AUTH_URL=http://auth.example.com:5000/v2.0
|
||||||
|
|
||||||
|
Once you've configured your authentication parameters, you can run ``cinder help`` to see a complete listing of available commands.
|
||||||
|
|
||||||
|
|
||||||
|
Release Notes
|
||||||
|
=============
|
||||||
|
1.0.4
|
||||||
|
-----
|
||||||
|
* Added suport for backup-service commands
|
||||||
|
.. _1163546: http://bugs.launchpad.net/python-cinderclient/+bug/1163546
|
||||||
|
.. _1161857: http://bugs.launchpad.net/python-cinderclient/+bug/1161857
|
||||||
|
.. _1160898: http://bugs.launchpad.net/python-cinderclient/+bug/1160898
|
||||||
|
.. _1161857: http://bugs.launchpad.net/python-cinderclient/+bug/1161857
|
||||||
|
.. _1156994: http://bugs.launchpad.net/python-cinderclient/+bug/1156994
|
||||||
|
|
||||||
|
1.0.3
|
||||||
|
-----
|
||||||
|
|
||||||
|
* Added support for V2 Cinder API
|
||||||
|
* Corected upload-volume-to-image help messaging
|
||||||
|
* Align handling of metadata args for all methods
|
||||||
|
* Update OSLO version
|
||||||
|
* Correct parsing of volume metadata
|
||||||
|
* Enable force delete of volumes and snapshots in error state
|
||||||
|
* Implement clone volume API call
|
||||||
|
* Add list-extensions call to cinderclient
|
||||||
|
* Add bootable column to list output
|
||||||
|
* Add retries to cinderclient operations
|
||||||
|
* Add Type/Extra-Specs support
|
||||||
|
* Add volume and snapshot rename commands
|
||||||
|
.. _1155655: http://bugs.launchpad.net/python-cinderclient/+bug/1155655
|
||||||
|
.. _1130730: http://bugs.launchpad.net/python-cinderclient/+bug/1130730
|
||||||
|
.. _1068521: http://bugs.launchpad.net/python-cinderclient/+bug/1068521
|
||||||
|
.. _1052161: http://bugs.launchpad.net/python-cinderclient/+bug/1052161
|
||||||
|
.. _1071003: http://bugs.launchpad.net/python-cinderclient/+bug/1071003
|
||||||
|
.. _1065275: http://bugs.launchpad.net/python-cinderclient/+bug/1065275
|
||||||
|
.. _1053432: http://bugs.launchpad.net/python-cinderclient/+bug/1053432
|
49
doc/source/shell.rst
Normal file
49
doc/source/shell.rst
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
The :program:`cinder` shell utility
|
||||||
|
=========================================
|
||||||
|
|
||||||
|
.. program:: cinder
|
||||||
|
.. highlight:: bash
|
||||||
|
|
||||||
|
The :program:`cinder` shell utility interacts with the OpenStack Cinder API
|
||||||
|
from the command line. It supports the entirety of the OpenStack Cinder API.
|
||||||
|
|
||||||
|
You'll need to provide :program:`cinder` with your OpenStack username and
|
||||||
|
API key. You can do this with the :option:`--os-username`, :option:`--os-password`
|
||||||
|
and :option:`--os-tenant-name` options, but it's easier to just set them as
|
||||||
|
environment variables by setting two environment variables:
|
||||||
|
|
||||||
|
.. envvar:: OS_USERNAME or CINDER_USERNAME
|
||||||
|
|
||||||
|
Your OpenStack Cinder username.
|
||||||
|
|
||||||
|
.. envvar:: OS_PASSWORD or CINDER_PASSWORD
|
||||||
|
|
||||||
|
Your password.
|
||||||
|
|
||||||
|
.. envvar:: OS_TENANT_NAME or CINDER_PROJECT_ID
|
||||||
|
|
||||||
|
Project for work.
|
||||||
|
|
||||||
|
.. envvar:: OS_AUTH_URL or CINDER_URL
|
||||||
|
|
||||||
|
The OpenStack API server URL.
|
||||||
|
|
||||||
|
.. envvar:: OS_VOLUME_API_VERSION
|
||||||
|
|
||||||
|
The OpenStack Block Storage API version.
|
||||||
|
|
||||||
|
For example, in Bash you'd use::
|
||||||
|
|
||||||
|
export OS_USERNAME=yourname
|
||||||
|
export OS_PASSWORD=yadayadayada
|
||||||
|
export OS_TENANT_NAME=myproject
|
||||||
|
export OS_AUTH_URL=http://...
|
||||||
|
export OS_VOLUME_API_VERSION=1
|
||||||
|
|
||||||
|
From there, all shell commands take the form::
|
||||||
|
|
||||||
|
cinder <command> [arguments...]
|
||||||
|
|
||||||
|
Run :program:`cinder help` to get a full list of all possible commands,
|
||||||
|
and run :program:`cinder help <command>` to get detailed help for that
|
||||||
|
command.
|
7
openstack-common.conf
Normal file
7
openstack-common.conf
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
|
||||||
|
# The list of modules to copy from openstack-common
|
||||||
|
modules=setup,version,strutils
|
||||||
|
|
||||||
|
# The base module to hold the copy of openstack.common
|
||||||
|
base=cinderclient
|
195
run_tests.sh
Executable file
195
run_tests.sh
Executable file
@ -0,0 +1,195 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
echo "Usage: $0 [OPTION]..."
|
||||||
|
echo "Run python-cinderclient test suite"
|
||||||
|
echo ""
|
||||||
|
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
|
||||||
|
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
|
||||||
|
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
|
||||||
|
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
|
||||||
|
echo " -p, --pep8 Just run pep8"
|
||||||
|
echo " -P, --no-pep8 Don't run pep8"
|
||||||
|
echo " -c, --coverage Generate coverage report"
|
||||||
|
echo " -h, --help Print this usage message"
|
||||||
|
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
|
||||||
|
echo ""
|
||||||
|
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
|
||||||
|
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
|
||||||
|
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
function process_option {
|
||||||
|
case "$1" in
|
||||||
|
-h|--help) usage;;
|
||||||
|
-V|--virtual-env) always_venv=1; never_venv=0;;
|
||||||
|
-N|--no-virtual-env) always_venv=0; never_venv=1;;
|
||||||
|
-s|--no-site-packages) no_site_packages=1;;
|
||||||
|
-f|--force) force=1;;
|
||||||
|
-p|--pep8) just_pep8=1;;
|
||||||
|
-P|--no-pep8) no_pep8=1;;
|
||||||
|
-c|--coverage) coverage=1;;
|
||||||
|
-d|--debug) debug=1;;
|
||||||
|
-*) testropts="$testropts $1";;
|
||||||
|
*) testrargs="$testrargs $1"
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
venv=.venv
|
||||||
|
with_venv=tools/with_venv.sh
|
||||||
|
always_venv=0
|
||||||
|
never_venv=0
|
||||||
|
force=0
|
||||||
|
no_site_packages=0
|
||||||
|
installvenvopts=
|
||||||
|
testrargs=
|
||||||
|
testropts=
|
||||||
|
wrapper=""
|
||||||
|
just_pep8=0
|
||||||
|
no_pep8=0
|
||||||
|
coverage=0
|
||||||
|
debug=0
|
||||||
|
|
||||||
|
LANG=en_US.UTF-8
|
||||||
|
LANGUAGE=en_US:en
|
||||||
|
LC_ALL=C
|
||||||
|
|
||||||
|
for arg in "$@"; do
|
||||||
|
process_option $arg
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $no_site_packages -eq 1 ]; then
|
||||||
|
installvenvopts="--no-site-packages"
|
||||||
|
fi
|
||||||
|
|
||||||
|
function init_testr {
|
||||||
|
if [ ! -d .testrepository ]; then
|
||||||
|
${wrapper} testr init
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_tests {
|
||||||
|
# Cleanup *pyc
|
||||||
|
${wrapper} find . -type f -name "*.pyc" -delete
|
||||||
|
|
||||||
|
if [ $debug -eq 1 ]; then
|
||||||
|
if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then
|
||||||
|
# Default to running all tests if specific test is not
|
||||||
|
# provided.
|
||||||
|
testrargs="discover ./tests"
|
||||||
|
fi
|
||||||
|
${wrapper} python -m testtools.run $testropts $testrargs
|
||||||
|
|
||||||
|
# Short circuit because all of the testr and coverage stuff
|
||||||
|
# below does not make sense when running testtools.run for
|
||||||
|
# debugging purposes.
|
||||||
|
return $?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $coverage -eq 1 ]; then
|
||||||
|
# Do not test test_coverage_ext when gathering coverage.
|
||||||
|
if [ "x$testrargs" = "x" ]; then
|
||||||
|
testrargs="^(?!.*test_coverage_ext).*$"
|
||||||
|
fi
|
||||||
|
export PYTHON="${wrapper} coverage run --source cinderclient --parallel-mode"
|
||||||
|
fi
|
||||||
|
# Just run the test suites in current environment
|
||||||
|
set +e
|
||||||
|
TESTRTESTS="$TESTRTESTS $testrargs"
|
||||||
|
echo "Running \`${wrapper} $TESTRTESTS\`"
|
||||||
|
${wrapper} $TESTRTESTS
|
||||||
|
RESULT=$?
|
||||||
|
set -e
|
||||||
|
|
||||||
|
copy_subunit_log
|
||||||
|
|
||||||
|
return $RESULT
|
||||||
|
}
|
||||||
|
|
||||||
|
function copy_subunit_log {
|
||||||
|
LOGNAME=`cat .testrepository/next-stream`
|
||||||
|
LOGNAME=$(($LOGNAME - 1))
|
||||||
|
LOGNAME=".testrepository/${LOGNAME}"
|
||||||
|
cp $LOGNAME subunit.log
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_pep8 {
|
||||||
|
echo "Running pep8 ..."
|
||||||
|
srcfiles="cinderclient tests"
|
||||||
|
# Just run PEP8 in current environment
|
||||||
|
#
|
||||||
|
# NOTE(sirp): W602 (deprecated 3-arg raise) is being ignored for the
|
||||||
|
# following reasons:
|
||||||
|
#
|
||||||
|
# 1. It's needed to preserve traceback information when re-raising
|
||||||
|
# exceptions; this is needed b/c Eventlet will clear exceptions when
|
||||||
|
# switching contexts.
|
||||||
|
#
|
||||||
|
# 2. There doesn't appear to be an alternative, "pep8-tool" compatible way of doing this
|
||||||
|
# in Python 2 (in Python 3 `with_traceback` could be used).
|
||||||
|
#
|
||||||
|
# 3. Can find no corroborating evidence that this is deprecated in Python 2
|
||||||
|
# other than what the PEP8 tool claims. It is deprecated in Python 3, so,
|
||||||
|
# perhaps the mistake was thinking that the deprecation applied to Python 2
|
||||||
|
# as well.
|
||||||
|
pep8_opts="--ignore=E202,W602 --repeat"
|
||||||
|
${wrapper} pep8 ${pep8_opts} ${srcfiles}
|
||||||
|
}
|
||||||
|
|
||||||
|
TESTRTESTS="testr run --parallel $testropts"
|
||||||
|
|
||||||
|
if [ $never_venv -eq 0 ]
|
||||||
|
then
|
||||||
|
# Remove the virtual environment if --force used
|
||||||
|
if [ $force -eq 1 ]; then
|
||||||
|
echo "Cleaning virtualenv..."
|
||||||
|
rm -rf ${venv}
|
||||||
|
fi
|
||||||
|
if [ -e ${venv} ]; then
|
||||||
|
wrapper="${with_venv}"
|
||||||
|
else
|
||||||
|
if [ $always_venv -eq 1 ]; then
|
||||||
|
# Automatically install the virtualenv
|
||||||
|
python tools/install_venv.py $installvenvopts
|
||||||
|
wrapper="${with_venv}"
|
||||||
|
else
|
||||||
|
echo -e "No virtual environment found...create one? (Y/n) \c"
|
||||||
|
read use_ve
|
||||||
|
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
|
||||||
|
# Install the virtualenv and run the test suite in it
|
||||||
|
python tools/install_venv.py $installvenvopts
|
||||||
|
wrapper=${with_venv}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Delete old coverage data from previous runs
|
||||||
|
if [ $coverage -eq 1 ]; then
|
||||||
|
${wrapper} coverage erase
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $just_pep8 -eq 1 ]; then
|
||||||
|
run_pep8
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
init_testr
|
||||||
|
run_tests
|
||||||
|
|
||||||
|
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
|
||||||
|
# not when we're running tests individually.
|
||||||
|
if [ -z "$testrargs" ]; then
|
||||||
|
if [ $no_pep8 -eq 0 ]; then
|
||||||
|
run_pep8
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $coverage -eq 1 ]; then
|
||||||
|
echo "Generating coverage report in covhtml/"
|
||||||
|
${wrapper} coverage combine
|
||||||
|
${wrapper} coverage html --include='cinderclient/*' --omit='cinderclient/openstack/common/*' -d covhtml -i
|
||||||
|
fi
|
7
setup.cfg
Normal file
7
setup.cfg
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
[build_sphinx]
|
||||||
|
all_files = 1
|
||||||
|
source-dir = doc/source
|
||||||
|
build-dir = doc/build
|
||||||
|
|
||||||
|
[upload_sphinx]
|
||||||
|
upload-dir = doc/build/html
|
60
setup.py
Normal file
60
setup.py
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# Copyright 2011 OpenStack, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import setuptools
|
||||||
|
|
||||||
|
|
||||||
|
from cinderclient.openstack.common import setup
|
||||||
|
|
||||||
|
requires = setup.parse_requirements()
|
||||||
|
depend_links = setup.parse_dependency_links()
|
||||||
|
tests_require = setup.parse_requirements(['tools/test-requires'])
|
||||||
|
project = 'python-cinderclient'
|
||||||
|
|
||||||
|
|
||||||
|
def read_file(file_name):
|
||||||
|
return open(os.path.join(os.path.dirname(__file__), file_name)).read()
|
||||||
|
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
name=project,
|
||||||
|
version=setup.get_version(project),
|
||||||
|
author="OpenStack Contributors",
|
||||||
|
author_email="openstack-dev@lists.openstack.org",
|
||||||
|
description="Client library for OpenStack Cinder API.",
|
||||||
|
long_description=read_file("README.rst"),
|
||||||
|
license="Apache License, Version 2.0",
|
||||||
|
url="https://github.com/openstack/python-cinderclient",
|
||||||
|
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
|
||||||
|
cmdclass=setup.get_cmdclass(),
|
||||||
|
install_requires=requires,
|
||||||
|
tests_require=tests_require,
|
||||||
|
setup_requires=['setuptools-git>=0.4'],
|
||||||
|
include_package_data=True,
|
||||||
|
dependency_links=depend_links,
|
||||||
|
classifiers=[
|
||||||
|
"Development Status :: 5 - Production/Stable",
|
||||||
|
"Environment :: Console",
|
||||||
|
"Environment :: OpenStack",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"Intended Audience :: Information Technology",
|
||||||
|
"License :: OSI Approved :: Apache Software License",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
"Programming Language :: Python"
|
||||||
|
],
|
||||||
|
entry_points={
|
||||||
|
"console_scripts": ["cinder = cinderclient.shell:main"]
|
||||||
|
}
|
||||||
|
)
|
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
72
tests/fakes.py
Normal file
72
tests/fakes.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
"""
|
||||||
|
A fake server that "responds" to API methods with pre-canned responses.
|
||||||
|
|
||||||
|
All of these responses come from the spec, so if for some reason the spec's
|
||||||
|
wrong the tests might raise AssertionError. I've indicated in comments the
|
||||||
|
places where actual behavior differs from the spec.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def assert_has_keys(dict, required=[], optional=[]):
|
||||||
|
keys = dict.keys()
|
||||||
|
for k in required:
|
||||||
|
try:
|
||||||
|
assert k in keys
|
||||||
|
except AssertionError:
|
||||||
|
extra_keys = set(keys).difference(set(required + optional))
|
||||||
|
raise AssertionError("found unexpected keys: %s" %
|
||||||
|
list(extra_keys))
|
||||||
|
|
||||||
|
|
||||||
|
class FakeClient(object):
|
||||||
|
|
||||||
|
def assert_called(self, method, url, body=None, pos=-1, **kwargs):
|
||||||
|
"""
|
||||||
|
Assert than an API method was just called.
|
||||||
|
"""
|
||||||
|
expected = (method, url)
|
||||||
|
called = self.client.callstack[pos][0:2]
|
||||||
|
|
||||||
|
assert self.client.callstack, ("Expected %s %s but no calls "
|
||||||
|
"were made." % expected)
|
||||||
|
|
||||||
|
assert expected == called, 'Expected %s %s; got %s %s' % (
|
||||||
|
expected + called)
|
||||||
|
|
||||||
|
if body is not None:
|
||||||
|
assert self.client.callstack[pos][2] == body
|
||||||
|
|
||||||
|
def assert_called_anytime(self, method, url, body=None):
|
||||||
|
"""
|
||||||
|
Assert than an API method was called anytime in the test.
|
||||||
|
"""
|
||||||
|
expected = (method, url)
|
||||||
|
|
||||||
|
assert self.client.callstack, ("Expected %s %s but no calls "
|
||||||
|
"were made." % expected)
|
||||||
|
|
||||||
|
found = False
|
||||||
|
for entry in self.client.callstack:
|
||||||
|
if expected == entry[0:2]:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
assert found, 'Expected %s %s; got %s' % (
|
||||||
|
expected, self.client.callstack)
|
||||||
|
|
||||||
|
if body is not None:
|
||||||
|
try:
|
||||||
|
assert entry[2] == body
|
||||||
|
except AssertionError:
|
||||||
|
print entry[2]
|
||||||
|
print "!="
|
||||||
|
print body
|
||||||
|
raise
|
||||||
|
|
||||||
|
self.client.callstack = []
|
||||||
|
|
||||||
|
def clear_callstack(self):
|
||||||
|
self.client.callstack = []
|
||||||
|
|
||||||
|
def authenticate(self):
|
||||||
|
pass
|
48
tests/test_base.py
Normal file
48
tests/test_base.py
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
from cinderclient import base
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient.v1 import volumes
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class BaseTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_resource_repr(self):
|
||||||
|
r = base.Resource(None, dict(foo="bar", baz="spam"))
|
||||||
|
self.assertEqual(repr(r), "<Resource baz=spam, foo=bar>")
|
||||||
|
|
||||||
|
def test_getid(self):
|
||||||
|
self.assertEqual(base.getid(4), 4)
|
||||||
|
|
||||||
|
class TmpObject(object):
|
||||||
|
id = 4
|
||||||
|
self.assertEqual(base.getid(TmpObject), 4)
|
||||||
|
|
||||||
|
def test_eq(self):
|
||||||
|
# Two resources of the same type with the same id: equal
|
||||||
|
r1 = base.Resource(None, {'id': 1, 'name': 'hi'})
|
||||||
|
r2 = base.Resource(None, {'id': 1, 'name': 'hello'})
|
||||||
|
self.assertEqual(r1, r2)
|
||||||
|
|
||||||
|
# Two resoruces of different types: never equal
|
||||||
|
r1 = base.Resource(None, {'id': 1})
|
||||||
|
r2 = volumes.Volume(None, {'id': 1})
|
||||||
|
self.assertNotEqual(r1, r2)
|
||||||
|
|
||||||
|
# Two resources with no ID: equal if their info is equal
|
||||||
|
r1 = base.Resource(None, {'name': 'joe', 'age': 12})
|
||||||
|
r2 = base.Resource(None, {'name': 'joe', 'age': 12})
|
||||||
|
self.assertEqual(r1, r2)
|
||||||
|
|
||||||
|
def test_findall_invalid_attribute(self):
|
||||||
|
# Make sure findall with an invalid attribute doesn't cause errors.
|
||||||
|
# The following should not raise an exception.
|
||||||
|
cs.volumes.findall(vegetable='carrot')
|
||||||
|
|
||||||
|
# However, find() should raise an error
|
||||||
|
self.assertRaises(exceptions.NotFound,
|
||||||
|
cs.volumes.find,
|
||||||
|
vegetable='carrot')
|
20
tests/test_client.py
Normal file
20
tests/test_client.py
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
|
||||||
|
import cinderclient.client
|
||||||
|
import cinderclient.v1.client
|
||||||
|
import cinderclient.v2.client
|
||||||
|
from tests import utils
|
||||||
|
|
||||||
|
|
||||||
|
class ClientTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_get_client_class_v1(self):
|
||||||
|
output = cinderclient.client.get_client_class('1')
|
||||||
|
self.assertEqual(output, cinderclient.v1.client.Client)
|
||||||
|
|
||||||
|
def test_get_client_class_v2(self):
|
||||||
|
output = cinderclient.client.get_client_class('2')
|
||||||
|
self.assertEqual(output, cinderclient.v2.client.Client)
|
||||||
|
|
||||||
|
def test_get_client_class_unknown(self):
|
||||||
|
self.assertRaises(cinderclient.exceptions.UnsupportedVersion,
|
||||||
|
cinderclient.client.get_client_class, '0')
|
191
tests/test_http.py
Normal file
191
tests/test_http.py
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
import mock
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from cinderclient import client
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from tests import utils
|
||||||
|
|
||||||
|
|
||||||
|
fake_response = utils.TestResponse({
|
||||||
|
"status_code": 200,
|
||||||
|
"text": '{"hi": "there"}',
|
||||||
|
})
|
||||||
|
mock_request = mock.Mock(return_value=(fake_response))
|
||||||
|
|
||||||
|
bad_400_response = utils.TestResponse({
|
||||||
|
"status_code": 400,
|
||||||
|
"text": '{"error": {"message": "n/a", "details": "Terrible!"}}',
|
||||||
|
})
|
||||||
|
bad_400_request = mock.Mock(return_value=(bad_400_response))
|
||||||
|
|
||||||
|
bad_401_response = utils.TestResponse({
|
||||||
|
"status_code": 401,
|
||||||
|
"text": '{"error": {"message": "FAILED!", "details": "DETAILS!"}}',
|
||||||
|
})
|
||||||
|
bad_401_request = mock.Mock(return_value=(bad_401_response))
|
||||||
|
|
||||||
|
bad_500_response = utils.TestResponse({
|
||||||
|
"status_code": 500,
|
||||||
|
"text": '{"error": {"message": "FAILED!", "details": "DETAILS!"}}',
|
||||||
|
})
|
||||||
|
bad_500_request = mock.Mock(return_value=(bad_500_response))
|
||||||
|
|
||||||
|
|
||||||
|
def get_client(retries=0):
|
||||||
|
cl = client.HTTPClient("username", "password",
|
||||||
|
"project_id", "auth_test", retries=retries)
|
||||||
|
return cl
|
||||||
|
|
||||||
|
|
||||||
|
def get_authed_client(retries=0):
|
||||||
|
cl = get_client(retries=retries)
|
||||||
|
cl.management_url = "http://example.com"
|
||||||
|
cl.auth_token = "token"
|
||||||
|
return cl
|
||||||
|
|
||||||
|
|
||||||
|
class ClientTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_get(self):
|
||||||
|
cl = get_authed_client()
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
@mock.patch('time.time', mock.Mock(return_value=1234))
|
||||||
|
def test_get_call():
|
||||||
|
resp, body = cl.get("/hi")
|
||||||
|
headers = {"X-Auth-Token": "token",
|
||||||
|
"X-Auth-Project-Id": "project_id",
|
||||||
|
"User-Agent": cl.USER_AGENT,
|
||||||
|
'Accept': 'application/json', }
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"GET",
|
||||||
|
"http://example.com/hi",
|
||||||
|
headers=headers,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
# Automatic JSON parsing
|
||||||
|
self.assertEqual(body, {"hi": "there"})
|
||||||
|
|
||||||
|
test_get_call()
|
||||||
|
|
||||||
|
def test_get_reauth_0_retries(self):
|
||||||
|
cl = get_authed_client(retries=0)
|
||||||
|
|
||||||
|
self.requests = [bad_401_request, mock_request]
|
||||||
|
|
||||||
|
def request(*args, **kwargs):
|
||||||
|
next_request = self.requests.pop(0)
|
||||||
|
return next_request(*args, **kwargs)
|
||||||
|
|
||||||
|
def reauth():
|
||||||
|
cl.management_url = "http://example.com"
|
||||||
|
cl.auth_token = "token"
|
||||||
|
|
||||||
|
@mock.patch.object(cl, 'authenticate', reauth)
|
||||||
|
@mock.patch.object(requests, "request", request)
|
||||||
|
@mock.patch('time.time', mock.Mock(return_value=1234))
|
||||||
|
def test_get_call():
|
||||||
|
resp, body = cl.get("/hi")
|
||||||
|
|
||||||
|
test_get_call()
|
||||||
|
self.assertEqual(self.requests, [])
|
||||||
|
|
||||||
|
def test_get_retry_500(self):
|
||||||
|
cl = get_authed_client(retries=1)
|
||||||
|
|
||||||
|
self.requests = [bad_500_request, mock_request]
|
||||||
|
|
||||||
|
def request(*args, **kwargs):
|
||||||
|
next_request = self.requests.pop(0)
|
||||||
|
return next_request(*args, **kwargs)
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", request)
|
||||||
|
@mock.patch('time.time', mock.Mock(return_value=1234))
|
||||||
|
def test_get_call():
|
||||||
|
resp, body = cl.get("/hi")
|
||||||
|
|
||||||
|
test_get_call()
|
||||||
|
self.assertEqual(self.requests, [])
|
||||||
|
|
||||||
|
def test_retry_limit(self):
|
||||||
|
cl = get_authed_client(retries=1)
|
||||||
|
|
||||||
|
self.requests = [bad_500_request, bad_500_request, mock_request]
|
||||||
|
|
||||||
|
def request(*args, **kwargs):
|
||||||
|
next_request = self.requests.pop(0)
|
||||||
|
return next_request(*args, **kwargs)
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", request)
|
||||||
|
@mock.patch('time.time', mock.Mock(return_value=1234))
|
||||||
|
def test_get_call():
|
||||||
|
resp, body = cl.get("/hi")
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.ClientException, test_get_call)
|
||||||
|
self.assertEqual(self.requests, [mock_request])
|
||||||
|
|
||||||
|
def test_get_no_retry_400(self):
|
||||||
|
cl = get_authed_client(retries=0)
|
||||||
|
|
||||||
|
self.requests = [bad_400_request, mock_request]
|
||||||
|
|
||||||
|
def request(*args, **kwargs):
|
||||||
|
next_request = self.requests.pop(0)
|
||||||
|
return next_request(*args, **kwargs)
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", request)
|
||||||
|
@mock.patch('time.time', mock.Mock(return_value=1234))
|
||||||
|
def test_get_call():
|
||||||
|
resp, body = cl.get("/hi")
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.BadRequest, test_get_call)
|
||||||
|
self.assertEqual(self.requests, [mock_request])
|
||||||
|
|
||||||
|
def test_get_retry_400_socket(self):
|
||||||
|
cl = get_authed_client(retries=1)
|
||||||
|
|
||||||
|
self.requests = [bad_400_request, mock_request]
|
||||||
|
|
||||||
|
def request(*args, **kwargs):
|
||||||
|
next_request = self.requests.pop(0)
|
||||||
|
return next_request(*args, **kwargs)
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", request)
|
||||||
|
@mock.patch('time.time', mock.Mock(return_value=1234))
|
||||||
|
def test_get_call():
|
||||||
|
resp, body = cl.get("/hi")
|
||||||
|
|
||||||
|
test_get_call()
|
||||||
|
self.assertEqual(self.requests, [])
|
||||||
|
|
||||||
|
def test_post(self):
|
||||||
|
cl = get_authed_client()
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_post_call():
|
||||||
|
cl.post("/hi", body=[1, 2, 3])
|
||||||
|
headers = {
|
||||||
|
"X-Auth-Token": "token",
|
||||||
|
"X-Auth-Project-Id": "project_id",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
'Accept': 'application/json',
|
||||||
|
"User-Agent": cl.USER_AGENT
|
||||||
|
}
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"POST",
|
||||||
|
"http://example.com/hi",
|
||||||
|
headers=headers,
|
||||||
|
data='[1, 2, 3]',
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
test_post_call()
|
||||||
|
|
||||||
|
def test_auth_failure(self):
|
||||||
|
cl = get_client()
|
||||||
|
|
||||||
|
# response must not have x-server-management-url header
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
self.assertRaises(exceptions.AuthorizationFailure, cl.authenticate)
|
||||||
|
|
||||||
|
test_auth_call()
|
127
tests/test_service_catalog.py
Normal file
127
tests/test_service_catalog.py
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient import service_catalog
|
||||||
|
from tests import utils
|
||||||
|
|
||||||
|
|
||||||
|
# Taken directly from keystone/content/common/samples/auth.json
|
||||||
|
# Do not edit this structure. Instead, grab the latest from there.
|
||||||
|
|
||||||
|
SERVICE_CATALOG = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"id": "ab48a9efdfedb23ty3494",
|
||||||
|
"expires": "2010-11-01T03:32:15-05:00",
|
||||||
|
"tenant": {
|
||||||
|
"id": "345",
|
||||||
|
"name": "My Project"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"user": {
|
||||||
|
"id": "123",
|
||||||
|
"name": "jqsmith",
|
||||||
|
"roles": [
|
||||||
|
{
|
||||||
|
"id": "234",
|
||||||
|
"name": "compute:admin",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "235",
|
||||||
|
"name": "object-store:admin",
|
||||||
|
"tenantId": "1",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"roles_links": [],
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"name": "Cloud Servers",
|
||||||
|
"type": "compute",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"tenantId": "1",
|
||||||
|
"publicURL": "https://compute1.host/v1/1234",
|
||||||
|
"internalURL": "https://compute1.host/v1/1234",
|
||||||
|
"region": "North",
|
||||||
|
"versionId": "1.0",
|
||||||
|
"versionInfo": "https://compute1.host/v1/",
|
||||||
|
"versionList": "https://compute1.host/"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"tenantId": "2",
|
||||||
|
"publicURL": "https://compute1.host/v1/3456",
|
||||||
|
"internalURL": "https://compute1.host/v1/3456",
|
||||||
|
"region": "North",
|
||||||
|
"versionId": "1.1",
|
||||||
|
"versionInfo": "https://compute1.host/v1/",
|
||||||
|
"versionList": "https://compute1.host/"
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"endpoints_links": [],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Nova Volumes",
|
||||||
|
"type": "volume",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"tenantId": "1",
|
||||||
|
"publicURL": "https://volume1.host/v1/1234",
|
||||||
|
"internalURL": "https://volume1.host/v1/1234",
|
||||||
|
"region": "South",
|
||||||
|
"versionId": "1.0",
|
||||||
|
"versionInfo": "uri",
|
||||||
|
"versionList": "uri"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"tenantId": "2",
|
||||||
|
"publicURL": "https://volume1.host/v1/3456",
|
||||||
|
"internalURL": "https://volume1.host/v1/3456",
|
||||||
|
"region": "South",
|
||||||
|
"versionId": "1.1",
|
||||||
|
"versionInfo": "https://volume1.host/v1/",
|
||||||
|
"versionList": "https://volume1.host/"
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"endpoints_links": [
|
||||||
|
{
|
||||||
|
"rel": "next",
|
||||||
|
"href": "https://identity1.host/v2.0/endpoints"
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"serviceCatalog_links": [
|
||||||
|
{
|
||||||
|
"rel": "next",
|
||||||
|
"href": "https://identity.host/v2.0/endpoints?session=2hfh8Ar",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceCatalogTest(utils.TestCase):
|
||||||
|
def test_building_a_service_catalog(self):
|
||||||
|
sc = service_catalog.ServiceCatalog(SERVICE_CATALOG)
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for,
|
||||||
|
service_type='compute')
|
||||||
|
self.assertEquals(sc.url_for('tenantId', '1', service_type='compute'),
|
||||||
|
"https://compute1.host/v1/1234")
|
||||||
|
self.assertEquals(sc.url_for('tenantId', '2', service_type='compute'),
|
||||||
|
"https://compute1.host/v1/3456")
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
|
||||||
|
"region", "South", service_type='compute')
|
||||||
|
|
||||||
|
def test_alternate_service_type(self):
|
||||||
|
sc = service_catalog.ServiceCatalog(SERVICE_CATALOG)
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for,
|
||||||
|
service_type='volume')
|
||||||
|
self.assertEquals(sc.url_for('tenantId', '1', service_type='volume'),
|
||||||
|
"https://volume1.host/v1/1234")
|
||||||
|
self.assertEquals(sc.url_for('tenantId', '2', service_type='volume'),
|
||||||
|
"https://volume1.host/v1/3456")
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
|
||||||
|
"region", "North", service_type='volume')
|
68
tests/test_shell.py
Normal file
68
tests/test_shell.py
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
import cStringIO
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
from testtools import matchers
|
||||||
|
|
||||||
|
from cinderclient import exceptions
|
||||||
|
import cinderclient.shell
|
||||||
|
from tests import utils
|
||||||
|
|
||||||
|
|
||||||
|
class ShellTest(utils.TestCase):
|
||||||
|
|
||||||
|
FAKE_ENV = {
|
||||||
|
'OS_USERNAME': 'username',
|
||||||
|
'OS_PASSWORD': 'password',
|
||||||
|
'OS_TENANT_NAME': 'tenant_name',
|
||||||
|
'OS_AUTH_URL': 'http://no.where',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Patch os.environ to avoid required auth info.
|
||||||
|
def setUp(self):
|
||||||
|
super(ShellTest, self).setUp()
|
||||||
|
for var in self.FAKE_ENV:
|
||||||
|
self.useFixture(fixtures.EnvironmentVariable(var,
|
||||||
|
self.FAKE_ENV[var]))
|
||||||
|
|
||||||
|
def shell(self, argstr):
|
||||||
|
orig = sys.stdout
|
||||||
|
try:
|
||||||
|
sys.stdout = cStringIO.StringIO()
|
||||||
|
_shell = cinderclient.shell.OpenStackCinderShell()
|
||||||
|
_shell.main(argstr.split())
|
||||||
|
except SystemExit:
|
||||||
|
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||||
|
self.assertEqual(exc_value.code, 0)
|
||||||
|
finally:
|
||||||
|
out = sys.stdout.getvalue()
|
||||||
|
sys.stdout.close()
|
||||||
|
sys.stdout = orig
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
def test_help_unknown_command(self):
|
||||||
|
self.assertRaises(exceptions.CommandError, self.shell, 'help foofoo')
|
||||||
|
|
||||||
|
def test_help(self):
|
||||||
|
required = [
|
||||||
|
'.*?^usage: ',
|
||||||
|
'.*?(?m)^\s+create\s+Add a new volume.',
|
||||||
|
'.*?(?m)^See "cinder help COMMAND" for help on a specific command',
|
||||||
|
]
|
||||||
|
help_text = self.shell('help')
|
||||||
|
for r in required:
|
||||||
|
self.assertThat(help_text,
|
||||||
|
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
|
||||||
|
|
||||||
|
def test_help_on_subcommand(self):
|
||||||
|
required = [
|
||||||
|
'.*?^usage: cinder list',
|
||||||
|
'.*?(?m)^List all the volumes.',
|
||||||
|
]
|
||||||
|
help_text = self.shell('help list')
|
||||||
|
for r in required:
|
||||||
|
self.assertThat(help_text,
|
||||||
|
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
|
75
tests/test_utils.py
Normal file
75
tests/test_utils.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient import utils
|
||||||
|
from cinderclient import base
|
||||||
|
from tests import utils as test_utils
|
||||||
|
|
||||||
|
UUID = '8e8ec658-c7b0-4243-bdf8-6f7f2952c0d0'
|
||||||
|
|
||||||
|
|
||||||
|
class FakeResource(object):
|
||||||
|
|
||||||
|
def __init__(self, _id, properties):
|
||||||
|
self.id = _id
|
||||||
|
try:
|
||||||
|
self.name = properties['name']
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
self.display_name = properties['display_name']
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FakeManager(base.ManagerWithFind):
|
||||||
|
|
||||||
|
resource_class = FakeResource
|
||||||
|
|
||||||
|
resources = [
|
||||||
|
FakeResource('1234', {'name': 'entity_one'}),
|
||||||
|
FakeResource(UUID, {'name': 'entity_two'}),
|
||||||
|
FakeResource('4242', {'display_name': 'entity_three'}),
|
||||||
|
FakeResource('5678', {'name': '9876'})
|
||||||
|
]
|
||||||
|
|
||||||
|
def get(self, resource_id):
|
||||||
|
for resource in self.resources:
|
||||||
|
if resource.id == str(resource_id):
|
||||||
|
return resource
|
||||||
|
raise exceptions.NotFound(resource_id)
|
||||||
|
|
||||||
|
def list(self):
|
||||||
|
return self.resources
|
||||||
|
|
||||||
|
|
||||||
|
class FindResourceTestCase(test_utils.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(FindResourceTestCase, self).setUp()
|
||||||
|
self.manager = FakeManager(None)
|
||||||
|
|
||||||
|
def test_find_none(self):
|
||||||
|
self.assertRaises(exceptions.CommandError,
|
||||||
|
utils.find_resource,
|
||||||
|
self.manager,
|
||||||
|
'asdf')
|
||||||
|
|
||||||
|
def test_find_by_integer_id(self):
|
||||||
|
output = utils.find_resource(self.manager, 1234)
|
||||||
|
self.assertEqual(output, self.manager.get('1234'))
|
||||||
|
|
||||||
|
def test_find_by_str_id(self):
|
||||||
|
output = utils.find_resource(self.manager, '1234')
|
||||||
|
self.assertEqual(output, self.manager.get('1234'))
|
||||||
|
|
||||||
|
def test_find_by_uuid(self):
|
||||||
|
output = utils.find_resource(self.manager, UUID)
|
||||||
|
self.assertEqual(output, self.manager.get(UUID))
|
||||||
|
|
||||||
|
def test_find_by_str_name(self):
|
||||||
|
output = utils.find_resource(self.manager, 'entity_one')
|
||||||
|
self.assertEqual(output, self.manager.get('1234'))
|
||||||
|
|
||||||
|
def test_find_by_str_displayname(self):
|
||||||
|
output = utils.find_resource(self.manager, 'entity_three')
|
||||||
|
self.assertEqual(output, self.manager.get('4242'))
|
45
tests/utils.py
Normal file
45
tests/utils.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
import requests
|
||||||
|
import testtools
|
||||||
|
|
||||||
|
|
||||||
|
class TestCase(testtools.TestCase):
|
||||||
|
TEST_REQUEST_BASE = {
|
||||||
|
'verify': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestCase, self).setUp()
|
||||||
|
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
|
||||||
|
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
|
||||||
|
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||||
|
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||||
|
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
|
||||||
|
os.environ.get('OS_STDERR_CAPTURE') == '1'):
|
||||||
|
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||||
|
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||||
|
|
||||||
|
|
||||||
|
class TestResponse(requests.Response):
|
||||||
|
""" Class used to wrap requests.Response and provide some
|
||||||
|
convenience to initialize with a dict """
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self._text = None
|
||||||
|
super(TestResponse, self)
|
||||||
|
if isinstance(data, dict):
|
||||||
|
self.status_code = data.get('status_code', None)
|
||||||
|
self.headers = data.get('headers', None)
|
||||||
|
# Fake the text attribute to streamline Response creation
|
||||||
|
self._text = data.get('text', None)
|
||||||
|
else:
|
||||||
|
self.status_code = data
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.__dict__ == other.__dict__
|
||||||
|
|
||||||
|
@property
|
||||||
|
def text(self):
|
||||||
|
return self._text
|
0
tests/v1/__init__.py
Normal file
0
tests/v1/__init__.py
Normal file
0
tests/v1/contrib/__init__.py
Normal file
0
tests/v1/contrib/__init__.py
Normal file
21
tests/v1/contrib/test_list_extensions.py
Normal file
21
tests/v1/contrib/test_list_extensions.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
from cinderclient import extension
|
||||||
|
from cinderclient.v1.contrib import list_extensions
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
extensions = [
|
||||||
|
extension.Extension(list_extensions.__name__.split(".")[-1],
|
||||||
|
list_extensions),
|
||||||
|
]
|
||||||
|
cs = fakes.FakeClient(extensions=extensions)
|
||||||
|
|
||||||
|
|
||||||
|
class ListExtensionsTests(utils.TestCase):
|
||||||
|
def test_list_extensions(self):
|
||||||
|
all_exts = cs.list_extensions.show_all()
|
||||||
|
cs.assert_called('GET', '/extensions')
|
||||||
|
self.assertTrue(len(all_exts) > 0)
|
||||||
|
for r in all_exts:
|
||||||
|
self.assertTrue(len(r.summary) > 0)
|
404
tests/v1/fakes.py
Normal file
404
tests/v1/fakes.py
Normal file
@ -0,0 +1,404 @@
|
|||||||
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
|
# Copyright 2011 OpenStack, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
from cinderclient import client as base_client
|
||||||
|
from cinderclient.v1 import client
|
||||||
|
from tests import fakes
|
||||||
|
import tests.utils as utils
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_volume(**kwargs):
|
||||||
|
volume = {
|
||||||
|
'id': '1234',
|
||||||
|
'display_name': None,
|
||||||
|
'display_description': None,
|
||||||
|
"attachments": [],
|
||||||
|
"bootable": "false",
|
||||||
|
"availability_zone": "cinder",
|
||||||
|
"created_at": "2012-08-27T00:00:00.000000",
|
||||||
|
"display_description": None,
|
||||||
|
"display_name": None,
|
||||||
|
"id": '00000000-0000-0000-0000-000000000000',
|
||||||
|
"metadata": {},
|
||||||
|
"size": 1,
|
||||||
|
"snapshot_id": None,
|
||||||
|
"status": "available",
|
||||||
|
"volume_type": "None",
|
||||||
|
}
|
||||||
|
volume.update(kwargs)
|
||||||
|
return volume
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_snapshot(**kwargs):
|
||||||
|
snapshot = {
|
||||||
|
"created_at": "2012-08-28T16:30:31.000000",
|
||||||
|
"display_description": None,
|
||||||
|
"display_name": None,
|
||||||
|
"id": '11111111-1111-1111-1111-111111111111',
|
||||||
|
"size": 1,
|
||||||
|
"status": "available",
|
||||||
|
"volume_id": '00000000-0000-0000-0000-000000000000',
|
||||||
|
}
|
||||||
|
snapshot.update(kwargs)
|
||||||
|
return snapshot
|
||||||
|
|
||||||
|
|
||||||
|
def _self_href(base_uri, tenant_id, backup_id):
|
||||||
|
return '%s/v1/%s/backups/%s' % (base_uri, tenant_id, backup_id)
|
||||||
|
|
||||||
|
|
||||||
|
def _bookmark_href(base_uri, tenant_id, backup_id):
|
||||||
|
return '%s/%s/backups/%s' % (base_uri, tenant_id, backup_id)
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_backup_full(id, base_uri, tenant_id):
|
||||||
|
return {
|
||||||
|
'id': id,
|
||||||
|
'name': 'backup',
|
||||||
|
'description': 'nightly backup',
|
||||||
|
'volume_id': '712f4980-5ac1-41e5-9383-390aa7c9f58b',
|
||||||
|
'container': 'volumebackups',
|
||||||
|
'object_count': 220,
|
||||||
|
'size': 10,
|
||||||
|
'availability_zone': 'az1',
|
||||||
|
'created_at': '2013-04-12T08:16:37.000000',
|
||||||
|
'status': 'available',
|
||||||
|
'links': [
|
||||||
|
{
|
||||||
|
'href': _self_href(base_uri, tenant_id, id),
|
||||||
|
'rel': 'self'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'href': _bookmark_href(base_uri, tenant_id, id),
|
||||||
|
'rel': 'bookmark'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_backup(id, base_uri, tenant_id):
|
||||||
|
return {
|
||||||
|
'id': id,
|
||||||
|
'name': 'backup',
|
||||||
|
'links': [
|
||||||
|
{
|
||||||
|
'href': _self_href(base_uri, tenant_id, id),
|
||||||
|
'rel': 'self'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'href': _bookmark_href(base_uri, tenant_id, id),
|
||||||
|
'rel': 'bookmark'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_restore():
|
||||||
|
return {'volume_id': '712f4980-5ac1-41e5-9383-390aa7c9f58b'}
|
||||||
|
|
||||||
|
|
||||||
|
class FakeClient(fakes.FakeClient, client.Client):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
client.Client.__init__(self, 'username', 'password',
|
||||||
|
'project_id', 'auth_url',
|
||||||
|
extensions=kwargs.get('extensions'))
|
||||||
|
self.client = FakeHTTPClient(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class FakeHTTPClient(base_client.HTTPClient):
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.username = 'username'
|
||||||
|
self.password = 'password'
|
||||||
|
self.auth_url = 'auth_url'
|
||||||
|
self.callstack = []
|
||||||
|
|
||||||
|
def _cs_request(self, url, method, **kwargs):
|
||||||
|
# Check that certain things are called correctly
|
||||||
|
if method in ['GET', 'DELETE']:
|
||||||
|
assert 'body' not in kwargs
|
||||||
|
elif method == 'PUT':
|
||||||
|
assert 'body' in kwargs
|
||||||
|
|
||||||
|
# Call the method
|
||||||
|
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
|
||||||
|
kwargs.update(args)
|
||||||
|
munged_url = url.rsplit('?', 1)[0]
|
||||||
|
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
|
||||||
|
munged_url = munged_url.replace('-', '_')
|
||||||
|
|
||||||
|
callback = "%s_%s" % (method.lower(), munged_url)
|
||||||
|
|
||||||
|
if not hasattr(self, callback):
|
||||||
|
raise AssertionError('Called unknown API method: %s %s, '
|
||||||
|
'expected fakes method name: %s' %
|
||||||
|
(method, url, callback))
|
||||||
|
|
||||||
|
# Note the call
|
||||||
|
self.callstack.append((method, url, kwargs.get('body', None)))
|
||||||
|
status, headers, body = getattr(self, callback)(**kwargs)
|
||||||
|
r = utils.TestResponse({
|
||||||
|
"status_code": status,
|
||||||
|
"text": body,
|
||||||
|
"headers": headers,
|
||||||
|
})
|
||||||
|
return r, body
|
||||||
|
|
||||||
|
if hasattr(status, 'items'):
|
||||||
|
return utils.TestResponse(status), body
|
||||||
|
else:
|
||||||
|
return utils.TestResponse({"status": status}), body
|
||||||
|
|
||||||
|
#
|
||||||
|
# Snapshots
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_snapshots_detail(self, **kw):
|
||||||
|
return (200, {}, {'snapshots': [
|
||||||
|
_stub_snapshot(),
|
||||||
|
]})
|
||||||
|
|
||||||
|
def get_snapshots_1234(self, **kw):
|
||||||
|
return (200, {}, {'snapshot': _stub_snapshot(id='1234')})
|
||||||
|
|
||||||
|
def put_snapshots_1234(self, **kw):
|
||||||
|
snapshot = _stub_snapshot(id='1234')
|
||||||
|
snapshot.update(kw['body']['snapshot'])
|
||||||
|
return (200, {}, {'snapshot': snapshot})
|
||||||
|
|
||||||
|
#
|
||||||
|
# Volumes
|
||||||
|
#
|
||||||
|
|
||||||
|
def put_volumes_1234(self, **kw):
|
||||||
|
volume = _stub_volume(id='1234')
|
||||||
|
volume.update(kw['body']['volume'])
|
||||||
|
return (200, {}, {'volume': volume})
|
||||||
|
|
||||||
|
def get_volumes(self, **kw):
|
||||||
|
return (200, {}, {"volumes": [
|
||||||
|
{'id': 1234, 'name': 'sample-volume'},
|
||||||
|
{'id': 5678, 'name': 'sample-volume2'}
|
||||||
|
]})
|
||||||
|
|
||||||
|
# TODO(jdg): This will need to change
|
||||||
|
# at the very least it's not complete
|
||||||
|
def get_volumes_detail(self, **kw):
|
||||||
|
return (200, {}, {"volumes": [
|
||||||
|
{'id': 1234,
|
||||||
|
'name': 'sample-volume',
|
||||||
|
'attachments': [{'server_id': 1234}]},
|
||||||
|
]})
|
||||||
|
|
||||||
|
def get_volumes_1234(self, **kw):
|
||||||
|
r = {'volume': self.get_volumes_detail()[2]['volumes'][0]}
|
||||||
|
return (200, {}, r)
|
||||||
|
|
||||||
|
def post_volumes_1234_action(self, body, **kw):
|
||||||
|
_body = None
|
||||||
|
resp = 202
|
||||||
|
assert len(body.keys()) == 1
|
||||||
|
action = body.keys()[0]
|
||||||
|
if action == 'os-attach':
|
||||||
|
assert body[action].keys() == ['instance_uuid', 'mountpoint']
|
||||||
|
elif action == 'os-detach':
|
||||||
|
assert body[action] is None
|
||||||
|
elif action == 'os-reserve':
|
||||||
|
assert body[action] is None
|
||||||
|
elif action == 'os-unreserve':
|
||||||
|
assert body[action] is None
|
||||||
|
elif action == 'os-initialize_connection':
|
||||||
|
assert body[action].keys() == ['connector']
|
||||||
|
return (202, {}, {'connection_info': 'foos'})
|
||||||
|
elif action == 'os-terminate_connection':
|
||||||
|
assert body[action].keys() == ['connector']
|
||||||
|
elif action == 'os-begin_detaching':
|
||||||
|
assert body[action] is None
|
||||||
|
elif action == 'os-roll_detaching':
|
||||||
|
assert body[action] is None
|
||||||
|
else:
|
||||||
|
raise AssertionError("Unexpected server action: %s" % action)
|
||||||
|
return (resp, {}, _body)
|
||||||
|
|
||||||
|
def post_volumes(self, **kw):
|
||||||
|
return (202, {}, {'volume': {}})
|
||||||
|
|
||||||
|
def delete_volumes_1234(self, **kw):
|
||||||
|
return (202, {}, None)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Quotas
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_os_quota_sets_test(self, **kw):
|
||||||
|
return (200, {}, {'quota_set': {
|
||||||
|
'tenant_id': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 1,
|
||||||
|
'snapshots': 1,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
def get_os_quota_sets_test_defaults(self):
|
||||||
|
return (200, {}, {'quota_set': {
|
||||||
|
'tenant_id': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 1,
|
||||||
|
'snapshots': 1,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
def put_os_quota_sets_test(self, body, **kw):
|
||||||
|
assert body.keys() == ['quota_set']
|
||||||
|
fakes.assert_has_keys(body['quota_set'],
|
||||||
|
required=['tenant_id'])
|
||||||
|
return (200, {}, {'quota_set': {
|
||||||
|
'tenant_id': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 2,
|
||||||
|
'snapshots': 2,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
#
|
||||||
|
# Quota Classes
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_os_quota_class_sets_test(self, **kw):
|
||||||
|
return (200, {}, {'quota_class_set': {
|
||||||
|
'class_name': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 1,
|
||||||
|
'snapshots': 1,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
def put_os_quota_class_sets_test(self, body, **kw):
|
||||||
|
assert body.keys() == ['quota_class_set']
|
||||||
|
fakes.assert_has_keys(body['quota_class_set'],
|
||||||
|
required=['class_name'])
|
||||||
|
return (200, {}, {'quota_class_set': {
|
||||||
|
'class_name': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 2,
|
||||||
|
'snapshots': 2,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
#
|
||||||
|
# VolumeTypes
|
||||||
|
#
|
||||||
|
def get_types(self, **kw):
|
||||||
|
return (200, {}, {
|
||||||
|
'volume_types': [{'id': 1,
|
||||||
|
'name': 'test-type-1',
|
||||||
|
'extra_specs':{}},
|
||||||
|
{'id': 2,
|
||||||
|
'name': 'test-type-2',
|
||||||
|
'extra_specs':{}}]})
|
||||||
|
|
||||||
|
def get_types_1(self, **kw):
|
||||||
|
return (200, {}, {'volume_type': {'id': 1,
|
||||||
|
'name': 'test-type-1',
|
||||||
|
'extra_specs': {}}})
|
||||||
|
|
||||||
|
def post_types(self, body, **kw):
|
||||||
|
return (202, {}, {'volume_type': {'id': 3,
|
||||||
|
'name': 'test-type-3',
|
||||||
|
'extra_specs': {}}})
|
||||||
|
|
||||||
|
def post_types_1_extra_specs(self, body, **kw):
|
||||||
|
assert body.keys() == ['extra_specs']
|
||||||
|
return (200, {}, {'extra_specs': {'k': 'v'}})
|
||||||
|
|
||||||
|
def delete_types_1_extra_specs_k(self, **kw):
|
||||||
|
return(204, {}, None)
|
||||||
|
|
||||||
|
def delete_types_1(self, **kw):
|
||||||
|
return (202, {}, None)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set/Unset metadata
|
||||||
|
#
|
||||||
|
def delete_volumes_1234_metadata_test_key(self, **kw):
|
||||||
|
return (204, {}, None)
|
||||||
|
|
||||||
|
def delete_volumes_1234_metadata_key1(self, **kw):
|
||||||
|
return (204, {}, None)
|
||||||
|
|
||||||
|
def delete_volumes_1234_metadata_key2(self, **kw):
|
||||||
|
return (204, {}, None)
|
||||||
|
|
||||||
|
def post_volumes_1234_metadata(self, **kw):
|
||||||
|
return (204, {}, {'metadata': {'test_key': 'test_value'}})
|
||||||
|
|
||||||
|
#
|
||||||
|
# List all extensions
|
||||||
|
#
|
||||||
|
def get_extensions(self, **kw):
|
||||||
|
exts = [
|
||||||
|
{
|
||||||
|
"alias": "FAKE-1",
|
||||||
|
"description": "Fake extension number 1",
|
||||||
|
"links": [],
|
||||||
|
"name": "Fake1",
|
||||||
|
"namespace": ("http://docs.openstack.org/"
|
||||||
|
"/ext/fake1/api/v1.1"),
|
||||||
|
"updated": "2011-06-09T00:00:00+00:00"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"alias": "FAKE-2",
|
||||||
|
"description": "Fake extension number 2",
|
||||||
|
"links": [],
|
||||||
|
"name": "Fake2",
|
||||||
|
"namespace": ("http://docs.openstack.org/"
|
||||||
|
"/ext/fake1/api/v1.1"),
|
||||||
|
"updated": "2011-06-09T00:00:00+00:00"
|
||||||
|
},
|
||||||
|
]
|
||||||
|
return (200, {}, {"extensions": exts, })
|
||||||
|
|
||||||
|
#
|
||||||
|
# VolumeBackups
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_backups_76a17945_3c6f_435c_975b_b5685db10b62(self, **kw):
|
||||||
|
base_uri = 'http://localhost:8776'
|
||||||
|
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
|
||||||
|
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
return (200, {},
|
||||||
|
{'backup': _stub_backup_full(backup1, base_uri, tenant_id)})
|
||||||
|
|
||||||
|
def get_backups_detail(self, **kw):
|
||||||
|
base_uri = 'http://localhost:8776'
|
||||||
|
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
|
||||||
|
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
backup2 = 'd09534c6-08b8-4441-9e87-8976f3a8f699'
|
||||||
|
return (200, {},
|
||||||
|
{'backups': [
|
||||||
|
_stub_backup_full(backup1, base_uri, tenant_id),
|
||||||
|
_stub_backup_full(backup2, base_uri, tenant_id)]})
|
||||||
|
|
||||||
|
def delete_backups_76a17945_3c6f_435c_975b_b5685db10b62(self, **kw):
|
||||||
|
return (202, {}, None)
|
||||||
|
|
||||||
|
def post_backups(self, **kw):
|
||||||
|
base_uri = 'http://localhost:8776'
|
||||||
|
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
|
||||||
|
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
return (202, {},
|
||||||
|
{'backup': _stub_backup(backup1, base_uri, tenant_id)})
|
||||||
|
|
||||||
|
def post_backups_76a17945_3c6f_435c_975b_b5685db10b62_restore(self, **kw):
|
||||||
|
return (200, {},
|
||||||
|
{'restore': _stub_restore()})
|
0
tests/v1/shares/__init__.py
Normal file
0
tests/v1/shares/__init__.py
Normal file
85
tests/v1/shares/fakes.py
Normal file
85
tests/v1/shares/fakes.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
|
# Copyright 2011 OpenStack, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from cinderclient.v1 import shares as shares_ext_module
|
||||||
|
from cinderclient.v1 import client
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
class FakeClient(fakes.FakeClient):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
client.Client.__init__(self, 'username', 'password',
|
||||||
|
'project_id', 'auth_url',
|
||||||
|
extensions=kwargs.get('extensions'))
|
||||||
|
self.client = FakeHTTPClient(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class FakeHTTPClient(fakes.FakeHTTPClient):
|
||||||
|
|
||||||
|
def get_shares_1234(self, **kw):
|
||||||
|
share = {'share': {'id': 1234, 'name': 'sharename'}}
|
||||||
|
return (200, {}, share)
|
||||||
|
|
||||||
|
def get_shares_detail(self):
|
||||||
|
shares = {'shares': [{'id': 1234,
|
||||||
|
'name': 'sharename',
|
||||||
|
'attachments': [{'server_id': 111}]}]}
|
||||||
|
return (200, {}, shares)
|
||||||
|
|
||||||
|
def get_share_snapshots_1234(self, **kw):
|
||||||
|
snapshot = {'share-snapshot': {'id': 1234, 'name': 'sharename'}}
|
||||||
|
return (200, {}, snapshot)
|
||||||
|
|
||||||
|
def get_share_snapshots_detail(self):
|
||||||
|
snapshots = {'share-snapshots': [{
|
||||||
|
'id': 1234,
|
||||||
|
'created_at': '2012-08-27T00:00:00.000000',
|
||||||
|
'share_size': 1,
|
||||||
|
'share_id': 4321,
|
||||||
|
'status': 'available',
|
||||||
|
'name': 'sharename',
|
||||||
|
'display_description': 'description',
|
||||||
|
'share_proto': 'type',
|
||||||
|
'export_location': 'location',
|
||||||
|
}]}
|
||||||
|
return (200, {}, snapshots)
|
||||||
|
|
||||||
|
def post_shares_1234_action(self, body, **kw):
|
||||||
|
_body = None
|
||||||
|
resp = 202
|
||||||
|
assert len(body.keys()) == 1
|
||||||
|
action = body.keys()[0]
|
||||||
|
if action == 'os-allow_access':
|
||||||
|
assert body[action].keys() == ['access_type', 'access_to']
|
||||||
|
elif action == 'os-deny_access':
|
||||||
|
assert body[action].keys() == ['access_id']
|
||||||
|
elif action == 'os-access_list':
|
||||||
|
assert body[action] is None
|
||||||
|
else:
|
||||||
|
raise AssertionError("Unexpected share action: %s" % action)
|
||||||
|
return (resp, {}, _body)
|
||||||
|
|
||||||
|
def post_shares(self, **kwargs):
|
||||||
|
return (202, {}, {'share': {}})
|
||||||
|
|
||||||
|
def post_share_snapshots(self, **kwargs):
|
||||||
|
return (202, {}, {'share-snapshot': {}})
|
||||||
|
|
||||||
|
def delete_shares_1234(self, **kw):
|
||||||
|
return (202, {}, None)
|
||||||
|
|
||||||
|
def delete_share_snapshots_1234(self, **kwargs):
|
||||||
|
return (202, {}, None)
|
44
tests/v1/shares/test_share_snapshots.py
Normal file
44
tests/v1/shares/test_share_snapshots.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import extension
|
||||||
|
from cinderclient.v1 import share_snapshots
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1.shares import fakes
|
||||||
|
|
||||||
|
|
||||||
|
extensions = [
|
||||||
|
extension.Extension('share_snapshots', share_snapshots),
|
||||||
|
]
|
||||||
|
cs = fakes.FakeClient(extensions=extensions)
|
||||||
|
|
||||||
|
|
||||||
|
class ShareSnapshotsTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_create_share_snapshot(self):
|
||||||
|
cs.share_snapshots.create(1234)
|
||||||
|
cs.assert_called('POST', '/share-snapshots')
|
||||||
|
|
||||||
|
def test_delete_share(self):
|
||||||
|
snapshot = cs.share_snapshots.get(1234)
|
||||||
|
cs.share_snapshots.delete(snapshot)
|
||||||
|
cs.assert_called('DELETE', '/share-snapshots/1234')
|
||||||
|
|
||||||
|
def test_list_shares(self):
|
||||||
|
cs.share_snapshots.list()
|
||||||
|
cs.assert_called('GET', '/share-snapshots/detail')
|
54
tests/v1/shares/test_shares.py
Normal file
54
tests/v1/shares/test_shares.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import extension
|
||||||
|
from cinderclient.v1 import shares
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1.shares import fakes
|
||||||
|
|
||||||
|
|
||||||
|
extensions = [
|
||||||
|
extension.Extension('shares', shares),
|
||||||
|
]
|
||||||
|
cs = fakes.FakeClient(extensions=extensions)
|
||||||
|
|
||||||
|
|
||||||
|
class SharesTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_create_nfs_share(self):
|
||||||
|
cs.shares.create('nfs', 1)
|
||||||
|
cs.assert_called('POST', '/shares')
|
||||||
|
|
||||||
|
def test_create_cifs_share(self):
|
||||||
|
cs.shares.create('cifs', 2)
|
||||||
|
cs.assert_called('POST', '/shares')
|
||||||
|
|
||||||
|
def test_delete_share(self):
|
||||||
|
share = cs.shares.get('1234')
|
||||||
|
cs.shares.delete(share)
|
||||||
|
cs.assert_called('DELETE', '/shares/1234')
|
||||||
|
|
||||||
|
def test_list_shares(self):
|
||||||
|
cs.shares.list()
|
||||||
|
cs.assert_called('GET', '/shares/detail')
|
||||||
|
|
||||||
|
def test_allow_access_to_share(self):
|
||||||
|
share = cs.shares.get(1234)
|
||||||
|
ip = '192.168.0.1'
|
||||||
|
cs.shares.allow(share, 'ip', ip)
|
||||||
|
cs.assert_called('POST', '/shares/1234/action')
|
374
tests/v1/test_auth.py
Normal file
374
tests/v1/test_auth.py
Normal file
@ -0,0 +1,374 @@
|
|||||||
|
import json
|
||||||
|
import mock
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from cinderclient.v1 import client
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from tests import utils
|
||||||
|
|
||||||
|
|
||||||
|
class AuthenticateAgainstKeystoneTests(utils.TestCase):
|
||||||
|
def test_authenticate_success(self):
|
||||||
|
cs = client.Client("username", "password", "project_id",
|
||||||
|
"auth_url/v2.0", service_type='compute')
|
||||||
|
resp = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"expires": "12345",
|
||||||
|
"id": "FAKE_ID",
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"type": "compute",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"region": "RegionOne",
|
||||||
|
"adminURL": "http://localhost:8774/v1",
|
||||||
|
"internalURL": "http://localhost:8774/v1",
|
||||||
|
"publicURL": "http://localhost:8774/v1/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
"status_code": 200,
|
||||||
|
"text": json.dumps(resp),
|
||||||
|
})
|
||||||
|
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
cs.client.authenticate()
|
||||||
|
headers = {
|
||||||
|
'User-Agent': cs.client.USER_AGENT,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Accept': 'application/json',
|
||||||
|
}
|
||||||
|
body = {
|
||||||
|
'auth': {
|
||||||
|
'passwordCredentials': {
|
||||||
|
'username': cs.client.user,
|
||||||
|
'password': cs.client.password,
|
||||||
|
},
|
||||||
|
'tenantName': cs.client.projectid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
token_url = cs.client.auth_url + "/tokens"
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"POST",
|
||||||
|
token_url,
|
||||||
|
headers=headers,
|
||||||
|
data=json.dumps(body),
|
||||||
|
allow_redirects=True,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
|
||||||
|
public_url = endpoints[0]["publicURL"].rstrip('/')
|
||||||
|
self.assertEqual(cs.client.management_url, public_url)
|
||||||
|
token_id = resp["access"]["token"]["id"]
|
||||||
|
self.assertEqual(cs.client.auth_token, token_id)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_authenticate_tenant_id(self):
|
||||||
|
cs = client.Client("username", "password", auth_url="auth_url/v2.0",
|
||||||
|
tenant_id='tenant_id', service_type='compute')
|
||||||
|
resp = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"expires": "12345",
|
||||||
|
"id": "FAKE_ID",
|
||||||
|
"tenant": {
|
||||||
|
"description": None,
|
||||||
|
"enabled": True,
|
||||||
|
"id": "tenant_id",
|
||||||
|
"name": "demo"
|
||||||
|
} # tenant associated with token
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"type": "compute",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"region": "RegionOne",
|
||||||
|
"adminURL": "http://localhost:8774/v1",
|
||||||
|
"internalURL": "http://localhost:8774/v1",
|
||||||
|
"publicURL": "http://localhost:8774/v1/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
"status_code": 200,
|
||||||
|
"text": json.dumps(resp),
|
||||||
|
})
|
||||||
|
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
cs.client.authenticate()
|
||||||
|
headers = {
|
||||||
|
'User-Agent': cs.client.USER_AGENT,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Accept': 'application/json',
|
||||||
|
}
|
||||||
|
body = {
|
||||||
|
'auth': {
|
||||||
|
'passwordCredentials': {
|
||||||
|
'username': cs.client.user,
|
||||||
|
'password': cs.client.password,
|
||||||
|
},
|
||||||
|
'tenantId': cs.client.tenant_id,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
token_url = cs.client.auth_url + "/tokens"
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"POST",
|
||||||
|
token_url,
|
||||||
|
headers=headers,
|
||||||
|
data=json.dumps(body),
|
||||||
|
allow_redirects=True,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
|
||||||
|
public_url = endpoints[0]["publicURL"].rstrip('/')
|
||||||
|
self.assertEqual(cs.client.management_url, public_url)
|
||||||
|
token_id = resp["access"]["token"]["id"]
|
||||||
|
self.assertEqual(cs.client.auth_token, token_id)
|
||||||
|
tenant_id = resp["access"]["token"]["tenant"]["id"]
|
||||||
|
self.assertEqual(cs.client.tenant_id, tenant_id)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_authenticate_failure(self):
|
||||||
|
cs = client.Client("username", "password", "project_id",
|
||||||
|
"auth_url/v2.0")
|
||||||
|
resp = {"unauthorized": {"message": "Unauthorized", "code": "401"}}
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
"status_code": 401,
|
||||||
|
"text": json.dumps(resp),
|
||||||
|
})
|
||||||
|
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_auth_redirect(self):
|
||||||
|
cs = client.Client("username", "password", "project_id",
|
||||||
|
"auth_url/v1", service_type='compute')
|
||||||
|
dict_correct_response = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"expires": "12345",
|
||||||
|
"id": "FAKE_ID",
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"type": "compute",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"adminURL": "http://localhost:8774/v1",
|
||||||
|
"region": "RegionOne",
|
||||||
|
"internalURL": "http://localhost:8774/v1",
|
||||||
|
"publicURL": "http://localhost:8774/v1/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
correct_response = json.dumps(dict_correct_response)
|
||||||
|
dict_responses = [
|
||||||
|
{"headers": {'location':'http://127.0.0.1:5001'},
|
||||||
|
"status_code": 305,
|
||||||
|
"text": "Use proxy"},
|
||||||
|
# Configured on admin port, cinder redirects to v2.0 port.
|
||||||
|
# When trying to connect on it, keystone auth succeed by v1.0
|
||||||
|
# protocol (through headers) but tokens are being returned in
|
||||||
|
# body (looks like keystone bug). Leaved for compatibility.
|
||||||
|
{"headers": {},
|
||||||
|
"status_code": 200,
|
||||||
|
"text": correct_response},
|
||||||
|
{"headers": {},
|
||||||
|
"status_code": 200,
|
||||||
|
"text": correct_response}
|
||||||
|
]
|
||||||
|
|
||||||
|
responses = [(utils.TestResponse(resp)) for resp in dict_responses]
|
||||||
|
|
||||||
|
def side_effect(*args, **kwargs):
|
||||||
|
return responses.pop(0)
|
||||||
|
|
||||||
|
mock_request = mock.Mock(side_effect=side_effect)
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
cs.client.authenticate()
|
||||||
|
headers = {
|
||||||
|
'User-Agent': cs.client.USER_AGENT,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Accept': 'application/json',
|
||||||
|
}
|
||||||
|
body = {
|
||||||
|
'auth': {
|
||||||
|
'passwordCredentials': {
|
||||||
|
'username': cs.client.user,
|
||||||
|
'password': cs.client.password,
|
||||||
|
},
|
||||||
|
'tenantName': cs.client.projectid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
token_url = cs.client.auth_url + "/tokens"
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"POST",
|
||||||
|
token_url,
|
||||||
|
headers=headers,
|
||||||
|
data=json.dumps(body),
|
||||||
|
allow_redirects=True,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
resp = dict_correct_response
|
||||||
|
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
|
||||||
|
public_url = endpoints[0]["publicURL"].rstrip('/')
|
||||||
|
self.assertEqual(cs.client.management_url, public_url)
|
||||||
|
token_id = resp["access"]["token"]["id"]
|
||||||
|
self.assertEqual(cs.client.auth_token, token_id)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_ambiguous_endpoints(self):
|
||||||
|
cs = client.Client("username", "password", "project_id",
|
||||||
|
"auth_url/v2.0", service_type='compute')
|
||||||
|
resp = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"expires": "12345",
|
||||||
|
"id": "FAKE_ID",
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"adminURL": "http://localhost:8774/v1",
|
||||||
|
"type": "compute",
|
||||||
|
"name": "Compute CLoud",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"region": "RegionOne",
|
||||||
|
"internalURL": "http://localhost:8774/v1",
|
||||||
|
"publicURL": "http://localhost:8774/v1/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"adminURL": "http://localhost:8774/v1",
|
||||||
|
"type": "compute",
|
||||||
|
"name": "Hyper-compute Cloud",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"internalURL": "http://localhost:8774/v1",
|
||||||
|
"publicURL": "http://localhost:8774/v1/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
"status_code": 200,
|
||||||
|
"text": json.dumps(resp),
|
||||||
|
})
|
||||||
|
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
self.assertRaises(exceptions.AmbiguousEndpoints,
|
||||||
|
cs.client.authenticate)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
|
||||||
|
class AuthenticationTests(utils.TestCase):
|
||||||
|
def test_authenticate_success(self):
|
||||||
|
cs = client.Client("username", "password", "project_id", "auth_url")
|
||||||
|
management_url = 'https://localhost/v1.1/443470'
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
'status_code': 204,
|
||||||
|
'headers': {
|
||||||
|
'x-server-management-url': management_url,
|
||||||
|
'x-auth-token': '1b751d74-de0c-46ae-84f0-915744b582d1',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
cs.client.authenticate()
|
||||||
|
headers = {
|
||||||
|
'Accept': 'application/json',
|
||||||
|
'X-Auth-User': 'username',
|
||||||
|
'X-Auth-Key': 'password',
|
||||||
|
'X-Auth-Project-Id': 'project_id',
|
||||||
|
'User-Agent': cs.client.USER_AGENT
|
||||||
|
}
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"GET",
|
||||||
|
cs.client.auth_url,
|
||||||
|
headers=headers,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
self.assertEqual(cs.client.management_url,
|
||||||
|
auth_response.headers['x-server-management-url'])
|
||||||
|
self.assertEqual(cs.client.auth_token,
|
||||||
|
auth_response.headers['x-auth-token'])
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_authenticate_failure(self):
|
||||||
|
cs = client.Client("username", "password", "project_id", "auth_url")
|
||||||
|
auth_response = utils.TestResponse({"status_code": 401})
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_auth_automatic(self):
|
||||||
|
cs = client.Client("username", "password", "project_id", "auth_url")
|
||||||
|
http_client = cs.client
|
||||||
|
http_client.management_url = ''
|
||||||
|
mock_request = mock.Mock(return_value=(None, None))
|
||||||
|
|
||||||
|
@mock.patch.object(http_client, 'request', mock_request)
|
||||||
|
@mock.patch.object(http_client, 'authenticate')
|
||||||
|
def test_auth_call(m):
|
||||||
|
http_client.get('/')
|
||||||
|
m.assert_called()
|
||||||
|
mock_request.assert_called()
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_auth_manual(self):
|
||||||
|
cs = client.Client("username", "password", "project_id", "auth_url")
|
||||||
|
|
||||||
|
@mock.patch.object(cs.client, 'authenticate')
|
||||||
|
def test_auth_call(m):
|
||||||
|
cs.authenticate()
|
||||||
|
m.assert_called()
|
||||||
|
|
||||||
|
test_auth_call()
|
42
tests/v1/test_quota_classes.py
Normal file
42
tests/v1/test_quota_classes.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaClassSetsTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_class_quotas_get(self):
|
||||||
|
class_name = 'test'
|
||||||
|
cs.quota_classes.get(class_name)
|
||||||
|
cs.assert_called('GET', '/os-quota-class-sets/%s' % class_name)
|
||||||
|
|
||||||
|
def test_update_quota(self):
|
||||||
|
q = cs.quota_classes.get('test')
|
||||||
|
q.update(volumes=2)
|
||||||
|
cs.assert_called('PUT', '/os-quota-class-sets/test')
|
||||||
|
|
||||||
|
def test_refresh_quota(self):
|
||||||
|
q = cs.quota_classes.get('test')
|
||||||
|
q2 = cs.quota_classes.get('test')
|
||||||
|
self.assertEqual(q.volumes, q2.volumes)
|
||||||
|
q2.volumes = 0
|
||||||
|
self.assertNotEqual(q.volumes, q2.volumes)
|
||||||
|
q2.get()
|
||||||
|
self.assertEqual(q.volumes, q2.volumes)
|
52
tests/v1/test_quotas.py
Normal file
52
tests/v1/test_quotas.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaSetsTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_tenant_quotas_get(self):
|
||||||
|
tenant_id = 'test'
|
||||||
|
cs.quotas.get(tenant_id)
|
||||||
|
cs.assert_called('GET', '/os-quota-sets/%s' % tenant_id)
|
||||||
|
|
||||||
|
def test_tenant_quotas_defaults(self):
|
||||||
|
tenant_id = 'test'
|
||||||
|
cs.quotas.defaults(tenant_id)
|
||||||
|
cs.assert_called('GET', '/os-quota-sets/%s/defaults' % tenant_id)
|
||||||
|
|
||||||
|
def test_update_quota(self):
|
||||||
|
q = cs.quotas.get('test')
|
||||||
|
q.update(volumes=2)
|
||||||
|
q.update(snapshots=2)
|
||||||
|
cs.assert_called('PUT', '/os-quota-sets/test')
|
||||||
|
|
||||||
|
def test_refresh_quota(self):
|
||||||
|
q = cs.quotas.get('test')
|
||||||
|
q2 = cs.quotas.get('test')
|
||||||
|
self.assertEqual(q.volumes, q2.volumes)
|
||||||
|
self.assertEqual(q.snapshots, q2.snapshots)
|
||||||
|
q2.volumes = 0
|
||||||
|
self.assertNotEqual(q.volumes, q2.volumes)
|
||||||
|
q2.snapshots = 0
|
||||||
|
self.assertNotEqual(q.snapshots, q2.snapshots)
|
||||||
|
q2.get()
|
||||||
|
self.assertEqual(q.volumes, q2.volumes)
|
||||||
|
self.assertEqual(q.snapshots, q2.snapshots)
|
183
tests/v1/test_shell.py
Normal file
183
tests/v1/test_shell.py
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
from cinderclient import client
|
||||||
|
from cinderclient import shell
|
||||||
|
from cinderclient.v1 import shell as shell_v1
|
||||||
|
from tests.v1 import fakes
|
||||||
|
from tests import utils
|
||||||
|
|
||||||
|
|
||||||
|
class ShellTest(utils.TestCase):
|
||||||
|
|
||||||
|
FAKE_ENV = {
|
||||||
|
'CINDER_USERNAME': 'username',
|
||||||
|
'CINDER_PASSWORD': 'password',
|
||||||
|
'CINDER_PROJECT_ID': 'project_id',
|
||||||
|
'OS_VOLUME_API_VERSION': '1.1',
|
||||||
|
'CINDER_URL': 'http://no.where',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Patch os.environ to avoid required auth info.
|
||||||
|
def setUp(self):
|
||||||
|
"""Run before each test."""
|
||||||
|
super(ShellTest, self).setUp()
|
||||||
|
for var in self.FAKE_ENV:
|
||||||
|
self.useFixture(fixtures.EnvironmentVariable(var,
|
||||||
|
self.FAKE_ENV[var]))
|
||||||
|
|
||||||
|
self.shell = shell.OpenStackCinderShell()
|
||||||
|
|
||||||
|
#HACK(bcwaldon): replace this when we start using stubs
|
||||||
|
self.old_get_client_class = client.get_client_class
|
||||||
|
client.get_client_class = lambda *_: fakes.FakeClient
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
# For some method like test_image_meta_bad_action we are
|
||||||
|
# testing a SystemExit to be thrown and object self.shell has
|
||||||
|
# no time to get instantatiated which is OK in this case, so
|
||||||
|
# we make sure the method is there before launching it.
|
||||||
|
if hasattr(self.shell, 'cs'):
|
||||||
|
self.shell.cs.clear_callstack()
|
||||||
|
|
||||||
|
#HACK(bcwaldon): replace this when we start using stubs
|
||||||
|
client.get_client_class = self.old_get_client_class
|
||||||
|
super(ShellTest, self).tearDown()
|
||||||
|
|
||||||
|
def run_command(self, cmd):
|
||||||
|
self.shell.main(cmd.split())
|
||||||
|
|
||||||
|
def assert_called(self, method, url, body=None, **kwargs):
|
||||||
|
return self.shell.cs.assert_called(method, url, body, **kwargs)
|
||||||
|
|
||||||
|
def assert_called_anytime(self, method, url, body=None):
|
||||||
|
return self.shell.cs.assert_called_anytime(method, url, body)
|
||||||
|
|
||||||
|
def test_extract_metadata(self):
|
||||||
|
# mimic the result of argparse's parse_args() method
|
||||||
|
class Arguments:
|
||||||
|
def __init__(self, metadata=[]):
|
||||||
|
self.metadata = metadata
|
||||||
|
|
||||||
|
inputs = [
|
||||||
|
([], {}),
|
||||||
|
(["key=value"], {"key": "value"}),
|
||||||
|
(["key"], {"key": None}),
|
||||||
|
(["k1=v1", "k2=v2"], {"k1": "v1", "k2": "v2"}),
|
||||||
|
(["k1=v1", "k2"], {"k1": "v1", "k2": None}),
|
||||||
|
(["k1", "k2=v2"], {"k1": None, "k2": "v2"})
|
||||||
|
]
|
||||||
|
|
||||||
|
for input in inputs:
|
||||||
|
args = Arguments(metadata=input[0])
|
||||||
|
self.assertEquals(shell_v1._extract_metadata(args), input[1])
|
||||||
|
|
||||||
|
def test_list(self):
|
||||||
|
self.run_command('list')
|
||||||
|
# NOTE(jdg): we default to detail currently
|
||||||
|
self.assert_called('GET', '/volumes/detail')
|
||||||
|
|
||||||
|
def test_list_filter_status(self):
|
||||||
|
self.run_command('list --status=available')
|
||||||
|
self.assert_called('GET', '/volumes/detail?status=available')
|
||||||
|
|
||||||
|
def test_list_filter_display_name(self):
|
||||||
|
self.run_command('list --display-name=1234')
|
||||||
|
self.assert_called('GET', '/volumes/detail?display_name=1234')
|
||||||
|
|
||||||
|
def test_list_all_tenants(self):
|
||||||
|
self.run_command('list --all-tenants=1')
|
||||||
|
self.assert_called('GET', '/volumes/detail?all_tenants=1')
|
||||||
|
|
||||||
|
def test_show(self):
|
||||||
|
self.run_command('show 1234')
|
||||||
|
self.assert_called('GET', '/volumes/1234')
|
||||||
|
|
||||||
|
def test_delete(self):
|
||||||
|
self.run_command('delete 1234')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234')
|
||||||
|
|
||||||
|
def test_snapshot_list_filter_volume_id(self):
|
||||||
|
self.run_command('snapshot-list --volume-id=1234')
|
||||||
|
self.assert_called('GET', '/snapshots/detail?volume_id=1234')
|
||||||
|
|
||||||
|
def test_snapshot_list_filter_status_and_volume_id(self):
|
||||||
|
self.run_command('snapshot-list --status=available --volume-id=1234')
|
||||||
|
self.assert_called('GET', '/snapshots/detail?'
|
||||||
|
'status=available&volume_id=1234')
|
||||||
|
|
||||||
|
def test_rename(self):
|
||||||
|
# basic rename with positional agruments
|
||||||
|
self.run_command('rename 1234 new-name')
|
||||||
|
expected = {'volume': {'display_name': 'new-name'}}
|
||||||
|
self.assert_called('PUT', '/volumes/1234', body=expected)
|
||||||
|
# change description only
|
||||||
|
self.run_command('rename 1234 --display-description=new-description')
|
||||||
|
expected = {'volume': {'display_description': 'new-description'}}
|
||||||
|
self.assert_called('PUT', '/volumes/1234', body=expected)
|
||||||
|
# rename and change description
|
||||||
|
self.run_command('rename 1234 new-name '
|
||||||
|
'--display-description=new-description')
|
||||||
|
expected = {'volume': {
|
||||||
|
'display_name': 'new-name',
|
||||||
|
'display_description': 'new-description',
|
||||||
|
}}
|
||||||
|
self.assert_called('PUT', '/volumes/1234', body=expected)
|
||||||
|
# noop, the only all will be the lookup
|
||||||
|
self.run_command('rename 1234')
|
||||||
|
self.assert_called('GET', '/volumes/1234')
|
||||||
|
|
||||||
|
def test_rename_snapshot(self):
|
||||||
|
# basic rename with positional agruments
|
||||||
|
self.run_command('snapshot-rename 1234 new-name')
|
||||||
|
expected = {'snapshot': {'display_name': 'new-name'}}
|
||||||
|
self.assert_called('PUT', '/snapshots/1234', body=expected)
|
||||||
|
# change description only
|
||||||
|
self.run_command('snapshot-rename 1234 '
|
||||||
|
'--display-description=new-description')
|
||||||
|
expected = {'snapshot': {'display_description': 'new-description'}}
|
||||||
|
self.assert_called('PUT', '/snapshots/1234', body=expected)
|
||||||
|
# snapshot-rename and change description
|
||||||
|
self.run_command('snapshot-rename 1234 new-name '
|
||||||
|
'--display-description=new-description')
|
||||||
|
expected = {'snapshot': {
|
||||||
|
'display_name': 'new-name',
|
||||||
|
'display_description': 'new-description',
|
||||||
|
}}
|
||||||
|
self.assert_called('PUT', '/snapshots/1234', body=expected)
|
||||||
|
# noop, the only all will be the lookup
|
||||||
|
self.run_command('snapshot-rename 1234')
|
||||||
|
self.assert_called('GET', '/snapshots/1234')
|
||||||
|
|
||||||
|
def test_set_metadata_set(self):
|
||||||
|
self.run_command('metadata 1234 set key1=val1 key2=val2')
|
||||||
|
self.assert_called('POST', '/volumes/1234/metadata',
|
||||||
|
{'metadata': {'key1': 'val1', 'key2': 'val2'}})
|
||||||
|
|
||||||
|
def test_set_metadata_delete_dict(self):
|
||||||
|
self.run_command('metadata 1234 unset key1=val1 key2=val2')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234/metadata/key1')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234/metadata/key2', pos=-2)
|
||||||
|
|
||||||
|
def test_set_metadata_delete_keys(self):
|
||||||
|
self.run_command('metadata 1234 unset key1 key2')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234/metadata/key1')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234/metadata/key2', pos=-2)
|
35
tests/v1/test_types.py
Normal file
35
tests/v1/test_types.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient.v1 import volume_types
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class TypesTest(utils.TestCase):
|
||||||
|
def test_list_types(self):
|
||||||
|
tl = cs.volume_types.list()
|
||||||
|
cs.assert_called('GET', '/types')
|
||||||
|
for t in tl:
|
||||||
|
self.assertTrue(isinstance(t, volume_types.VolumeType))
|
||||||
|
|
||||||
|
def test_create(self):
|
||||||
|
t = cs.volume_types.create('test-type-3')
|
||||||
|
cs.assert_called('POST', '/types')
|
||||||
|
self.assertTrue(isinstance(t, volume_types.VolumeType))
|
||||||
|
|
||||||
|
def test_set_key(self):
|
||||||
|
t = cs.volume_types.get(1)
|
||||||
|
t.set_keys({'k': 'v'})
|
||||||
|
cs.assert_called('POST',
|
||||||
|
'/types/1/extra_specs',
|
||||||
|
{'extra_specs': {'k': 'v'}})
|
||||||
|
|
||||||
|
def test_unsset_keys(self):
|
||||||
|
t = cs.volume_types.get(1)
|
||||||
|
t.unset_keys(['k'])
|
||||||
|
cs.assert_called('DELETE', '/types/1/extra_specs/k')
|
||||||
|
|
||||||
|
def test_delete(self):
|
||||||
|
cs.volume_types.delete(1)
|
||||||
|
cs.assert_called('DELETE', '/types/1')
|
53
tests/v1/test_volume_backups.py
Normal file
53
tests/v1/test_volume_backups.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackupsTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_create(self):
|
||||||
|
cs.backups.create('2b695faf-b963-40c8-8464-274008fbcef4')
|
||||||
|
cs.assert_called('POST', '/backups')
|
||||||
|
|
||||||
|
def test_get(self):
|
||||||
|
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
cs.backups.get(backup_id)
|
||||||
|
cs.assert_called('GET', '/backups/%s' % backup_id)
|
||||||
|
|
||||||
|
def test_list(self):
|
||||||
|
cs.backups.list()
|
||||||
|
cs.assert_called('GET', '/backups/detail')
|
||||||
|
|
||||||
|
def test_delete(self):
|
||||||
|
b = cs.backups.list()[0]
|
||||||
|
b.delete()
|
||||||
|
cs.assert_called('DELETE',
|
||||||
|
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
|
||||||
|
cs.backups.delete('76a17945-3c6f-435c-975b-b5685db10b62')
|
||||||
|
cs.assert_called('DELETE',
|
||||||
|
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
|
||||||
|
cs.backups.delete(b)
|
||||||
|
cs.assert_called('DELETE',
|
||||||
|
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
|
||||||
|
|
||||||
|
def test_restore(self):
|
||||||
|
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
cs.restores.restore(backup_id)
|
||||||
|
cs.assert_called('POST', '/backups/%s/restore' % backup_id)
|
71
tests/v1/test_volumes.py
Normal file
71
tests/v1/test_volumes.py
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
from tests import utils
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class VolumesTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_delete_volume(self):
|
||||||
|
v = cs.volumes.list()[0]
|
||||||
|
v.delete()
|
||||||
|
cs.assert_called('DELETE', '/volumes/1234')
|
||||||
|
cs.volumes.delete('1234')
|
||||||
|
cs.assert_called('DELETE', '/volumes/1234')
|
||||||
|
cs.volumes.delete(v)
|
||||||
|
cs.assert_called('DELETE', '/volumes/1234')
|
||||||
|
|
||||||
|
def test_create_volume(self):
|
||||||
|
cs.volumes.create(1)
|
||||||
|
cs.assert_called('POST', '/volumes')
|
||||||
|
|
||||||
|
def test_attach(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.attach(v, 1, '/dev/vdc')
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_detach(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.detach(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_reserve(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.reserve(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_unreserve(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.unreserve(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_begin_detaching(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.begin_detaching(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_roll_detaching(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.roll_detaching(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_initialize_connection(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.initialize_connection(v, {})
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_terminate_connection(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.terminate_connection(v, {})
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_set_metadata(self):
|
||||||
|
cs.volumes.set_metadata(1234, {'k1': 'v1'})
|
||||||
|
cs.assert_called('POST', '/volumes/1234/metadata',
|
||||||
|
{'metadata': {'k1': 'v1'}})
|
||||||
|
|
||||||
|
def test_delete_metadata(self):
|
||||||
|
keys = ['key1']
|
||||||
|
cs.volumes.delete_metadata(1234, keys)
|
||||||
|
cs.assert_called('DELETE', '/volumes/1234/metadata/key1')
|
1
tests/v1/testfile.txt
Normal file
1
tests/v1/testfile.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
BLAH
|
15
tests/v2/__init__.py
Normal file
15
tests/v2/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
0
tests/v2/contrib/__init__.py
Normal file
0
tests/v2/contrib/__init__.py
Normal file
36
tests/v2/contrib/test_list_extensions.py
Normal file
36
tests/v2/contrib/test_list_extensions.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import extension
|
||||||
|
from cinderclient.v2.contrib import list_extensions
|
||||||
|
from tests import utils
|
||||||
|
from tests.v1 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
extensions = [
|
||||||
|
extension.Extension(list_extensions.__name__.split(".")[-1],
|
||||||
|
list_extensions),
|
||||||
|
]
|
||||||
|
cs = fakes.FakeClient(extensions=extensions)
|
||||||
|
|
||||||
|
|
||||||
|
class ListExtensionsTests(utils.TestCase):
|
||||||
|
def test_list_extensions(self):
|
||||||
|
all_exts = cs.list_extensions.show_all()
|
||||||
|
cs.assert_called('GET', '/extensions')
|
||||||
|
self.assertTrue(len(all_exts) > 0)
|
||||||
|
for r in all_exts:
|
||||||
|
self.assertTrue(len(r.summary) > 0)
|
411
tests/v2/fakes.py
Normal file
411
tests/v2/fakes.py
Normal file
@ -0,0 +1,411 @@
|
|||||||
|
# Copyright 2013 OpenStack, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
from cinderclient import client as base_client
|
||||||
|
from cinderclient.v2 import client
|
||||||
|
from tests import fakes
|
||||||
|
import tests.utils as utils
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_volume(**kwargs):
|
||||||
|
volume = {
|
||||||
|
'id': '1234',
|
||||||
|
'name': None,
|
||||||
|
'description': None,
|
||||||
|
"attachments": [],
|
||||||
|
"bootable": "false",
|
||||||
|
"availability_zone": "cinder",
|
||||||
|
"created_at": "2012-08-27T00:00:00.000000",
|
||||||
|
"id": '00000000-0000-0000-0000-000000000000',
|
||||||
|
"metadata": {},
|
||||||
|
"size": 1,
|
||||||
|
"snapshot_id": None,
|
||||||
|
"status": "available",
|
||||||
|
"volume_type": "None",
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"href": "http://localhost/v2/fake/volumes/1234",
|
||||||
|
"rel": "self"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"href": "http://localhost/fake/volumes/1234",
|
||||||
|
"rel": "bookmark"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
volume.update(kwargs)
|
||||||
|
return volume
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_snapshot(**kwargs):
|
||||||
|
snapshot = {
|
||||||
|
"created_at": "2012-08-28T16:30:31.000000",
|
||||||
|
"display_description": None,
|
||||||
|
"display_name": None,
|
||||||
|
"id": '11111111-1111-1111-1111-111111111111',
|
||||||
|
"size": 1,
|
||||||
|
"status": "available",
|
||||||
|
"volume_id": '00000000-0000-0000-0000-000000000000',
|
||||||
|
}
|
||||||
|
snapshot.update(kwargs)
|
||||||
|
return snapshot
|
||||||
|
|
||||||
|
|
||||||
|
def _self_href(base_uri, tenant_id, backup_id):
|
||||||
|
return '%s/v2/%s/backups/%s' % (base_uri, tenant_id, backup_id)
|
||||||
|
|
||||||
|
|
||||||
|
def _bookmark_href(base_uri, tenant_id, backup_id):
|
||||||
|
return '%s/%s/backups/%s' % (base_uri, tenant_id, backup_id)
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_backup_full(id, base_uri, tenant_id):
|
||||||
|
return {
|
||||||
|
'id': id,
|
||||||
|
'name': 'backup',
|
||||||
|
'description': 'nightly backup',
|
||||||
|
'volume_id': '712f4980-5ac1-41e5-9383-390aa7c9f58b',
|
||||||
|
'container': 'volumebackups',
|
||||||
|
'object_count': 220,
|
||||||
|
'size': 10,
|
||||||
|
'availability_zone': 'az1',
|
||||||
|
'created_at': '2013-04-12T08:16:37.000000',
|
||||||
|
'status': 'available',
|
||||||
|
'links': [
|
||||||
|
{
|
||||||
|
'href': _self_href(base_uri, tenant_id, id),
|
||||||
|
'rel': 'self'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'href': _bookmark_href(base_uri, tenant_id, id),
|
||||||
|
'rel': 'bookmark'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_backup(id, base_uri, tenant_id):
|
||||||
|
return {
|
||||||
|
'id': id,
|
||||||
|
'name': 'backup',
|
||||||
|
'links': [
|
||||||
|
{
|
||||||
|
'href': _self_href(base_uri, tenant_id, id),
|
||||||
|
'rel': 'self'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'href': _bookmark_href(base_uri, tenant_id, id),
|
||||||
|
'rel': 'bookmark'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _stub_restore():
|
||||||
|
return {'volume_id': '712f4980-5ac1-41e5-9383-390aa7c9f58b'}
|
||||||
|
|
||||||
|
|
||||||
|
class FakeClient(fakes.FakeClient, client.Client):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
client.Client.__init__(self, 'username', 'password',
|
||||||
|
'project_id', 'auth_url',
|
||||||
|
extensions=kwargs.get('extensions'))
|
||||||
|
self.client = FakeHTTPClient(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class FakeHTTPClient(base_client.HTTPClient):
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.username = 'username'
|
||||||
|
self.password = 'password'
|
||||||
|
self.auth_url = 'auth_url'
|
||||||
|
self.callstack = []
|
||||||
|
|
||||||
|
def _cs_request(self, url, method, **kwargs):
|
||||||
|
# Check that certain things are called correctly
|
||||||
|
if method in ['GET', 'DELETE']:
|
||||||
|
assert 'body' not in kwargs
|
||||||
|
elif method == 'PUT':
|
||||||
|
assert 'body' in kwargs
|
||||||
|
|
||||||
|
# Call the method
|
||||||
|
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
|
||||||
|
kwargs.update(args)
|
||||||
|
munged_url = url.rsplit('?', 1)[0]
|
||||||
|
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
|
||||||
|
munged_url = munged_url.replace('-', '_')
|
||||||
|
|
||||||
|
callback = "%s_%s" % (method.lower(), munged_url)
|
||||||
|
|
||||||
|
if not hasattr(self, callback):
|
||||||
|
raise AssertionError('Called unknown API method: %s %s, '
|
||||||
|
'expected fakes method name: %s' %
|
||||||
|
(method, url, callback))
|
||||||
|
|
||||||
|
# Note the call
|
||||||
|
self.callstack.append((method, url, kwargs.get('body', None)))
|
||||||
|
status, headers, body = getattr(self, callback)(**kwargs)
|
||||||
|
r = utils.TestResponse({
|
||||||
|
"status_code": status,
|
||||||
|
"text": body,
|
||||||
|
"headers": headers,
|
||||||
|
})
|
||||||
|
return r, body
|
||||||
|
|
||||||
|
if hasattr(status, 'items'):
|
||||||
|
return utils.TestResponse(status), body
|
||||||
|
else:
|
||||||
|
return utils.TestResponse({"status": status}), body
|
||||||
|
|
||||||
|
#
|
||||||
|
# Snapshots
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_snapshots_detail(self, **kw):
|
||||||
|
return (200, {}, {'snapshots': [
|
||||||
|
_stub_snapshot(),
|
||||||
|
]})
|
||||||
|
|
||||||
|
def get_snapshots_1234(self, **kw):
|
||||||
|
return (200, {}, {'snapshot': _stub_snapshot(id='1234')})
|
||||||
|
|
||||||
|
def put_snapshots_1234(self, **kw):
|
||||||
|
snapshot = _stub_snapshot(id='1234')
|
||||||
|
snapshot.update(kw['body']['snapshot'])
|
||||||
|
return (200, {}, {'snapshot': snapshot})
|
||||||
|
|
||||||
|
#
|
||||||
|
# Volumes
|
||||||
|
#
|
||||||
|
|
||||||
|
def put_volumes_1234(self, **kw):
|
||||||
|
volume = _stub_volume(id='1234')
|
||||||
|
volume.update(kw['body']['volume'])
|
||||||
|
return (200, {}, {'volume': volume})
|
||||||
|
|
||||||
|
def get_volumes(self, **kw):
|
||||||
|
return (200, {}, {"volumes": [
|
||||||
|
{'id': 1234, 'name': 'sample-volume'},
|
||||||
|
{'id': 5678, 'name': 'sample-volume2'}
|
||||||
|
]})
|
||||||
|
|
||||||
|
# TODO(jdg): This will need to change
|
||||||
|
# at the very least it's not complete
|
||||||
|
def get_volumes_detail(self, **kw):
|
||||||
|
return (200, {}, {"volumes": [
|
||||||
|
{'id': 1234,
|
||||||
|
'name': 'sample-volume',
|
||||||
|
'attachments': [{'server_id': 1234}]},
|
||||||
|
]})
|
||||||
|
|
||||||
|
def get_volumes_1234(self, **kw):
|
||||||
|
r = {'volume': self.get_volumes_detail()[2]['volumes'][0]}
|
||||||
|
return (200, {}, r)
|
||||||
|
|
||||||
|
def post_volumes_1234_action(self, body, **kw):
|
||||||
|
_body = None
|
||||||
|
resp = 202
|
||||||
|
assert len(body.keys()) == 1
|
||||||
|
action = body.keys()[0]
|
||||||
|
if action == 'os-attach':
|
||||||
|
assert body[action].keys() == ['instance_uuid', 'mountpoint']
|
||||||
|
elif action == 'os-detach':
|
||||||
|
assert body[action] is None
|
||||||
|
elif action == 'os-reserve':
|
||||||
|
assert body[action] is None
|
||||||
|
elif action == 'os-unreserve':
|
||||||
|
assert body[action] is None
|
||||||
|
elif action == 'os-initialize_connection':
|
||||||
|
assert body[action].keys() == ['connector']
|
||||||
|
return (202, {}, {'connection_info': 'foos'})
|
||||||
|
elif action == 'os-terminate_connection':
|
||||||
|
assert body[action].keys() == ['connector']
|
||||||
|
elif action == 'os-begin_detaching':
|
||||||
|
assert body[action] is None
|
||||||
|
elif action == 'os-roll_detaching':
|
||||||
|
assert body[action] is None
|
||||||
|
else:
|
||||||
|
raise AssertionError("Unexpected server action: %s" % action)
|
||||||
|
return (resp, {}, _body)
|
||||||
|
|
||||||
|
def post_volumes(self, **kw):
|
||||||
|
return (202, {}, {'volume': {}})
|
||||||
|
|
||||||
|
def delete_volumes_1234(self, **kw):
|
||||||
|
return (202, {}, None)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Quotas
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_os_quota_sets_test(self, **kw):
|
||||||
|
return (200, {}, {'quota_set': {
|
||||||
|
'tenant_id': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 1,
|
||||||
|
'snapshots': 1,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
def get_os_quota_sets_test_defaults(self):
|
||||||
|
return (200, {}, {'quota_set': {
|
||||||
|
'tenant_id': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 1,
|
||||||
|
'snapshots': 1,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
def put_os_quota_sets_test(self, body, **kw):
|
||||||
|
assert body.keys() == ['quota_set']
|
||||||
|
fakes.assert_has_keys(body['quota_set'],
|
||||||
|
required=['tenant_id'])
|
||||||
|
return (200, {}, {'quota_set': {
|
||||||
|
'tenant_id': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 2,
|
||||||
|
'snapshots': 2,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
#
|
||||||
|
# Quota Classes
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_os_quota_class_sets_test(self, **kw):
|
||||||
|
return (200, {}, {'quota_class_set': {
|
||||||
|
'class_name': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 1,
|
||||||
|
'snapshots': 1,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
def put_os_quota_class_sets_test(self, body, **kw):
|
||||||
|
assert body.keys() == ['quota_class_set']
|
||||||
|
fakes.assert_has_keys(body['quota_class_set'],
|
||||||
|
required=['class_name'])
|
||||||
|
return (200, {}, {'quota_class_set': {
|
||||||
|
'class_name': 'test',
|
||||||
|
'metadata_items': [],
|
||||||
|
'volumes': 2,
|
||||||
|
'snapshots': 2,
|
||||||
|
'gigabytes': 1}})
|
||||||
|
|
||||||
|
#
|
||||||
|
# VolumeTypes
|
||||||
|
#
|
||||||
|
def get_types(self, **kw):
|
||||||
|
return (200, {}, {
|
||||||
|
'volume_types': [{'id': 1,
|
||||||
|
'name': 'test-type-1',
|
||||||
|
'extra_specs':{}},
|
||||||
|
{'id': 2,
|
||||||
|
'name': 'test-type-2',
|
||||||
|
'extra_specs':{}}]})
|
||||||
|
|
||||||
|
def get_types_1(self, **kw):
|
||||||
|
return (200, {}, {'volume_type': {'id': 1,
|
||||||
|
'name': 'test-type-1',
|
||||||
|
'extra_specs': {}}})
|
||||||
|
|
||||||
|
def post_types(self, body, **kw):
|
||||||
|
return (202, {}, {'volume_type': {'id': 3,
|
||||||
|
'name': 'test-type-3',
|
||||||
|
'extra_specs': {}}})
|
||||||
|
|
||||||
|
def post_types_1_extra_specs(self, body, **kw):
|
||||||
|
assert body.keys() == ['extra_specs']
|
||||||
|
return (200, {}, {'extra_specs': {'k': 'v'}})
|
||||||
|
|
||||||
|
def delete_types_1_extra_specs_k(self, **kw):
|
||||||
|
return(204, {}, None)
|
||||||
|
|
||||||
|
def delete_types_1(self, **kw):
|
||||||
|
return (202, {}, None)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set/Unset metadata
|
||||||
|
#
|
||||||
|
def delete_volumes_1234_metadata_test_key(self, **kw):
|
||||||
|
return (204, {}, None)
|
||||||
|
|
||||||
|
def delete_volumes_1234_metadata_key1(self, **kw):
|
||||||
|
return (204, {}, None)
|
||||||
|
|
||||||
|
def delete_volumes_1234_metadata_key2(self, **kw):
|
||||||
|
return (204, {}, None)
|
||||||
|
|
||||||
|
def post_volumes_1234_metadata(self, **kw):
|
||||||
|
return (204, {}, {'metadata': {'test_key': 'test_value'}})
|
||||||
|
|
||||||
|
#
|
||||||
|
# List all extensions
|
||||||
|
#
|
||||||
|
def get_extensions(self, **kw):
|
||||||
|
exts = [
|
||||||
|
{
|
||||||
|
"alias": "FAKE-1",
|
||||||
|
"description": "Fake extension number 1",
|
||||||
|
"links": [],
|
||||||
|
"name": "Fake1",
|
||||||
|
"namespace": ("http://docs.openstack.org/"
|
||||||
|
"/ext/fake1/api/v1.1"),
|
||||||
|
"updated": "2011-06-09T00:00:00+00:00"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"alias": "FAKE-2",
|
||||||
|
"description": "Fake extension number 2",
|
||||||
|
"links": [],
|
||||||
|
"name": "Fake2",
|
||||||
|
"namespace": ("http://docs.openstack.org/"
|
||||||
|
"/ext/fake1/api/v1.1"),
|
||||||
|
"updated": "2011-06-09T00:00:00+00:00"
|
||||||
|
},
|
||||||
|
]
|
||||||
|
return (200, {}, {"extensions": exts, })
|
||||||
|
|
||||||
|
#
|
||||||
|
# VolumeBackups
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_backups_76a17945_3c6f_435c_975b_b5685db10b62(self, **kw):
|
||||||
|
base_uri = 'http://localhost:8776'
|
||||||
|
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
|
||||||
|
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
return (200, {},
|
||||||
|
{'backup': _stub_backup_full(backup1, base_uri, tenant_id)})
|
||||||
|
|
||||||
|
def get_backups_detail(self, **kw):
|
||||||
|
base_uri = 'http://localhost:8776'
|
||||||
|
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
|
||||||
|
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
backup2 = 'd09534c6-08b8-4441-9e87-8976f3a8f699'
|
||||||
|
return (200, {},
|
||||||
|
{'backups': [
|
||||||
|
_stub_backup_full(backup1, base_uri, tenant_id),
|
||||||
|
_stub_backup_full(backup2, base_uri, tenant_id)]})
|
||||||
|
|
||||||
|
def delete_backups_76a17945_3c6f_435c_975b_b5685db10b62(self, **kw):
|
||||||
|
return (202, {}, None)
|
||||||
|
|
||||||
|
def post_backups(self, **kw):
|
||||||
|
base_uri = 'http://localhost:8776'
|
||||||
|
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
|
||||||
|
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
return (202, {},
|
||||||
|
{'backup': _stub_backup(backup1, base_uri, tenant_id)})
|
||||||
|
|
||||||
|
def post_backups_76a17945_3c6f_435c_975b_b5685db10b62_restore(self, **kw):
|
||||||
|
return (200, {},
|
||||||
|
{'restore': _stub_restore()})
|
0
tests/v2/shares/__init__.py
Normal file
0
tests/v2/shares/__init__.py
Normal file
85
tests/v2/shares/fakes.py
Normal file
85
tests/v2/shares/fakes.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
|
# Copyright 2011 OpenStack, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from cinderclient.v2 import shares as shares_ext_module
|
||||||
|
from cinderclient.v2 import client
|
||||||
|
from tests.v2 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
class FakeClient(fakes.FakeClient):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
client.Client.__init__(self, 'username', 'password',
|
||||||
|
'project_id', 'auth_url',
|
||||||
|
extensions=kwargs.get('extensions'))
|
||||||
|
self.client = FakeHTTPClient(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class FakeHTTPClient(fakes.FakeHTTPClient):
|
||||||
|
|
||||||
|
def get_shares_1234(self, **kw):
|
||||||
|
share = {'share': {'id': 1234, 'name': 'sharename'}}
|
||||||
|
return (200, {}, share)
|
||||||
|
|
||||||
|
def get_shares_detail(self):
|
||||||
|
shares = {'shares': [{'id': 1234,
|
||||||
|
'name': 'sharename',
|
||||||
|
'attachments': [{'server_id': 111}]}]}
|
||||||
|
return (200, {}, shares)
|
||||||
|
|
||||||
|
def get_share_snapshots_1234(self, **kw):
|
||||||
|
snapshot = {'share-snapshot': {'id': 1234, 'name': 'sharename'}}
|
||||||
|
return (200, {}, snapshot)
|
||||||
|
|
||||||
|
def get_share_snapshots_detail(self):
|
||||||
|
snapshots = {'share-snapshots': [{
|
||||||
|
'id': 1234,
|
||||||
|
'created_at': '2012-08-27T00:00:00.000000',
|
||||||
|
'share_size': 1,
|
||||||
|
'share_id': 4321,
|
||||||
|
'status': 'available',
|
||||||
|
'name': 'sharename',
|
||||||
|
'display_description': 'description',
|
||||||
|
'share_proto': 'type',
|
||||||
|
'export_location': 'location',
|
||||||
|
}]}
|
||||||
|
return (200, {}, snapshots)
|
||||||
|
|
||||||
|
def post_shares_1234_action(self, body, **kw):
|
||||||
|
_body = None
|
||||||
|
resp = 202
|
||||||
|
assert len(body.keys()) == 1
|
||||||
|
action = body.keys()[0]
|
||||||
|
if action == 'os-allow_access':
|
||||||
|
assert body[action].keys() == ['access_type', 'access_to']
|
||||||
|
elif action == 'os-deny_access':
|
||||||
|
assert body[action].keys() == ['access_id']
|
||||||
|
elif action == 'os-access_list':
|
||||||
|
assert body[action] is None
|
||||||
|
else:
|
||||||
|
raise AssertionError("Unexpected share action: %s" % action)
|
||||||
|
return (resp, {}, _body)
|
||||||
|
|
||||||
|
def post_shares(self, **kwargs):
|
||||||
|
return (202, {}, {'share': {}})
|
||||||
|
|
||||||
|
def post_share_snapshots(self, **kwargs):
|
||||||
|
return (202, {}, {'share-snapshot': {}})
|
||||||
|
|
||||||
|
def delete_shares_1234(self, **kw):
|
||||||
|
return (202, {}, None)
|
||||||
|
|
||||||
|
def delete_share_snapshots_1234(self, **kwargs):
|
||||||
|
return (202, {}, None)
|
44
tests/v2/shares/test_share_snapshots.py
Normal file
44
tests/v2/shares/test_share_snapshots.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import extension
|
||||||
|
from cinderclient.v2 import share_snapshots
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v2.shares import fakes
|
||||||
|
|
||||||
|
|
||||||
|
extensions = [
|
||||||
|
extension.Extension('share_snapshots', share_snapshots),
|
||||||
|
]
|
||||||
|
cs = fakes.FakeClient(extensions=extensions)
|
||||||
|
|
||||||
|
|
||||||
|
class ShareSnapshotsTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_create_share_snapshot(self):
|
||||||
|
cs.share_snapshots.create(1234)
|
||||||
|
cs.assert_called('POST', '/share-snapshots')
|
||||||
|
|
||||||
|
def test_delete_share(self):
|
||||||
|
snapshot = cs.share_snapshots.get(1234)
|
||||||
|
cs.share_snapshots.delete(snapshot)
|
||||||
|
cs.assert_called('DELETE', '/share-snapshots/1234')
|
||||||
|
|
||||||
|
def test_list_shares(self):
|
||||||
|
cs.share_snapshots.list()
|
||||||
|
cs.assert_called('GET', '/share-snapshots/detail')
|
54
tests/v2/shares/test_shares.py
Normal file
54
tests/v2/shares/test_shares.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# Copyright 2010 Jacob Kaplan-Moss
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import extension
|
||||||
|
from cinderclient.v2 import shares
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v2.shares import fakes
|
||||||
|
|
||||||
|
|
||||||
|
extensions = [
|
||||||
|
extension.Extension('shares', shares),
|
||||||
|
]
|
||||||
|
cs = fakes.FakeClient(extensions=extensions)
|
||||||
|
|
||||||
|
|
||||||
|
class SharesTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_create_nfs_share(self):
|
||||||
|
cs.shares.create('nfs', 1)
|
||||||
|
cs.assert_called('POST', '/shares')
|
||||||
|
|
||||||
|
def test_create_cifs_share(self):
|
||||||
|
cs.shares.create('cifs', 2)
|
||||||
|
cs.assert_called('POST', '/shares')
|
||||||
|
|
||||||
|
def test_delete_share(self):
|
||||||
|
share = cs.shares.get('1234')
|
||||||
|
cs.shares.delete(share)
|
||||||
|
cs.assert_called('DELETE', '/shares/1234')
|
||||||
|
|
||||||
|
def test_list_shares(self):
|
||||||
|
cs.shares.list()
|
||||||
|
cs.assert_called('GET', '/shares/detail')
|
||||||
|
|
||||||
|
def test_allow_access_to_share(self):
|
||||||
|
share = cs.shares.get(1234)
|
||||||
|
ip = '192.168.0.1'
|
||||||
|
cs.shares.allow(share, 'ip', ip)
|
||||||
|
cs.assert_called('POST', '/shares/1234/action')
|
390
tests/v2/test_auth.py
Normal file
390
tests/v2/test_auth.py
Normal file
@ -0,0 +1,390 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
import mock
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from cinderclient import exceptions
|
||||||
|
from cinderclient.v2 import client
|
||||||
|
from tests import utils
|
||||||
|
|
||||||
|
|
||||||
|
class AuthenticateAgainstKeystoneTests(utils.TestCase):
|
||||||
|
def test_authenticate_success(self):
|
||||||
|
cs = client.Client("username", "password", "project_id",
|
||||||
|
"auth_url/v2.0", service_type='compute')
|
||||||
|
resp = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"expires": "12345",
|
||||||
|
"id": "FAKE_ID",
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"type": "compute",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"region": "RegionOne",
|
||||||
|
"adminURL": "http://localhost:8774/v2",
|
||||||
|
"internalURL": "http://localhost:8774/v2",
|
||||||
|
"publicURL": "http://localhost:8774/v2/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
"status_code": 200,
|
||||||
|
"text": json.dumps(resp),
|
||||||
|
})
|
||||||
|
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
cs.client.authenticate()
|
||||||
|
headers = {
|
||||||
|
'User-Agent': cs.client.USER_AGENT,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Accept': 'application/json',
|
||||||
|
}
|
||||||
|
body = {
|
||||||
|
'auth': {
|
||||||
|
'passwordCredentials': {
|
||||||
|
'username': cs.client.user,
|
||||||
|
'password': cs.client.password,
|
||||||
|
},
|
||||||
|
'tenantName': cs.client.projectid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
token_url = cs.client.auth_url + "/tokens"
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"POST",
|
||||||
|
token_url,
|
||||||
|
headers=headers,
|
||||||
|
data=json.dumps(body),
|
||||||
|
allow_redirects=True,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
|
||||||
|
public_url = endpoints[0]["publicURL"].rstrip('/')
|
||||||
|
self.assertEqual(cs.client.management_url, public_url)
|
||||||
|
token_id = resp["access"]["token"]["id"]
|
||||||
|
self.assertEqual(cs.client.auth_token, token_id)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_authenticate_tenant_id(self):
|
||||||
|
cs = client.Client("username", "password", auth_url="auth_url/v2.0",
|
||||||
|
tenant_id='tenant_id', service_type='compute')
|
||||||
|
resp = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"expires": "12345",
|
||||||
|
"id": "FAKE_ID",
|
||||||
|
"tenant": {
|
||||||
|
"description": None,
|
||||||
|
"enabled": True,
|
||||||
|
"id": "tenant_id",
|
||||||
|
"name": "demo"
|
||||||
|
} # tenant associated with token
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"type": "compute",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"region": "RegionOne",
|
||||||
|
"adminURL": "http://localhost:8774/v2",
|
||||||
|
"internalURL": "http://localhost:8774/v2",
|
||||||
|
"publicURL": "http://localhost:8774/v2/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
"status_code": 200,
|
||||||
|
"text": json.dumps(resp),
|
||||||
|
})
|
||||||
|
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
cs.client.authenticate()
|
||||||
|
headers = {
|
||||||
|
'User-Agent': cs.client.USER_AGENT,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Accept': 'application/json',
|
||||||
|
}
|
||||||
|
body = {
|
||||||
|
'auth': {
|
||||||
|
'passwordCredentials': {
|
||||||
|
'username': cs.client.user,
|
||||||
|
'password': cs.client.password,
|
||||||
|
},
|
||||||
|
'tenantId': cs.client.tenant_id,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
token_url = cs.client.auth_url + "/tokens"
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"POST",
|
||||||
|
token_url,
|
||||||
|
headers=headers,
|
||||||
|
data=json.dumps(body),
|
||||||
|
allow_redirects=True,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
|
||||||
|
public_url = endpoints[0]["publicURL"].rstrip('/')
|
||||||
|
self.assertEqual(cs.client.management_url, public_url)
|
||||||
|
token_id = resp["access"]["token"]["id"]
|
||||||
|
self.assertEqual(cs.client.auth_token, token_id)
|
||||||
|
tenant_id = resp["access"]["token"]["tenant"]["id"]
|
||||||
|
self.assertEqual(cs.client.tenant_id, tenant_id)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_authenticate_failure(self):
|
||||||
|
cs = client.Client("username", "password", "project_id",
|
||||||
|
"auth_url/v2.0")
|
||||||
|
resp = {"unauthorized": {"message": "Unauthorized", "code": "401"}}
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
"status_code": 401,
|
||||||
|
"text": json.dumps(resp),
|
||||||
|
})
|
||||||
|
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_auth_redirect(self):
|
||||||
|
cs = client.Client("username", "password", "project_id",
|
||||||
|
"auth_url/v2", service_type='compute')
|
||||||
|
dict_correct_response = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"expires": "12345",
|
||||||
|
"id": "FAKE_ID",
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"type": "compute",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"adminURL": "http://localhost:8774/v2",
|
||||||
|
"region": "RegionOne",
|
||||||
|
"internalURL": "http://localhost:8774/v2",
|
||||||
|
"publicURL": "http://localhost:8774/v2/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
correct_response = json.dumps(dict_correct_response)
|
||||||
|
dict_responses = [
|
||||||
|
{"headers": {'location':'http://127.0.0.1:5001'},
|
||||||
|
"status_code": 305,
|
||||||
|
"text": "Use proxy"},
|
||||||
|
# Configured on admin port, cinder redirects to v2.0 port.
|
||||||
|
# When trying to connect on it, keystone auth succeed by v1.0
|
||||||
|
# protocol (through headers) but tokens are being returned in
|
||||||
|
# body (looks like keystone bug). Leaved for compatibility.
|
||||||
|
{"headers": {},
|
||||||
|
"status_code": 200,
|
||||||
|
"text": correct_response},
|
||||||
|
{"headers": {},
|
||||||
|
"status_code": 200,
|
||||||
|
"text": correct_response}
|
||||||
|
]
|
||||||
|
|
||||||
|
responses = [(utils.TestResponse(resp)) for resp in dict_responses]
|
||||||
|
|
||||||
|
def side_effect(*args, **kwargs):
|
||||||
|
return responses.pop(0)
|
||||||
|
|
||||||
|
mock_request = mock.Mock(side_effect=side_effect)
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
cs.client.authenticate()
|
||||||
|
headers = {
|
||||||
|
'User-Agent': cs.client.USER_AGENT,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Accept': 'application/json',
|
||||||
|
}
|
||||||
|
body = {
|
||||||
|
'auth': {
|
||||||
|
'passwordCredentials': {
|
||||||
|
'username': cs.client.user,
|
||||||
|
'password': cs.client.password,
|
||||||
|
},
|
||||||
|
'tenantName': cs.client.projectid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
token_url = cs.client.auth_url + "/tokens"
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"POST",
|
||||||
|
token_url,
|
||||||
|
headers=headers,
|
||||||
|
data=json.dumps(body),
|
||||||
|
allow_redirects=True,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
resp = dict_correct_response
|
||||||
|
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
|
||||||
|
public_url = endpoints[0]["publicURL"].rstrip('/')
|
||||||
|
self.assertEqual(cs.client.management_url, public_url)
|
||||||
|
token_id = resp["access"]["token"]["id"]
|
||||||
|
self.assertEqual(cs.client.auth_token, token_id)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_ambiguous_endpoints(self):
|
||||||
|
cs = client.Client("username", "password", "project_id",
|
||||||
|
"auth_url/v2.0", service_type='compute')
|
||||||
|
resp = {
|
||||||
|
"access": {
|
||||||
|
"token": {
|
||||||
|
"expires": "12345",
|
||||||
|
"id": "FAKE_ID",
|
||||||
|
},
|
||||||
|
"serviceCatalog": [
|
||||||
|
{
|
||||||
|
"adminURL": "http://localhost:8774/v2",
|
||||||
|
"type": "compute",
|
||||||
|
"name": "Compute CLoud",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"region": "RegionOne",
|
||||||
|
"internalURL": "http://localhost:8774/v2",
|
||||||
|
"publicURL": "http://localhost:8774/v2/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"adminURL": "http://localhost:8774/v2",
|
||||||
|
"type": "compute",
|
||||||
|
"name": "Hyper-compute Cloud",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"internalURL": "http://localhost:8774/v2",
|
||||||
|
"publicURL": "http://localhost:8774/v2/",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
"status_code": 200,
|
||||||
|
"text": json.dumps(resp),
|
||||||
|
})
|
||||||
|
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
self.assertRaises(exceptions.AmbiguousEndpoints,
|
||||||
|
cs.client.authenticate)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
|
||||||
|
class AuthenticationTests(utils.TestCase):
|
||||||
|
def test_authenticate_success(self):
|
||||||
|
cs = client.Client("username", "password", "project_id", "auth_url")
|
||||||
|
management_url = 'https://localhost/v2.1/443470'
|
||||||
|
auth_response = utils.TestResponse({
|
||||||
|
'status_code': 204,
|
||||||
|
'headers': {
|
||||||
|
'x-server-management-url': management_url,
|
||||||
|
'x-auth-token': '1b751d74-de0c-46ae-84f0-915744b582d1',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
cs.client.authenticate()
|
||||||
|
headers = {
|
||||||
|
'Accept': 'application/json',
|
||||||
|
'X-Auth-User': 'username',
|
||||||
|
'X-Auth-Key': 'password',
|
||||||
|
'X-Auth-Project-Id': 'project_id',
|
||||||
|
'User-Agent': cs.client.USER_AGENT
|
||||||
|
}
|
||||||
|
mock_request.assert_called_with(
|
||||||
|
"GET",
|
||||||
|
cs.client.auth_url,
|
||||||
|
headers=headers,
|
||||||
|
**self.TEST_REQUEST_BASE)
|
||||||
|
|
||||||
|
self.assertEqual(cs.client.management_url,
|
||||||
|
auth_response.headers['x-server-management-url'])
|
||||||
|
self.assertEqual(cs.client.auth_token,
|
||||||
|
auth_response.headers['x-auth-token'])
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_authenticate_failure(self):
|
||||||
|
cs = client.Client("username", "password", "project_id", "auth_url")
|
||||||
|
auth_response = utils.TestResponse({"status_code": 401})
|
||||||
|
mock_request = mock.Mock(return_value=(auth_response))
|
||||||
|
|
||||||
|
@mock.patch.object(requests, "request", mock_request)
|
||||||
|
def test_auth_call():
|
||||||
|
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_auth_automatic(self):
|
||||||
|
cs = client.Client("username", "password", "project_id", "auth_url")
|
||||||
|
http_client = cs.client
|
||||||
|
http_client.management_url = ''
|
||||||
|
mock_request = mock.Mock(return_value=(None, None))
|
||||||
|
|
||||||
|
@mock.patch.object(http_client, 'request', mock_request)
|
||||||
|
@mock.patch.object(http_client, 'authenticate')
|
||||||
|
def test_auth_call(m):
|
||||||
|
http_client.get('/')
|
||||||
|
m.assert_called()
|
||||||
|
mock_request.assert_called()
|
||||||
|
|
||||||
|
test_auth_call()
|
||||||
|
|
||||||
|
def test_auth_manual(self):
|
||||||
|
cs = client.Client("username", "password", "project_id", "auth_url")
|
||||||
|
|
||||||
|
@mock.patch.object(cs.client, 'authenticate')
|
||||||
|
def test_auth_call(m):
|
||||||
|
cs.authenticate()
|
||||||
|
m.assert_called()
|
||||||
|
|
||||||
|
test_auth_call()
|
42
tests/v2/test_quota_classes.py
Normal file
42
tests/v2/test_quota_classes.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v2 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaClassSetsTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_class_quotas_get(self):
|
||||||
|
class_name = 'test'
|
||||||
|
cs.quota_classes.get(class_name)
|
||||||
|
cs.assert_called('GET', '/os-quota-class-sets/%s' % class_name)
|
||||||
|
|
||||||
|
def test_update_quota(self):
|
||||||
|
q = cs.quota_classes.get('test')
|
||||||
|
q.update(volumes=2)
|
||||||
|
cs.assert_called('PUT', '/os-quota-class-sets/test')
|
||||||
|
|
||||||
|
def test_refresh_quota(self):
|
||||||
|
q = cs.quota_classes.get('test')
|
||||||
|
q2 = cs.quota_classes.get('test')
|
||||||
|
self.assertEqual(q.volumes, q2.volumes)
|
||||||
|
q2.volumes = 0
|
||||||
|
self.assertNotEqual(q.volumes, q2.volumes)
|
||||||
|
q2.get()
|
||||||
|
self.assertEqual(q.volumes, q2.volumes)
|
52
tests/v2/test_quotas.py
Normal file
52
tests/v2/test_quotas.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v2 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class QuotaSetsTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_tenant_quotas_get(self):
|
||||||
|
tenant_id = 'test'
|
||||||
|
cs.quotas.get(tenant_id)
|
||||||
|
cs.assert_called('GET', '/os-quota-sets/%s' % tenant_id)
|
||||||
|
|
||||||
|
def test_tenant_quotas_defaults(self):
|
||||||
|
tenant_id = 'test'
|
||||||
|
cs.quotas.defaults(tenant_id)
|
||||||
|
cs.assert_called('GET', '/os-quota-sets/%s/defaults' % tenant_id)
|
||||||
|
|
||||||
|
def test_update_quota(self):
|
||||||
|
q = cs.quotas.get('test')
|
||||||
|
q.update(volumes=2)
|
||||||
|
q.update(snapshots=2)
|
||||||
|
cs.assert_called('PUT', '/os-quota-sets/test')
|
||||||
|
|
||||||
|
def test_refresh_quota(self):
|
||||||
|
q = cs.quotas.get('test')
|
||||||
|
q2 = cs.quotas.get('test')
|
||||||
|
self.assertEqual(q.volumes, q2.volumes)
|
||||||
|
self.assertEqual(q.snapshots, q2.snapshots)
|
||||||
|
q2.volumes = 0
|
||||||
|
self.assertNotEqual(q.volumes, q2.volumes)
|
||||||
|
q2.snapshots = 0
|
||||||
|
self.assertNotEqual(q.snapshots, q2.snapshots)
|
||||||
|
q2.get()
|
||||||
|
self.assertEqual(q.volumes, q2.volumes)
|
||||||
|
self.assertEqual(q.snapshots, q2.snapshots)
|
159
tests/v2/test_shell.py
Normal file
159
tests/v2/test_shell.py
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
# Copyright 2013 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import fixtures
|
||||||
|
|
||||||
|
from cinderclient import client
|
||||||
|
from cinderclient import shell
|
||||||
|
from tests import utils
|
||||||
|
from tests.v2 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
class ShellTest(utils.TestCase):
|
||||||
|
|
||||||
|
FAKE_ENV = {
|
||||||
|
'CINDER_USERNAME': 'username',
|
||||||
|
'CINDER_PASSWORD': 'password',
|
||||||
|
'CINDER_PROJECT_ID': 'project_id',
|
||||||
|
'OS_VOLUME_API_VERSION': '2',
|
||||||
|
'CINDER_URL': 'http://no.where',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Patch os.environ to avoid required auth info.
|
||||||
|
def setUp(self):
|
||||||
|
"""Run before each test."""
|
||||||
|
super(ShellTest, self).setUp()
|
||||||
|
for var in self.FAKE_ENV:
|
||||||
|
self.useFixture(fixtures.EnvironmentVariable(var,
|
||||||
|
self.FAKE_ENV[var]))
|
||||||
|
|
||||||
|
self.shell = shell.OpenStackCinderShell()
|
||||||
|
|
||||||
|
#HACK(bcwaldon): replace this when we start using stubs
|
||||||
|
self.old_get_client_class = client.get_client_class
|
||||||
|
client.get_client_class = lambda *_: fakes.FakeClient
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
# For some method like test_image_meta_bad_action we are
|
||||||
|
# testing a SystemExit to be thrown and object self.shell has
|
||||||
|
# no time to get instantatiated which is OK in this case, so
|
||||||
|
# we make sure the method is there before launching it.
|
||||||
|
if hasattr(self.shell, 'cs'):
|
||||||
|
self.shell.cs.clear_callstack()
|
||||||
|
|
||||||
|
#HACK(bcwaldon): replace this when we start using stubs
|
||||||
|
client.get_client_class = self.old_get_client_class
|
||||||
|
super(ShellTest, self).tearDown()
|
||||||
|
|
||||||
|
def run_command(self, cmd):
|
||||||
|
self.shell.main(cmd.split())
|
||||||
|
|
||||||
|
def assert_called(self, method, url, body=None, **kwargs):
|
||||||
|
return self.shell.cs.assert_called(method, url, body, **kwargs)
|
||||||
|
|
||||||
|
def assert_called_anytime(self, method, url, body=None):
|
||||||
|
return self.shell.cs.assert_called_anytime(method, url, body)
|
||||||
|
|
||||||
|
def test_list(self):
|
||||||
|
self.run_command('list')
|
||||||
|
# NOTE(jdg): we default to detail currently
|
||||||
|
self.assert_called('GET', '/volumes/detail')
|
||||||
|
|
||||||
|
def test_list_filter_status(self):
|
||||||
|
self.run_command('list --status=available')
|
||||||
|
self.assert_called('GET', '/volumes/detail?status=available')
|
||||||
|
|
||||||
|
def test_list_filter_name(self):
|
||||||
|
self.run_command('list --name=1234')
|
||||||
|
self.assert_called('GET', '/volumes/detail?name=1234')
|
||||||
|
|
||||||
|
def test_list_all_tenants(self):
|
||||||
|
self.run_command('list --all-tenants=1')
|
||||||
|
self.assert_called('GET', '/volumes/detail?all_tenants=1')
|
||||||
|
|
||||||
|
def test_show(self):
|
||||||
|
self.run_command('show 1234')
|
||||||
|
self.assert_called('GET', '/volumes/1234')
|
||||||
|
|
||||||
|
def test_delete(self):
|
||||||
|
self.run_command('delete 1234')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234')
|
||||||
|
|
||||||
|
def test_snapshot_list_filter_volume_id(self):
|
||||||
|
self.run_command('snapshot-list --volume-id=1234')
|
||||||
|
self.assert_called('GET', '/snapshots/detail?volume_id=1234')
|
||||||
|
|
||||||
|
def test_snapshot_list_filter_status_and_volume_id(self):
|
||||||
|
self.run_command('snapshot-list --status=available --volume-id=1234')
|
||||||
|
self.assert_called('GET', '/snapshots/detail?'
|
||||||
|
'status=available&volume_id=1234')
|
||||||
|
|
||||||
|
def test_rename(self):
|
||||||
|
# basic rename with positional agruments
|
||||||
|
self.run_command('rename 1234 new-name')
|
||||||
|
expected = {'volume': {'name': 'new-name'}}
|
||||||
|
self.assert_called('PUT', '/volumes/1234', body=expected)
|
||||||
|
# change description only
|
||||||
|
self.run_command('rename 1234 --description=new-description')
|
||||||
|
expected = {'volume': {'description': 'new-description'}}
|
||||||
|
self.assert_called('PUT', '/volumes/1234', body=expected)
|
||||||
|
# rename and change description
|
||||||
|
self.run_command('rename 1234 new-name '
|
||||||
|
'--description=new-description')
|
||||||
|
expected = {'volume': {
|
||||||
|
'name': 'new-name',
|
||||||
|
'description': 'new-description',
|
||||||
|
}}
|
||||||
|
self.assert_called('PUT', '/volumes/1234', body=expected)
|
||||||
|
# noop, the only all will be the lookup
|
||||||
|
self.run_command('rename 1234')
|
||||||
|
self.assert_called('GET', '/volumes/1234')
|
||||||
|
|
||||||
|
def test_rename_snapshot(self):
|
||||||
|
# basic rename with positional agruments
|
||||||
|
self.run_command('snapshot-rename 1234 new-name')
|
||||||
|
expected = {'snapshot': {'name': 'new-name'}}
|
||||||
|
self.assert_called('PUT', '/snapshots/1234', body=expected)
|
||||||
|
# change description only
|
||||||
|
self.run_command('snapshot-rename 1234 '
|
||||||
|
'--description=new-description')
|
||||||
|
expected = {'snapshot': {'description': 'new-description'}}
|
||||||
|
self.assert_called('PUT', '/snapshots/1234', body=expected)
|
||||||
|
# snapshot-rename and change description
|
||||||
|
self.run_command('snapshot-rename 1234 new-name '
|
||||||
|
'--description=new-description')
|
||||||
|
expected = {'snapshot': {
|
||||||
|
'name': 'new-name',
|
||||||
|
'description': 'new-description',
|
||||||
|
}}
|
||||||
|
self.assert_called('PUT', '/snapshots/1234', body=expected)
|
||||||
|
# noop, the only all will be the lookup
|
||||||
|
self.run_command('snapshot-rename 1234')
|
||||||
|
self.assert_called('GET', '/snapshots/1234')
|
||||||
|
|
||||||
|
def test_set_metadata_set(self):
|
||||||
|
self.run_command('metadata 1234 set key1=val1 key2=val2')
|
||||||
|
self.assert_called('POST', '/volumes/1234/metadata',
|
||||||
|
{'metadata': {'key1': 'val1', 'key2': 'val2'}})
|
||||||
|
|
||||||
|
def test_set_metadata_delete_dict(self):
|
||||||
|
self.run_command('metadata 1234 unset key1=val1 key2=val2')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234/metadata/key1')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234/metadata/key2', pos=-2)
|
||||||
|
|
||||||
|
def test_set_metadata_delete_keys(self):
|
||||||
|
self.run_command('metadata 1234 unset key1 key2')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234/metadata/key1')
|
||||||
|
self.assert_called('DELETE', '/volumes/1234/metadata/key2', pos=-2)
|
50
tests/v2/test_types.py
Normal file
50
tests/v2/test_types.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient.v2 import volume_types
|
||||||
|
from tests import utils
|
||||||
|
from tests.v2 import fakes
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class TypesTest(utils.TestCase):
|
||||||
|
def test_list_types(self):
|
||||||
|
tl = cs.volume_types.list()
|
||||||
|
cs.assert_called('GET', '/types')
|
||||||
|
for t in tl:
|
||||||
|
self.assertTrue(isinstance(t, volume_types.VolumeType))
|
||||||
|
|
||||||
|
def test_create(self):
|
||||||
|
t = cs.volume_types.create('test-type-3')
|
||||||
|
cs.assert_called('POST', '/types')
|
||||||
|
self.assertTrue(isinstance(t, volume_types.VolumeType))
|
||||||
|
|
||||||
|
def test_set_key(self):
|
||||||
|
t = cs.volume_types.get(1)
|
||||||
|
t.set_keys({'k': 'v'})
|
||||||
|
cs.assert_called('POST',
|
||||||
|
'/types/1/extra_specs',
|
||||||
|
{'extra_specs': {'k': 'v'}})
|
||||||
|
|
||||||
|
def test_unsset_keys(self):
|
||||||
|
t = cs.volume_types.get(1)
|
||||||
|
t.unset_keys(['k'])
|
||||||
|
cs.assert_called('DELETE', '/types/1/extra_specs/k')
|
||||||
|
|
||||||
|
def test_delete(self):
|
||||||
|
cs.volume_types.delete(1)
|
||||||
|
cs.assert_called('DELETE', '/types/1')
|
53
tests/v2/test_volume_backups.py
Normal file
53
tests/v2/test_volume_backups.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v2 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeBackupsTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_create(self):
|
||||||
|
cs.backups.create('2b695faf-b963-40c8-8464-274008fbcef4')
|
||||||
|
cs.assert_called('POST', '/backups')
|
||||||
|
|
||||||
|
def test_get(self):
|
||||||
|
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
cs.backups.get(backup_id)
|
||||||
|
cs.assert_called('GET', '/backups/%s' % backup_id)
|
||||||
|
|
||||||
|
def test_list(self):
|
||||||
|
cs.backups.list()
|
||||||
|
cs.assert_called('GET', '/backups/detail')
|
||||||
|
|
||||||
|
def test_delete(self):
|
||||||
|
b = cs.backups.list()[0]
|
||||||
|
b.delete()
|
||||||
|
cs.assert_called('DELETE',
|
||||||
|
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
|
||||||
|
cs.backups.delete('76a17945-3c6f-435c-975b-b5685db10b62')
|
||||||
|
cs.assert_called('DELETE',
|
||||||
|
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
|
||||||
|
cs.backups.delete(b)
|
||||||
|
cs.assert_called('DELETE',
|
||||||
|
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
|
||||||
|
|
||||||
|
def test_restore(self):
|
||||||
|
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
|
||||||
|
cs.restores.restore(backup_id)
|
||||||
|
cs.assert_called('POST', '/backups/%s/restore' % backup_id)
|
87
tests/v2/test_volumes.py
Normal file
87
tests/v2/test_volumes.py
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
# Copyright (c) 2013 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from tests import utils
|
||||||
|
from tests.v2 import fakes
|
||||||
|
|
||||||
|
|
||||||
|
cs = fakes.FakeClient()
|
||||||
|
|
||||||
|
|
||||||
|
class VolumesTest(utils.TestCase):
|
||||||
|
|
||||||
|
def test_delete_volume(self):
|
||||||
|
v = cs.volumes.list()[0]
|
||||||
|
v.delete()
|
||||||
|
cs.assert_called('DELETE', '/volumes/1234')
|
||||||
|
cs.volumes.delete('1234')
|
||||||
|
cs.assert_called('DELETE', '/volumes/1234')
|
||||||
|
cs.volumes.delete(v)
|
||||||
|
cs.assert_called('DELETE', '/volumes/1234')
|
||||||
|
|
||||||
|
def test_create_volume(self):
|
||||||
|
cs.volumes.create(1)
|
||||||
|
cs.assert_called('POST', '/volumes')
|
||||||
|
|
||||||
|
def test_attach(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.attach(v, 1, '/dev/vdc')
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_detach(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.detach(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_reserve(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.reserve(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_unreserve(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.unreserve(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_begin_detaching(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.begin_detaching(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_roll_detaching(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.roll_detaching(v)
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_initialize_connection(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.initialize_connection(v, {})
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_terminate_connection(self):
|
||||||
|
v = cs.volumes.get('1234')
|
||||||
|
cs.volumes.terminate_connection(v, {})
|
||||||
|
cs.assert_called('POST', '/volumes/1234/action')
|
||||||
|
|
||||||
|
def test_set_metadata(self):
|
||||||
|
cs.volumes.set_metadata(1234, {'k1': 'v2'})
|
||||||
|
cs.assert_called('POST', '/volumes/1234/metadata',
|
||||||
|
{'metadata': {'k1': 'v2'}})
|
||||||
|
|
||||||
|
def test_delete_metadata(self):
|
||||||
|
keys = ['key1']
|
||||||
|
cs.volumes.delete_metadata(1234, keys)
|
||||||
|
cs.assert_called('DELETE', '/volumes/1234/metadata/key1')
|
15
tools/cinder.bash_completion
Normal file
15
tools/cinder.bash_completion
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
_cinder()
|
||||||
|
{
|
||||||
|
local cur prev opts
|
||||||
|
COMPREPLY=()
|
||||||
|
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||||
|
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||||
|
|
||||||
|
opts="$(cinder bash_completion)"
|
||||||
|
|
||||||
|
COMPLETION_CACHE=~/.cinderclient/*/*-cache
|
||||||
|
opts+=" "$(cat $COMPLETION_CACHE 2> /dev/null | tr '\n' ' ')
|
||||||
|
|
||||||
|
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||||
|
}
|
||||||
|
complete -F _cinder cinder
|
3
tools/generate_authors.sh
Executable file
3
tools/generate_authors.sh
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
git shortlog -se | cut -c8-
|
245
tools/install_venv.py
Normal file
245
tools/install_venv.py
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Copyright 2010 OpenStack, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Installation script for Nova's development virtualenv
|
||||||
|
"""
|
||||||
|
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import platform
|
||||||
|
|
||||||
|
|
||||||
|
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||||
|
VENV = os.path.join(ROOT, '.venv')
|
||||||
|
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
|
||||||
|
TEST_REQUIRES = os.path.join(ROOT, 'tools', 'test-requires')
|
||||||
|
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
|
||||||
|
|
||||||
|
|
||||||
|
def die(message, *args):
|
||||||
|
print >> sys.stderr, message % args
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def check_python_version():
|
||||||
|
if sys.version_info < (2, 6):
|
||||||
|
die("Need Python Version >= 2.6")
|
||||||
|
|
||||||
|
|
||||||
|
def run_command_with_code(cmd, redirect_output=True, check_exit_code=True):
|
||||||
|
"""
|
||||||
|
Runs a command in an out-of-process shell, returning the
|
||||||
|
output of that command. Working directory is ROOT.
|
||||||
|
"""
|
||||||
|
if redirect_output:
|
||||||
|
stdout = subprocess.PIPE
|
||||||
|
else:
|
||||||
|
stdout = None
|
||||||
|
|
||||||
|
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
|
||||||
|
output = proc.communicate()[0]
|
||||||
|
if check_exit_code and proc.returncode != 0:
|
||||||
|
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
|
||||||
|
return (output, proc.returncode)
|
||||||
|
|
||||||
|
|
||||||
|
def run_command(cmd, redirect_output=True, check_exit_code=True):
|
||||||
|
return run_command_with_code(cmd, redirect_output, check_exit_code)[0]
|
||||||
|
|
||||||
|
|
||||||
|
class Distro(object):
|
||||||
|
|
||||||
|
def check_cmd(self, cmd):
|
||||||
|
return bool(run_command(['which', cmd], check_exit_code=False).strip())
|
||||||
|
|
||||||
|
def install_virtualenv(self):
|
||||||
|
if self.check_cmd('virtualenv'):
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.check_cmd('easy_install'):
|
||||||
|
print 'Installing virtualenv via easy_install...',
|
||||||
|
if run_command(['easy_install', 'virtualenv']):
|
||||||
|
print 'Succeeded'
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
print 'Failed'
|
||||||
|
|
||||||
|
die('ERROR: virtualenv not found.\n\nDevelopment'
|
||||||
|
' requires virtualenv, please install it using your'
|
||||||
|
' favorite package management tool')
|
||||||
|
|
||||||
|
def post_process(self):
|
||||||
|
"""Any distribution-specific post-processing gets done here.
|
||||||
|
|
||||||
|
In particular, this is useful for applying patches to code inside
|
||||||
|
the venv."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Debian(Distro):
|
||||||
|
"""This covers all Debian-based distributions."""
|
||||||
|
|
||||||
|
def check_pkg(self, pkg):
|
||||||
|
return run_command_with_code(['dpkg', '-l', pkg],
|
||||||
|
check_exit_code=False)[1] == 0
|
||||||
|
|
||||||
|
def apt_install(self, pkg, **kwargs):
|
||||||
|
run_command(['sudo', 'apt-get', 'install', '-y', pkg], **kwargs)
|
||||||
|
|
||||||
|
def apply_patch(self, originalfile, patchfile):
|
||||||
|
run_command(['patch', originalfile, patchfile])
|
||||||
|
|
||||||
|
def install_virtualenv(self):
|
||||||
|
if self.check_cmd('virtualenv'):
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self.check_pkg('python-virtualenv'):
|
||||||
|
self.apt_install('python-virtualenv', check_exit_code=False)
|
||||||
|
|
||||||
|
super(Debian, self).install_virtualenv()
|
||||||
|
|
||||||
|
|
||||||
|
class Fedora(Distro):
|
||||||
|
"""This covers all Fedora-based distributions.
|
||||||
|
|
||||||
|
Includes: Fedora, RHEL, CentOS, Scientific Linux"""
|
||||||
|
|
||||||
|
def check_pkg(self, pkg):
|
||||||
|
return run_command_with_code(['rpm', '-q', pkg],
|
||||||
|
check_exit_code=False)[1] == 0
|
||||||
|
|
||||||
|
def yum_install(self, pkg, **kwargs):
|
||||||
|
run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs)
|
||||||
|
|
||||||
|
def apply_patch(self, originalfile, patchfile):
|
||||||
|
run_command(['patch', originalfile, patchfile])
|
||||||
|
|
||||||
|
def install_virtualenv(self):
|
||||||
|
if self.check_cmd('virtualenv'):
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self.check_pkg('python-virtualenv'):
|
||||||
|
self.yum_install('python-virtualenv', check_exit_code=False)
|
||||||
|
|
||||||
|
super(Fedora, self).install_virtualenv()
|
||||||
|
|
||||||
|
|
||||||
|
def get_distro():
|
||||||
|
if os.path.exists('/etc/fedora-release') or \
|
||||||
|
os.path.exists('/etc/redhat-release'):
|
||||||
|
return Fedora()
|
||||||
|
elif os.path.exists('/etc/debian_version'):
|
||||||
|
return Debian()
|
||||||
|
else:
|
||||||
|
return Distro()
|
||||||
|
|
||||||
|
|
||||||
|
def check_dependencies():
|
||||||
|
get_distro().install_virtualenv()
|
||||||
|
|
||||||
|
|
||||||
|
def create_virtualenv(venv=VENV, no_site_packages=True):
|
||||||
|
"""Creates the virtual environment and installs PIP only into the
|
||||||
|
virtual environment
|
||||||
|
"""
|
||||||
|
print 'Creating venv...',
|
||||||
|
if no_site_packages:
|
||||||
|
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
|
||||||
|
else:
|
||||||
|
run_command(['virtualenv', '-q', VENV])
|
||||||
|
print 'done.'
|
||||||
|
print 'Installing pip in virtualenv...',
|
||||||
|
if not run_command(['tools/with_venv.sh', 'easy_install',
|
||||||
|
'pip>1.0']).strip():
|
||||||
|
die("Failed to install pip.")
|
||||||
|
print 'done.'
|
||||||
|
|
||||||
|
|
||||||
|
def pip_install(*args):
|
||||||
|
run_command(['tools/with_venv.sh',
|
||||||
|
'pip', 'install', '--upgrade'] + list(args),
|
||||||
|
redirect_output=False)
|
||||||
|
|
||||||
|
|
||||||
|
def install_dependencies(venv=VENV):
|
||||||
|
print 'Installing dependencies with pip (this can take a while)...'
|
||||||
|
|
||||||
|
# First things first, make sure our venv has the latest pip and distribute.
|
||||||
|
pip_install('pip')
|
||||||
|
pip_install('distribute')
|
||||||
|
|
||||||
|
pip_install('-r', PIP_REQUIRES)
|
||||||
|
pip_install('-r', TEST_REQUIRES)
|
||||||
|
# Tell the virtual env how to "import cinder"
|
||||||
|
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
|
||||||
|
"cinderclient.pth")
|
||||||
|
f = open(pthfile, 'w')
|
||||||
|
f.write("%s\n" % ROOT)
|
||||||
|
|
||||||
|
|
||||||
|
def post_process():
|
||||||
|
get_distro().post_process()
|
||||||
|
|
||||||
|
|
||||||
|
def print_help():
|
||||||
|
help = """
|
||||||
|
python-cinderclient development environment setup is complete.
|
||||||
|
|
||||||
|
python-cinderclient development uses virtualenv to track and manage Python
|
||||||
|
dependencies while in development and testing.
|
||||||
|
|
||||||
|
To activate the python-cinderclient virtualenv for the extent of your
|
||||||
|
current shell session you can run:
|
||||||
|
|
||||||
|
$ source .venv/bin/activate
|
||||||
|
|
||||||
|
Or, if you prefer, you can run commands in the virtualenv on a case by case
|
||||||
|
basis by running:
|
||||||
|
|
||||||
|
$ tools/with_venv.sh <your command>
|
||||||
|
|
||||||
|
Also, make test will automatically use the virtualenv.
|
||||||
|
"""
|
||||||
|
print help
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
"""Parse command-line arguments"""
|
||||||
|
parser = optparse.OptionParser()
|
||||||
|
parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
|
||||||
|
default=False, action="store_true",
|
||||||
|
help="Do not inherit packages from global Python install")
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv):
|
||||||
|
(options, args) = parse_args()
|
||||||
|
check_python_version()
|
||||||
|
check_dependencies()
|
||||||
|
create_virtualenv(no_site_packages=options.no_site_packages)
|
||||||
|
install_dependencies()
|
||||||
|
post_process()
|
||||||
|
print_help()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(sys.argv)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user