Initial split from python-novaclient.

This commit is contained in:
Jenkins 2012-05-21 16:32:35 -04:00 committed by Monty Taylor
commit 471704df64
62 changed files with 5949 additions and 0 deletions

11
.gitignore vendored Normal file
View File

@ -0,0 +1,11 @@
.coverage
.venv
*,cover
cover
*.pyc
.idea
*.swp
*~
build
dist
python_novaclient.egg-info

4
.gitreview Normal file
View File

@ -0,0 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/python-cinderclient.git

15
.mailmap Normal file
View File

@ -0,0 +1,15 @@
Antony Messerli <amesserl@rackspace.com> root <root@debian.ohthree.com>
<amesserl@rackspace.com> <root@debian.ohthree.com>
<brian.waldon@rackspace.com> <bcwaldon@gmail.com>
Chris Behrens <cbehrens+github@codestud.com> comstud <cbehrens+github@codestud.com>
<cbehrens+github@codestud.com> <cbehrens@codestud.com>
Johannes Erdfelt <johannes.erdfelt@rackspace.com> jerdfelt <johannes@erdfelt.com>
<johannes.erdfelt@rackspace.com> <johannes@erdfelt.com>
<josh@jk0.org> <jkearney@nova.(none)>
<sandy@darksecretsoftware.com> <sandy.walsh@rackspace.com>
<sandy@darksecretsoftware.com> <sandy@sandywalsh.com>
Andy Smith <github@anarkystic.com> termie <github@anarkystic.com>
<chmouel.boudjnah@rackspace.co.uk> <chmouel@chmouel.com>
<matt.dietz@rackspace.com> <matthew.dietz@gmail.com>
Nikolay Sokolov <nsokolov@griddynamics.com> Nokolay Sokolov <nsokolov@griddynamics.com>
Nikolay Sokolov <nsokolov@griddynamics.com> Nokolay Sokolov <chemikadze@gmail.com>

60
AUTHORS Normal file
View File

@ -0,0 +1,60 @@
Aaron Lee <aaron.lee@rackspace.com>
Alex Meade <alex.meade@rackspace.com>
Alvaro Lopez Garcia <aloga@ifca.unican.es>
Andrey Brindeyev <abrindeyev@griddynamics.com>
Andy Smith <github@anarkystic.com>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <amesserl@rackspace.com>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Brian Lamar <brian.lamar@rackspace.com>
Brian Waldon <brian.waldon@rackspace.com>
Chmouel Boudjnah <chmouel.boudjnah@rackspace.co.uk>
Chris Behrens <cbehrens+github@codestud.com>
Christian Berendt <berendt@b1-systems.de>
Christopher MacGown <ignoti+github@gmail.com>
Chuck Thier <cthier@gmail.com>
Cole Robinson <crobinso@redhat.com>
Dan Prince <dprince@redhat.com>
Dan Wendlandt <dan@nicira.com>
Dave Walker <Dave.Walker@canonical.com>
Dean Troyer <dtroyer@gmail.com>
Ed Leafe <ed@leafe.com>
Edouard Thuleau <edouard1.thuleau@orange.com>
Eldar Nugaev <eldr@ya.ru>
François Charlier <francois.charlier@ecindernce.com>
Gabriel Hurley <gabriel@strikeawe.com>
Gaurav Gupta <gaurav@denali-systems.com>
Hengqing Hu <hudayou@hotmail.com>
Ilya Alekseyev <ilyaalekseyev@acm.org>
Jake Dahn <admin@jakedahn.com>
James E. Blair <james.blair@rackspace.com>
Jason Kölker <jason@koelker.net>
Jason Straw <jason.straw@rackspace.com>
Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
John Garbutt <john.garbutt@citrix.com>
Josh Kearney <josh@jk0.org>
Juan G. Hernando Rivero <ghe.rivero@stackops.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Kiall Mac Innes <kiall@managedit.ie>
Kirill Shileev <kshileev@griddynamics.com>
Lvov Maxim <mlvov@mirantis.com>
Matt Dietz <matt.dietz@rackspace.com>
Matt Stephenson <mattstep@mattstep.net>
Michael Basnight <mbasnight@gmail.com>
Nicholas Mistry <nmistry@gmail.com>
Nikolay Sokolov <nsokolov@griddynamics.com>
Pádraig Brady <pbrady@redhat.com>
Pavel Shkitin <pshkitin@griddynamics.com>
Peng Yong <ppyy@pubyun.com>
Rick Harris <rconradharris@gmail.com>
Robie Basak <robie.basak@canonical.com>
Russell Bryant <rbryant@redhat.com>
Sandy Walsh <sandy@darksecretsoftware.com>
Unmesh Gurjar <unmesh.gurjar@vertex.co.in>
William Wolf <throughnothing@gmail.com>
Yaguang Tang <heut2008@gmail.com>
Zhongyue Luo <lzyeval@gmail.com>
Scott Moser <smoser@ubuntu.com>
Paul Voccio <paul@substation9.com>

115
HACKING Normal file
View File

@ -0,0 +1,115 @@
Nova Style Commandments
=======================
Step 1: Read http://www.python.org/dev/peps/pep-0008/
Step 2: Read http://www.python.org/dev/peps/pep-0008/ again
Step 3: Read on
Imports
-------
- thou shalt not import objects, only modules
- thou shalt not import more than one module per line
- thou shalt not make relative imports
- thou shalt organize your imports according to the following template
::
# vim: tabstop=4 shiftwidth=4 softtabstop=4
{{stdlib imports in human alphabetical order}}
\n
{{cinder imports in human alphabetical order}}
\n
\n
{{begin your code}}
General
-------
- thou shalt put two newlines twixt toplevel code (funcs, classes, etc)
- thou shalt put one newline twixt methods in classes and anywhere else
- thou shalt not write "except:", use "except Exception:" at the very least
- thou shalt include your name with TODOs as in "TODO(termie)"
- thou shalt not name anything the same name as a builtin or reserved word
- thou shalt not violate causality in our time cone, or else
Human Alphabetical Order Examples
---------------------------------
::
import httplib
import logging
import random
import StringIO
import time
import unittest
from cinder import flags
from cinder import test
from cinder.auth import users
from cinder.endpoint import api
from cinder.endpoint import cloud
Docstrings
----------
"""A one line docstring looks like this and ends in a period."""
"""A multiline docstring has a one-line summary, less than 80 characters.
Then a new paragraph after a newline that explains in more detail any
general information about the function, class or method. Example usages
are also great to have here if it is a complex class for function. After
you have finished your descriptions add an extra newline and close the
quotations.
When writing the docstring for a class, an extra line should be placed
after the closing quotations. For more in-depth explanations for these
decisions see http://www.python.org/dev/peps/pep-0257/
If you are going to describe parameters and return values, use Sphinx, the
appropriate syntax is as follows.
:param foo: the foo parameter
:param bar: the bar parameter
:returns: description of the return value
"""
Text encoding
----------
- All text within python code should be of type 'unicode'.
WRONG:
>>> s = 'foo'
>>> s
'foo'
>>> type(s)
<type 'str'>
RIGHT:
>>> u = u'foo'
>>> u
u'foo'
>>> type(u)
<type 'unicode'>
- Transitions between internal unicode and external strings should always
be immediately and explicitly encoded or decoded.
- All external text that is not explicitly encoded (database storage,
commandline arguments, etc.) should be presumed to be encoded as utf-8.
WRONG:
mystring = infile.readline()
myreturnstring = do_some_magic_with(mystring)
outfile.write(myreturnstring)
RIGHT:
mystring = infile.readline()
mytext = s.decode('utf-8')
returntext = do_some_magic_with(mytext)
returnstring = returntext.encode('utf-8')
outfile.write(returnstring)

208
LICENSE Normal file
View File

@ -0,0 +1,208 @@
Copyright (c) 2009 Jacob Kaplan-Moss - initial codebase (< v2.1)
Copyright (c) 2011 Rackspace - OpenStack extensions (>= v2.1)
All rights reserved.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
--- License for python-cinderclient versions prior to 2.1 ---
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of this project nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

8
MANIFEST.in Normal file
View File

@ -0,0 +1,8 @@
include AUTHORS
include HACKING
include LICENSE
include README.rst
include run_tests.sh tox.ini
recursive-include docs *
recursive-include tests *
recursive-include tools *

155
README.rst Normal file
View File

@ -0,0 +1,155 @@
Python bindings to the OpenStack Volume API
===========================================
This is a client for the OpenStack Volume API. There's a Python API (the
``cinderclient`` module), and a command-line script (``cinder``). Each
implements 100% of the OpenStack Volume API.
[PENDING] `Full documentation is available`__.
__ http://packages.python.org/python-cinderclient/
You'll also probably want to read `OpenStack Compute Developer Guide API`__ --
the first bit, at least -- to get an idea of the concepts. Rackspace is doing
the cloud hosting thing a bit differently from Amazon, and if you get the
concepts this library should make more sense.
__ http://docs.openstack.org/api/
The project is hosted on `Launchpad`_, where bugs can be filed. The code is
hosted on `Github`_. Patches must be submitted using `Gerrit`_, *not* Github
pull requests.
.. _Github: https://github.com/openstack/python-cinderclient
.. _Launchpad: https://launchpad.net/python-cinderclient
.. _Gerrit: http://wiki.openstack.org/GerritWorkflow
This code a fork of `Jacobian's python-cloudservers`__ If you need API support
for the Rackspace API solely or the BSD license, you should use that repository.
python-client is licensed under the Apache License like the rest of OpenStack.
__ http://github.com/jacobian/python-cloudservers
.. contents:: Contents:
:local:
Command-line API
----------------
Installing this package gets you a shell command, ``cinder``, that you
can use to interact with any Rackspace compatible API (including OpenStack).
You'll need to provide your OpenStack username and password. You can do this
with the ``--os_username``, ``--os_password`` and ``--os_tenant_name``
params, but it's easier to just set them as environment variables::
export OS_USERNAME=openstack
export OS_PASSWORD=yadayada
export OS_TENANT_NAME=myproject
You will also need to define the authentication url with ``--os_auth_url``
and the version of the API with ``--version``. Or set them as an environment
variables as well::
export OS_AUTH_URL=http://example.com:8774/v1.1/
export OS_COMPUTE_API_VERSION=1.1
If you are using Keystone, you need to set the CINDER_URL to the keystone
endpoint::
export OS_AUTH_URL=http://example.com:5000/v2.0/
Since Keystone can return multiple regions in the Service Catalog, you
can specify the one you want with ``--os_region_name`` (or
``export OS_REGION_NAME``). It defaults to the first in the list returned.
You'll find complete documentation on the shell by running
``cinder help``::
usage: cinder [--debug] [--os_username OS_USERNAME] [--os_password OS_PASSWORD]
[--os_tenant_name OS_TENANT_NAME] [--os_auth_url OS_AUTH_URL]
[--os_region_name OS_REGION_NAME] [--service_type SERVICE_TYPE]
[--service_name SERVICE_NAME] [--endpoint_type ENDPOINT_TYPE]
[--version VERSION] [--username USERNAME]
[--region_name REGION_NAME] [--apikey APIKEY]
[--projectid PROJECTID] [--url URL]
<subcommand> ...
Command-line interface to the OpenStack Nova API.
Positional arguments:
<subcommand>
create Add a new volume.
credentials Show user credentials returned from auth
delete Remove a volume.
endpoints Discover endpoints that get returned from the
authenticate services
list List all the volumes.
show Show details about a volume.
snapshot-create Add a new snapshot.
snapshot-delete Remove a snapshot.
snapshot-list List all the snapshots.
snapshot-show Show details about a snapshot.
type-create Create a new volume type.
type-delete Delete a specific flavor
type-list Print a list of available 'volume types'.
bash-completion Prints all of the commands and options to stdout so
that the
help Display help about this program or one of its
subcommands.
Optional arguments:
--debug Print debugging output
--os_username OS_USERNAME
Defaults to env[OS_USERNAME].
--os_password OS_PASSWORD
Defaults to env[OS_PASSWORD].
--os_tenant_name OS_TENANT_NAME
Defaults to env[OS_TENANT_NAME].
--os_auth_url OS_AUTH_URL
Defaults to env[OS_AUTH_URL].
--os_region_name OS_REGION_NAME
Defaults to env[OS_REGION_NAME].
--service_type SERVICE_TYPE
Defaults to compute for most actions
--service_name SERVICE_NAME
Defaults to env[CINDER_SERVICE_NAME]
--endpoint_type ENDPOINT_TYPE
Defaults to env[CINDER_ENDPOINT_TYPE] or publicURL.
--os_compute_api_version VERSION
Accepts 1.1, defaults to env[OS_COMPUTE_API_VERSION].
--username USERNAME Deprecated
--region_name REGION_NAME
Deprecated
--apikey APIKEY, --password APIKEY
Deprecated
--projectid PROJECTID, --tenant_name PROJECTID
Deprecated
--url URL, --auth_url URL
Deprecated
See "cinder help COMMAND" for help on a specific command.
Python API
----------
[PENDING] There's also a `complete Python API`__.
__ http://packages.python.org/python-cinderclient/
Quick-start using keystone::
# use v2.0 auth with http://example.com:5000/v2.0/")
>>> from cinderclient.v1 import client
>>> nt = client.Client(USER, PASS, TENANT, AUTH_URL, service_type="compute")
>>> nt.flavors.list()
[...]
>>> nt.servers.list()
[...]
>>> nt.keypairs.list()
[...]
What's new?
-----------
[PENDING] See `the release notes <http://packages.python.org/python-cinderclient/releases.html>`_.

0
cinderclient/__init__.py Normal file
View File

293
cinderclient/base.py Normal file
View File

@ -0,0 +1,293 @@
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import contextlib
import hashlib
import os
from cinderclient import exceptions
from cinderclient import utils
# Python 2.4 compat
try:
all
except NameError:
def all(iterable):
return True not in (not x for x in iterable)
def getid(obj):
"""
Abstracts the common pattern of allowing both an object or an object's ID
as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class Manager(utils.HookableMixin):
"""
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, api):
self.api = api
def _list(self, url, response_key, obj_class=None, body=None):
resp = None
if body:
resp, body = self.api.client.post(url, body=body)
else:
resp, body = self.api.client.get(url)
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
if isinstance(data, dict):
try:
data = data['values']
except KeyError:
pass
with self.completion_cache('human_id', obj_class, mode="w"):
with self.completion_cache('uuid', obj_class, mode="w"):
return [obj_class(self, res, loaded=True)
for res in data if res]
@contextlib.contextmanager
def completion_cache(self, cache_type, obj_class, mode):
"""
The completion cache store items that can be used for bash
autocompletion, like UUIDs or human-friendly IDs.
A resource listing will clear and repopulate the cache.
A resource create will append to the cache.
Delete is not handled because listings are assumed to be performed
often enough to keep the cache reasonably up-to-date.
"""
base_dir = utils.env('CINDERCLIENT_UUID_CACHE_DIR',
default="~/.cinderclient")
# NOTE(sirp): Keep separate UUID caches for each username + endpoint
# pair
username = utils.env('OS_USERNAME', 'CINDER_USERNAME')
url = utils.env('OS_URL', 'CINDER_URL')
uniqifier = hashlib.md5(username + url).hexdigest()
cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier))
try:
os.makedirs(cache_dir, 0755)
except OSError:
# NOTE(kiall): This is typicaly either permission denied while
# attempting to create the directory, or the directory
# already exists. Either way, don't fail.
pass
resource = obj_class.__name__.lower()
filename = "%s-%s-cache" % (resource, cache_type.replace('_', '-'))
path = os.path.join(cache_dir, filename)
cache_attr = "_%s_cache" % cache_type
try:
setattr(self, cache_attr, open(path, mode))
except IOError:
# NOTE(kiall): This is typicaly a permission denied while
# attempting to write the cache file.
pass
try:
yield
finally:
cache = getattr(self, cache_attr, None)
if cache:
cache.close()
delattr(self, cache_attr)
def write_to_completion_cache(self, cache_type, val):
cache = getattr(self, "_%s_cache" % cache_type, None)
if cache:
cache.write("%s\n" % val)
def _get(self, url, response_key=None):
resp, body = self.api.client.get(url)
if response_key:
return self.resource_class(self, body[response_key], loaded=True)
else:
return self.resource_class(self, body, loaded=True)
def _create(self, url, body, response_key, return_raw=False, **kwargs):
self.run_hooks('modify_body_for_create', body, **kwargs)
resp, body = self.api.client.post(url, body=body)
if return_raw:
return body[response_key]
with self.completion_cache('human_id', self.resource_class, mode="a"):
with self.completion_cache('uuid', self.resource_class, mode="a"):
return self.resource_class(self, body[response_key])
def _delete(self, url):
resp, body = self.api.client.delete(url)
def _update(self, url, body, **kwargs):
self.run_hooks('modify_body_for_update', body, **kwargs)
resp, body = self.api.client.put(url, body=body)
return body
class ManagerWithFind(Manager):
"""
Like a `Manager`, but with additional `find()`/`findall()` methods.
"""
def find(self, **kwargs):
"""
Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(404, msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch
else:
return matches[0]
def findall(self, **kwargs):
"""
Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
def list(self):
raise NotImplementedError
class Resource(object):
"""
A resource represents a particular instance of an object (server, flavor,
etc). This is pretty much just a bag for attributes.
:param manager: Manager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
HUMAN_ID = False
def __init__(self, manager, info, loaded=False):
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
# NOTE(sirp): ensure `id` is already present because if it isn't we'll
# enter an infinite loop of __getattr__ -> get -> __init__ ->
# __getattr__ -> ...
if 'id' in self.__dict__ and len(str(self.id)) == 36:
self.manager.write_to_completion_cache('uuid', self.id)
human_id = self.human_id
if human_id:
self.manager.write_to_completion_cache('human_id', human_id)
@property
def human_id(self):
"""Subclasses may override this provide a pretty ID which can be used
for bash completion.
"""
if 'name' in self.__dict__ and self.HUMAN_ID:
return utils.slugify(self.name)
return None
def _add_details(self, info):
for (k, v) in info.iteritems():
try:
setattr(self, k, v)
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
#NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def __repr__(self):
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and
k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
def get(self):
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val

330
cinderclient/client.py Normal file
View File

@ -0,0 +1,330 @@
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
"""
OpenStack Client interface. Handles the REST calls and responses.
"""
import httplib2
import logging
import os
import urlparse
try:
import json
except ImportError:
import simplejson as json
# Python 2.5 compat fix
if not hasattr(urlparse, 'parse_qsl'):
import cgi
urlparse.parse_qsl = cgi.parse_qsl
from cinderclient import exceptions
from cinderclient import service_catalog
from cinderclient import utils
_logger = logging.getLogger(__name__)
if 'CINDERCLIENT_DEBUG' in os.environ and os.environ['CINDERCLIENT_DEBUG']:
ch = logging.StreamHandler()
_logger.setLevel(logging.DEBUG)
_logger.addHandler(ch)
class HTTPClient(httplib2.Http):
USER_AGENT = 'python-cinderclient'
def __init__(self, user, password, projectid, auth_url, insecure=False,
timeout=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', service_type=None,
service_name=None, volume_service_name=None):
super(HTTPClient, self).__init__(timeout=timeout)
self.user = user
self.password = password
self.projectid = projectid
self.auth_url = auth_url.rstrip('/')
self.version = 'v1'
self.region_name = region_name
self.endpoint_type = endpoint_type
self.service_type = service_type
self.service_name = service_name
self.volume_service_name = volume_service_name
self.management_url = None
self.auth_token = None
self.proxy_token = proxy_token
self.proxy_tenant_id = proxy_tenant_id
# httplib2 overrides
self.force_exception_to_status_code = True
self.disable_ssl_certificate_validation = insecure
def http_log(self, args, kwargs, resp, body):
if not _logger.isEnabledFor(logging.DEBUG):
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
_logger.debug("REQ: %s\n" % "".join(string_parts))
if 'body' in kwargs:
_logger.debug("REQ BODY: %s\n" % (kwargs['body']))
_logger.debug("RESP:%s %s\n", resp, body)
def request(self, *args, **kwargs):
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['body'] = json.dumps(kwargs['body'])
resp, body = super(HTTPClient, self).request(*args, **kwargs)
self.http_log(args, kwargs, resp, body)
if body:
try:
body = json.loads(body)
except ValueError:
pass
else:
body = None
if resp.status >= 400:
raise exceptions.from_response(resp, body)
return resp, body
def _cs_request(self, url, method, **kwargs):
if not self.management_url:
self.authenticate()
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
if self.projectid:
kwargs['headers']['X-Auth-Project-Id'] = self.projectid
resp, body = self.request(self.management_url + url, method,
**kwargs)
return resp, body
except exceptions.Unauthorized, ex:
try:
self.authenticate()
resp, body = self.request(self.management_url + url, method,
**kwargs)
return resp, body
except exceptions.Unauthorized:
raise ex
def get(self, url, **kwargs):
return self._cs_request(url, 'GET', **kwargs)
def post(self, url, **kwargs):
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
return self._cs_request(url, 'DELETE', **kwargs)
def _extract_service_catalog(self, url, resp, body, extract_token=True):
"""See what the auth service told us and process the response.
We may get redirected to another site, fail or actually get
back a service catalog with a token and our endpoints."""
if resp.status == 200: # content must always present
try:
self.auth_url = url
self.service_catalog = \
service_catalog.ServiceCatalog(body)
if extract_token:
self.auth_token = self.service_catalog.get_token()
management_url = self.service_catalog.url_for(
attr='region',
filter_value=self.region_name,
endpoint_type=self.endpoint_type,
service_type=self.service_type,
service_name=self.service_name,
volume_service_name=self.volume_service_name,)
self.management_url = management_url.rstrip('/')
return None
except exceptions.AmbiguousEndpoints:
print "Found more than one valid endpoint. Use a more " \
"restrictive filter"
raise
except KeyError:
raise exceptions.AuthorizationFailure()
except exceptions.EndpointNotFound:
print "Could not find any suitable endpoint. Correct region?"
raise
elif resp.status == 305:
return resp['location']
else:
raise exceptions.from_response(resp, body)
def _fetch_endpoints_from_auth(self, url):
"""We have a token, but don't know the final endpoint for
the region. We have to go back to the auth service and
ask again. This request requires an admin-level token
to work. The proxy token supplied could be from a low-level enduser.
We can't get this from the keystone service endpoint, we have to use
the admin endpoint.
This will overwrite our admin token with the user token.
"""
# GET ...:5001/v2.0/tokens/#####/endpoints
url = '/'.join([url, 'tokens', '%s?belongsTo=%s'
% (self.proxy_token, self.proxy_tenant_id)])
_logger.debug("Using Endpoint URL: %s" % url)
resp, body = self.request(url, "GET",
headers={'X-Auth_Token': self.auth_token})
return self._extract_service_catalog(url, resp, body,
extract_token=False)
def authenticate(self):
magic_tuple = urlparse.urlsplit(self.auth_url)
scheme, netloc, path, query, frag = magic_tuple
port = magic_tuple.port
if port is None:
port = 80
path_parts = path.split('/')
for part in path_parts:
if len(part) > 0 and part[0] == 'v':
self.version = part
break
# TODO(sandy): Assume admin endpoint is 35357 for now.
# Ideally this is going to have to be provided by the service catalog.
new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,))
admin_url = urlparse.urlunsplit(
(scheme, new_netloc, path, query, frag))
auth_url = self.auth_url
if self.version == "v2.0":
while auth_url:
if "CINDER_RAX_AUTH" in os.environ:
auth_url = self._rax_auth(auth_url)
else:
auth_url = self._v2_auth(auth_url)
# Are we acting on behalf of another user via an
# existing token? If so, our actual endpoints may
# be different than that of the admin token.
if self.proxy_token:
self._fetch_endpoints_from_auth(admin_url)
# Since keystone no longer returns the user token
# with the endpoints any more, we need to replace
# our service account token with the user token.
self.auth_token = self.proxy_token
else:
try:
while auth_url:
auth_url = self._v1_auth(auth_url)
# In some configurations cinder makes redirection to
# v2.0 keystone endpoint. Also, new location does not contain
# real endpoint, only hostname and port.
except exceptions.AuthorizationFailure:
if auth_url.find('v2.0') < 0:
auth_url = auth_url + '/v2.0'
self._v2_auth(auth_url)
def _v1_auth(self, url):
if self.proxy_token:
raise exceptions.NoTokenLookupException()
headers = {'X-Auth-User': self.user,
'X-Auth-Key': self.password}
if self.projectid:
headers['X-Auth-Project-Id'] = self.projectid
resp, body = self.request(url, 'GET', headers=headers)
if resp.status in (200, 204): # in some cases we get No Content
try:
mgmt_header = 'x-server-management-url'
self.management_url = resp[mgmt_header].rstrip('/')
self.auth_token = resp['x-auth-token']
self.auth_url = url
except KeyError:
raise exceptions.AuthorizationFailure()
elif resp.status == 305:
return resp['location']
else:
raise exceptions.from_response(resp, body)
def _v2_auth(self, url):
"""Authenticate against a v2.0 auth service."""
body = {"auth": {
"passwordCredentials": {"username": self.user,
"password": self.password}}}
if self.projectid:
body['auth']['tenantName'] = self.projectid
self._authenticate(url, body)
def _rax_auth(self, url):
"""Authenticate against the Rackspace auth service."""
body = {"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": self.user,
"apiKey": self.password,
"tenantName": self.projectid}}}
self._authenticate(url, body)
def _authenticate(self, url, body):
"""Authenticate and extract the service catalog."""
token_url = url + "/tokens"
# Make sure we follow redirects when trying to reach Keystone
tmp_follow_all_redirects = self.follow_all_redirects
self.follow_all_redirects = True
try:
resp, body = self.request(token_url, "POST", body=body)
finally:
self.follow_all_redirects = tmp_follow_all_redirects
return self._extract_service_catalog(url, resp, body)
def get_client_class(version):
version_map = {
'1': 'cinderclient.v1.client.Client',
}
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = "Invalid client version '%s'. must be one of: %s" % (
(version, ', '.join(version_map.keys())))
raise exceptions.UnsupportedVersion(msg)
return utils.import_class(client_path)
def Client(version, *args, **kwargs):
client_class = get_client_class(version)
return client_class(*args, **kwargs)

146
cinderclient/exceptions.py Normal file
View File

@ -0,0 +1,146 @@
# Copyright 2010 Jacob Kaplan-Moss
"""
Exception definitions.
"""
class UnsupportedVersion(Exception):
"""Indicates that the user is trying to use an unsupported
version of the API"""
pass
class CommandError(Exception):
pass
class AuthorizationFailure(Exception):
pass
class NoUniqueMatch(Exception):
pass
class NoTokenLookupException(Exception):
"""This form of authentication does not support looking up
endpoints from an existing token."""
pass
class EndpointNotFound(Exception):
"""Could not find Service or Region in Service Catalog."""
pass
class AmbiguousEndpoints(Exception):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
self.endpoints = endpoints
def __str__(self):
return "AmbiguousEndpoints: %s" % repr(self.endpoints)
class ClientException(Exception):
"""
The base exception class for all exceptions this library raises.
"""
def __init__(self, code, message=None, details=None, request_id=None):
self.code = code
self.message = message or self.__class__.message
self.details = details
self.request_id = request_id
def __str__(self):
formatted_string = "%s (HTTP %s)" % (self.message, self.code)
if self.request_id:
formatted_string += " (Request-ID: %s)" % self.request_id
return formatted_string
class BadRequest(ClientException):
"""
HTTP 400 - Bad request: you sent some malformed data.
"""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""
HTTP 401 - Unauthorized: bad credentials.
"""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""
HTTP 403 - Forbidden: your credentials don't give you access to this
resource.
"""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""
HTTP 404 - Not found
"""
http_status = 404
message = "Not found"
class OverLimit(ClientException):
"""
HTTP 413 - Over limit: you're over the API limits for this time period.
"""
http_status = 413
message = "Over limit"
# NotImplemented is a python keyword.
class HTTPNotImplemented(ClientException):
"""
HTTP 501 - Not Implemented: the server does not support this operation.
"""
http_status = 501
message = "Not Implemented"
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
# so we can do this:
# _code_map = dict((c.http_status, c)
# for c in ClientException.__subclasses__())
#
# Instead, we have to hardcode it:
_code_map = dict((c.http_status, c) for c in [BadRequest, Unauthorized,
Forbidden, NotFound, OverLimit, HTTPNotImplemented])
def from_response(response, body):
"""
Return an instance of an ClientException or subclass
based on an httplib2 response.
Usage::
resp, body = http.request(...)
if resp.status != 200:
raise exception_from_response(resp, body)
"""
cls = _code_map.get(response.status, ClientException)
request_id = response.get('x-compute-request-id')
if body:
message = "n/a"
details = "n/a"
if hasattr(body, 'keys'):
error = body[body.keys()[0]]
message = error.get('message', None)
details = error.get('details', None)
return cls(code=response.status, message=message, details=details,
request_id=request_id)
else:
return cls(code=response.status, request_id=request_id)

39
cinderclient/extension.py Normal file
View File

@ -0,0 +1,39 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import base
from cinderclient import utils
class Extension(utils.HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
def __init__(self, name, module):
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
elif utils.safe_issubclass(attr_value, base.Manager):
self.manager_class = attr_value
def __repr__(self):
return "<Extension '%s'>" % self.name

View File

@ -0,0 +1,77 @@
# Copyright 2011 OpenStack LLC.
# Copyright 2011, Piston Cloud Computing, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cinderclient.exceptions
class ServiceCatalog(object):
"""Helper methods for dealing with a Keystone Service Catalog."""
def __init__(self, resource_dict):
self.catalog = resource_dict
def get_token(self):
return self.catalog['access']['token']['id']
def url_for(self, attr=None, filter_value=None,
service_type=None, endpoint_type='publicURL',
service_name=None, volume_service_name=None):
"""Fetch the public URL from the Compute service for
a particular endpoint attribute. If none given, return
the first. See tests for sample service catalog."""
matching_endpoints = []
if 'endpoints' in self.catalog:
# We have a bastardized service catalog. Treat it special. :/
for endpoint in self.catalog['endpoints']:
if not filter_value or endpoint[attr] == filter_value:
matching_endpoints.append(endpoint)
if not matching_endpoints:
raise cinderclient.exceptions.EndpointNotFound()
# We don't always get a service catalog back ...
if not 'serviceCatalog' in self.catalog['access']:
return None
# Full catalog ...
catalog = self.catalog['access']['serviceCatalog']
for service in catalog:
if service.get("type") != service_type:
continue
if (service_name and service_type == 'compute' and
service.get('name') != service_name):
continue
if (volume_service_name and service_type == 'volume' and
service.get('name') != volume_service_name):
continue
endpoints = service['endpoints']
for endpoint in endpoints:
if not filter_value or endpoint.get(attr) == filter_value:
endpoint["serviceName"] = service.get("name")
matching_endpoints.append(endpoint)
if not matching_endpoints:
raise cinderclient.exceptions.EndpointNotFound()
elif len(matching_endpoints) > 1:
raise cinderclient.exceptions.AmbiguousEndpoints(
endpoints=matching_endpoints)
else:
return matching_endpoints[0][endpoint_type]

435
cinderclient/shell.py Normal file
View File

@ -0,0 +1,435 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Volume API.
"""
import argparse
import glob
import httplib2
import imp
import itertools
import os
import pkgutil
import sys
import logging
from cinderclient import client
from cinderclient import exceptions as exc
import cinderclient.extension
from cinderclient import utils
from cinderclient.v1 import shell as shell_v1
DEFAULT_OS_VOLUME_API_VERSION = "1"
DEFAULT_CINDER_ENDPOINT_TYPE = 'publicURL'
DEFAULT_CINDER_SERVICE_TYPE = 'compute'
logger = logging.getLogger(__name__)
class CinderClientArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(CinderClientArgumentParser, self).__init__(*args, **kwargs)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
"""
self.print_usage(sys.stderr)
#FIXME(lzyeval): if changes occur in argparse.ArgParser._check_value
choose_from = ' (choose from'
progparts = self.prog.partition(' ')
self.exit(2, "error: %(errmsg)s\nTry '%(mainp)s help %(subp)s'"
" for more information.\n" %
{'errmsg': message.split(choose_from)[0],
'mainp': progparts[0],
'subp': progparts[2]})
class OpenStackCinderShell(object):
def get_base_parser(self):
parser = CinderClientArgumentParser(
prog='cinder',
description=__doc__.strip(),
epilog='See "cinder help COMMAND" '\
'for help on a specific command.',
add_help=False,
formatter_class=OpenStackHelpFormatter,
)
# Global arguments
parser.add_argument('-h', '--help',
action='store_true',
help=argparse.SUPPRESS,
)
parser.add_argument('--debug',
default=False,
action='store_true',
help="Print debugging output")
parser.add_argument('--os_username',
default=utils.env('OS_USERNAME', 'CINDER_USERNAME'),
help='Defaults to env[OS_USERNAME].')
parser.add_argument('--os_password',
default=utils.env('OS_PASSWORD', 'CINDER_PASSWORD'),
help='Defaults to env[OS_PASSWORD].')
parser.add_argument('--os_tenant_name',
default=utils.env('OS_TENANT_NAME', 'CINDER_PROJECT_ID'),
help='Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os_auth_url',
default=utils.env('OS_AUTH_URL', 'CINDER_URL'),
help='Defaults to env[OS_AUTH_URL].')
parser.add_argument('--os_region_name',
default=utils.env('OS_REGION_NAME', 'CINDER_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME].')
parser.add_argument('--service_type',
help='Defaults to compute for most actions')
parser.add_argument('--service_name',
default=utils.env('CINDER_SERVICE_NAME'),
help='Defaults to env[CINDER_SERVICE_NAME]')
parser.add_argument('--volume_service_name',
default=utils.env('CINDER_VOLUME_SERVICE_NAME'),
help='Defaults to env[CINDER_VOLUME_SERVICE_NAME]')
parser.add_argument('--endpoint_type',
default=utils.env('CINDER_ENDPOINT_TYPE',
default=DEFAULT_CINDER_ENDPOINT_TYPE),
help='Defaults to env[CINDER_ENDPOINT_TYPE] or '
+ DEFAULT_CINDER_ENDPOINT_TYPE + '.')
parser.add_argument('--os_volume_api_version',
default=utils.env('OS_VOLUME_API_VERSION',
default=DEFAULT_OS_VOLUME_API_VERSION),
help='Accepts 1, defaults to env[OS_VOLUME_API_VERSION].')
parser.add_argument('--insecure',
default=utils.env('CINDERCLIENT_INSECURE', default=False),
action='store_true',
help=argparse.SUPPRESS)
# FIXME(dtroyer): The args below are here for diablo compatibility,
# remove them in folsum cycle
# alias for --os_username, left in for backwards compatibility
parser.add_argument('--username',
help='Deprecated')
# alias for --os_region_name, left in for backwards compatibility
parser.add_argument('--region_name',
help='Deprecated')
# alias for --os_password, left in for backwards compatibility
parser.add_argument('--apikey', '--password', dest='apikey',
default=utils.env('CINDER_API_KEY'),
help='Deprecated')
# alias for --os_tenant_name, left in for backward compatibility
parser.add_argument('--projectid', '--tenant_name', dest='projectid',
default=utils.env('CINDER_PROJECT_ID'),
help='Deprecated')
# alias for --os_auth_url, left in for backward compatibility
parser.add_argument('--url', '--auth_url', dest='url',
default=utils.env('CINDER_URL'),
help='Deprecated')
return parser
def get_subcommand_parser(self, version):
parser = self.get_base_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
try:
actions_module = {
'1.1': shell_v1,
'2': shell_v1,
}[version]
except KeyError:
actions_module = shell_v1
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, self)
for extension in self.extensions:
self._find_actions(subparsers, extension.module)
self._add_bash_completion_subparser(subparsers)
return parser
def _discover_extensions(self, version):
extensions = []
for name, module in itertools.chain(
self._discover_via_python_path(version),
self._discover_via_contrib_path(version)):
extension = cinderclient.extension.Extension(name, module)
extensions.append(extension)
return extensions
def _discover_via_python_path(self, version):
for (module_loader, name, ispkg) in pkgutil.iter_modules():
if name.endswith('python_cinderclient_ext'):
if not hasattr(module_loader, 'load_module'):
# Python 2.6 compat: actually get an ImpImporter obj
module_loader = module_loader.find_module(name)
module = module_loader.load_module(name)
yield name, module
def _discover_via_contrib_path(self, version):
module_path = os.path.dirname(os.path.abspath(__file__))
version_str = "v%s" % version.replace('.', '_')
ext_path = os.path.join(module_path, version_str, 'contrib')
ext_glob = os.path.join(ext_path, "*.py")
for ext_path in glob.iglob(ext_glob):
name = os.path.basename(ext_path)[:-3]
if name == "__init__":
continue
module = imp.load_source(name, ext_path)
yield name, module
def _add_bash_completion_subparser(self, subparsers):
subparser = subparsers.add_parser('bash_completion',
add_help=False,
formatter_class=OpenStackHelpFormatter
)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.do_bash_completion)
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# I prefer to be hypen-separated instead of underscores.
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(command,
help=help,
description=desc,
add_help=False,
formatter_class=OpenStackHelpFormatter
)
subparser.add_argument('-h', '--help',
action='help',
help=argparse.SUPPRESS,
)
self.subcommands[command] = subparser
for (args, kwargs) in arguments:
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
def setup_debugging(self, debug):
if not debug:
return
streamhandler = logging.StreamHandler()
streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
streamhandler.setFormatter(logging.Formatter(streamformat))
logger.setLevel(logging.DEBUG)
logger.addHandler(streamhandler)
httplib2.debuglevel = 1
def main(self, argv):
# Parse args once to find version
parser = self.get_base_parser()
(options, args) = parser.parse_known_args(argv)
self.setup_debugging(options.debug)
# build available subcommands based on version
self.extensions = self._discover_extensions(
options.os_volume_api_version)
self._run_extension_hooks('__pre_parse_args__')
subcommand_parser = self.get_subcommand_parser(
options.os_volume_api_version)
self.parser = subcommand_parser
if options.help and len(args) == 0:
subcommand_parser.print_help()
return 0
args = subcommand_parser.parse_args(argv)
self._run_extension_hooks('__post_parse_args__', args)
# Short-circuit and deal with help right away.
if args.func == self.do_help:
self.do_help(args)
return 0
elif args.func == self.do_bash_completion:
self.do_bash_completion(args)
return 0
(os_username, os_password, os_tenant_name, os_auth_url,
os_region_name, endpoint_type, insecure,
service_type, service_name, volume_service_name,
username, apikey, projectid, url, region_name) = (
args.os_username, args.os_password,
args.os_tenant_name, args.os_auth_url,
args.os_region_name, args.endpoint_type,
args.insecure, args.service_type, args.service_name,
args.volume_service_name, args.username,
args.apikey, args.projectid,
args.url, args.region_name)
if not endpoint_type:
endpoint_type = DEFAULT_CINDER_ENDPOINT_TYPE
if not service_type:
service_type = DEFAULT_CINDER_SERVICE_TYPE
service_type = utils.get_service_type(args.func) or service_type
#FIXME(usrleon): Here should be restrict for project id same as
# for os_username or os_password but for compatibility it is not.
if not utils.isunauthenticated(args.func):
if not os_username:
if not username:
raise exc.CommandError("You must provide a username "
"via either --os_username or env[OS_USERNAME]")
else:
os_username = username
if not os_password:
if not apikey:
raise exc.CommandError("You must provide a password "
"via either --os_password or via "
"env[OS_PASSWORD]")
else:
os_password = apikey
if not os_tenant_name:
if not projectid:
raise exc.CommandError("You must provide a tenant name "
"via either --os_tenant_name or "
"env[OS_TENANT_NAME]")
else:
os_tenant_name = projectid
if not os_auth_url:
if not url:
raise exc.CommandError("You must provide an auth url "
"via either --os_auth_url or env[OS_AUTH_URL]")
else:
os_auth_url = url
if not os_region_name and region_name:
os_region_name = region_name
if not os_tenant_name:
raise exc.CommandError("You must provide a tenant name "
"via either --os_tenant_name or env[OS_TENANT_NAME]")
if not os_auth_url:
raise exc.CommandError("You must provide an auth url "
"via either --os_auth_url or env[OS_AUTH_URL]")
self.cs = client.Client(options.os_volume_api_version, os_username,
os_password, os_tenant_name, os_auth_url, insecure,
region_name=os_region_name, endpoint_type=endpoint_type,
extensions=self.extensions, service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name)
try:
if not utils.isunauthenticated(args.func):
self.cs.authenticate()
except exc.Unauthorized:
raise exc.CommandError("Invalid OpenStack Nova credentials.")
except exc.AuthorizationFailure:
raise exc.CommandError("Unable to authorize user")
args.func(self.cs, args)
def _run_extension_hooks(self, hook_type, *args, **kwargs):
"""Run hooks for all registered extensions."""
for extension in self.extensions:
extension.run_hooks(hook_type, *args, **kwargs)
def do_bash_completion(self, args):
"""
Prints all of the commands and options to stdout so that the
cinder.bash_completion script doesn't have to hard code them.
"""
commands = set()
options = set()
for sc_str, sc in self.subcommands.items():
commands.add(sc_str)
for option in sc._optionals._option_string_actions.keys():
options.add(option)
commands.remove('bash-completion')
commands.remove('bash_completion')
print ' '.join(commands | options)
@utils.arg('command', metavar='<subcommand>', nargs='?',
help='Display help for <subcommand>')
def do_help(self, args):
"""
Display help about this program or one of its subcommands.
"""
if args.command:
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exc.CommandError("'%s' is not a valid subcommand" %
args.command)
else:
self.parser.print_help()
# I'm picky about my shell help.
class OpenStackHelpFormatter(argparse.HelpFormatter):
def start_section(self, heading):
# Title-case the headings
heading = '%s%s' % (heading[0].upper(), heading[1:])
super(OpenStackHelpFormatter, self).start_section(heading)
def main():
try:
OpenStackCinderShell().main(sys.argv[1:])
except Exception, e:
logger.debug(e, exc_info=1)
print >> sys.stderr, "ERROR: %s" % str(e)
sys.exit(1)
if __name__ == "__main__":
main()

261
cinderclient/utils.py Normal file
View File

@ -0,0 +1,261 @@
import os
import re
import sys
import uuid
import prettytable
from cinderclient import exceptions
def arg(*args, **kwargs):
"""Decorator for CLI args."""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*vars, **kwargs):
"""
returns the first environment variable set
if none are non-empty, defaults to '' or keyword arg default
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def add_arg(f, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'arguments'):
f.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in f.arguments:
# Because of the sematics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
f.arguments.insert(0, (args, kwargs))
def add_resource_manager_extra_kwargs_hook(f, hook):
"""Adds hook to bind CLI arguments to ResourceManager calls.
The `do_foo` calls in shell.py will receive CLI args and then in turn pass
them through to the ResourceManager. Before passing through the args, the
hooks registered here will be called, giving us a chance to add extra
kwargs (taken from the command-line) to what's passed to the
ResourceManager.
"""
if not hasattr(f, 'resource_manager_kwargs_hooks'):
f.resource_manager_kwargs_hooks = []
names = [h.__name__ for h in f.resource_manager_kwargs_hooks]
if hook.__name__ not in names:
f.resource_manager_kwargs_hooks.append(hook)
def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False):
"""Return extra_kwargs by calling resource manager kwargs hooks."""
hooks = getattr(f, "resource_manager_kwargs_hooks", [])
extra_kwargs = {}
for hook in hooks:
hook_name = hook.__name__
hook_kwargs = hook(args)
conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys())
if conflicting_keys and not allow_conflicts:
raise Exception("Hook '%(hook_name)s' is attempting to redefine"
" attributes '%(conflicting_keys)s'" % locals())
extra_kwargs.update(hook_kwargs)
return extra_kwargs
def unauthenticated(f):
"""
Adds 'unauthenticated' attribute to decorated function.
Usage:
@unauthenticated
def mymethod(f):
...
"""
f.unauthenticated = True
return f
def isunauthenticated(f):
"""
Checks to see if the function is marked as not requiring authentication
with the @unauthenticated decorator. Returns True if decorator is
set to True, False otherwise.
"""
return getattr(f, 'unauthenticated', False)
def service_type(stype):
"""
Adds 'service_type' attribute to decorated function.
Usage:
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""
Retrieves service type from function
"""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def print_list(objs, fields, formatters={}):
mixed_case_fields = ['serverId']
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
print pt.get_string(sortby=fields[0])
def print_dict(d, property="Property"):
pt = prettytable.PrettyTable([property, 'Value'], caching=False)
pt.aligns = ['l', 'l']
[pt.add_row(list(r)) for r in d.iteritems()]
print pt.get_string(sortby=property)
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
# now try to get entity as uuid
try:
uuid.UUID(str(name_or_id))
return manager.get(name_or_id)
except (ValueError, exceptions.NotFound):
pass
try:
try:
return manager.find(human_id=name_or_id)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
try:
# Volumes does not have name, but display_name
return manager.find(display_name=name_or_id)
except exceptions.NotFound:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = ("Multiple %s matches found for '%s', use an ID to be more"
" specific." % (manager.resource_class.__name__.lower(),
name_or_id))
raise exceptions.CommandError(msg)
def _format_servers_list_networks(server):
output = []
for (network, addresses) in server.networks.items():
if len(addresses) == 0:
continue
addresses_csv = ', '.join(addresses)
group = "%s=%s" % (network, addresses_csv)
output.append(group)
return '; '.join(output)
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
def safe_issubclass(*args):
"""Like issubclass, but will just return False if not a class."""
try:
if issubclass(*args):
return True
except TypeError:
pass
return False
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
# http://code.activestate.com/recipes/
# 577257-slugify-make-a-string-usable-in-a-url-or-filename/
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
if not isinstance(value, unicode):
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)

View File

@ -0,0 +1,17 @@
# Copyright (c) 2012 OpenStack, LLC.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v1.client import Client

71
cinderclient/v1/client.py Normal file
View File

@ -0,0 +1,71 @@
from cinderclient import client
from cinderclient.v1 import volumes
from cinderclient.v1 import volume_snapshots
from cinderclient.v1 import volume_types
class Client(object):
"""
Top-level object to access the OpenStack Compute API.
Create an instance with your creds::
>>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL)
Then call methods on its managers::
>>> client.servers.list()
...
>>> client.flavors.list()
...
"""
# FIXME(jesse): project_id isn't required to authenticate
def __init__(self, username, api_key, project_id, auth_url,
insecure=False, timeout=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', extensions=None,
service_type='compute', service_name=None,
volume_service_name=None):
# FIXME(comstud): Rename the api_key argument above when we
# know it's not being used as keyword argument
password = api_key
# extensions
self.volumes = volumes.VolumeManager(self)
self.volume_snapshots = volume_snapshots.SnapshotManager(self)
self.volume_types = volume_types.VolumeTypeManager(self)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
self.client = client.HTTPClient(username,
password,
project_id,
auth_url,
insecure=insecure,
timeout=timeout,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name)
def authenticate(self):
"""
Authenticate against the server.
Normally this is called automatically when you first access the API,
but you can call this method to force authentication right now.
Returns on success; raises :exc:`exceptions.Unauthorized` if the
credentials are wrong.
"""
self.client.authenticate()

View File

241
cinderclient/v1/shell.py Normal file
View File

@ -0,0 +1,241 @@
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import time
from cinderclient import utils
def _poll_for_status(poll_fn, obj_id, action, final_ok_states,
poll_period=5, show_progress=True):
"""Block while an action is being performed, periodically printing
progress.
"""
def print_progress(progress):
if show_progress:
msg = ('\rInstance %(action)s... %(progress)s%% complete'
% dict(action=action, progress=progress))
else:
msg = '\rInstance %(action)s...' % dict(action=action)
sys.stdout.write(msg)
sys.stdout.flush()
print
while True:
obj = poll_fn(obj_id)
status = obj.status.lower()
progress = getattr(obj, 'progress', None) or 0
if status in final_ok_states:
print_progress(100)
print "\nFinished"
break
elif status == "error":
print "\nError %(action)s instance" % locals()
break
else:
print_progress(progress)
time.sleep(poll_period)
def _find_volume(cs, volume):
"""Get a volume by ID."""
return utils.find_resource(cs.volumes, volume)
def _find_volume_snapshot(cs, snapshot):
"""Get a volume snapshot by ID."""
return utils.find_resource(cs.volume_snapshots, snapshot)
def _print_volume(cs, volume):
utils.print_dict(volume._info)
def _print_volume_snapshot(cs, snapshot):
utils.print_dict(snapshot._info)
def _translate_volume_keys(collection):
convert = [('displayName', 'display_name'), ('volumeType', 'volume_type')]
for item in collection:
keys = item.__dict__.keys()
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
def _translate_volume_snapshot_keys(collection):
convert = [('displayName', 'display_name'), ('volumeId', 'volume_id')]
for item in collection:
keys = item.__dict__.keys()
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
@utils.service_type('volume')
def do_list(cs, args):
"""List all the volumes."""
volumes = cs.volumes.list()
_translate_volume_keys(volumes)
# Create a list of servers to which the volume is attached
for vol in volumes:
servers = [s.get('server_id') for s in vol.attachments]
setattr(vol, 'attached_to', ','.join(map(str, servers)))
utils.print_list(volumes, ['ID', 'Status', 'Display Name',
'Size', 'Volume Type', 'Attached to'])
@utils.arg('volume', metavar='<volume>', help='ID of the volume.')
@utils.service_type('volume')
def do_show(cs, args):
"""Show details about a volume."""
volume = _find_volume(cs, args.volume)
_print_volume(cs, volume)
@utils.arg('size',
metavar='<size>',
type=int,
help='Size of volume in GB')
@utils.arg('--snapshot_id',
metavar='<snapshot_id>',
help='Optional snapshot id to create the volume from. (Default=None)',
default=None)
@utils.arg('--display_name', metavar='<display_name>',
help='Optional volume name. (Default=None)',
default=None)
@utils.arg('--display_description', metavar='<display_description>',
help='Optional volume description. (Default=None)',
default=None)
@utils.arg('--volume_type',
metavar='<volume_type>',
help='Optional volume type. (Default=None)',
default=None)
@utils.service_type('volume')
def do_create(cs, args):
"""Add a new volume."""
cs.volumes.create(args.size,
args.snapshot_id,
args.display_name,
args.display_description,
args.volume_type)
@utils.arg('volume', metavar='<volume>', help='ID of the volume to delete.')
@utils.service_type('volume')
def do_delete(cs, args):
"""Remove a volume."""
volume = _find_volume(cs, args.volume)
volume.delete()
@utils.service_type('volume')
def do_snapshot_list(cs, args):
"""List all the snapshots."""
snapshots = cs.volume_snapshots.list()
_translate_volume_snapshot_keys(snapshots)
utils.print_list(snapshots, ['ID', 'Volume ID', 'Status', 'Display Name',
'Size'])
@utils.arg('snapshot', metavar='<snapshot>', help='ID of the snapshot.')
@utils.service_type('volume')
def do_snapshot_show(cs, args):
"""Show details about a snapshot."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
_print_volume_snapshot(cs, snapshot)
@utils.arg('volume_id',
metavar='<volume_id>',
help='ID of the volume to snapshot')
@utils.arg('--force',
metavar='<True|False>',
help='Optional flag to indicate whether to snapshot a volume even if its '
'attached to an instance. (Default=False)',
default=False)
@utils.arg('--display_name', metavar='<display_name>',
help='Optional snapshot name. (Default=None)',
default=None)
@utils.arg('--display_description', metavar='<display_description>',
help='Optional snapshot description. (Default=None)',
default=None)
@utils.service_type('volume')
def do_snapshot_create(cs, args):
"""Add a new snapshot."""
cs.volume_snapshots.create(args.volume_id,
args.force,
args.display_name,
args.display_description)
@utils.arg('snapshot_id',
metavar='<snapshot_id>',
help='ID of the snapshot to delete.')
@utils.service_type('volume')
def do_snapshot_delete(cs, args):
"""Remove a snapshot."""
snapshot = _find_volume_snapshot(cs, args.snapshot_id)
snapshot.delete()
def _print_volume_type_list(vtypes):
utils.print_list(vtypes, ['ID', 'Name'])
@utils.service_type('volume')
def do_type_list(cs, args):
"""Print a list of available 'volume types'."""
vtypes = cs.volume_types.list()
_print_volume_type_list(vtypes)
@utils.arg('name',
metavar='<name>',
help="Name of the new flavor")
@utils.service_type('volume')
def do_type_create(cs, args):
"""Create a new volume type."""
vtype = cs.volume_types.create(args.name)
_print_volume_type_list([vtype])
@utils.arg('id',
metavar='<id>',
help="Unique ID of the volume type to delete")
@utils.service_type('volume')
def do_type_delete(cs, args):
"""Delete a specific flavor"""
cs.volume_types.delete(args.id)
def do_endpoints(cs, args):
"""Discover endpoints that get returned from the authenticate services"""
catalog = cs.client.service_catalog.catalog
for e in catalog['access']['serviceCatalog']:
utils.print_dict(e['endpoints'][0], e['name'])
def do_credentials(cs, args):
"""Show user credentials returned from auth"""
catalog = cs.client.service_catalog.catalog
utils.print_dict(catalog['access']['user'], "User Credentials")
utils.print_dict(catalog['access']['token'], "Token")

View File

@ -0,0 +1,88 @@
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume snapshot interface (1.1 extension).
"""
from cinderclient import base
class Snapshot(base.Resource):
"""
A Snapshot is a point-in-time snapshot of an openstack volume.
"""
def __repr__(self):
return "<Snapshot: %s>" % self.id
def delete(self):
"""
Delete this snapshot.
"""
self.manager.delete(self)
class SnapshotManager(base.ManagerWithFind):
"""
Manage :class:`Snapshot` resources.
"""
resource_class = Snapshot
def create(self, volume_id, force=False,
display_name=None, display_description=None):
"""
Create a snapshot of the given volume.
:param volume_id: The ID of the volume to snapshot.
:param force: If force is True, create a snapshot even if the volume is
attached to an instance. Default is False.
:param display_name: Name of the snapshot
:param display_description: Description of the snapshot
:rtype: :class:`Snapshot`
"""
body = {'snapshot': {'volume_id': volume_id,
'force': force,
'display_name': display_name,
'display_description': display_description}}
return self._create('/snapshots', body, 'snapshot')
def get(self, snapshot_id):
"""
Get a snapshot.
:param snapshot_id: The ID of the snapshot to get.
:rtype: :class:`Snapshot`
"""
return self._get("/snapshots/%s" % snapshot_id, "snapshot")
def list(self, detailed=True):
"""
Get a list of all snapshots.
:rtype: list of :class:`Snapshot`
"""
if detailed is True:
return self._list("/snapshots/detail", "snapshots")
else:
return self._list("/snapshots", "snapshots")
def delete(self, snapshot):
"""
Delete a snapshot.
:param snapshot: The :class:`Snapshot` to delete.
"""
self._delete("/snapshots/%s" % base.getid(snapshot))

View File

@ -0,0 +1,77 @@
# Copyright (c) 2011 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volume Type interface.
"""
from cinderclient import base
class VolumeType(base.Resource):
"""
A Volume Type is the type of volume to be created
"""
def __repr__(self):
return "<Volume Type: %s>" % self.name
class VolumeTypeManager(base.ManagerWithFind):
"""
Manage :class:`VolumeType` resources.
"""
resource_class = VolumeType
def list(self):
"""
Get a list of all volume types.
:rtype: list of :class:`VolumeType`.
"""
return self._list("/types", "volume_types")
def get(self, volume_type):
"""
Get a specific volume type.
:param volume_type: The ID of the :class:`VolumeType` to get.
:rtype: :class:`VolumeType`
"""
return self._get("/types/%s" % base.getid(volume_type), "volume_type")
def delete(self, volume_type):
"""
Delete a specific volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get.
"""
self._delete("/types/%s" % base.getid(volume_type))
def create(self, name):
"""
Create a volume type.
:param name: Descriptive name of the volume type
:rtype: :class:`VolumeType`
"""
body = {
"volume_type": {
"name": name,
}
}
return self._create("/types", body, "volume_type")

135
cinderclient/v1/volumes.py Normal file
View File

@ -0,0 +1,135 @@
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume interface (1.1 extension).
"""
from cinderclient import base
class Volume(base.Resource):
"""
A volume is an extra block level storage to the OpenStack instances.
"""
def __repr__(self):
return "<Volume: %s>" % self.id
def delete(self):
"""
Delete this volume.
"""
self.manager.delete(self)
class VolumeManager(base.ManagerWithFind):
"""
Manage :class:`Volume` resources.
"""
resource_class = Volume
def create(self, size, snapshot_id=None,
display_name=None, display_description=None,
volume_type=None):
"""
Create a volume.
:param size: Size of volume in GB
:param snapshot_id: ID of the snapshot
:param display_name: Name of the volume
:param display_description: Description of the volume
:param volume_type: Type of volume
:rtype: :class:`Volume`
"""
body = {'volume': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'volume_type': volume_type}}
return self._create('/volumes', body, 'volume')
def get(self, volume_id):
"""
Get a volume.
:param volume_id: The ID of the volume to delete.
:rtype: :class:`Volume`
"""
return self._get("/volumes/%s" % volume_id, "volume")
def list(self, detailed=True):
"""
Get a list of all volumes.
:rtype: list of :class:`Volume`
"""
if detailed is True:
return self._list("/volumes/detail", "volumes")
else:
return self._list("/volumes", "volumes")
def delete(self, volume):
"""
Delete a volume.
:param volume: The :class:`Volume` to delete.
"""
self._delete("/volumes/%s" % base.getid(volume))
def create_server_volume(self, server_id, volume_id, device):
"""
Attach a volume identified by the volume ID to the given server ID
:param server_id: The ID of the server
:param volume_id: The ID of the volume to attach.
:param device: The device name
:rtype: :class:`Volume`
"""
body = {'volumeAttachment': {'volumeId': volume_id,
'device': device}}
return self._create("/servers/%s/os-volume_attachments" % server_id,
body, "volumeAttachment")
def get_server_volume(self, server_id, attachment_id):
"""
Get the volume identified by the attachment ID, that is attached to
the given server ID
:param server_id: The ID of the server
:param attachment_id: The ID of the attachment
:rtype: :class:`Volume`
"""
return self._get("/servers/%s/os-volume_attachments/%s" % (server_id,
attachment_id,), "volumeAttachment")
def get_server_volumes(self, server_id):
"""
Get a list of all the attached volumes for the given server ID
:param server_id: The ID of the server
:rtype: list of :class:`Volume`
"""
return self._list("/servers/%s/os-volume_attachments" % server_id,
"volumeAttachments")
def delete_server_volume(self, server_id, attachment_id):
"""
Detach a volume identified by the attachment ID from the given server
:param server_id: The ID of the server
:param attachment_id: The ID of the attachment
"""
self._delete("/servers/%s/os-volume_attachments/%s" %
(server_id, attachment_id,))

1
docs/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
_build/

89
docs/Makefile Normal file
View File

@ -0,0 +1,89 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/python-cinderclient.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/python-cinderclient.qhc"
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
"run these through (pdf)latex."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

67
docs/api.rst Normal file
View File

@ -0,0 +1,67 @@
The :mod:`cinderclient` Python API
==================================
.. module:: cinderclient
:synopsis: A client for the OpenStack Nova API.
.. currentmodule:: cinderclient
Usage
-----
First create an instance of :class:`OpenStack` with your credentials::
>>> from cinderclient import OpenStack
>>> cinder = OpenStack(USERNAME, PASSWORD, AUTH_URL)
Then call methods on the :class:`OpenStack` object:
.. class:: OpenStack
.. attribute:: backup_schedules
A :class:`BackupScheduleManager` -- manage automatic backup images.
.. attribute:: flavors
A :class:`FlavorManager` -- query available "flavors" (hardware
configurations).
.. attribute:: images
An :class:`ImageManager` -- query and create server disk images.
.. attribute:: ipgroups
A :class:`IPGroupManager` -- manage shared public IP addresses.
.. attribute:: servers
A :class:`ServerManager` -- start, stop, and manage virtual machines.
.. automethod:: authenticate
For example::
>>> cinder.servers.list()
[<Server: buildslave-ubuntu-9.10>]
>>> cinder.flavors.list()
[<Flavor: 256 server>,
<Flavor: 512 server>,
<Flavor: 1GB server>,
<Flavor: 2GB server>,
<Flavor: 4GB server>,
<Flavor: 8GB server>,
<Flavor: 15.5GB server>]
>>> fl = cinder.flavors.find(ram=512)
>>> cinder.servers.create("my-server", flavor=fl)
<Server: my-server>
For more information, see the reference:
.. toctree::
:maxdepth: 2
ref/index

198
docs/conf.py Normal file
View File

@ -0,0 +1,198 @@
# -*- coding: utf-8 -*-
#
# python-cinderclient documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 6 14:19:25 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-cinderclient'
copyright = u'Rackspace, based on work by Jacob Kaplan-Moss'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.6'
# The full version, including alpha/beta/rc tags.
release = '2.6.10'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-cinderclientdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python-cinderclient.tex', u'python-cinderclient Documentation',
u'Rackspace - based on work by Jacob Kaplan-Moss', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}

45
docs/index.rst Normal file
View File

@ -0,0 +1,45 @@
Python bindings to the OpenStack Nova API
==================================================
This is a client for OpenStack Nova API. There's :doc:`a Python API
<api>` (the :mod:`cinderclient` module), and a :doc:`command-line script
<shell>` (installed as :program:`cinder`). Each implements the entire
OpenStack Nova API.
You'll need an `OpenStack Nova` account, which you can get by using `cinder-manage`.
.. seealso::
You may want to read `Rackspace's API guide`__ (PDF) -- the first bit, at
least -- to get an idea of the concepts. Rackspace is doing the cloud
hosting thing a bit differently from Amazon, and if you get the concepts
this library should make more sense.
__ http://docs.rackspacecloud.com/servers/api/cs-devguide-latest.pdf
Contents:
.. toctree::
:maxdepth: 2
shell
api
ref/index
releases
Contributing
============
Development takes place `on GitHub`__; please file bugs/pull requests there.
__ https://github.com/rackspace/python-cinderclient
Run tests with ``python setup.py test``.
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -0,0 +1,60 @@
Backup schedules
================
.. currentmodule:: cinderclient
Rackspace allows scheduling of weekly and/or daily backups for virtual
servers. You can access these backup schedules either off the API object as
:attr:`OpenStack.backup_schedules`, or directly off a particular
:class:`Server` instance as :attr:`Server.backup_schedule`.
Classes
-------
.. autoclass:: BackupScheduleManager
:members: create, delete, update, get
.. autoclass:: BackupSchedule
:members: update, delete
.. attribute:: enabled
Is this backup enabled? (boolean)
.. attribute:: weekly
The day of week upon which to perform a weekly backup.
.. attribute:: daily
The daily time period during which to perform a daily backup.
Constants
---------
Constants for selecting weekly backup days:
.. data:: BACKUP_WEEKLY_DISABLED
.. data:: BACKUP_WEEKLY_SUNDAY
.. data:: BACKUP_WEEKLY_MONDAY
.. data:: BACKUP_WEEKLY_TUESDAY
.. data:: BACKUP_WEEKLY_WEDNESDA
.. data:: BACKUP_WEEKLY_THURSDAY
.. data:: BACKUP_WEEKLY_FRIDAY
.. data:: BACKUP_WEEKLY_SATURDAY
Constants for selecting hourly backup windows:
.. data:: BACKUP_DAILY_DISABLED
.. data:: BACKUP_DAILY_H_0000_0200
.. data:: BACKUP_DAILY_H_0200_0400
.. data:: BACKUP_DAILY_H_0400_0600
.. data:: BACKUP_DAILY_H_0600_0800
.. data:: BACKUP_DAILY_H_0800_1000
.. data:: BACKUP_DAILY_H_1000_1200
.. data:: BACKUP_DAILY_H_1200_1400
.. data:: BACKUP_DAILY_H_1400_1600
.. data:: BACKUP_DAILY_H_1600_1800
.. data:: BACKUP_DAILY_H_1800_2000
.. data:: BACKUP_DAILY_H_2000_2200
.. data:: BACKUP_DAILY_H_2200_0000

14
docs/ref/exceptions.rst Normal file
View File

@ -0,0 +1,14 @@
Exceptions
==========
.. currentmodule:: cinderclient
Exceptions
----------
Exceptions that the API might throw:
.. automodule:: cinderclient
:members: OpenStackException, BadRequest, Unauthorized, Forbidden,
NotFound, OverLimit

35
docs/ref/flavors.rst Normal file
View File

@ -0,0 +1,35 @@
Flavors
=======
From Rackspace's API documentation:
A flavor is an available hardware configuration for a server. Each flavor
has a unique combination of disk space, memory capacity and priority for
CPU time.
Classes
-------
.. currentmodule:: cinderclient
.. autoclass:: FlavorManager
:members: get, list, find, findall
.. autoclass:: Flavor
:members:
.. attribute:: id
This flavor's ID.
.. attribute:: name
A human-readable name for this flavor.
.. attribute:: ram
The amount of RAM this flavor has, in MB.
.. attribute:: disk
The amount of disk space this flavor has, in MB

54
docs/ref/images.rst Normal file
View File

@ -0,0 +1,54 @@
Images
======
.. currentmodule:: cinderclient
An "image" is a snapshot from which you can create new server instances.
From Rackspace's own API documentation:
An image is a collection of files used to create or rebuild a server.
Rackspace provides a number of pre-built OS images by default. You may
also create custom images from cloud servers you have launched. These
custom images are useful for backup purposes or for producing "gold"
server images if you plan to deploy a particular server configuration
frequently.
Classes
-------
.. autoclass:: ImageManager
:members: get, list, find, findall, create, delete
.. autoclass:: Image
:members: delete
.. attribute:: id
This image's ID.
.. attribute:: name
This image's name.
.. attribute:: created
The date/time this image was created.
.. attribute:: updated
The date/time this instance was updated.
.. attribute:: status
The status of this image (usually ``"SAVING"`` or ``ACTIVE``).
.. attribute:: progress
During saving of an image this'll be set to something between
0 and 100, representing a rough percentage done.
.. attribute:: serverId
If this image was created from a :class:`Server` then this attribute
will be set to the ID of the server whence this image came.

12
docs/ref/index.rst Normal file
View File

@ -0,0 +1,12 @@
API Reference
=============
.. toctree::
:maxdepth: 1
backup_schedules
exceptions
flavors
images
ipgroups
servers

46
docs/ref/ipgroups.rst Normal file
View File

@ -0,0 +1,46 @@
Shared IP addresses
===================
From the Rackspace API guide:
Public IP addresses can be shared across multiple servers for use in
various high availability scenarios. When an IP address is shared to
another server, the cloud network restrictions are modified to allow each
server to listen to and respond on that IP address (you may optionally
specify that the target server network configuration be modified). Shared
IP addresses can be used with many standard heartbeat facilities (e.g.
``keepalived``) that monitor for failure and manage IP failover.
A shared IP group is a collection of servers that can share IPs with other
members of the group. Any server in a group can share one or more public
IPs with any other server in the group. With the exception of the first
server in a shared IP group, servers must be launched into shared IP
groups. A server may only be a member of one shared IP group.
.. seealso::
Use :meth:`Server.share_ip` and `Server.unshare_ip` to share and unshare
IPs in a group.
Classes
-------
.. currentmodule:: cinderclient
.. autoclass:: IPGroupManager
:members: get, list, find, findall, create, delete
.. autoclass:: IPGroup
:members: delete
.. attribute:: id
Shared group ID.
.. attribute:: name
Name of the group.
.. attribute:: servers
A list of server IDs in this group.

73
docs/ref/servers.rst Normal file
View File

@ -0,0 +1,73 @@
Servers
=======
A virtual machine instance.
Classes
-------
.. currentmodule:: cinderclient
.. autoclass:: ServerManager
:members: get, list, find, findall, create, update, delete, share_ip,
unshare_ip, reboot, rebuild, resize, confirm_resize,
revert_resize
.. autoclass:: Server
:members: update, delete, share_ip, unshare_ip, reboot, rebuild, resize,
confirm_resize, revert_resize
.. attribute:: id
This server's ID.
.. attribute:: name
The name you gave the server when you booted it.
.. attribute:: imageId
The :class:`Image` this server was booted with.
.. attribute:: flavorId
This server's current :class:`Flavor`.
.. attribute:: hostId
Rackspace doesn't document this value. It appears to be SHA1 hash.
.. attribute:: status
The server's status (``BOOTING``, ``ACTIVE``, etc).
.. attribute:: progress
When booting, resizing, updating, etc., this will be set to a
value between 0 and 100 giving a rough estimate of the progress
of the current operation.
.. attribute:: addresses
The public and private IP addresses of this server. This'll be a dict
of the form::
{
"public" : ["67.23.10.138"],
"private" : ["10.176.42.19"]
}
You *can* get more than one public/private IP provisioned, but not
directly from the API; you'll need to open a support ticket.
.. attribute:: metadata
The metadata dict you gave when creating the server.
Constants
---------
Reboot types:
.. data:: REBOOT_SOFT
.. data:: REBOOT_HARD

99
docs/releases.rst Normal file
View File

@ -0,0 +1,99 @@
=============
Release notes
=============
2.5.8 (July 11, 2011)
=====================
* returns all public/private ips, not just first one
* better 'cinder list' search options
2.5.7 - 2.5.6 = minor tweaks
2.5.5 (June 21, 2011)
=====================
* zone-boot min/max instance count added thanks to comstud
* create for user added thanks to cerberus
* fixed tests
2.5.3 (June 15, 2011)
=====================
* ProjectID can be None for backwards compatability.
* README/docs updated for projectId thanks to usrleon
2.5.1 (June 10, 2011)
=====================
* ProjectID now part of authentication
2.5.0 (June 3, 2011)
=================
* better logging thanks to GridDynamics
2.4.4 (June 1, 2011)
=================
* added support for GET /servers with reservation_id (and /servers/detail)
2.4.3 (May 27, 2011)
=================
* added support for POST /zones/select (client only, not cmdline)
2.4 (March 7, 2011)
=================
* added Jacob Kaplan-Moss copyright notices to older/untouched files.
2.3 (March 2, 2011)
=================
* package renamed to python-cinderclient. Module to cinderclient
2.2 (March 1, 2011)
=================
* removed some license/copywrite notices from source that wasn't
significantly changed.
2.1 (Feb 28, 2011)
=================
* shell renamed to cinder from cindertools
* license changed from BSD to Apache
2.0 (Feb 7, 2011)
=================
* Forked from https://github.com/jacobian/python-cloudservers
* Rebranded to python-cindertools
* Auth URL support
* New OpenStack specific commands added (pause, suspend, etc)
1.2 (August 15, 2010)
=====================
* Support for Python 2.4 - 2.7.
* Improved output of :program:`cloudservers ipgroup-list`.
* Made ``cloudservers boot --ipgroup <name>`` work (as well as ``--ipgroup
<id>``).
1.1 (May 6, 2010)
=================
* Added a ``--files`` option to :program:`cloudservers boot` supporting
the upload of (up to five) files at boot time.
* Added a ``--key`` option to :program:`cloudservers boot` to key the server
with an SSH public key at boot time. This is just a shortcut for ``--files``,
but it's a useful shortcut.
* Changed the default server image to Ubuntu 10.04 LTS.

52
docs/shell.rst Normal file
View File

@ -0,0 +1,52 @@
The :program:`cinder` shell utility
=========================================
.. program:: cinder
.. highlight:: bash
The :program:`cinder` shell utility interacts with OpenStack Nova API
from the command line. It supports the entirety of the OpenStack Nova API.
First, you'll need an OpenStack Nova account and an API key. You get this
by using the `cinder-manage` command in OpenStack Nova.
You'll need to provide :program:`cinder` with your OpenStack username and
API key. You can do this with the :option:`--os_username`, :option:`--os_password`
and :option:`--os_tenant_id` options, but it's easier to just set them as
environment variables by setting two environment variables:
.. envvar:: OS_USERNAME
Your OpenStack Nova username.
.. envvar:: OS_PASSWORD
Your password.
.. envvar:: OS_TENANT_NAME
Project for work.
.. envvar:: OS_AUTH_URL
The OpenStack API server URL.
.. envvar:: OS_COMPUTE_API_VERSION
The OpenStack API version.
For example, in Bash you'd use::
export OS_USERNAME=yourname
export OS_PASSWORD=yadayadayada
export OS_TENANT_NAME=myproject
export OS_AUTH_URL=http://...
export OS_COMPUTE_API_VERSION=1.1
From there, all shell commands take the form::
cinder <command> [arguments...]
Run :program:`cinder help` to get a full list of all possible commands,
and run :program:`cinder help <command>` to get detailed help for that
command.

154
run_tests.sh Executable file
View File

@ -0,0 +1,154 @@
#!/bin/bash
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run python-cinderclient test suite"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
echo " -P, --no-pep8 Don't run pep8"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_option {
case "$1" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-s|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
}
venv=.venv
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
no_site_packages=0
installvenvopts=
noseargs=
noseopts=
wrapper=""
just_pep8=0
no_pep8=0
coverage=0
for arg in "$@"; do
process_option $arg
done
# If enabled, tell nose to collect coverage data
if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=cinderclient"
fi
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function run_tests {
# Cleanup *.pyc
${wrapper} find . -type f -name "*.pyc" -delete
# Just run the test suites in current environment
${wrapper} $NOSETESTS
# If we get some short import error right away, print the error log directly
RESULT=$?
return $RESULT
}
function run_pep8 {
echo "Running pep8 ..."
srcfiles="cinderclient tests"
# Just run PEP8 in current environment
#
# NOTE(sirp): W602 (deprecated 3-arg raise) is being ignored for the
# following reasons:
#
# 1. It's needed to preserve traceback information when re-raising
# exceptions; this is needed b/c Eventlet will clear exceptions when
# switching contexts.
#
# 2. There doesn't appear to be an alternative, "pep8-tool" compatible way of doing this
# in Python 2 (in Python 3 `with_traceback` could be used).
#
# 3. Can find no corroborating evidence that this is deprecated in Python 2
# other than what the PEP8 tool claims. It is deprecated in Python 3, so,
# perhaps the mistake was thinking that the deprecation applied to Python 2
# as well.
pep8_opts="--ignore=E202,W602 --repeat"
${wrapper} pep8 ${pep8_opts} ${srcfiles}
}
NOSETESTS="nosetests $noseopts $noseargs"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (noseopts), which begin with a '-', and
# arguments (noseargs).
if [ -z "$noseargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
${wrapper} coverage html -d covhtml -i
fi

13
setup.cfg Normal file
View File

@ -0,0 +1,13 @@
[nosetests]
cover-package = cinderclient
cover-html = true
cover-erase = true
cover-inclusive = true
[build_sphinx]
source-dir = docs/
build-dir = docs/_build
all_files = 1
[upload_sphinx]
upload-dir = docs/_build/html

56
setup.py Normal file
View File

@ -0,0 +1,56 @@
# Copyright 2011 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
import sys
requirements = ["httplib2", "prettytable"]
if sys.version_info < (2, 6):
requirements.append("simplejson")
if sys.version_info < (2, 7):
requirements.append("argparse")
def read_file(file_name):
return open(os.path.join(os.path.dirname(__file__), file_name)).read()
setuptools.setup(
name="python-cinderclient",
version="2012.2",
author="Rackspace, based on work by Jacob Kaplan-Moss",
author_email="github@racklabs.com",
description="Client library for OpenStack Nova API.",
long_description=read_file("README.rst"),
license="Apache License, Version 2.0",
url="https://github.com/openstack/python-cinderclient",
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=requirements,
tests_require=["nose", "mock"],
test_suite="nose.collector",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python"
],
entry_points={
"console_scripts": ["cinder = cinderclient.shell:main"]
}
)

0
tests/__init__.py Normal file
View File

71
tests/fakes.py Normal file
View File

@ -0,0 +1,71 @@
"""
A fake server that "responds" to API methods with pre-canned responses.
All of these responses come from the spec, so if for some reason the spec's
wrong the tests might raise AssertionError. I've indicated in comments the
places where actual behavior differs from the spec.
"""
def assert_has_keys(dict, required=[], optional=[]):
keys = dict.keys()
for k in required:
try:
assert k in keys
except AssertionError:
extra_keys = set(keys).difference(set(required + optional))
raise AssertionError("found unexpected keys: %s" %
list(extra_keys))
class FakeClient(object):
def assert_called(self, method, url, body=None, pos=-1):
"""
Assert than an API method was just called.
"""
expected = (method, url)
called = self.client.callstack[pos][0:2]
assert self.client.callstack, \
"Expected %s %s but no calls were made." % expected
assert expected == called, 'Expected %s %s; got %s %s' % \
(expected + called)
if body is not None:
assert self.client.callstack[pos][2] == body
def assert_called_anytime(self, method, url, body=None):
"""
Assert than an API method was called anytime in the test.
"""
expected = (method, url)
assert self.client.callstack, \
"Expected %s %s but no calls were made." % expected
found = False
for entry in self.client.callstack:
if expected == entry[0:2]:
found = True
break
assert found, 'Expected %s %s; got %s' % \
(expected, self.client.callstack)
if body is not None:
try:
assert entry[2] == body
except AssertionError:
print entry[2]
print "!="
print body
raise
self.client.callstack = []
def clear_callstack(self):
self.client.callstack = []
def authenticate(self):
pass

48
tests/test_base.py Normal file
View File

@ -0,0 +1,48 @@
from cinderclient import base
from cinderclient import exceptions
from cinderclient.v1 import volumes
from tests import utils
from tests.v1 import fakes
cs = fakes.FakeClient()
class BaseTest(utils.TestCase):
def test_resource_repr(self):
r = base.Resource(None, dict(foo="bar", baz="spam"))
self.assertEqual(repr(r), "<Resource baz=spam, foo=bar>")
def test_getid(self):
self.assertEqual(base.getid(4), 4)
class TmpObject(object):
id = 4
self.assertEqual(base.getid(TmpObject), 4)
def test_eq(self):
# Two resources of the same type with the same id: equal
r1 = base.Resource(None, {'id': 1, 'name': 'hi'})
r2 = base.Resource(None, {'id': 1, 'name': 'hello'})
self.assertEqual(r1, r2)
# Two resoruces of different types: never equal
r1 = base.Resource(None, {'id': 1})
r2 = volumes.Volume(None, {'id': 1})
self.assertNotEqual(r1, r2)
# Two resources with no ID: equal if their info is equal
r1 = base.Resource(None, {'name': 'joe', 'age': 12})
r2 = base.Resource(None, {'name': 'joe', 'age': 12})
self.assertEqual(r1, r2)
def test_findall_invalid_attribute(self):
# Make sure findall with an invalid attribute doesn't cause errors.
# The following should not raise an exception.
cs.volumes.findall(vegetable='carrot')
# However, find() should raise an error
self.assertRaises(exceptions.NotFound,
cs.volumes.find,
vegetable='carrot')

18
tests/test_client.py Normal file
View File

@ -0,0 +1,18 @@
import cinderclient.client
import cinderclient.v1.client
from tests import utils
class ClientTest(utils.TestCase):
def setUp(self):
pass
def test_get_client_class_v1(self):
output = cinderclient.client.get_client_class('1')
self.assertEqual(output, cinderclient.v1.client.Client)
def test_get_client_class_unknown(self):
self.assertRaises(cinderclient.exceptions.UnsupportedVersion,
cinderclient.client.get_client_class, '0')

74
tests/test_http.py Normal file
View File

@ -0,0 +1,74 @@
import httplib2
import mock
from cinderclient import client
from cinderclient import exceptions
from tests import utils
fake_response = httplib2.Response({"status": 200})
fake_body = '{"hi": "there"}'
mock_request = mock.Mock(return_value=(fake_response, fake_body))
def get_client():
cl = client.HTTPClient("username", "password",
"project_id", "auth_test")
return cl
def get_authed_client():
cl = get_client()
cl.management_url = "http://example.com"
cl.auth_token = "token"
return cl
class ClientTest(utils.TestCase):
def test_get(self):
cl = get_authed_client()
@mock.patch.object(httplib2.Http, "request", mock_request)
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_get_call():
resp, body = cl.get("/hi")
headers = {"X-Auth-Token": "token",
"X-Auth-Project-Id": "project_id",
"User-Agent": cl.USER_AGENT,
'Accept': 'application/json',
}
mock_request.assert_called_with("http://example.com/hi",
"GET", headers=headers)
# Automatic JSON parsing
self.assertEqual(body, {"hi": "there"})
test_get_call()
def test_post(self):
cl = get_authed_client()
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_post_call():
cl.post("/hi", body=[1, 2, 3])
headers = {
"X-Auth-Token": "token",
"X-Auth-Project-Id": "project_id",
"Content-Type": "application/json",
'Accept': 'application/json',
"User-Agent": cl.USER_AGENT
}
mock_request.assert_called_with("http://example.com/hi", "POST",
headers=headers, body='[1, 2, 3]')
test_post_call()
def test_auth_failure(self):
cl = get_client()
# response must not have x-server-management-url header
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.AuthorizationFailure, cl.authenticate)
test_auth_call()

View File

@ -0,0 +1,127 @@
from cinderclient import exceptions
from cinderclient import service_catalog
from tests import utils
# Taken directly from keystone/content/common/samples/auth.json
# Do not edit this structure. Instead, grab the latest from there.
SERVICE_CATALOG = {
"access": {
"token": {
"id": "ab48a9efdfedb23ty3494",
"expires": "2010-11-01T03:32:15-05:00",
"tenant": {
"id": "345",
"name": "My Project"
}
},
"user": {
"id": "123",
"name": "jqsmith",
"roles": [
{
"id": "234",
"name": "compute:admin",
},
{
"id": "235",
"name": "object-store:admin",
"tenantId": "1",
}
],
"roles_links": [],
},
"serviceCatalog": [
{
"name": "Cloud Servers",
"type": "compute",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://compute1.host/v1/1234",
"internalURL": "https://compute1.host/v1/1234",
"region": "North",
"versionId": "1.0",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
{
"tenantId": "2",
"publicURL": "https://compute1.host/v1/3456",
"internalURL": "https://compute1.host/v1/3456",
"region": "North",
"versionId": "1.1",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
],
"endpoints_links": [],
},
{
"name": "Nova Volumes",
"type": "volume",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v1/1234",
"internalURL": "https://volume1.host/v1/1234",
"region": "South",
"versionId": "1.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v1/3456",
"internalURL": "https://volume1.host/v1/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v1/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
],
"serviceCatalog_links": [
{
"rel": "next",
"href": "https://identity.host/v2.0/endpoints?session=2hfh8Ar",
},
],
},
}
class ServiceCatalogTest(utils.TestCase):
def test_building_a_service_catalog(self):
sc = service_catalog.ServiceCatalog(SERVICE_CATALOG)
self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for,
service_type='compute')
self.assertEquals(sc.url_for('tenantId', '1', service_type='compute'),
"https://compute1.host/v1/1234")
self.assertEquals(sc.url_for('tenantId', '2', service_type='compute'),
"https://compute1.host/v1/3456")
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
"region", "South", service_type='compute')
def test_alternate_service_type(self):
sc = service_catalog.ServiceCatalog(SERVICE_CATALOG)
self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for,
service_type='volume')
self.assertEquals(sc.url_for('tenantId', '1', service_type='volume'),
"https://volume1.host/v1/1234")
self.assertEquals(sc.url_for('tenantId', '2', service_type='volume'),
"https://volume1.host/v1/3456")
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
"region", "North", service_type='volume')

75
tests/test_shell.py Normal file
View File

@ -0,0 +1,75 @@
import cStringIO
import os
import httplib2
import sys
from cinderclient import exceptions
import cinderclient.shell
from tests import utils
class ShellTest(utils.TestCase):
# Patch os.environ to avoid required auth info.
def setUp(self):
global _old_env
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
_old_env, os.environ = os.environ, fake_env.copy()
def shell(self, argstr):
orig = sys.stdout
try:
sys.stdout = cStringIO.StringIO()
_shell = cinderclient.shell.OpenStackCinderShell()
_shell.main(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(exc_value.code, 0)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
return out
def tearDown(self):
global _old_env
os.environ = _old_env
def test_help_unknown_command(self):
self.assertRaises(exceptions.CommandError, self.shell, 'help foofoo')
def test_debug(self):
httplib2.debuglevel = 0
self.shell('--debug help')
assert httplib2.debuglevel == 1
def test_help(self):
required = [
'^usage: ',
'(?m)^\s+create\s+Add a new volume.',
'(?m)^See "cinder help COMMAND" for help on a specific command',
]
for argstr in ['--help', 'help']:
help_text = self.shell(argstr)
for r in required:
self.assertRegexpMatches(help_text, r)
def test_help_on_subcommand(self):
required = [
'^usage: cinder list',
'(?m)^List all the volumes.',
]
argstrings = [
'list --help',
'help list',
]
for argstr in argstrings:
help_text = self.shell(argstr)
for r in required:
self.assertRegexpMatches(help_text, r)

74
tests/test_utils.py Normal file
View File

@ -0,0 +1,74 @@
from cinderclient import exceptions
from cinderclient import utils
from cinderclient import base
from tests import utils as test_utils
UUID = '8e8ec658-c7b0-4243-bdf8-6f7f2952c0d0'
class FakeResource(object):
def __init__(self, _id, properties):
self.id = _id
try:
self.name = properties['name']
except KeyError:
pass
try:
self.display_name = properties['display_name']
except KeyError:
pass
class FakeManager(base.ManagerWithFind):
resource_class = FakeResource
resources = [
FakeResource('1234', {'name': 'entity_one'}),
FakeResource(UUID, {'name': 'entity_two'}),
FakeResource('4242', {'display_name': 'entity_three'}),
FakeResource('5678', {'name': '9876'})
]
def get(self, resource_id):
for resource in self.resources:
if resource.id == str(resource_id):
return resource
raise exceptions.NotFound(resource_id)
def list(self):
return self.resources
class FindResourceTestCase(test_utils.TestCase):
def setUp(self):
self.manager = FakeManager(None)
def test_find_none(self):
self.assertRaises(exceptions.CommandError,
utils.find_resource,
self.manager,
'asdf')
def test_find_by_integer_id(self):
output = utils.find_resource(self.manager, 1234)
self.assertEqual(output, self.manager.get('1234'))
def test_find_by_str_id(self):
output = utils.find_resource(self.manager, '1234')
self.assertEqual(output, self.manager.get('1234'))
def test_find_by_uuid(self):
output = utils.find_resource(self.manager, UUID)
self.assertEqual(output, self.manager.get(UUID))
def test_find_by_str_name(self):
output = utils.find_resource(self.manager, 'entity_one')
self.assertEqual(output, self.manager.get('1234'))
def test_find_by_str_displayname(self):
output = utils.find_resource(self.manager, 'entity_three')
self.assertEqual(output, self.manager.get('4242'))

5
tests/utils.py Normal file
View File

@ -0,0 +1,5 @@
import unittest2
class TestCase(unittest2.TestCase):
pass

0
tests/v1/__init__.py Normal file
View File

765
tests/v1/fakes.py Normal file
View File

@ -0,0 +1,765 @@
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib2
import urlparse
from cinderclient import client as base_client
from cinderclient.v1 import client
from tests import fakes
class FakeClient(fakes.FakeClient, client.Client):
def __init__(self, *args, **kwargs):
client.Client.__init__(self, 'username', 'password',
'project_id', 'auth_url')
self.client = FakeHTTPClient(**kwargs)
class FakeHTTPClient(base_client.HTTPClient):
def __init__(self, **kwargs):
self.username = 'username'
self.password = 'password'
self.auth_url = 'auth_url'
self.callstack = []
def _cs_request(self, url, method, **kwargs):
# Check that certain things are called correctly
if method in ['GET', 'DELETE']:
assert 'body' not in kwargs
elif method == 'PUT':
assert 'body' in kwargs
# Call the method
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
kwargs.update(args)
munged_url = url.rsplit('?', 1)[0]
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
munged_url = munged_url.replace('-', '_')
callback = "%s_%s" % (method.lower(), munged_url)
if not hasattr(self, callback):
raise AssertionError('Called unknown API method: %s %s, '
'expected fakes method name: %s' %
(method, url, callback))
# Note the call
self.callstack.append((method, url, kwargs.get('body', None)))
status, body = getattr(self, callback)(**kwargs)
if hasattr(status, 'items'):
return httplib2.Response(status), body
else:
return httplib2.Response({"status": status}), body
#
# Limits
#
def get_limits(self, **kw):
return (200, {"limits": {
"rate": [
{
"uri": "*",
"regex": ".*",
"limit": [
{
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-12-15T22:42:45Z"
},
{
"value": 10,
"verb": "PUT",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-12-15T22:42:45Z"
},
{
"value": 100,
"verb": "DELETE",
"remaining": 100,
"unit": "MINUTE",
"next-available": "2011-12-15T22:42:45Z"
}
]
},
{
"uri": "*/servers",
"regex": "^/servers",
"limit": [
{
"verb": "POST",
"value": 25,
"remaining": 24,
"unit": "DAY",
"next-available": "2011-12-15T22:42:45Z"
}
]
}
],
"absolute": {
"maxTotalRAMSize": 51200,
"maxServerMeta": 5,
"maxImageMeta": 5,
"maxPersonality": 5,
"maxPersonalitySize": 10240
},
},
})
#
# Servers
#
def get_volumes(self, **kw):
return (200, {"volumes": [
{'id': 1234, 'name': 'sample-volume'},
{'id': 5678, 'name': 'sample-volume2'}
]})
# TODO(jdg): This will need to change
# at the very least it's not complete
def get_volumes_detail(self, **kw):
return (200, {"volumes": [
{'id': 1234,
'name': 'sample-volume',
'attachments': [{'server_id': 1234}]
},
]})
def get_volumes_1234(self, **kw):
r = {'volume': self.get_volumes_detail()[1]['volumes'][0]}
return (200, r)
def post_servers(self, body, **kw):
assert set(body.keys()) <= set(['server', 'os:scheduler_hints'])
fakes.assert_has_keys(body['server'],
required=['name', 'imageRef', 'flavorRef'],
optional=['metadata', 'personality'])
if 'personality' in body['server']:
for pfile in body['server']['personality']:
fakes.assert_has_keys(pfile, required=['path', 'contents'])
return (202, self.get_servers_1234()[1])
def get_servers_1234(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_5678(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][1]}
return (200, r)
def put_servers_1234(self, body, **kw):
assert body.keys() == ['server']
fakes.assert_has_keys(body['server'], optional=['name', 'adminPass'])
return (204, None)
def delete_servers_1234(self, **kw):
return (202, None)
def delete_volumes_1234(self, **kw):
return (202, None)
def delete_servers_1234_metadata_test_key(self, **kw):
return (204, None)
def delete_servers_1234_metadata_key1(self, **kw):
return (204, None)
def delete_servers_1234_metadata_key2(self, **kw):
return (204, None)
def post_servers_1234_metadata(self, **kw):
return (204, {'metadata': {'test_key': 'test_value'}})
def get_servers_1234_diagnostics(self, **kw):
return (200, {'data': 'Fake diagnostics'})
def get_servers_1234_actions(self, **kw):
return (200, {'actions': [
{
'action': 'rebuild',
'error': None,
'created_at': '2011-12-30 11:45:36'
},
{
'action': 'reboot',
'error': 'Failed!',
'created_at': '2011-12-30 11:40:29'
},
]})
#
# Server Addresses
#
def get_servers_1234_ips(self, **kw):
return (200, {'addresses':
self.get_servers_1234()[1]['server']['addresses']})
def get_servers_1234_ips_public(self, **kw):
return (200, {'public':
self.get_servers_1234_ips()[1]['addresses']['public']})
def get_servers_1234_ips_private(self, **kw):
return (200, {'private':
self.get_servers_1234_ips()[1]['addresses']['private']})
def delete_servers_1234_ips_public_1_2_3_4(self, **kw):
return (202, None)
#
# Server actions
#
def post_servers_1234_action(self, body, **kw):
_body = None
resp = 202
assert len(body.keys()) == 1
action = body.keys()[0]
if action == 'reboot':
assert body[action].keys() == ['type']
assert body[action]['type'] in ['HARD', 'SOFT']
elif action == 'rebuild':
keys = body[action].keys()
if 'adminPass' in keys:
keys.remove('adminPass')
assert keys == ['imageRef']
_body = self.get_servers_1234()[1]
elif action == 'resize':
assert body[action].keys() == ['flavorRef']
elif action == 'confirmResize':
assert body[action] is None
# This one method returns a different response code
return (204, None)
elif action == 'revertResize':
assert body[action] is None
elif action == 'migrate':
assert body[action] is None
elif action == 'rescue':
assert body[action] is None
elif action == 'unrescue':
assert body[action] is None
elif action == 'lock':
assert body[action] is None
elif action == 'unlock':
assert body[action] is None
elif action == 'addFixedIp':
assert body[action].keys() == ['networkId']
elif action == 'removeFixedIp':
assert body[action].keys() == ['address']
elif action == 'addFloatingIp':
assert body[action].keys() == ['address']
elif action == 'removeFloatingIp':
assert body[action].keys() == ['address']
elif action == 'createImage':
assert set(body[action].keys()) == set(['name', 'metadata'])
resp = dict(status=202, location="http://blah/images/456")
elif action == 'changePassword':
assert body[action].keys() == ['adminPass']
elif action == 'os-getConsoleOutput':
assert body[action].keys() == ['length']
return (202, {'output': 'foo'})
elif action == 'os-getVNCConsole':
assert body[action].keys() == ['type']
elif action == 'os-migrateLive':
assert set(body[action].keys()) == set(['host',
'block_migration',
'disk_over_commit'])
else:
raise AssertionError("Unexpected server action: %s" % action)
return (resp, _body)
#
# Cloudpipe
#
def get_os_cloudpipe(self, **kw):
return (200, {'cloudpipes': [
{'project_id':1}
]})
def post_os_cloudpipe(self, **ks):
return (202, {'instance_id': '9d5824aa-20e6-4b9f-b967-76a699fc51fd'})
#
# Flavors
#
def get_flavors(self, **kw):
return (200, {'flavors': [
{'id': 1, 'name': '256 MB Server'},
{'id': 2, 'name': '512 MB Server'}
]})
def get_flavors_detail(self, **kw):
return (200, {'flavors': [
{'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10,
'OS-FLV-EXT-DATA:ephemeral': 10},
{'id': 2, 'name': '512 MB Server', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 20}
]})
def get_flavors_1(self, **kw):
return (200, {'flavor': self.get_flavors_detail()[1]['flavors'][0]})
def get_flavors_2(self, **kw):
return (200, {'flavor': self.get_flavors_detail()[1]['flavors'][1]})
def get_flavors_3(self, **kw):
# Diablo has no ephemeral
return (200, {'flavor': {'id': 3, 'name': '256 MB Server',
'ram': 256, 'disk': 10}})
def delete_flavors_flavordelete(self, **kw):
return (202, None)
def post_flavors(self, body, **kw):
return (202, {'flavor': self.get_flavors_detail()[1]['flavors'][0]})
#
# Floating ips
#
def get_os_floating_ip_pools(self):
return (200, {'floating_ip_pools': [{'name': 'foo', 'name': 'bar'}]})
def get_os_floating_ips(self, **kw):
return (200, {'floating_ips': [
{'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'},
{'id': 2, 'fixed_ip': '10.0.0.2', 'ip': '11.0.0.2'},
]})
def get_os_floating_ips_1(self, **kw):
return (200, {'floating_ip':
{'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1'}
})
def post_os_floating_ips(self, body, **kw):
return (202, self.get_os_floating_ips_1()[1])
def post_os_floating_ips(self, body):
if body.get('pool'):
return (200, {'floating_ip':
{'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1',
'pool': 'cinder'}})
else:
return (200, {'floating_ip':
{'id': 1, 'fixed_ip': '10.0.0.1', 'ip': '11.0.0.1',
'pool': None}})
def delete_os_floating_ips_1(self, **kw):
return (204, None)
def get_os_floating_ip_dns(self, **kw):
return (205, {'domain_entries':
[{'domain': 'example.org'},
{'domain': 'example.com'}]})
def get_os_floating_ip_dns_testdomain_entries(self, **kw):
if kw.get('ip'):
return (205, {'dns_entries':
[{'dns_entry':
{'ip': kw.get('ip'),
'name': "host1",
'type': "A",
'domain': 'testdomain'}},
{'dns_entry':
{'ip': kw.get('ip'),
'name': "host2",
'type': "A",
'domain': 'testdomain'}}]})
else:
return (404, None)
def get_os_floating_ip_dns_testdomain_entries_testname(self, **kw):
return (205, {'dns_entry':
{'ip': "10.10.10.10",
'name': 'testname',
'type': "A",
'domain': 'testdomain'}})
def put_os_floating_ip_dns_testdomain(self, body, **kw):
if body['domain_entry']['scope'] == 'private':
fakes.assert_has_keys(body['domain_entry'],
required=['availability_zone', 'scope'])
elif body['domain_entry']['scope'] == 'public':
fakes.assert_has_keys(body['domain_entry'],
required=['project', 'scope'])
else:
fakes.assert_has_keys(body['domain_entry'],
required=['project', 'scope'])
return (205, None)
def put_os_floating_ip_dns_testdomain_entries_testname(self, body, **kw):
fakes.assert_has_keys(body['dns_entry'],
required=['ip', 'dns_type'])
return (205, None)
def delete_os_floating_ip_dns_testdomain(self, **kw):
return (200, None)
def delete_os_floating_ip_dns_testdomain_entries_testname(self, **kw):
return (200, None)
#
# Images
#
def get_images(self, **kw):
return (200, {'images': [
{'id': 1, 'name': 'CentOS 5.2'},
{'id': 2, 'name': 'My Server Backup'}
]})
def get_images_detail(self, **kw):
return (200, {'images': [
{
'id': 1,
'name': 'CentOS 5.2',
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "ACTIVE",
"metadata": {
"test_key": "test_value",
},
"links": {},
},
{
"id": 743,
"name": "My Server Backup",
"serverId": 1234,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {},
}
]})
def get_images_1(self, **kw):
return (200, {'image': self.get_images_detail()[1]['images'][0]})
def get_images_2(self, **kw):
return (200, {'image': self.get_images_detail()[1]['images'][1]})
def post_images(self, body, **kw):
assert body.keys() == ['image']
fakes.assert_has_keys(body['image'], required=['serverId', 'name'])
return (202, self.get_images_1()[1])
def post_images_1_metadata(self, body, **kw):
assert body.keys() == ['metadata']
fakes.assert_has_keys(body['metadata'],
required=['test_key'])
return (200,
{'metadata': self.get_images_1()[1]['image']['metadata']})
def delete_images_1(self, **kw):
return (204, None)
def delete_images_1_metadata_test_key(self, **kw):
return (204, None)
#
# Keypairs
#
def get_os_keypairs(self, *kw):
return (200, {"keypairs": [
{'fingerprint': 'FAKE_KEYPAIR', 'name': 'test'}
]})
def delete_os_keypairs_test(self, **kw):
return (202, None)
def post_os_keypairs(self, body, **kw):
assert body.keys() == ['keypair']
fakes.assert_has_keys(body['keypair'],
required=['name'])
r = {'keypair': self.get_os_keypairs()[1]['keypairs'][0]}
return (202, r)
#
# Virtual Interfaces
#
def get_servers_1234_os_virtual_interfaces(self, **kw):
return (200, {"virtual_interfaces": [
{'id': 'fakeid', 'mac_address': 'fakemac'}
]})
#
# Quotas
#
def get_os_quota_sets_test(self, **kw):
return (200, {'quota_set': {
'tenant_id': 'test',
'metadata_items': [],
'injected_file_content_bytes': 1,
'volumes': 1,
'gigabytes': 1,
'ram': 1,
'floating_ips': 1,
'instances': 1,
'injected_files': 1,
'cores': 1}})
def get_os_quota_sets_test_defaults(self):
return (200, {'quota_set': {
'tenant_id': 'test',
'metadata_items': [],
'injected_file_content_bytes': 1,
'volumes': 1,
'gigabytes': 1,
'ram': 1,
'floating_ips': 1,
'instances': 1,
'injected_files': 1,
'cores': 1}})
def put_os_quota_sets_test(self, body, **kw):
assert body.keys() == ['quota_set']
fakes.assert_has_keys(body['quota_set'],
required=['tenant_id'])
return (200, {'quota_set': {
'tenant_id': 'test',
'metadata_items': [],
'injected_file_content_bytes': 1,
'volumes': 2,
'gigabytes': 1,
'ram': 1,
'floating_ips': 1,
'instances': 1,
'injected_files': 1,
'cores': 1}})
#
# Quota Classes
#
def get_os_quota_class_sets_test(self, **kw):
return (200, {'quota_class_set': {
'class_name': 'test',
'metadata_items': [],
'injected_file_content_bytes': 1,
'volumes': 1,
'gigabytes': 1,
'ram': 1,
'floating_ips': 1,
'instances': 1,
'injected_files': 1,
'cores': 1}})
def put_os_quota_class_sets_test(self, body, **kw):
assert body.keys() == ['quota_class_set']
fakes.assert_has_keys(body['quota_class_set'],
required=['class_name'])
return (200, {'quota_class_set': {
'class_name': 'test',
'metadata_items': [],
'injected_file_content_bytes': 1,
'volumes': 2,
'gigabytes': 1,
'ram': 1,
'floating_ips': 1,
'instances': 1,
'injected_files': 1,
'cores': 1}})
#
# Security Groups
#
def get_os_security_groups(self, **kw):
return (200, {"security_groups": [
{'id': 1, 'name': 'test', 'description': 'FAKE_SECURITY_GROUP'}
]})
def get_os_security_groups_1(self, **kw):
return (200, {"security_group":
{'id': 1, 'name': 'test', 'description': 'FAKE_SECURITY_GROUP'}
})
def delete_os_security_groups_1(self, **kw):
return (202, None)
def post_os_security_groups(self, body, **kw):
assert body.keys() == ['security_group']
fakes.assert_has_keys(body['security_group'],
required=['name', 'description'])
r = {'security_group':
self.get_os_security_groups()[1]['security_groups'][0]}
return (202, r)
#
# Security Group Rules
#
def get_os_security_group_rules(self, **kw):
return (200, {"security_group_rules": [
{'id': 1, 'parent_group_id': 1, 'group_id': 2,
'ip_protocol': 'TCP', 'from_port': '22', 'to_port': 22,
'cidr': '10.0.0.0/8'}
]})
def delete_os_security_group_rules_1(self, **kw):
return (202, None)
def post_os_security_group_rules(self, body, **kw):
assert body.keys() == ['security_group_rule']
fakes.assert_has_keys(body['security_group_rule'],
required=['parent_group_id'],
optional=['group_id', 'ip_protocol', 'from_port',
'to_port', 'cidr'])
r = {'security_group_rule':
self.get_os_security_group_rules()[1]['security_group_rules'][0]}
return (202, r)
#
# Tenant Usage
#
def get_os_simple_tenant_usage(self, **kw):
return (200, {u'tenant_usages': [{
u'total_memory_mb_usage': 25451.762807466665,
u'total_vcpus_usage': 49.71047423333333,
u'total_hours': 49.71047423333333,
u'tenant_id': u'7b0a1d73f8fb41718f3343c207597869',
u'stop': u'2012-01-22 19:48:41.750722',
u'server_usages': [{
u'hours': 49.71047423333333,
u'uptime': 27035, u'local_gb': 0, u'ended_at': None,
u'name': u'f15image1',
u'tenant_id': u'7b0a1d73f8fb41718f3343c207597869',
u'vcpus': 1, u'memory_mb': 512, u'state': u'active',
u'flavor': u'm1.tiny',
u'started_at': u'2012-01-20 18:06:06.479998'}],
u'start': u'2011-12-25 19:48:41.750687',
u'total_local_gb_usage': 0.0}]})
def get_os_simple_tenant_usage_tenantfoo(self, **kw):
return (200, {u'tenant_usage': {
u'total_memory_mb_usage': 25451.762807466665,
u'total_vcpus_usage': 49.71047423333333,
u'total_hours': 49.71047423333333,
u'tenant_id': u'7b0a1d73f8fb41718f3343c207597869',
u'stop': u'2012-01-22 19:48:41.750722',
u'server_usages': [{
u'hours': 49.71047423333333,
u'uptime': 27035, u'local_gb': 0, u'ended_at': None,
u'name': u'f15image1',
u'tenant_id': u'7b0a1d73f8fb41718f3343c207597869',
u'vcpus': 1, u'memory_mb': 512, u'state': u'active',
u'flavor': u'm1.tiny',
u'started_at': u'2012-01-20 18:06:06.479998'}],
u'start': u'2011-12-25 19:48:41.750687',
u'total_local_gb_usage': 0.0}})
#
# Certificates
#
def get_os_certificates_root(self, **kw):
return (200, {'certificate': {'private_key': None, 'data': 'foo'}})
def post_os_certificates(self, **kw):
return (200, {'certificate': {'private_key': 'foo', 'data': 'bar'}})
#
# Aggregates
#
def get_os_aggregates(self, *kw):
return (200, {"aggregates": [
{'id':'1',
'name': 'test',
'availability_zone': 'cinder1'},
{'id':'2',
'name': 'test2',
'availability_zone': 'cinder1'},
]})
def _return_aggregate(self):
r = {'aggregate': self.get_os_aggregates()[1]['aggregates'][0]}
return (200, r)
def get_os_aggregates_1(self, **kw):
return self._return_aggregate()
def post_os_aggregates(self, body, **kw):
return self._return_aggregate()
def put_os_aggregates_1(self, body, **kw):
return self._return_aggregate()
def put_os_aggregates_2(self, body, **kw):
return self._return_aggregate()
def post_os_aggregates_1_action(self, body, **kw):
return self._return_aggregate()
def post_os_aggregates_2_action(self, body, **kw):
return self._return_aggregate()
def delete_os_aggregates_1(self, **kw):
return (202, None)
#
# Hosts
#
def get_os_hosts_host(self, *kw):
return (200, {'host':
[{'resource': {'project': '(total)', 'host': 'dummy',
'cpu': 16, 'memory_mb': 32234, 'disk_gb': 128}},
{'resource': {'project': '(used_now)', 'host': 'dummy',
'cpu': 1, 'memory_mb': 2075, 'disk_gb': 45}},
{'resource': {'project': '(used_max)', 'host': 'dummy',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}},
{'resource': {'project': 'admin', 'host': 'dummy',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}}]})
def get_os_hosts_sample_host(self, *kw):
return (200, {'host': [{'resource': {'host': 'sample_host'}}], })
def put_os_hosts_sample_host_1(self, body, **kw):
return (200, {'host': 'sample-host_1',
'status': 'enabled'})
def put_os_hosts_sample_host_2(self, body, **kw):
return (200, {'host': 'sample-host_2',
'maintenance_mode': 'on_maintenance'})
def put_os_hosts_sample_host_3(self, body, **kw):
return (200, {'host': 'sample-host_3',
'status': 'enabled',
'maintenance_mode': 'on_maintenance'})
def get_os_hosts_sample_host_startup(self, **kw):
return (200, {'host': 'sample_host',
'power_action': 'startup'})
def get_os_hosts_sample_host_reboot(self, **kw):
return (200, {'host': 'sample_host',
'power_action': 'reboot'})
def get_os_hosts_sample_host_shutdown(self, **kw):
return (200, {'host': 'sample_host',
'power_action': 'shutdown'})
def put_os_hosts_sample_host(self, body, **kw):
result = {'host': 'dummy'}
result.update(body)
return (200, result)

297
tests/v1/test_auth.py Normal file
View File

@ -0,0 +1,297 @@
import httplib2
import json
import mock
from cinderclient.v1 import client
from cinderclient import exceptions
from tests import utils
def to_http_response(resp_dict):
"""Converts dict of response attributes to httplib response."""
resp = httplib2.Response(resp_dict)
for k, v in resp_dict['headers'].items():
resp[k] = v
return resp
class AuthenticateAgainstKeystoneTests(utils.TestCase):
def test_authenticate_success(self):
cs = client.Client("username", "password", "project_id",
"auth_url/v2.0", service_type='compute')
resp = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"type": "compute",
"endpoints": [
{
"region": "RegionOne",
"adminURL": "http://localhost:8774/v1",
"internalURL": "http://localhost:8774/v1",
"publicURL": "http://localhost:8774/v1/",
},
],
},
],
},
}
auth_response = httplib2.Response({
"status": 200,
"body": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response,
json.dumps(resp)))
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantName': cs.client.projectid,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(token_url, "POST",
headers=headers,
body=json.dumps(body))
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(cs.client.management_url, public_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(cs.client.auth_token, token_id)
test_auth_call()
def test_authenticate_failure(self):
cs = client.Client("username", "password", "project_id",
"auth_url/v2.0")
resp = {"unauthorized": {"message": "Unauthorized", "code": "401"}}
auth_response = httplib2.Response({
"status": 401,
"body": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response,
json.dumps(resp)))
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
test_auth_call()
def test_auth_redirect(self):
cs = client.Client("username", "password", "project_id",
"auth_url/v1", service_type='compute')
dict_correct_response = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"type": "compute",
"endpoints": [
{
"adminURL": "http://localhost:8774/v1",
"region": "RegionOne",
"internalURL": "http://localhost:8774/v1",
"publicURL": "http://localhost:8774/v1/",
},
],
},
],
},
}
correct_response = json.dumps(dict_correct_response)
dict_responses = [
{"headers": {'location':'http://127.0.0.1:5001'},
"status": 305,
"body": "Use proxy"},
# Configured on admin port, cinder redirects to v2.0 port.
# When trying to connect on it, keystone auth succeed by v1.0
# protocol (through headers) but tokens are being returned in
# body (looks like keystone bug). Leaved for compatibility.
{"headers": {},
"status": 200,
"body": correct_response},
{"headers": {},
"status": 200,
"body": correct_response}
]
responses = [(to_http_response(resp), resp['body']) \
for resp in dict_responses]
def side_effect(*args, **kwargs):
return responses.pop(0)
mock_request = mock.Mock(side_effect=side_effect)
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantName': cs.client.projectid,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(token_url, "POST",
headers=headers,
body=json.dumps(body))
resp = dict_correct_response
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(cs.client.management_url, public_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(cs.client.auth_token, token_id)
test_auth_call()
def test_ambiguous_endpoints(self):
cs = client.Client("username", "password", "project_id",
"auth_url/v2.0", service_type='compute')
resp = {
"access": {
"token": {
"expires": "12345",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"adminURL": "http://localhost:8774/v1",
"type": "compute",
"name": "Compute CLoud",
"endpoints": [
{
"region": "RegionOne",
"internalURL": "http://localhost:8774/v1",
"publicURL": "http://localhost:8774/v1/",
},
],
},
{
"adminURL": "http://localhost:8774/v1",
"type": "compute",
"name": "Hyper-compute Cloud",
"endpoints": [
{
"internalURL": "http://localhost:8774/v1",
"publicURL": "http://localhost:8774/v1/",
},
],
},
],
},
}
auth_response = httplib2.Response({
"status": 200,
"body": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response,
json.dumps(resp)))
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.AmbiguousEndpoints,
cs.client.authenticate)
test_auth_call()
class AuthenticationTests(utils.TestCase):
def test_authenticate_success(self):
cs = client.Client("username", "password", "project_id", "auth_url")
management_url = 'https://servers.api.rackspacecloud.com/v1.1/443470'
auth_response = httplib2.Response({
'status': 204,
'x-server-management-url': management_url,
'x-auth-token': '1b751d74-de0c-46ae-84f0-915744b582d1',
})
mock_request = mock.Mock(return_value=(auth_response, None))
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'Accept': 'application/json',
'X-Auth-User': 'username',
'X-Auth-Key': 'password',
'X-Auth-Project-Id': 'project_id',
'User-Agent': cs.client.USER_AGENT
}
mock_request.assert_called_with(cs.client.auth_url, 'GET',
headers=headers)
self.assertEqual(cs.client.management_url,
auth_response['x-server-management-url'])
self.assertEqual(cs.client.auth_token,
auth_response['x-auth-token'])
test_auth_call()
def test_authenticate_failure(self):
cs = client.Client("username", "password", "project_id", "auth_url")
auth_response = httplib2.Response({'status': 401})
mock_request = mock.Mock(return_value=(auth_response, None))
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
test_auth_call()
def test_auth_automatic(self):
cs = client.Client("username", "password", "project_id", "auth_url")
http_client = cs.client
http_client.management_url = ''
mock_request = mock.Mock(return_value=(None, None))
@mock.patch.object(http_client, 'request', mock_request)
@mock.patch.object(http_client, 'authenticate')
def test_auth_call(m):
http_client.get('/')
m.assert_called()
mock_request.assert_called()
test_auth_call()
def test_auth_manual(self):
cs = client.Client("username", "password", "project_id", "auth_url")
@mock.patch.object(cs.client, 'authenticate')
def test_auth_call(m):
cs.authenticate()
m.assert_called()
test_auth_call()

77
tests/v1/test_shell.py Normal file
View File

@ -0,0 +1,77 @@
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from cinderclient import client
from cinderclient import shell
from tests.v1 import fakes
from tests import utils
class ShellTest(utils.TestCase):
# Patch os.environ to avoid required auth info.
def setUp(self):
"""Run before each test."""
self.old_environment = os.environ.copy()
os.environ = {
'CINDER_USERNAME': 'username',
'CINDER_PASSWORD': 'password',
'CINDER_PROJECT_ID': 'project_id',
'OS_COMPUTE_API_VERSION': '1.1',
'CINDER_URL': 'http://no.where',
}
self.shell = shell.OpenStackCinderShell()
#HACK(bcwaldon): replace this when we start using stubs
self.old_get_client_class = client.get_client_class
client.get_client_class = lambda *_: fakes.FakeClient
def tearDown(self):
os.environ = self.old_environment
# For some method like test_image_meta_bad_action we are
# testing a SystemExit to be thrown and object self.shell has
# no time to get instantatiated which is OK in this case, so
# we make sure the method is there before launching it.
if hasattr(self.shell, 'cs'):
self.shell.cs.clear_callstack()
#HACK(bcwaldon): replace this when we start using stubs
client.get_client_class = self.old_get_client_class
def run_command(self, cmd):
self.shell.main(cmd.split())
def assert_called(self, method, url, body=None, **kwargs):
return self.shell.cs.assert_called(method, url, body, **kwargs)
def assert_called_anytime(self, method, url, body=None):
return self.shell.cs.assert_called_anytime(method, url, body)
def test_list(self):
self.run_command('list')
# NOTE(jdg): we default to detail currently
self.assert_called('GET', '/volumes/detail')
def test_show(self):
self.run_command('show 1234')
self.assert_called('GET', '/volumes/1234')
def test_delete(self):
self.run_command('delete 1234')

1
tests/v1/testfile.txt Normal file
View File

@ -0,0 +1 @@
BLAH

29
tests/v1/utils.py Normal file
View File

@ -0,0 +1,29 @@
from nose.tools import ok_
def fail(msg):
raise AssertionError(msg)
def assert_in(thing, seq, msg=None):
msg = msg or "'%s' not found in %s" % (thing, seq)
ok_(thing in seq, msg)
def assert_not_in(thing, seq, msg=None):
msg = msg or "unexpected '%s' found in %s" % (thing, seq)
ok_(thing not in seq, msg)
def assert_has_keys(dict, required=[], optional=[]):
keys = dict.keys()
for k in required:
assert_in(k, keys, "required key %s missing from %s" % (k, dict))
allowed_keys = set(required) | set(optional)
extra_keys = set(keys).difference(set(required + optional))
if extra_keys:
fail("found unexpected keys: %s" % list(extra_keys))
def assert_isinstance(thing, kls):
ok_(isinstance(thing, kls), "%s is not an instance of %s" % (thing, kls))

3
tools/generate_authors.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
git shortlog -se | cut -c8-

244
tools/install_venv.py Normal file
View File

@ -0,0 +1,244 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Nova's development virtualenv
"""
import optparse
import os
import subprocess
import sys
import platform
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
def die(message, *args):
print >> sys.stderr, message % args
sys.exit(1)
def check_python_version():
if sys.version_info < (2, 6):
die("Need Python Version >= 2.6")
def run_command_with_code(cmd, redirect_output=True, check_exit_code=True):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(cmd, redirect_output=True, check_exit_code=True):
return run_command_with_code(cmd, redirect_output, check_exit_code)[0]
class Distro(object):
def check_cmd(self, cmd):
return bool(run_command(['which', cmd], check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print 'Installing virtualenv via easy_install...',
if run_command(['easy_install', 'virtualenv']):
print 'Succeeded'
return
else:
print 'Failed'
die('ERROR: virtualenv not found.\n\nDevelopment'
' requires virtualenv, please install it using your'
' favorite package management tool')
def post_process(self):
"""Any distribution-specific post-processing gets done here.
In particular, this is useful for applying patches to code inside
the venv."""
pass
class Debian(Distro):
"""This covers all Debian-based distributions."""
def check_pkg(self, pkg):
return run_command_with_code(['dpkg', '-l', pkg],
check_exit_code=False)[1] == 0
def apt_install(self, pkg, **kwargs):
run_command(['sudo', 'apt-get', 'install', '-y', pkg], **kwargs)
def apply_patch(self, originalfile, patchfile):
run_command(['patch', originalfile, patchfile])
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.apt_install('python-virtualenv', check_exit_code=False)
super(Debian, self).install_virtualenv()
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux"""
def check_pkg(self, pkg):
return run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def yum_install(self, pkg, **kwargs):
run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs)
def apply_patch(self, originalfile, patchfile):
run_command(['patch', originalfile, patchfile])
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.yum_install('python-virtualenv', check_exit_code=False)
super(Fedora, self).install_virtualenv()
def get_distro():
if os.path.exists('/etc/fedora-release') or \
os.path.exists('/etc/redhat-release'):
return Fedora()
elif os.path.exists('/etc/debian_version'):
return Debian()
else:
return Distro()
def check_dependencies():
get_distro().install_virtualenv()
def create_virtualenv(venv=VENV, no_site_packages=True):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
if no_site_packages:
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
else:
run_command(['virtualenv', '-q', VENV])
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']).strip():
die("Failed to install pip.")
print 'done.'
def pip_install(*args):
run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
# First things first, make sure our venv has the latest pip and distribute.
pip_install('pip')
pip_install('distribute')
pip_install('-r', PIP_REQUIRES)
# Tell the virtual env how to "import cinder"
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
"cinderclient.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
def post_process():
get_distro().post_process()
def print_help():
help = """
python-cinderclient development environment setup is complete.
python-cinderclient development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the python-cinderclient virtualenv for the extent of your current
shell session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
def parse_args():
"""Parse command-line arguments"""
parser = optparse.OptionParser()
parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
default=False, action="store_true",
help="Do not inherit packages from global Python install")
return parser.parse_args()
def main(argv):
(options, args) = parse_args()
check_python_version()
check_dependencies()
create_virtualenv(no_site_packages=options.no_site_packages)
install_dependencies()
post_process()
print_help()
if __name__ == '__main__':
main(sys.argv)

View File

@ -0,0 +1,15 @@
_cinder()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="$(cinder bash_completion)"
COMPLETION_CACHE=~/.cinderclient/*/*-cache
opts+=" "$(cat $COMPLETION_CACHE 2> /dev/null | tr '\n' ' ')
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
}
complete -F _cinder cinder

9
tools/pip-requires Normal file
View File

@ -0,0 +1,9 @@
argparse
coverage
httplib2
mock
nose
prettytable
simplejson
pep8==0.6.1
unittest2

145
tools/rfc.sh Executable file
View File

@ -0,0 +1,145 @@
#!/bin/sh -e
# Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
# This initial version of this file was taken from the source tree
# of GlusterFS. It was not directly attributed, but is assumed to be
# Copyright (c) 2010-2011 Gluster, Inc and release GPLv3
# Subsequent modifications are Copyright (c) 2011 OpenStack, LLC.
#
# GlusterFS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# GlusterFS is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
branch="master";
set_hooks_commit_msg()
{
top_dir=`git rev-parse --show-toplevel`
f="${top_dir}/.git/hooks/commit-msg";
u="https://review.openstack.org/tools/hooks/commit-msg";
if [ -x "$f" ]; then
return;
fi
curl -o $f $u || wget -O $f $u;
chmod +x $f;
GIT_EDITOR=true git commit --amend
}
add_remote()
{
username=$1
project=$2
echo "No remote set, testing ssh://$username@review.openstack.org:29418"
if project_list=`ssh -p29418 -o StrictHostKeyChecking=no $username@review.openstack.org gerrit ls-projects 2>/dev/null`
then
echo "$username@review.openstack.org:29418 worked."
if echo $project_list | grep $project >/dev/null
then
echo "Creating a git remote called gerrit that maps to:"
echo " ssh://$username@review.openstack.org:29418/$project"
git remote add gerrit ssh://$username@review.openstack.org:29418/$project
else
echo "The current project name, $project, is not a known project."
echo "Please either reclone from github/gerrit or create a"
echo "remote named gerrit that points to the intended project."
return 1
fi
return 0
fi
return 1
}
check_remote()
{
if ! git remote | grep gerrit >/dev/null 2>&1
then
origin_project=`git remote show origin | grep 'Fetch URL' | perl -nle '@fields = split(m|[:/]|); $len = $#fields; print $fields[$len-1], "/", $fields[$len];'`
if add_remote $USERNAME $origin_project
then
return 0
else
echo "Your local name doesn't work on Gerrit."
echo -n "Enter Gerrit username (same as launchpad): "
read gerrit_user
if add_remote $gerrit_user $origin_project
then
return 0
else
echo "Can't infer where gerrit is - please set a remote named"
echo "gerrit manually and then try again."
echo
echo "For more information, please see:"
echo "\thttp://wiki.openstack.org/GerritWorkflow"
exit 1
fi
fi
fi
}
rebase_changes()
{
git fetch;
GIT_EDITOR=true git rebase -i origin/$branch || exit $?;
}
assert_diverge()
{
if ! git diff origin/$branch..HEAD | grep -q .
then
echo "No changes between the current branch and origin/$branch."
exit 1
fi
}
main()
{
set_hooks_commit_msg;
check_remote;
rebase_changes;
assert_diverge;
bug=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]ug|[Ll][Pp])\s*[#:]?\s*(\d+)/) {print "$2"; exit}')
bp=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]lue[Pp]rint|[Bb][Pp])\s*[#:]?\s*([0-9a-zA-Z-_]+)/) {print "$2"; exit}')
if [ "$DRY_RUN" = 1 ]; then
drier='echo -e Please use the following command to send your commits to review:\n\n'
else
drier=
fi
local_branch=`git branch | grep -Ei "\* (.*)" | cut -f2 -d' '`
if [ -z "$bug" ]; then
if [ -z "$bp" ]; then
$drier git push gerrit HEAD:refs/for/$branch/$local_branch;
else
$drier git push gerrit HEAD:refs/for/$branch/bp/$bp;
fi
else
$drier git push gerrit HEAD:refs/for/$branch/bug/$bug;
fi
}
main "$@"

4
tools/with_venv.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
TOOLS=`dirname $0`
VENV=$TOOLS/../.venv
source $VENV/bin/activate && $@

14
tox.ini Normal file
View File

@ -0,0 +1,14 @@
[tox]
envlist = py26,py27
[testenv]
deps = -r{toxinidir}/tools/pip-requires
commands = /bin/bash run_tests.sh -N
[testenv:pep8]
deps = pep8
commands = /bin/bash run_tests.sh -N --pep8
[testenv:coverage]
deps = coverage
commands = /bin/bash run_tests.sh -N --coverage