Make pep8 checks a bit stricter.

Along with moving to pep8 1.3.3, we also want to standardize
on what we ignore. This patch get's us most of the way there
by setting the ignore list to:
N4,E125, E126, E711,E712.

Almost all changes made here are white-space/indentation changes.

The removal of Hacking N4 errors from the ignore list will
be handled in a seperate patch.

Change-Id: If45f156600485d23769449018590f60b4f69b0c5
This commit is contained in:
John Griffith 2012-11-24 20:17:32 -07:00
parent 0fb98971a7
commit 51418bdd5b
127 changed files with 2754 additions and 2830 deletions

View File

@ -127,8 +127,9 @@ class ShellCommands(object):
Falls back to Python shell if unavailable"""
self.run('python')
@args('--shell', dest="shell", metavar='<bpython|ipython|python >',
help='Python shell')
@args('--shell', dest="shell",
metavar='<bpython|ipython|python >',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
@ -180,7 +181,7 @@ def _db_error(caught_exception):
class HostCommands(object):
"""List hosts"""
"""List hosts."""
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
@ -206,8 +207,9 @@ class DbCommands(object):
def __init__(self):
pass
@args('--version', dest='version', metavar='<version>',
help='Database version')
@args('--version', dest='version',
metavar='<version>',
help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version)
@ -224,9 +226,10 @@ class VersionCommands(object):
pass
def list(self):
print _("%(version)s (%(vcs)s)") % \
{'version': version.version_string(),
'vcs': version.version_string_with_vcs()}
print(
_("%(version)s (%(vcs)s)") %
{'version': version.version_string(),
'vcs': version.version_string_with_vcs()})
def __call__(self):
self.list()
@ -313,7 +316,7 @@ class ImportCommands(object):
columns = table.columns.keys()
for row in src.query(table).all():
data = dict([(str(column), getattr(row, column))
for column in columns])
for column in columns])
dest.add(new_row(**data))
dest.commit()
@ -325,7 +328,7 @@ class ImportCommands(object):
for row in src.query(table).all():
if row.resource == 'gigabytes' or row.resource == 'volumes':
data = dict([(str(column), getattr(row, column))
for column in columns])
for column in columns])
dest.add(new_row(**data))
dest.commit()
@ -352,10 +355,14 @@ class ImportCommands(object):
dest_db = '%s/cinder' % dest_db
self._import_db(src_db, dest_db, backup_db)
@args('--src', dest='src_tgts', metavar='<src tgts>',
help='[login@src_host:]/opt/stack/nova/volumes/')
@args('--dest', dest='dest_tgts', metavar='<dest tgts>',
help='[login@src_host:/opt/stack/cinder/volumes/]')
@args('--src',
dest='src_tgts',
metavar='<src tgts>',
help='[login@src_host:]/opt/stack/nova/volumes/')
@args('--dest',
dest='dest_tgts',
metavar='<dest tgts>',
help='[login@src_host:/opt/stack/cinder/volumes/]')
def copy_ptgt_files(self, src_tgts, dest_tgts=None):
"""Copy persistent scsi tgt files from nova to cinder.
@ -380,10 +387,12 @@ class ImportCommands(object):
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
"""Methods for dealing with a cloud in an odd state."""
@args('--volume', dest='volume_id', metavar='<volume id>',
help='Volume ID')
@args('--volume',
dest='volume_id',
metavar='<volume id>',
help='Volume ID')
def delete(self, volume_id):
"""Delete a volume, bypassing the check that it
must be available."""
@ -407,8 +416,10 @@ class VolumeCommands(object):
{"method": "delete_volume",
"args": {"volume_id": volume['id']}})
@args('--volume', dest='volume_id', metavar='<volume id>',
help='Volume ID')
@args('--volume',
dest='volume_id',
metavar='<volume id>',
help='Volume ID')
def reattach(self, volume_id):
"""Re-attach a volume that has previously been attached
to an instance. Typically called after a compute host
@ -429,7 +440,7 @@ class VolumeCommands(object):
class StorageManagerCommands(object):
"""Class for mangaging Storage Backends and Flavors"""
"""Class for mangaging Storage Backends and Flavors."""
def flavor_list(self, flavor=None):
ctxt = context.get_admin_context()
@ -449,9 +460,9 @@ class StorageManagerCommands(object):
for flav in flavors:
print "%-18s\t%-20s\t%s" % (
flav['id'],
flav['label'],
flav['description'])
flav['id'],
flav['label'],
flav['description'])
def flavor_create(self, label, desc):
# TODO(renukaapte) flavor name must be unique
@ -487,10 +498,10 @@ class StorageManagerCommands(object):
sys.exit(2)
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
_('Flavor id'),
_('SR UUID'),
_('SR Type'),
_('Config Parameters'),)
_('Flavor id'),
_('SR UUID'),
_('SR Type'),
_('Config Parameters'),)
for b in backends:
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
@ -516,8 +527,8 @@ class StorageManagerCommands(object):
print "error: %s" % ex
sys.exit(2)
config_params = " ".join(['%s=%s' %
(key, params[key]) for key in params])
config_params = " ".join(
['%s=%s' % (key, params[key]) for key in params])
if 'sr_uuid' in params:
sr_uuid = params['sr_uuid']
@ -532,11 +543,12 @@ class StorageManagerCommands(object):
c = raw_input('Proceed? (y/n) ')
if c == 'y' or c == 'Y':
try:
db.sm_backend_conf_update(ctxt, backend['id'],
dict(created=False,
flavor_id=flavors['id'],
sr_type=sr_type,
config_params=config_params))
db.sm_backend_conf_update(
ctxt, backend['id'],
dict(created=False,
flavor_id=flavors['id'],
sr_type=sr_type,
config_params=config_params))
except exception.DBError, e:
_db_error(e)
return
@ -578,10 +590,10 @@ class ConfigCommands(object):
class GetLogCommands(object):
"""Get logging information"""
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files"""
"""Get all of the errors from the log files."""
error_found = 0
if FLAGS.logdir:
logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')]
@ -601,7 +613,7 @@ class GetLogCommands(object):
print "No errors in logfiles!"
def syslog(self, num_entries=10):
"""Get <num_entries> of the cinder syslog events"""
"""Get <num_entries> of the cinder syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
@ -692,8 +704,8 @@ def main():
script_name = argv.pop(0)
if len(argv) < 1:
print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \
{'version': version.version_string(),
'vcs': version.version_string_with_vcs()}
{'version': version.version_string(),
'vcs': version.version_string_with_vcs()}
print script_name + " category action [<args>]"
print _("Available categories:")
for k, _v in CATEGORIES:

View File

@ -181,14 +181,10 @@ class ViewBuilder(object):
_collection_name = None
def _get_links(self, request, identifier):
return [{
"rel": "self",
"href": self._get_href_link(request, identifier),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier),
}]
return [{"rel": "self",
"href": self._get_href_link(request, identifier), },
{"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self, request, identifier):
"""Return href string with proper limit and marker params."""

View File

@ -27,14 +27,15 @@ from cinder import volume
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('volume',
'extended_snapshot_attributes')
authorize = extensions.soft_extension_authorizer(
'volume',
'extended_snapshot_attributes')
class ExtendedSnapshotAttributesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedSnapshotAttributesController, self).__init__(*args,
**kwargs)
**kwargs)
self.volume_api = volume.API()
def _get_snapshots(self, context):

View File

@ -63,10 +63,8 @@ class QuotaClassSetsController(object):
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
return self._format_quota_set(
id,
QUOTAS.get_class_quotas(context, id)
)
return self._format_quota_set(id,
QUOTAS.get_class_quotas(context, id))
@wsgi.serializers(xml=QuotaClassTemplate)
def update(self, req, id, body):

View File

@ -118,8 +118,8 @@ class Quotas(extensions.ExtensionDescriptor):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
QuotaSetsController(),
member_actions={'defaults': 'GET'})
QuotaSetsController(),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources

View File

@ -140,10 +140,10 @@ class Types_extra_specs(extensions.ExtensionDescriptor):
def get_resources(self):
resources = []
res = extensions.ResourceExtension('extra_specs',
VolumeTypeExtraSpecsController(),
parent=dict(
member_name='type',
collection_name='types'))
VolumeTypeExtraSpecsController(),
parent=dict(member_name='type',
collection_name='types')
)
resources.append(res)
return resources

View File

@ -31,7 +31,7 @@ authorize = extensions.extension_authorizer('volume', 'types_manage')
class VolumeTypesManageController(wsgi.Controller):
""" The volume types API controller for the OpenStack API """
"""The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@ -64,7 +64,7 @@ class VolumeTypesManageController(wsgi.Controller):
@wsgi.action("delete")
def _delete(self, req, id):
""" Deletes an existing volume type """
"""Deletes an existing volume type."""
context = req.environ['cinder.context']
authorize(context)
@ -78,7 +78,7 @@ class VolumeTypesManageController(wsgi.Controller):
class Types_manage(extensions.ExtensionDescriptor):
"""Types manage support"""
"""Types manage support."""
name = "TypesManage"
alias = "os-types-manage"

View File

@ -52,7 +52,7 @@ class VolumeToImageSerializer(xmlutil.TemplateBuilder):
class VolumeToImageDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted requests"""
"""Deserializer to handle xml-formatted requests."""
def default(self, string):
dom = minidom.parseString(string)
action_node = dom.childNodes[0]

View File

@ -31,10 +31,11 @@ from cinder.openstack.common import log as logging
from cinder import wsgi as base_wsgi
use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
use_forwarded_for_opt = cfg.BoolOpt(
'use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
FLAGS = flags.FLAGS
FLAGS.register_opt(use_forwarded_for_opt)

View File

@ -39,7 +39,7 @@ class FaultWrapper(base_wsgi.Middleware):
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception(_("Caught error: %s"), unicode(inner))

View File

@ -49,9 +49,10 @@ class ProjectMapper(APIMapper):
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
routes.Mapper.resource(self,
member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):

View File

@ -16,19 +16,19 @@
"""The hosts admin extension."""
import webob.exc
from xml.dom import minidom
from xml.parsers import expat
from cinder.api.openstack import extensions
from cinder.api.openstack import wsgi
from cinder.api.openstack import xmlutil
from cinder.volume import api as volume_api
from cinder import db
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume import api as volume_api
from xml.dom import minidom
from xml.parsers import expat
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
@ -176,8 +176,9 @@ class HostController(object):
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
result = self.api.set_host_enabled(context, host=host,
enabled=enabled)
result = self.api.set_host_enabled(context,
host=host,
enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
@ -230,13 +231,14 @@ class HostController(object):
(snap_count, snap_sum) = db.snapshot_data_get_for_project(
context,
project_id)
resources.append({'resource':
{'host': host,
'project': project_id,
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count),
'total_snapshot_gb': str(snap_sum)}})
resources.append(
{'resource':
{'host': host,
'project': project_id,
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count),
'total_snapshot_gb': str(snap_sum)}})
snap_count_total += int(snap_count)
snap_sum_total += int(snap_sum)
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
@ -254,8 +256,11 @@ class Hosts(extensions.ExtensionDescriptor):
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={'update': 'PUT'},
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
HostController(),
collection_actions={
'update': 'PUT'},
member_actions={
'startup': 'GET',
'shutdown': 'GET',
'reboot': 'GET'})]
return resources

View File

@ -79,7 +79,7 @@ class Request(webob.Request):
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['cinder.best_content_type'] = (content_type or
'application/json')
'application/json')
return self.environ['cinder.best_content_type']
@ -577,8 +577,9 @@ class ResourceExceptionHandler(object):
code=ex_value.code, explanation=unicode(ex_value)))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_('Exception handling resource: %s') % ex_value,
exc_info=exc_info)
LOG.error(_(
'Exception handling resource: %s') %
ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
@ -901,7 +902,7 @@ class Resource(wsgi.Application):
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in ['action', 'create', 'delete']):
action not in ['action', 'create', 'delete']):
# Propagate the error
raise
else:
@ -1038,17 +1039,16 @@ class Controller(object):
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
_fault_names = {400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""

View File

@ -24,8 +24,9 @@ from cinder.openstack.common import log as logging
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
_option_header_piece_re = re.compile(
r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = logging.getLogger(__name__)
@ -171,8 +172,7 @@ class URLMap(paste.urlmap.URLMap):
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
if (path_info == app_url or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
@ -274,7 +274,7 @@ class URLMap(paste.urlmap.URLMap):
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:

View File

@ -44,8 +44,8 @@ class APIRouter(cinder.api.openstack.APIRouter):
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
controller=self.resources['versions'],
action='show')
controller=self.resources['versions'],
action='show')
mapper.redirect("", "/")

View File

@ -164,15 +164,17 @@ class SnapshotsController(wsgi.Controller):
raise exception.InvalidParameterValue(err=msg)
if utils.bool_from_str(force):
new_snapshot = self.volume_api.create_snapshot_force(context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
new_snapshot = self.volume_api.create_snapshot_force(
context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
else:
new_snapshot = self.volume_api.create_snapshot(context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
new_snapshot = self.volume_api.create_snapshot(
context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
""" The volume type & volume types extra specs extension"""
"""The volume type & volume types extra specs extension."""
from webob import exc
@ -50,20 +50,20 @@ class VolumeTypesTemplate(xmlutil.TemplateBuilder):
class VolumeTypesController(wsgi.Controller):
""" The volume types API controller for the OpenStack API """
"""The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@wsgi.serializers(xml=VolumeTypesTemplate)
def index(self, req):
""" Returns the list of volume types """
"""Returns the list of volume types."""
context = req.environ['cinder.context']
vol_types = volume_types.get_all_types(context).values()
return self._view_builder.index(req, vol_types)
@wsgi.serializers(xml=VolumeTypeTemplate)
def show(self, req, id):
""" Return a single volume type item """
"""Return a single volume type item."""
context = req.environ['cinder.context']
try:

View File

@ -296,7 +296,7 @@ class VolumeController(wsgi.Controller):
if req_volume_type:
try:
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
context, req_volume_type)
context, req_volume_type)
except exception.VolumeTypeNotFound:
explanation = 'Volume type not found.'
raise exc.HTTPNotFound(explanation=explanation)
@ -394,7 +394,7 @@ def remove_invalid_options(context, search_options, allowed_search_options):
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
LOG.debug(log_msg)

View File

@ -44,8 +44,8 @@ class APIRouter(cinder.api.openstack.APIRouter):
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
controller=self.resources['versions'],
action='show')
controller=self.resources['versions'],
action='show')
mapper.redirect("", "/")

View File

@ -164,15 +164,17 @@ class SnapshotsController(wsgi.Controller):
raise exception.InvalidParameterValue(err=msg)
if utils.bool_from_str(force):
new_snapshot = self.volume_api.create_snapshot_force(context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
new_snapshot = self.volume_api.create_snapshot_force(
context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
else:
new_snapshot = self.volume_api.create_snapshot(context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
new_snapshot = self.volume_api.create_snapshot(
context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
""" The volume type & volume types extra specs extension"""
"""The volume type & volume types extra specs extension."""
from webob import exc
@ -50,20 +50,20 @@ class VolumeTypesTemplate(xmlutil.TemplateBuilder):
class VolumeTypesController(wsgi.Controller):
""" The volume types API controller for the OpenStack API """
"""The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@wsgi.serializers(xml=VolumeTypesTemplate)
def index(self, req):
""" Returns the list of volume types """
"""Returns the list of volume types."""
context = req.environ['cinder.context']
vol_types = volume_types.get_all_types(context).values()
return self._view_builder.index(req, vol_types)
@wsgi.serializers(xml=VolumeTypeTemplate)
def show(self, req, id):
""" Return a single volume type item """
"""Return a single volume type item."""
context = req.environ['cinder.context']
try:

View File

@ -291,7 +291,7 @@ class VolumeController(wsgi.Controller):
if req_volume_type:
try:
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
context, req_volume_type)
context, req_volume_type)
except exception.VolumeTypeNotFound:
explanation = 'Volume type not found.'
raise exc.HTTPNotFound(explanation=explanation)
@ -389,7 +389,7 @@ def remove_invalid_options(context, search_options, allowed_search_options):
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
LOG.debug(log_msg)

View File

@ -23,8 +23,8 @@ class ViewBuilder(common.ViewBuilder):
def show(self, request, volume_type, brief=False):
"""Trim away extraneous volume type attributes."""
trimmed = dict(id=volume_type.get('id'),
name=volume_type.get('name'),
extra_specs=volume_type.get('extra_specs'))
name=volume_type.get('name'),
extra_specs=volume_type.get('extra_specs'))
return trimmed if brief else dict(volume_type=trimmed)
def index(self, request, volume_types):

View File

@ -38,14 +38,9 @@ class ViewBuilder(object):
version_objs.append({
"id": version['id'],
"status": version['status'],
"links": [
{
"rel": "self",
"href": self.generate_href(req.path),
},
],
"media-types": version['media-types'],
})
"links": [{"rel": "self",
"href": self.generate_href(req.path), }, ],
"media-types": version['media-types'], })
return dict(choices=version_objs)
@ -57,8 +52,7 @@ class ViewBuilder(object):
"id": version['id'],
"status": version['status'],
"updated": version['updated'],
"links": self._build_links(version),
})
"links": self._build_links(version), })
return dict(versions=version_objs)
@ -66,20 +60,15 @@ class ViewBuilder(object):
reval = copy.deepcopy(version)
reval['links'].insert(0, {
"rel": "self",
"href": self.base_url.rstrip('/') + '/',
})
"href": self.base_url.rstrip('/') + '/', })
return dict(version=reval)
def _build_links(self, version_data):
"""Generate a container of links that refer to the provided version."""
href = self.generate_href()
links = [
{
"rel": "self",
"href": href,
},
]
links = [{'rel': 'self',
'href': href, }, ]
return links

View File

@ -26,8 +26,7 @@ LOG = logging.getLogger(__name__)
deprecate_opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal')
]
help='make deprecations fatal')]
FLAGS = flags.FLAGS
FLAGS.register_opts(deprecate_opts)

View File

@ -59,7 +59,7 @@ class RequestContext(object):
"""
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
str(kwargs))
str(kwargs))
self.user_id = user_id
self.project_id = project_id

View File

@ -61,8 +61,7 @@ db_opts = [
help='Template string to be used to generate volume names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
help='Template string to be used to generate snapshot names'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(db_opts)
@ -155,8 +154,9 @@ def migration_get(context, migration_id):
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
status)
return IMPL.migration_get_by_instance_and_status(context,
instance_uuid,
status)
def migration_get_all_unconfirmed(context, confirm_window):
@ -378,12 +378,14 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs):
def volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs):
"""Create or update volume type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs)
IMPL.volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs)
###################
@ -391,8 +393,10 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for the specified volume."""
return IMPL.volume_glance_metadata_create(context, volume_id,
key, value)
return IMPL.volume_glance_metadata_create(context,
volume_id,
key,
value)
def volume_glance_metadata_get(context, volume_id):

View File

@ -20,17 +20,12 @@
"""Implementation of SQLAlchemy backend."""
import datetime
import functools
import uuid
import warnings
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql import func
@ -179,7 +174,7 @@ def model_query(context, *args, **kwargs):
query = query.filter_by(deleted=True)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and is_user_context(context):
query = query.filter_by(project_id=context.project_id)
@ -242,9 +237,12 @@ def service_destroy(context, service_id):
@require_admin_context
def service_get(context, service_id, session=None):
result = model_query(context, models.Service, session=session).\
filter_by(id=service_id).\
first()
result = model_query(
context,
models.Service,
session=session).\
filter_by(id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
@ -263,19 +261,21 @@ def service_get_all(context, disabled=None):
@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
return model_query(
context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
result = model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
result = model_query(
context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
if not result:
raise exception.ServiceNotFound(host=host, topic=topic)
return result
@ -283,9 +283,10 @@ def service_get_by_host_and_topic(context, host, topic):
@require_admin_context
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
return model_query(
context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@require_admin_context
@ -294,11 +295,11 @@ def _service_get_all_topic_subquery(context, session, topic, subq, label):
return model_query(context, models.Service,
func.coalesce(sort_value, 0),
session=session, read_deleted="no").\
filter_by(topic=topic).\
filter_by(disabled=False).\
outerjoin((subq, models.Service.host == subq.c.host)).\
order_by(sort_value).\
all()
filter_by(topic=topic).\
filter_by(disabled=False).\
outerjoin((subq, models.Service.host == subq.c.host)).\
order_by(sort_value).\
all()
@require_admin_context
@ -310,8 +311,8 @@ def service_get_all_volume_sorted(context):
subq = model_query(context, models.Volume.host,
func.sum(models.Volume.size).label(label),
session=session, read_deleted="no").\
group_by(models.Volume.host).\
subquery()
group_by(models.Volume.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
@ -322,9 +323,9 @@ def service_get_all_volume_sorted(context):
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
@ -390,8 +391,8 @@ def _dict_with_extra_specs(inst_type_query):
@require_admin_context
def iscsi_target_count_by_host(context, host):
return model_query(context, models.IscsiTarget).\
filter_by(host=host).\
count()
filter_by(host=host).\
count()
@require_admin_context
@ -414,9 +415,9 @@ def iscsi_target_create_safe(context, values):
def quota_get(context, project_id, resource, session=None):
result = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
@ -429,8 +430,8 @@ def quota_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
@ -473,9 +474,9 @@ def quota_destroy(context, project_id, resource):
def quota_class_get(context, class_name, resource, session=None):
result = model_query(context, models.QuotaClass, session=session,
read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
@ -488,8 +489,8 @@ def quota_class_get_all_by_name(context, class_name):
authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
@ -533,8 +534,8 @@ def quota_class_destroy_all_by_name(context, class_name):
with session.begin():
quota_classes = model_query(context, models.QuotaClass,
session=session, read_deleted="no").\
filter_by(class_name=class_name).\
all()
filter_by(class_name=class_name).\
all()
for quota_class_ref in quota_classes:
quota_class_ref.delete(session=session)
@ -547,9 +548,9 @@ def quota_class_destroy_all_by_name(context, class_name):
def quota_usage_get(context, project_id, resource, session=None):
result = model_query(context, models.QuotaUsage, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
@ -562,8 +563,8 @@ def quota_usage_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
all()
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
@ -593,8 +594,7 @@ def quota_usage_create(context, project_id, resource, in_use, reserved,
def reservation_get(context, uuid, session=None):
result = model_query(context, models.Reservation, session=session,
read_deleted="no").\
filter_by(uuid=uuid).\
first()
filter_by(uuid=uuid).first()
if not result:
raise exception.ReservationNotFound(uuid=uuid)
@ -607,8 +607,7 @@ def reservation_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
all()
filter_by(project_id=project_id).all()
result = {'project_id': project_id}
for row in rows:
@ -653,9 +652,9 @@ def _get_quota_usages(context, session):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=context.project_id).\
with_lockmode('update').\
all()
filter_by(project_id=context.project_id).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
@ -798,9 +797,9 @@ def _quota_reservations(session, context, reservations):
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update').\
all()
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update').\
all()
@require_context
@ -844,24 +843,24 @@ def quota_destroy_all_by_project(context, project_id):
with session.begin():
quotas = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
all()
filter_by(project_id=project_id).\
all()
for quota_ref in quotas:
quota_ref.delete(session=session)
quota_usages = model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
filter_by(project_id=project_id).\
all()
for quota_usage_ref in quota_usages:
quota_usage_ref.delete(session=session)
reservations = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
filter_by(project_id=project_id).\
all()
for reservation_ref in reservations:
reservation_ref.delete(session=session)
@ -874,8 +873,8 @@ def reservation_expire(context):
current_time = timeutils.utcnow()
results = model_query(context, models.Reservation, session=session,
read_deleted="no").\
filter(models.Reservation.expire < current_time).\
all()
filter(models.Reservation.expire < current_time).\
all()
if results:
for reservation in results:
@ -895,10 +894,10 @@ def volume_allocate_iscsi_target(context, volume_id, host):
with session.begin():
iscsi_target_ref = model_query(context, models.IscsiTarget,
session=session, read_deleted="no").\
filter_by(volume=None).\
filter_by(host=host).\
with_lockmode('update').\
first()
filter_by(volume=None).\
filter_by(host=host).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
@ -949,8 +948,8 @@ def volume_data_get_for_host(context, host, session=None):
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
filter_by(host=host).\
first()
filter_by(host=host).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@ -963,8 +962,8 @@ def volume_data_get_for_project(context, project_id, session=None):
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
first()
filter_by(project_id=project_id).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@ -975,19 +974,19 @@ def volume_destroy(context, volume_id):
session = get_session()
with session.begin():
session.query(models.Volume).\
filter_by(id=volume_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
filter_by(id=volume_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
update({'volume_id': None})
filter_by(volume_id=volume_id).\
update({'volume_id': None})
session.query(models.VolumeMetadata).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
@ -1006,15 +1005,15 @@ def volume_detached(context, volume_id):
def _volume_get_query(context, session=None, project_only=False):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type'))
options(joinedload('volume_metadata')).\
options(joinedload('volume_type'))
@require_context
def volume_get(context, volume_id, session=None):
result = _volume_get_query(context, session=session, project_only=True).\
filter_by(id=volume_id).\
first()
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
@ -1035,10 +1034,10 @@ def volume_get_all_by_host(context, host):
@require_admin_context
def volume_get_all_by_instance_uuid(context, instance_uuid):
result = model_query(context, models.Volume, read_deleted="no").\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
filter_by(instance_uuid=instance_uuid).\
all()
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
filter_by(instance_uuid=instance_uuid).\
all()
if not result:
return []
@ -1055,8 +1054,8 @@ def volume_get_all_by_project(context, project_id):
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
filter_by(volume_id=volume_id).\
first()
filter_by(volume_id=volume_id).\
first()
if not result:
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
@ -1085,7 +1084,7 @@ def volume_update(context, volume_id, values):
def _volume_metadata_get_query(context, volume_id, session=None):
return model_query(context, models.VolumeMetadata,
session=session, read_deleted="no").\
filter_by(volume_id=volume_id)
filter_by(volume_id=volume_id)
@require_context
@ -1113,8 +1112,8 @@ def volume_metadata_delete(context, volume_id, key):
@require_volume_exists
def volume_metadata_get_item(context, volume_id, key, session=None):
result = _volume_metadata_get_query(context, volume_id, session=session).\
filter_by(key=key).\
first()
filter_by(key=key).\
first()
if not result:
raise exception.VolumeMetadataNotFound(metadata_key=key,
@ -1179,19 +1178,19 @@ def snapshot_destroy(context, snapshot_id):
session = get_session()
with session.begin():
session.query(models.Snapshot).\
filter_by(id=snapshot_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
filter_by(id=snapshot_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def snapshot_get(context, snapshot_id, session=None):
result = model_query(context, models.Snapshot, session=session,
project_only=True).\
filter_by(id=snapshot_id).\
first()
filter_by(id=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
@ -1208,15 +1207,15 @@ def snapshot_get_all(context):
def snapshot_get_all_for_volume(context, volume_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(volume_id=volume_id).all()
filter_by(volume_id=volume_id).all()
@require_context
def snapshot_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return model_query(context, models.Snapshot).\
filter_by(project_id=project_id).\
all()
filter_by(project_id=project_id).\
all()
@require_context
@ -1227,8 +1226,8 @@ def snapshot_data_get_for_project(context, project_id, session=None):
func.sum(models.Snapshot.volume_size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
first()
filter_by(project_id=project_id).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@ -1268,8 +1267,8 @@ def migration_update(context, id, values):
def migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter_by(id=id).\
first()
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
@ -1280,9 +1279,9 @@ def migration_get(context, id, session=None):
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
@ -1294,13 +1293,13 @@ def migration_get_by_instance_and_status(context, instance_uuid, status):
@require_admin_context
def migration_get_all_unconfirmed(context, confirm_window, session=None):
confirm_window = timeutils.utcnow() - datetime.timedelta(
seconds=confirm_window)
seconds=confirm_window)
return model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
all()
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
all()
##################
@ -1342,9 +1341,9 @@ def volume_type_get_all(context, inactive=False, filters=None):
read_deleted = "yes" if inactive else "no"
rows = model_query(context, models.VolumeTypes,
read_deleted=read_deleted).\
options(joinedload('extra_specs')).\
order_by("name").\
all()
options(joinedload('extra_specs')).\
order_by("name").\
all()
# TODO(sirp): this patern of converting rows to a result with extra_specs
# is repeated quite a bit, might be worth creating a method for it
@ -1359,9 +1358,9 @@ def volume_type_get_all(context, inactive=False, filters=None):
def volume_type_get(context, id, session=None):
"""Returns a dict describing specific volume_type"""
result = model_query(context, models.VolumeTypes, session=session).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
@ -1373,9 +1372,9 @@ def volume_type_get(context, id, session=None):
def volume_type_get_by_name(context, name, session=None):
"""Returns a dict describing specific volume_type"""
result = model_query(context, models.VolumeTypes, session=session).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not result:
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
@ -1391,25 +1390,27 @@ def volume_type_destroy(context, name):
session=session)
volume_type_id = volume_type_ref['id']
session.query(models.VolumeTypes).\
filter_by(id=volume_type_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
filter_by(id=volume_type_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.VolumeTypeExtraSpecs).\
filter_by(volume_type_id=volume_type_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
filter_by(volume_type_id=volume_type_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_get_active_by_window(context, begin, end=None,
project_id=None):
def volume_get_active_by_window(context,
begin,
end=None,
project_id=None):
"""Return volumes that were active during window."""
session = get_session()
query = session.query(models.Volume)
query = query.filter(or_(models.Volume.deleted_at == None,
query = query.filter(or_(models.Volume.deleted_at is None,
models.Volume.deleted_at > begin))
if end:
query = query.filter(models.Volume.created_at < end)
@ -1425,13 +1426,13 @@ def volume_get_active_by_window(context, begin, end=None,
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id)
filter_by(volume_type_id=volume_type_id)
@require_context
def volume_type_extra_specs_get(context, volume_type_id):
rows = _volume_type_extra_specs_query(context, volume_type_id).\
all()
all()
result = {}
for row in rows:
@ -1453,13 +1454,14 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
def volume_type_extra_specs_get_item(context, volume_type_id, key,
session=None):
result = _volume_type_extra_specs_query(
context, volume_type_id, session=session).\
filter_by(key=key).\
first()
context, volume_type_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.VolumeTypeExtraSpecsNotFound(
extra_specs_key=key, volume_type_id=volume_type_id)
extra_specs_key=key,
volume_type_id=volume_type_id)
return result
@ -1493,8 +1495,8 @@ def volume_glance_metadata_get(context, volume_id, session=None):
session = get_session()
return session.query(models.VolumeGlanceMetadata).\
filter_by(volume_id=volume_id).\
filter_by(deleted=False).all()
filter_by(volume_id=volume_id).\
filter_by(deleted=False).all()
@require_context
@ -1505,8 +1507,8 @@ def volume_snapshot_glance_metadata_get(context, snapshot_id, session=None):
session = get_session()
return session.query(models.VolumeGlanceMetadata).\
filter_by(snapshot_id=snapshot_id).\
filter_by(deleted=False).all()
filter_by(snapshot_id=snapshot_id).\
filter_by(deleted=False).all()
@require_context
@ -1523,9 +1525,9 @@ def volume_glance_metadata_create(context, volume_id, key, value,
with session.begin():
rows = session.query(models.VolumeGlanceMetadata).\
filter_by(volume_id=volume_id).\
filter_by(key=key).\
filter_by(deleted=False).all()
filter_by(volume_id=volume_id).\
filter_by(key=key).\
filter_by(deleted=False).all()
if len(rows) > 0:
raise exception.GlanceMetadataExists(key=key,
@ -1577,7 +1579,7 @@ def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id,
session = get_session()
metadata = volume_snapshot_glance_metadata_get(context, snapshot_id,
session=session)
session=session)
with session.begin():
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
@ -1628,8 +1630,8 @@ def sm_backend_conf_update(context, sm_backend_id, values):
backend_conf = model_query(context, models.SMBackendConf,
session=session,
read_deleted="yes").\
filter_by(id=sm_backend_id).\
first()
filter_by(id=sm_backend_id).\
first()
if not backend_conf:
raise exception.NotFound(
@ -1648,15 +1650,15 @@ def sm_backend_conf_delete(context, sm_backend_id):
with session.begin():
model_query(context, models.SMBackendConf, session=session,
read_deleted="yes").\
filter_by(id=sm_backend_id).\
delete()
filter_by(id=sm_backend_id).\
delete()
@require_admin_context
def sm_backend_conf_get(context, sm_backend_id):
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
filter_by(id=sm_backend_id).\
first()
filter_by(id=sm_backend_id).\
first()
if not result:
raise exception.NotFound(_("No backend config with id "
@ -1668,14 +1670,14 @@ def sm_backend_conf_get(context, sm_backend_id):
@require_admin_context
def sm_backend_conf_get_by_sr(context, sr_uuid):
return model_query(context, models.SMBackendConf, read_deleted="yes").\
filter_by(sr_uuid=sr_uuid).\
first()
filter_by(sr_uuid=sr_uuid).\
first()
@require_admin_context
def sm_backend_conf_get_all(context):
return model_query(context, models.SMBackendConf, read_deleted="yes").\
all()
all()
####################
@ -1684,7 +1686,7 @@ def sm_backend_conf_get_all(context):
def _sm_flavor_get_query(context, sm_flavor_label, session=None):
return model_query(context, models.SMFlavors, session=session,
read_deleted="yes").\
filter_by(label=sm_flavor_label)
filter_by(label=sm_flavor_label)
@require_admin_context
@ -1716,7 +1718,7 @@ def sm_flavor_get(context, sm_flavor_label):
if not result:
raise exception.NotFound(
_("No sm_flavor called %(sm_flavor)s") % locals())
_("No sm_flavor called %(sm_flavor)s") % locals())
return result
@ -1732,7 +1734,7 @@ def sm_flavor_get_all(context):
def _sm_volume_get_query(context, volume_id, session=None):
return model_query(context, models.SMVolume, session=session,
read_deleted="yes").\
filter_by(id=volume_id)
filter_by(id=volume_id)
def sm_volume_create(context, values):
@ -1760,7 +1762,7 @@ def sm_volume_get(context, volume_id):
if not result:
raise exception.NotFound(
_("No sm_volume with id %(volume_id)s") % locals())
_("No sm_volume with id %(volume_id)s") % locals())
return result

View File

@ -28,23 +28,29 @@ def upgrade(migrate_engine):
# New table
quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True),
Column('class_name',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False), index=True),
Column('resource',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('hard_limit', Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True,
name=None)),
Column('id', Integer(), primary_key=True),
Column('class_name',
String(length=255,
convert_unicode=True,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False),
index=True),
Column('resource',
String(length=255,
convert_unicode=True,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
Column('hard_limit', Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
try:
quota_classes.create()
@ -53,26 +59,27 @@ def upgrade(migrate_engine):
raise
quota_usages = Table('quota_usages', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True),
Column('project_id',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
index=True),
Column('resource',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('in_use', Integer(), nullable=False),
Column('reserved', Integer(), nullable=False),
Column('until_refresh', Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True,
name=None)),
Column('id', Integer(), primary_key=True),
Column('project_id',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
index=True),
Column('resource',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('in_use', Integer(), nullable=False),
Column('reserved', Integer(), nullable=False),
Column('until_refresh', Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
try:
quota_usages.create()
@ -81,31 +88,37 @@ def upgrade(migrate_engine):
raise
reservations = Table('reservations', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True),
Column('uuid',
String(length=36, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False), nullable=False),
Column('usage_id', Integer(), ForeignKey('quota_usages.id'),
nullable=False),
Column('project_id',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
index=True),
Column('resource',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('delta', Integer(), nullable=False),
Column('expire', DateTime(timezone=False)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True,
name=None)),
Column('id', Integer(), primary_key=True),
Column('uuid',
String(length=36,
convert_unicode=True,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False),
nullable=False),
Column('usage_id',
Integer(),
ForeignKey('quota_usages.id'),
nullable=False),
Column('project_id',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
index=True),
Column('resource',
String(length=255, convert_unicode=True,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('delta', Integer(), nullable=False),
Column('expire', DateTime(timezone=False)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
try:
reservations.create()

View File

@ -29,27 +29,31 @@ def upgrade(migrate_engine):
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of tables .
#
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
mysql_engine='InnoDB'
)
snapshots = Table('snapshots', meta,
Column('id', Integer(), primary_key=True, nullable=False),
mysql_engine='InnoDB'
)
volumes = Table('volumes',
meta,
Column('id', Integer(),
primary_key=True, nullable=False),
mysql_engine='InnoDB')
snapshots = Table('snapshots',
meta,
Column('id', Integer(),
primary_key=True, nullable=False),
mysql_engine='InnoDB')
# Create new table
volume_glance_metadata = Table('volume_glance_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(length=36), ForeignKey('volumes.id')),
Column('snapshot_id', String(length=36),
ForeignKey('snapshots.id')),
Column('key', String(255)),
Column('value', Text),
mysql_engine='InnoDB'
volume_glance_metadata = Table(
'volume_glance_metadata',
meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(length=36), ForeignKey('volumes.id')),
Column('snapshot_id', String(length=36),
ForeignKey('snapshots.id')),
Column('key', String(255)),
Column('value', Text),
mysql_engine='InnoDB'
)
try:

View File

@ -85,7 +85,7 @@ class CinderBase(object):
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict"""
"""Make the model object behave like a dict."""
for k, v in values.iteritems():
setattr(self, k, v)
@ -159,7 +159,7 @@ class Volume(BASE, CinderBase):
class VolumeMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a volume"""
"""Represents a metadata key/value pair for a volume."""
__tablename__ = 'volume_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
@ -173,7 +173,7 @@ class VolumeMetadata(BASE, CinderBase):
class VolumeTypes(BASE, CinderBase):
"""Represent possible volume_types of volumes offered"""
"""Represent possible volume_types of volumes offered."""
__tablename__ = "volume_types"
id = Column(Integer, primary_key=True)
name = Column(String(255))
@ -187,7 +187,7 @@ class VolumeTypes(BASE, CinderBase):
class VolumeTypeExtraSpecs(BASE, CinderBase):
"""Represents additional specs as key/value pairs for a volume_type"""
"""Represents additional specs as key/value pairs for a volume_type."""
__tablename__ = 'volume_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
@ -206,7 +206,7 @@ class VolumeTypeExtraSpecs(BASE, CinderBase):
class VolumeGlanceMetadata(BASE, CinderBase):
"""Glance metadata for a bootable volume"""
"""Glance metadata for a bootable volume."""
__tablename__ = 'volume_glance_metadata'
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), ForeignKey('volumes.id'))
@ -317,7 +317,7 @@ class Snapshot(BASE, CinderBase):
class IscsiTarget(BASE, CinderBase):
"""Represents an iscsi target for a given host"""
"""Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
{'mysql_engine': 'InnoDB'})

View File

@ -138,8 +138,8 @@ def get_engine():
_ENGINE.connect()
break
except OperationalError, e:
if (remaining != 'infinite' and remaining == 0) or \
not is_db_connection_error(e.args[0]):
if ((remaining != 'infinite' and remaining == 0) or
not is_db_connection_error(e.args[0])):
raise
return _ENGINE

View File

@ -98,8 +98,7 @@ core_opts = [
help='Directory where cinder binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining cinder's state"),
]
help="Top-level directory for maintaining cinder's state"), ]
debug_opts = [
]
@ -122,8 +121,8 @@ global_opts = [
help='A list of the glance api servers available to cinder '
'([hostname|ip]:port)'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
default=0,
help='Number retries when downloading an image from glance'),
cfg.StrOpt('scheduler_topic',
default='cinder-scheduler',
help='the topic scheduler nodes listen on'),
@ -217,8 +216,8 @@ global_opts = [
default=60,
help='maximum time since last check-in for up service'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth. Supports noauth, keystone, '
@ -228,7 +227,6 @@ global_opts = [
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
cfg.BoolOpt('secure_delete',
default=True,
help='Whether to perform secure delete'),
]
help='Whether to perform secure delete'), ]
FLAGS.register_opts(global_opts)

View File

@ -111,8 +111,8 @@ class GlanceClientWrapper(object):
retry the request according to FLAGS.glance_num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + FLAGS.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
@ -125,12 +125,14 @@ class GlanceClientWrapper(object):
port = self.port
extra = "retrying"
error_msg = _("Error contacting glance server "
"'%(host)s:%(port)s' for '%(method)s', %(extra)s.")
"'%(host)s:%(port)s' for '%(method)s', "
"%(extra)s.")
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg, locals())
raise exception.GlanceConnectionFailed(
host=host, port=port, reason=str(e))
raise exception.GlanceConnectionFailed(host=host,
port=port,
reason=str(e))
LOG.exception(error_msg, locals())
time.sleep(1)
@ -220,8 +222,8 @@ class GlanceImageService(object):
return self._translate_from_glance(recv_service_image_meta)
def update(self, context, image_id, image_meta, data=None,
purge_props=True):
def update(self, context, image_id,
image_meta, data=None, purge_props=True):
"""Modify the given image with the new data."""
image_meta = self._translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
@ -378,7 +380,7 @@ def _reraise_translated_exception():
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
@ -389,7 +391,7 @@ def _translate_image_exception(image_id, exc_value):
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
glanceclient.exc.Unauthorized)):
return exception.NotAuthorized(exc_value)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(exc_value)
@ -419,7 +421,8 @@ def get_remote_image_service(context, image_href):
try:
(image_id, glance_host, glance_port) = _parse_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
host=glance_host, port=glance_port)
host=glance_host,
port=glance_port)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)

View File

@ -214,5 +214,8 @@ class SchedulerDependentManager(Manager):
"""Pass data back to the scheduler at a periodic interval."""
if self.last_capabilities:
LOG.debug(_('Notifying Schedulers of capabilities ...'))
self.scheduler_rpcapi.update_service_capabilities(context,
self.service_name, self.host, self.last_capabilities)
self.scheduler_rpcapi.update_service_capabilities(
context,
self.service_name,
self.host,
self.last_capabilities)

View File

@ -30,8 +30,7 @@ policy_opts = [
help=_('JSON file representing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule checked when requested rule is not found')),
]
help=_('Rule checked when requested rule is not found')), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(policy_opts)

View File

@ -49,8 +49,7 @@ quota_opts = [
help='number of seconds between subsequent usage refreshes'),
cfg.StrOpt('quota_driver',
default='cinder.quota.DbQuotaDriver',
help='default driver to use for quota checks'),
]
help='default driver to use for quota checks'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(quota_opts)
@ -156,9 +155,9 @@ class DbQuotaDriver(object):
continue
quotas[resource.name] = dict(
limit=project_quotas.get(resource.name, class_quotas.get(
resource.name, resource.default)),
)
limit=project_quotas.get(resource.name,
class_quotas.get(resource.name,
resource.default)), )
# Include usages if desired. This is optional because one
# internal consumer of this interface wants to access the
@ -167,8 +166,7 @@ class DbQuotaDriver(object):
usage = project_usages.get(resource.name, {})
quotas[resource.name].update(
in_use=usage.get('in_use', 0),
reserved=usage.get('reserved', 0),
)
reserved=usage.get('reserved', 0), )
return quotas
@ -577,10 +575,10 @@ class QuotaEngine(object):
"""
return self._driver.get_project_quotas(context, self._resources,
project_id,
quota_class=quota_class,
defaults=defaults,
usages=usages)
project_id,
quota_class=quota_class,
defaults=defaults,
usages=usages)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
@ -729,14 +727,16 @@ class QuotaEngine(object):
def _sync_instances(context, project_id, session):
return dict(zip(('instances', 'cores', 'ram'),
db.instance_data_get_for_project(
context, project_id, session=session)))
db.instance_data_get_for_project(context,
project_id,
session=session)))
def _sync_volumes(context, project_id, session):
return dict(zip(('volumes', 'gigabytes'),
db.volume_data_get_for_project(
context, project_id, session=session)))
db.volume_data_get_for_project(context,
project_id,
session=session)))
QUOTAS = QuotaEngine()
@ -744,8 +744,7 @@ QUOTAS = QuotaEngine()
resources = [
ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
]
ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'), ]
QUOTAS.register_resources(resources)

View File

@ -69,4 +69,4 @@ class ChanceScheduler(driver.Scheduler):
updated_volume = driver.volume_update_db(context, volume_id, host)
self.volume_rpcapi.create_volume(context, updated_volume, host,
snapshot_id, image_id)
snapshot_id, image_id)

View File

@ -33,8 +33,7 @@ from cinder.volume import rpcapi as volume_rpcapi
scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='cinder.scheduler.host_manager.HostManager',
help='The scheduler host manager class to use'),
]
help='The scheduler host manager class to use'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(scheduler_driver_opts)
@ -55,7 +54,7 @@ class Scheduler(object):
def __init__(self):
self.host_manager = importutils.import_object(
FLAGS.scheduler_host_manager)
FLAGS.scheduler_host_manager)
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
def get_host_list(self):
@ -70,7 +69,8 @@ class Scheduler(object):
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
self.host_manager.update_service_capabilities(service_name,
host, capabilities)
host,
capabilities)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""

View File

@ -36,16 +36,17 @@ from cinder.openstack.common.notifier import api as notifier
LOG = logging.getLogger(__name__)
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='cinder.scheduler.simple.SimpleScheduler',
help='Default driver to use for the scheduler')
scheduler_driver_opt = cfg.StrOpt(
'scheduler_driver',
default='cinder.scheduler.simple.SimpleScheduler',
help='Default driver to use for the scheduler')
FLAGS = flags.FLAGS
FLAGS.register_opt(scheduler_driver_opt)
class SchedulerManager(manager.Manager):
"""Chooses a host to create volumes"""
"""Chooses a host to create volumes."""
RPC_API_VERSION = '1.2'
@ -64,12 +65,13 @@ class SchedulerManager(manager.Manager):
return self.driver.get_service_capabilities()
def update_service_capabilities(self, context, service_name=None,
host=None, capabilities=None, **kwargs):
host=None, capabilities=None, **kwargs):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
self.driver.update_service_capabilities(service_name, host,
capabilities)
self.driver.update_service_capabilities(service_name,
host,
capabilities)
def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
@ -86,11 +88,12 @@ class SchedulerManager(manager.Manager):
volume_properties = {'size': size,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id}
request_spec.update({'volume_id': volume_id,
'snapshot_id': snapshot_id,
'image_id': image_id,
'volume_properties': volume_properties,
'volume_type': dict(vol_type).iteritems()})
request_spec.update(
{'volume_id': volume_id,
'snapshot_id': snapshot_id,
'image_id': image_id,
'volume_properties': volume_properties,
'volume_type': dict(vol_type).iteritems()})
self.driver.schedule_create_volume(context, request_spec,
filter_properties)

View File

@ -39,23 +39,26 @@ class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
RPC_API_VERSION = '1.0'
def __init__(self):
super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic,
default_version=self.RPC_API_VERSION)
super(SchedulerAPI, self).__init__(
topic=FLAGS.scheduler_topic,
default_version=self.RPC_API_VERSION)
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None):
return self.cast(ctxt, self.make_msg('create_volume',
topic=topic,
volume_id=volume_id,
snapshot_id=snapshot_id,
image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties),
version='1.2')
return self.cast(ctxt, self.make_msg(
'create_volume',
topic=topic,
volume_id=volume_id,
snapshot_id=snapshot_id,
image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties),
version='1.2')
def update_service_capabilities(self, ctxt, service_name, host,
capabilities):
def update_service_capabilities(self, ctxt,
service_name, host,
capabilities):
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
service_name=service_name, host=host,
capabilities=capabilities))
service_name=service_name, host=host,
capabilities=capabilities))

View File

@ -33,8 +33,7 @@ from cinder import utils
simple_scheduler_opts = [
cfg.IntOpt("max_gigabytes",
default=10000,
help="maximum number of volume gigabytes to allow per host"),
]
help="maximum number of volume gigabytes to allow per host"), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(simple_scheduler_opts)
@ -63,10 +62,11 @@ class SimpleScheduler(chance.ChanceScheduler):
if not utils.service_is_up(service):
raise exception.WillNotSchedule(host=host)
updated_volume = driver.volume_update_db(context, volume_id, host)
self.volume_rpcapi.create_volume(context, updated_volume,
host,
snapshot_id,
image_id)
self.volume_rpcapi.create_volume(context,
updated_volume,
host,
snapshot_id,
image_id)
return None
results = db.service_get_all_volume_sorted(elevated)
@ -81,10 +81,11 @@ class SimpleScheduler(chance.ChanceScheduler):
if utils.service_is_up(service) and not service['disabled']:
updated_volume = driver.volume_update_db(context, volume_id,
service['host'])
self.volume_rpcapi.create_volume(context, updated_volume,
service['host'],
snapshot_id,
image_id)
self.volume_rpcapi.create_volume(context,
updated_volume,
service['host'],
snapshot_id,
image_id)
return None
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)

View File

@ -59,8 +59,7 @@ service_opts = [
help='IP address for OpenStack Volume API to listen'),
cfg.IntOpt('osapi_volume_listen_port',
default=8776,
help='port for os volume api to listen'),
]
help='port for os volume api to listen'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(service_opts)
@ -305,7 +304,7 @@ class Service(object):
state_catalog['availability_zone'] = zone
db.service_update(ctxt,
self.service_id, state_catalog)
self.service_id, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):

View File

@ -46,8 +46,7 @@ test_opts = [
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
help='should we use everything for testing'),
]
help='should we use everything for testing'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)

View File

@ -131,8 +131,10 @@ class _Win32Colorizer(object):
"""
def __init__(self, stream):
import win32console as win
red, green, blue, bold = (win.FOREGROUND_RED, win.FOREGROUND_GREEN,
win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY)
red, green, blue, bold = (win.FOREGROUND_RED,
win.FOREGROUND_GREEN,
win.FOREGROUND_BLUE,
win.FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE)
self._colors = {
@ -143,8 +145,7 @@ class _Win32Colorizer(object):
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold
}
'white': red | green | blue | bold}
def supported(cls, stream=sys.stdout):
try:
@ -314,10 +315,10 @@ class CinderTestRunner(core.TextTestRunner):
def _makeResult(self):
return CinderTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config,
show_elapsed=self.show_elapsed)
self.descriptions,
self.verbosity,
self.config,
show_elapsed=self.show_elapsed)
def _writeSlowTests(self, result_):
# Pare out 'fast' tests
@ -359,9 +360,9 @@ def run():
plugins=core.DefaultPluginManager())
runner = CinderTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c,
show_elapsed=not hide_elapsed)
verbosity=c.verbosity,
config=c,
show_elapsed=not hide_elapsed)
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))

View File

@ -33,17 +33,15 @@ UUID2 = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {
'id': UUID1,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake',
'progress': '0%'
}
return {'id': UUID1,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake',
'progress': '0%'}
def fake_snapshot_get(self, context, snapshot_id):
@ -80,7 +78,7 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
def assertSnapshotAttributes(self, snapshot, project_id, progress):
self.assertEqual(snapshot.get('%sproject_id' % self.prefix),
project_id)
project_id)
self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress)
def test_show(self):
@ -89,8 +87,8 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
self.assertEqual(res.status_int, 200)
self.assertSnapshotAttributes(self._get_snapshot(res.body),
project_id='fake',
progress='0%')
project_id='fake',
progress='0%')
def test_detail(self):
url = '/v1/fake/snapshots/detail'
@ -99,8 +97,8 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
self.assertEqual(res.status_int, 200)
for i, snapshot in enumerate(self._get_snapshots(res.body)):
self.assertSnapshotAttributes(snapshot,
project_id='fake',
progress='0%')
project_id='fake',
progress='0%')
def test_no_instance_passthrough_404(self):

View File

@ -59,7 +59,7 @@ class VolumeActionsTest(test.TestCase):
app = fakes.wsgi_app()
for _action in self._actions:
req = webob.Request.blank('/v1/fake/volumes/%s/action' %
self.UUID)
self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({_action: None})
req.content_type = 'application/json'
@ -153,15 +153,15 @@ class VolumeImageActionsTest(test.TestCase):
req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
res_dict = self.controller._volume_upload_image(req, id, body)
expected = {'os-volume_upload_image': {'id': id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'status': 'uploading',
'display_description': 'displaydesc',
'size': 1,
'volume_type': {'name': 'vol_type_name'},
'image_id': 1,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'image_name'}}
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'status': 'uploading',
'display_description': 'displaydesc',
'size': 1,
'volume_type': {'name': 'vol_type_name'},
'image_id': 1,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'image_name'}}
self.assertDictMatch(res_dict, expected)
def test_copy_volume_to_image_volumenotfound(self):
@ -185,7 +185,7 @@ class VolumeImageActionsTest(test.TestCase):
def test_copy_volume_to_image_invalidvolume(self):
def stub_upload_volume_to_image_service_raise(self, context, volume,
metadata, force):
metadata, force):
raise exception.InvalidVolume
self.stubs.Set(volume_api.API,
"copy_volume_to_image",
@ -206,7 +206,7 @@ class VolumeImageActionsTest(test.TestCase):
def test_copy_volume_to_image_valueerror(self):
def stub_upload_volume_to_image_service_raise(self, context, volume,
metadata, force):
metadata, force):
raise ValueError
self.stubs.Set(volume_api.API,
"copy_volume_to_image",
@ -227,7 +227,7 @@ class VolumeImageActionsTest(test.TestCase):
def test_copy_volume_to_image_remoteerror(self):
def stub_upload_volume_to_image_service_raise(self, context, volume,
metadata, force):
metadata, force):
raise rpc_common.RemoteError
self.stubs.Set(volume_api.API,
"copy_volume_to_image",

View File

@ -121,7 +121,7 @@ class VolumeTenantAttributeTest(test.TestCase):
res = req.get_response(app())
vol = etree.XML(res.body)
tenant_key = ('{http://docs.openstack.org/volume/ext/'
'volume_tenant_attribute/api/v1}tenant_id')
'volume_tenant_attribute/api/v1}tenant_id')
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
def test_list_volumes_detail_xml(self):
@ -133,5 +133,5 @@ class VolumeTenantAttributeTest(test.TestCase):
res = req.get_response(app())
vol = list(etree.XML(res.body))[0]
tenant_key = ('{http://docs.openstack.org/volume/ext/'
'volume_tenant_attribute/api/v1}tenant_id')
'volume_tenant_attribute/api/v1}tenant_id')
self.assertEqual(vol.get(tenant_key), PROJECT_ID)

View File

@ -74,7 +74,7 @@ class Foxinsocks(extensions.ExtensionDescriptor):
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('foxnsocks',
FoxInSocksController())
FoxInSocksController())
resources.append(resource)
return resources
@ -84,8 +84,7 @@ class Foxinsocks(extensions.ExtensionDescriptor):
extension_set = [
(FoxInSocksServerControllerExtension, 'servers'),
(FoxInSocksFlavorGooseControllerExtension, 'flavors'),
(FoxInSocksFlavorBandsControllerExtension, 'flavors'),
]
(FoxInSocksFlavorBandsControllerExtension, 'flavors'), ]
for klass, collection in extension_set:
controller = klass()
ext = extensions.ControllerExtension(self, collection, controller)

View File

@ -69,7 +69,7 @@ class TestFaults(test.TestCase):
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {

View File

@ -62,7 +62,7 @@ def fake_wsgi(self, req):
def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None,
use_no_auth=False, ext_mgr=None):
use_no_auth=False, ext_mgr=None):
if not inner_app_v1:
inner_app_v1 = router.APIRouter(ext_mgr)
@ -72,13 +72,13 @@ def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None,
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v1 = fault.FaultWrapper(auth.InjectContext(ctxt,
inner_app_v1))
inner_app_v1))
elif use_no_auth:
api_v1 = fault.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v1)))
limits.RateLimitingMiddleware(inner_app_v1)))
else:
api_v1 = fault.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v1)))
limits.RateLimitingMiddleware(inner_app_v1)))
mapper = urlmap.URLMap()
mapper['/v1'] = api_v1
@ -125,8 +125,10 @@ class HTTPRequest(webob.Request):
kwargs['base_url'] = 'http://localhost/v1'
use_admin_context = kwargs.pop('use_admin_context', False)
out = webob.Request.blank(*args, **kwargs)
out.environ['cinder.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
out.environ['cinder.context'] = FakeRequestContext(
'fake_user',
'fake',
is_admin=use_admin_context)
return out
@ -254,16 +256,14 @@ def stub_volume_get_all_by_project(self, context, search_opts=None):
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot = {'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'}
snapshot.update(kwargs)
return snapshot

View File

@ -441,10 +441,9 @@ class ResourceTest(test.TestCase):
extended = ControllerExtended()
resource.register_actions(extended)
self.assertEqual({
'fooAction': extended._action_foo,
'barAction': extended._action_bar,
}, resource.wsgi_actions)
self.assertEqual({'fooAction': extended._action_foo,
'barAction': extended._action_bar, },
resource.wsgi_actions)
def test_register_extensions(self):
class Controller(object):

View File

@ -15,17 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import datetime
import webob.exc
from cinder.api.openstack.volume.contrib import hosts as os_hosts
from cinder import context
import datetime
from cinder import db
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test
from lxml import etree
FLAGS = flags.FLAGS
@ -34,18 +34,18 @@ created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
curr_time = timeutils.utcnow()
SERVICE_LIST = [
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'}]
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'}]
LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
@ -97,7 +97,7 @@ class HostTestCase(test.TestCase):
cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')
expected = [host for host in LIST_RESPONSE
if host['service'] == 'cinder-volume']
if host['service'] == 'cinder-volume']
self.assertEqual(cinder_hosts, expected)
def test_list_hosts_with_zone(self):
@ -107,19 +107,22 @@ class HostTestCase(test.TestCase):
def test_bad_status_value(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body={'status': 'bad'})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body={'status': 'disablabc'})
self.req, 'test.host.1', body={'status': 'bad'})
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
self.req,
'test.host.1',
body={'status': 'disablabc'})
def test_bad_update_key(self):
bad_body = {'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body=bad_body)
self.req, 'test.host.1', body=bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {'status': 'disable', 'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body=bad_body)
self.req, 'test.host.1', body=bad_body)
def test_good_udpate_keys(self):
body = {'status': 'disable'}
@ -127,8 +130,11 @@ class HostTestCase(test.TestCase):
self.req, 'test.host.1', body=body)
def test_bad_host(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
self.req, 'bogus_host_name', body={'disabled': 0})
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
'bogus_host_name',
body={'disabled': 0})
def test_show_forbidden(self):
self.req.environ['cinder.context'].is_admin = False

View File

@ -44,10 +44,7 @@ class ExtensionControllerTest(ExtensionTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.ext_list = [
"TypesManage",
"TypesExtraSpecs",
]
self.ext_list = ["TypesManage", "TypesExtraSpecs", ]
self.ext_list.sort()
def test_list_extensions_json(self):
@ -70,15 +67,13 @@ class ExtensionControllerTest(ExtensionTestCase):
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
self.assertEqual(fox_ext, {
'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
'name': 'Fox In Socks',
'updated': '2011-01-22T13:25:27-06:00',
'description': 'The Fox In Socks Extension',
'alias': 'FOXNSOX',
'links': []
},
)
self.assertEqual(
fox_ext, {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
'name': 'Fox In Socks',
'updated': '2011-01-22T13:25:27-06:00',
'description': 'The Fox In Socks Extension',
'alias': 'FOXNSOX',
'links': []}, )
for ext in data['extensions']:
url = '/fake/extensions/%s' % ext['alias']
@ -94,13 +89,14 @@ class ExtensionControllerTest(ExtensionTestCase):
self.assertEqual(200, response.status_int)
data = jsonutils.loads(response.body)
self.assertEqual(data['extension'], {
"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
"name": "Fox In Socks",
"updated": "2011-01-22T13:25:27-06:00",
"description": "The Fox In Socks Extension",
"alias": "FOXNSOX",
"links": []})
self.assertEqual(
data['extension'],
{"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
"name": "Fox In Socks",
"updated": "2011-01-22T13:25:27-06:00",
"description": "The Fox In Socks Extension",
"alias": "FOXNSOX",
"links": []})
def test_get_non_existing_extension_json(self):
app = router.APIRouter()
@ -125,10 +121,12 @@ class ExtensionControllerTest(ExtensionTestCase):
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
self.assertEqual(fox_ext.get('namespace'),
self.assertEqual(
fox_ext.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
self.assertEqual(
fox_ext.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension')
xmlutil.validate_schema(root, 'extensions')
@ -145,10 +143,12 @@ class ExtensionControllerTest(ExtensionTestCase):
self.assertEqual(root.tag.split('extension')[0], NS)
self.assertEqual(root.get('alias'), 'FOXNSOX')
self.assertEqual(root.get('name'), 'Fox In Socks')
self.assertEqual(root.get('namespace'),
self.assertEqual(
root.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(root.findtext('{0}description'.format(NS)),
self.assertEqual(
root.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension')
xmlutil.validate_schema(root, 'extension')

View File

@ -22,17 +22,11 @@ from cinder import test
class SelectorTest(test.TestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
obj_for_test = {'test': {'name': 'test',
'values': [1, 2, 3],
'attrs': {'foo': 1,
'bar': 2,
'baz': 3, }, }, }
def test_empty_selector(self):
sel = xmlutil.Selector()
@ -217,11 +211,9 @@ class TemplateElementTest(test.TestCase):
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
@ -234,10 +226,8 @@ class TemplateElementTest(test.TestCase):
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
children2 = [xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'), ]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
@ -252,11 +242,9 @@ class TemplateElementTest(test.TestCase):
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
@ -287,11 +275,9 @@ class TemplateElementTest(test.TestCase):
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
@ -384,10 +370,8 @@ class TemplateElementTest(test.TestCase):
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']), ]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
@ -589,22 +573,13 @@ class TemplateTest(test.TestCase):
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
obj = {'test': {'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {'a': 1,
'b': 2,
'c': 3,
'd': 4, },
'image': {'name': 'image_foobar', 'id': 42, }, }, }
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',

View File

@ -159,10 +159,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
},
],
"absolute": {
"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5,
},
"absolute": {"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
@ -776,26 +774,26 @@ class LimitsViewBuilderTest(test.TestCase):
"injected_file_content_bytes": 5}
def test_build_limits(self):
expected_limits = {"limits": {
"rate": [{
"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-07-21T18:17:06Z"}]},
{"uri": "*/volumes",
"regex": "^/volumes",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": "2011-07-21T18:17:06Z"}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5}}}
tdate = "2011-07-21T18:17:06Z"
expected_limits = \
{"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/volumes",
"regex": "^/volumes",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
@ -827,27 +825,27 @@ class LimitsXMLSerializationTest(test.TestCase):
serializer = limits.LimitsTemplate()
fixture = {
"limits": {
"rate": [{
"uri": "*",
"regex": ".*",
"limit": [{
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-12-15T22:42:45Z"}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": "2011-12-15T22:42:45Z"}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 10240}}}
"rate": [{
"uri": "*",
"regex": ".*",
"limit": [{
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-12-15T22:42:45Z"}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": "2011-12-15T22:42:45Z"}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
@ -873,8 +871,9 @@ class LimitsXMLSerializationTest(test.TestCase):
for j, limit in enumerate(rate_limits):
for key in ['verb', 'value', 'remaining', 'unit',
'next-available']:
self.assertEqual(limit.get(key),
str(fixture['limits']['rate'][i]['limit'][j][key]))
self.assertEqual(
limit.get(key),
str(fixture['limits']['rate'][i]['limit'][j][key]))
def test_index_no_limits(self):
serializer = limits.LimitsTemplate()

View File

@ -36,15 +36,13 @@ INVALID_UUID = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {
'id': UUID,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
}
return {'id': UUID,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description', }
def stub_snapshot_create(self, context, volume_id, name, description):
@ -81,47 +79,48 @@ class SnapshotApiTest(test.TestCase):
self.stubs.Set(db, 'snapshot_get_all_by_project',
fakes.stub_snapshot_get_all_by_project)
self.stubs.Set(db, 'snapshot_get_all',
fakes.stub_snapshot_get_all)
fakes.stub_snapshot_get_all)
def test_snapshot_create(self):
self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
snapshot = {"volume_id": '12',
"force": False,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
"force": False,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp_dict = self.controller.create(req, body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['display_name'],
snapshot['display_name'])
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['display_description'],
snapshot['display_description'])
snapshot['display_description'])
def test_snapshot_create_force(self):
self.stubs.Set(volume.api.API, "create_snapshot_force",
stub_snapshot_create)
self.stubs.Set(volume.api.API,
"create_snapshot_force",
stub_snapshot_create)
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
snapshot = {"volume_id": '12',
"force": True,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
"force": True,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp_dict = self.controller.create(req, body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['display_name'],
snapshot['display_name'])
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['display_description'],
snapshot['display_description'])
snapshot['display_description'])
snapshot = {"volume_id": "12",
"force": "**&&^^%%$$##@@",
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
"force": "**&&^^%%$$##@@",
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
self.assertRaises(exception.InvalidParameterValue,
@ -133,9 +132,7 @@ class SnapshotApiTest(test.TestCase):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
self.stubs.Set(volume.api.API, "update_snapshot",
fakes.stub_snapshot_update)
updates = {
"display_name": "Updated Test Name",
}
updates = {"display_name": "Updated Test Name", }
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
res_dict = self.controller.update(req, UUID, body)
@ -207,8 +204,9 @@ class SnapshotApiTest(test.TestCase):
snapshot_id)
def test_snapshot_detail(self):
self.stubs.Set(volume.api.API, "get_all_snapshots",
stub_snapshot_get_all)
self.stubs.Set(volume.api.API,
"get_all_snapshots",
stub_snapshot_get_all)
req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
resp_dict = self.controller.detail(req)
@ -350,8 +348,7 @@ class SnapshotSerializerTest(test.TestCase):
created_at=datetime.datetime.now(),
display_name='snap_name',
display_description='snap_desc',
volume_id='vol_id',
)
volume_id='vol_id', )
text = serializer.serialize(dict(snapshot=raw_snapshot))
print text
@ -361,24 +358,20 @@ class SnapshotSerializerTest(test.TestCase):
def test_snapshot_index_detail_serializer(self):
serializer = snapshots.SnapshotsTemplate()
raw_snapshots = [dict(
id='snap1_id',
status='snap1_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap1_name',
display_description='snap1_desc',
volume_id='vol1_id',
),
dict(
id='snap2_id',
status='snap2_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap2_name',
display_description='snap2_desc',
volume_id='vol2_id',
)]
raw_snapshots = [dict(id='snap1_id',
status='snap1_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap1_name',
display_description='snap1_desc',
volume_id='vol1_id', ),
dict(id='snap2_id',
status='snap2_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap2_name',
display_description='snap2_desc',
volume_id='vol2_id', )]
text = serializer.serialize(dict(snapshots=raw_snapshots))
print text

View File

@ -40,15 +40,13 @@ def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != TEST_SNAPSHOT_UUID:
raise exception.NotFound
return {
'id': snapshot_id,
return {'id': snapshot_id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
}
'display_description': 'Default description', }
class VolumeApiTest(test.TestCase):
@ -89,7 +87,7 @@ class VolumeApiTest(test.TestCase):
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
1, 1, 1),
'size': 100}}
self.assertEqual(res_dict, expected)
@ -105,8 +103,7 @@ class VolumeApiTest(test.TestCase):
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"volume_type": db_vol_type['name'],
}
"volume_type": db_vol_type['name'], }
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
@ -128,28 +125,29 @@ class VolumeApiTest(test.TestCase):
def test_volume_create_with_image_id(self):
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
"imageRef": test_id}
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'nova',
'display_name': 'Volume Test Name',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'image_id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'snapshot_id': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': '1'}
}
'display_description': 'Volume Test Desc',
'availability_zone': 'nova',
'display_name': 'Volume Test Name',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'image_id': test_id,
'snapshot_id': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': '1'}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
@ -160,11 +158,11 @@ class VolumeApiTest(test.TestCase):
self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
"snapshot_id": TEST_SNAPSHOT_UUID}
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
"snapshot_id": TEST_SNAPSHOT_UUID}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
@ -176,10 +174,10 @@ class VolumeApiTest(test.TestCase):
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 1234}
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 1234}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
@ -191,10 +189,10 @@ class VolumeApiTest(test.TestCase):
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": '12345'}
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": '12345'}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
@ -305,7 +303,7 @@ class VolumeApiTest(test.TestCase):
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
@ -328,7 +326,7 @@ class VolumeApiTest(test.TestCase):
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
@ -410,7 +408,7 @@ class VolumeApiTest(test.TestCase):
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
@ -433,7 +431,7 @@ class VolumeApiTest(test.TestCase):
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
@ -460,7 +458,7 @@ class VolumeApiTest(test.TestCase):
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
@ -552,20 +550,16 @@ class VolumeSerializerTest(test.TestCase):
size=1024,
availability_zone='vol_availability',
created_at=datetime.datetime.now(),
attachments=[dict(
id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo')],
attachments=[dict(id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo')],
display_name='vol_name',
display_description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
metadata=dict(
foo='bar',
baz='quux',
),
)
metadata=dict(foo='bar',
baz='quux', ), )
text = serializer.serialize(dict(volume=raw_volume))
print text
@ -575,46 +569,36 @@ class VolumeSerializerTest(test.TestCase):
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(
id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
created_at=datetime.datetime.now(),
attachments=[dict(
id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1')],
display_name='vol1_name',
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
metadata=dict(
foo='vol1_foo',
bar='vol1_bar',
),
),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
created_at=datetime.datetime.now(),
attachments=[dict(
id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
metadata=dict(
foo='vol2_foo',
bar='vol2_bar',
),
)]
raw_volumes = [dict(id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1')],
display_name='vol1_name',
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
metadata=dict(foo='vol1_foo',
bar='vol1_bar', ), ),
dict(id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
metadata=dict(foo='vol2_foo',
bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
print text
@ -637,11 +621,7 @@ class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
},
}
expected = {"volume": {"size": "1", }, }
self.assertEquals(request['body'], expected)
def test_display_name(self):

View File

@ -159,10 +159,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
},
],
"absolute": {
"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5,
},
"absolute": {"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
@ -590,7 +588,6 @@ class WsgiLimiterTest(BaseLimitTestSuite):
def test_invalid_methods(self):
"""Only POSTs should work."""
requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
@ -776,44 +773,26 @@ class LimitsViewBuilderTest(test.TestCase):
"injected_file_content_bytes": 5}
def test_build_limits(self):
tdate = "2011-07-21T18:17:06Z"
expected_limits = {
"limits": {
"rate": [
{
"uri": "*",
"regex": ".*",
"limit": [
{
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-07-21T18:17:06Z"
}
]
},
{
"uri": "*/volumes",
"regex": "^/volumes",
"limit": [
{
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": "2011-07-21T18:17:06Z"
}
]
}
],
"absolute": {
"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5
}
}
}
"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/volumes",
"regex": "^/volumes",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
@ -842,30 +821,26 @@ class LimitsXMLSerializationTest(test.TestCase):
self.assertTrue(has_dec)
def test_index(self):
tdate = "2011-12-15T22:42:45Z"
serializer = limits.LimitsTemplate()
fixture = {
"limits": {
"rate": [{
"uri": "*",
"regex": ".*",
"limit": [{
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-12-15T22:42:45Z"}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": "2011-12-15T22:42:45Z"}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 10240}}}
fixture = {"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
@ -891,8 +866,9 @@ class LimitsXMLSerializationTest(test.TestCase):
for j, limit in enumerate(rate_limits):
for key in ['verb', 'value', 'remaining', 'unit',
'next-available']:
self.assertEqual(limit.get(key),
str(fixture['limits']['rate'][i]['limit'][j][key]))
self.assertEqual(
limit.get(key),
str(fixture['limits']['rate'][i]['limit'][j][key]))
def test_index_no_limits(self):
serializer = limits.LimitsTemplate()

View File

@ -614,33 +614,24 @@ class VolumeSerializerTest(test.TestCase):
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
metadata=dict(
foo='vol1_foo',
bar='vol1_bar',
),
),
metadata=dict(foo='vol1_foo',
bar='vol1_bar', ), ),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
created_at=datetime.datetime.now(),
attachments=[
dict(
id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
attachments=[dict(id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
metadata=dict(
foo='vol2_foo',
bar='vol2_bar',
),
)
]
metadata=dict(foo='vol2_foo',
bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
print text

View File

@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite"""
"""Stubouts, mocks and fixtures for the test suite."""
from cinder import db

View File

@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service"""
"""Implementation of a fake image service."""
import copy
import datetime
@ -44,101 +44,101 @@ class _FakeImageService(object):
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'}}
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)

View File

@ -31,7 +31,7 @@ from cinder.tests.glance import stubs as glance_stubs
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object"""
"""Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
@ -109,11 +109,11 @@ class TestGlanceImageService(test.TestCase):
def _fake_create_glance_client(context, host, port, version):
return client
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
self.stubs.Set(glance,
'_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper(
'fake', 'fake_host', 9292)
client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
@ -131,7 +131,7 @@ class TestGlanceImageService(test.TestCase):
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
"""Ensure instance_id is persisted as an image-property"""
"""Ensure instance_id is persisted as an image-property."""
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
@ -458,7 +458,10 @@ class TestGlanceImageService(test.TestCase):
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download, self.context, image_id, writer)
service.download,
self.context,
image_id,
writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
@ -520,19 +523,19 @@ class TestGlanceImageService(test.TestCase):
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(service, same_id) = glance.get_remote_image_service(
self.context, image_id)
(service, same_id) = glance.get_remote_image_service(self.context,
image_id)
self.assertEquals(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(
self.context, image_url)
(service, same_id) = glance.get_remote_image_service(self.context,
image_url)
self.assertEquals(same_id, image_id)
self.assertEquals(service._client.host,
'something-less-likely')
'something-less-likely')
def _create_failing_glance_client(info):

View File

@ -53,7 +53,7 @@ class OpenStackApiAuthorizationException(OpenStackApiException):
if not message:
message = _("Authorization error")
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
response)
class OpenStackApiNotFoundException(OpenStackApiException):
@ -157,8 +157,8 @@ class TestOpenStackClient(object):
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message=_("Unexpected status code"),
response=response)
message=_("Unexpected status code"),
response=response)
return response

View File

@ -21,7 +21,7 @@ CALLED_FUNCTION = []
def example_decorator(name, function):
""" decorator for notify which is used from utils.monkey_patch()
"""decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function

View File

@ -64,13 +64,18 @@ class SchedulerRpcAPITestCase(test.TestCase):
def test_update_service_capabilities(self):
self._test_scheduler_api('update_service_capabilities',
rpc_method='fanout_cast', service_name='fake_name',
host='fake_host', capabilities='fake_capabilities')
rpc_method='fanout_cast',
service_name='fake_name',
host='fake_host',
capabilities='fake_capabilities')
def test_create_volume(self):
self._test_scheduler_api('create_volume',
rpc_method='cast', topic='topic', volume_id='volume_id',
snapshot_id='snapshot_id', image_id='image_id',
request_spec='fake_request_spec',
filter_properties='filter_properties',
version='1.2')
rpc_method='cast',
topic='topic',
volume_id='volume_id',
snapshot_id='snapshot_id',
image_id='image_id',
request_spec='fake_request_spec',
filter_properties='filter_properties',
version='1.2')

View File

@ -35,7 +35,7 @@ FLAGS = flags.FLAGS
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager"""
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
@ -63,29 +63,34 @@ class SchedulerManagerTestCase(test.TestCase):
host = 'fake_host'
self.mox.StubOutWithMock(self.manager.driver,
'update_service_capabilities')
'update_service_capabilities')
# Test no capabilities passes empty dictionary
self.manager.driver.update_service_capabilities(service_name,
host, {})
host, {})
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host)
result = self.manager.update_service_capabilities(
self.context,
service_name=service_name,
host=host)
self.mox.VerifyAll()
self.mox.ResetAll()
# Test capabilities passes correctly
capabilities = {'fake_capability': 'fake_value'}
self.manager.driver.update_service_capabilities(
service_name, host, capabilities)
self.manager.driver.update_service_capabilities(service_name,
host,
capabilities)
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host,
capabilities=capabilities)
result = self.manager.update_service_capabilities(
self.context,
service_name=service_name, host=host,
capabilities=capabilities)
def test_create_volume_exception_puts_volume_in_error_state(self):
""" Test that a NoValideHost exception for create_volume puts
the volume in 'error' state and eats the exception.
"""Test that a NoValideHost exception for create_volume.
Puts the volume in 'error' state and eats the exception.
"""
fake_volume_id = 1
self._mox_schedule_method_helper('schedule_create_volume')
@ -95,7 +100,8 @@ class SchedulerManagerTestCase(test.TestCase):
volume_id = fake_volume_id
request_spec = {'volume_id': fake_volume_id}
self.manager.driver.schedule_create_volume(self.context,
self.manager.driver.schedule_create_volume(
self.context,
request_spec, {}).AndRaise(exception.NoValidHost(reason=""))
db.volume_update(self.context, fake_volume_id, {'status': 'error'})
@ -112,11 +118,11 @@ class SchedulerManagerTestCase(test.TestCase):
setattr(self.manager.driver, method_name, stub_method)
self.mox.StubOutWithMock(self.manager.driver,
method_name)
method_name)
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class"""
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
@ -132,14 +138,16 @@ class SchedulerTestCase(test.TestCase):
host = 'fake_host'
self.mox.StubOutWithMock(self.driver.host_manager,
'update_service_capabilities')
'update_service_capabilities')
capabilities = {'fake_capability': 'fake_value'}
self.driver.host_manager.update_service_capabilities(
service_name, host, capabilities)
self.driver.host_manager.update_service_capabilities(service_name,
host,
capabilities)
self.mox.ReplayAll()
result = self.driver.update_service_capabilities(service_name,
host, capabilities)
host,
capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
@ -150,7 +158,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(utils, 'service_is_up')
db.service_get_all_by_topic(self.context,
self.topic).AndReturn(services)
self.topic).AndReturn(services)
utils.service_is_up(service1).AndReturn(False)
utils.service_is_up(service2).AndReturn(True)
@ -168,12 +176,12 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods"""
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
@ -185,7 +193,8 @@ class SchedulerDriverModuleTestCase(test.TestCase):
timeutils.utcnow().AndReturn('fake-now')
db.volume_update(self.context, 31337,
{'host': 'fake_host', 'scheduled_at': 'fake-now'})
{'host': 'fake_host',
'scheduled_at': 'fake-now'})
self.mox.ReplayAll()
driver.volume_update_db(self.context, 31337, 'fake_host')

View File

@ -32,11 +32,12 @@ class HpSanISCSITestCase(test.TestCase):
self.connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'host': 'fakehost'}
self.properties = {'target_discoverd': True,
'target_portal': '10.0.1.6:3260',
'target_iqn':
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
'volume_id': 1}
self.properties = {
'target_discoverd': True,
'target_portal': '10.0.1.6:3260',
'target_iqn':
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
'volume_id': 1}
def tearDown(self):
super(HpSanISCSITestCase, self).tearDown()

View File

@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the API endpoint"""
"""Unit tests for the API endpoint."""
import httplib
import StringIO
@ -25,18 +25,18 @@ import webob
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
"""A fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer"""
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
"""A fake httplib.HTTPConnection for boto.
requests made via this connection actually get translated and routed into
our WSGI app, we then wait for the response and turn it back into
@ -71,5 +71,5 @@ class FakeHttplibConnection(object):
return self.sock.response_string
def close(self):
"""Required for compatibility with boto/tornado"""
"""Required for compatibility with boto/tornado."""
pass

View File

@ -31,15 +31,14 @@ class RootwrapTestCase(test.TestCase):
filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
filters.CommandFilter("/nonexistent/cat", "root"),
filters.CommandFilter("/bin/cat", "root") # Keep this one last
]
filters.CommandFilter("/bin/cat", "root")] # Keep this one last
def test_RegExpFilter_match(self):
usercmd = ["ls", "/root"]
filtermatch = wrapper.match_filter(self.filters, usercmd)
self.assertFalse(filtermatch is None)
self.assertEqual(filtermatch.get_command(usercmd),
["/bin/ls", "/root"])
["/bin/ls", "/root"])
def test_RegExpFilter_reject(self):
usercmd = ["ls", "root"]
@ -92,7 +91,7 @@ class RootwrapTestCase(test.TestCase):
self.assertTrue(f.match(usercmd) or f2.match(usercmd))
def test_KillFilter_no_raise(self):
"""Makes sure ValueError from bug 926412 is gone"""
"""Makes sure ValueError from bug 926412 is gone."""
f = filters.KillFilter("root", "")
# Providing anything other than kill should be False
usercmd = ['notkill', 999999]
@ -102,7 +101,7 @@ class RootwrapTestCase(test.TestCase):
self.assertFalse(f.match(usercmd))
def test_KillFilter_deleted_exe(self):
"""Makes sure deleted exe's are killed correctly"""
"""Makes sure deleted exe's are killed correctly."""
# See bug #967931.
def fake_readlink(blah):
return '/bin/commandddddd (deleted)'

View File

@ -63,8 +63,10 @@ class ContextTestCase(test.TestCase):
self.stubs.Set(context.LOG, 'warn', fake_warn)
c = context.RequestContext('user', 'project',
extra_arg1='meow', extra_arg2='wuff')
c = context.RequestContext('user',
'project',
extra_arg1='meow',
extra_arg2='wuff')
self.assertTrue(c)
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])

View File

@ -62,8 +62,8 @@ class FlagsTestCase(test.TestCase):
def test_long_vs_short_flags(self):
FLAGS.clear()
FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
default='val',
help='desc'))
default='val',
help='desc'))
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
args = flags.parse_args(argv, default_config_files=[])
@ -72,8 +72,8 @@ class FlagsTestCase(test.TestCase):
FLAGS.clear()
FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer',
default=60,
help='desc'))
default=60,
help='desc'))
args = flags.parse_args(argv, default_config_files=[])
self.assertEqual(FLAGS.duplicate_answer, 60)
self.assertEqual(FLAGS.duplicate_answer_long, 'val')

View File

@ -76,7 +76,7 @@ class TargetAdminTestCase(object):
tgtadm = iscsi.get_target_admin()
tgtadm.set_execute(self.fake_execute)
tgtadm.create_iscsi_target(self.target_name, self.tid,
self.lun, self.path)
self.lun, self.path)
tgtadm.show_target(self.tid, iqn=self.target_name)
tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id)
@ -95,8 +95,8 @@ class TgtAdmTestCase(test.TestCase, TargetAdminTestCase):
self.flags(iscsi_helper='tgtadm')
self.flags(volumes_dir=self.persist_tempdir)
self.script_template = "\n".join([
'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
def tearDown(self):
try:
@ -113,9 +113,9 @@ class IetAdmTestCase(test.TestCase, TargetAdminTestCase):
TargetAdminTestCase.setUp(self)
self.flags(iscsi_helper='ietadm')
self.script_template = "\n".join([
'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
'--params Path=%(path)s,Type=fileio',
'ietadm --op show --tid=%(tid)s',
'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
'ietadm --op delete --tid=%(tid)s'])
'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
'--params Path=%(path)s,Type=fileio',
'ietadm --op show --tid=%(tid)s',
'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
'ietadm --op delete --tid=%(tid)s'])

View File

@ -76,7 +76,7 @@ def _have_mysql():
class TestMigrations(test.TestCase):
"""Test sqlalchemy-migrate migrations"""
"""Test sqlalchemy-migrate migrations."""
TEST_DATABASES = {}
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
@ -87,7 +87,7 @@ class TestMigrations(test.TestCase):
DEFAULT_CONFIG_FILE)
MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
REPOSITORY = repository.Repository(
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
def setUp(self):
super(TestMigrations, self).setUp()
@ -256,11 +256,12 @@ class TestMigrations(test.TestCase):
# upgrades successfully.
# Place the database under version control
migration_api.version_control(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.INIT_VERSION)
self.assertEqual(migration.INIT_VERSION,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
migration_api.upgrade(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION + 1)
@ -268,7 +269,7 @@ class TestMigrations(test.TestCase):
LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
for version in xrange(migration.INIT_VERSION + 2,
TestMigrations.REPOSITORY.latest + 1):
TestMigrations.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version)
if snake_walk:
@ -300,5 +301,5 @@ class TestMigrations(test.TestCase):
TestMigrations.REPOSITORY,
version)
self.assertEqual(version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
migration_api.db_version(engine,
TestMigrations.REPOSITORY))

View File

@ -578,21 +578,21 @@ RESPONSE_PREFIX = """<?xml version="1.0" encoding="UTF-8"?>
RESPONSE_SUFFIX = """</env:Body></env:Envelope>"""
APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext',
'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
'StorageServiceDatasetProvision']
'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
'StorageServiceDatasetProvision']
iter_count = 0
iter_table = {}
class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run"""
"""HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(s):
"""Respond to a GET request."""
@ -622,7 +622,7 @@ class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
out.write('</portType>')
out.write('<binding name="DfmBinding" type="na:DfmInterface">')
out.write('<soap:binding style="document" ' +
'transport="http://schemas.xmlsoap.org/soap/http"/>')
'transport="http://schemas.xmlsoap.org/soap/http"/>')
for api in APIS:
out.write('<operation name="%s">' % api)
out.write('<soap:operation soapAction="urn:%s"/>' % api)
@ -641,7 +641,7 @@ class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
request_xml = s.rfile.read(int(s.headers['Content-Length']))
ntap_ns = 'http://www.netapp.com/management/v1'
nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/',
'na': ntap_ns}
'na': ntap_ns}
root = etree.fromstring(request_xml)
body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0]
@ -977,7 +977,7 @@ class NetAppDriverTestCase(test.TestCase):
self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
self.VOLUME_TYPE, self.VOLUME_SIZE)
volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID,
'id': 0, 'provider_auth': None}
'id': 0, 'provider_auth': None}
updates = self.driver._get_export(volume)
self.assertTrue(updates['provider_location'])
volume['provider_location'] = updates['provider_location']
@ -1193,7 +1193,7 @@ class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
out.write('<binding name="CloudStorageBinding" '
'type="na:CloudStorage">')
out.write('<soap:binding style="document" ' +
'transport="http://schemas.xmlsoap.org/soap/http"/>')
'transport="http://schemas.xmlsoap.org/soap/http"/>')
for api in CMODE_APIS:
out.write('<operation name="%s">' % api)
out.write('<soap:operation soapAction=""/>')
@ -1212,7 +1212,7 @@ class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
request_xml = s.rfile.read(int(s.headers['Content-Length']))
ntap_ns = 'http://cloud.netapp.com/'
nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
'na': ntap_ns}
'na': ntap_ns}
root = etree.fromstring(request_xml)
body = root.xpath('/soapenv:Envelope/soapenv:Body',
@ -1322,24 +1322,18 @@ class FakeCmodeHTTPConnection(object):
class NetAppCmodeISCSIDriverTestCase(test.TestCase):
"""Test case for NetAppISCSIDriver"""
volume = {
'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None
}
snapshot = {
'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
'volume_size': 1, 'project_id': 'project'
}
volume_sec = {
'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None
}
volume = {'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
snapshot = {'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
'volume_size': 1, 'project_id': 'project'}
volume_sec = {'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
def setUp(self):
super(NetAppCmodeISCSIDriverTestCase, self).setUp()
@ -1371,7 +1365,7 @@ class NetAppCmodeISCSIDriverTestCase(test.TestCase):
self.volume['provider_location'] = updates['provider_location']
connector = {'initiator': 'init1'}
connection_info = self.driver.initialize_connection(self.volume,
connector)
connector)
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
properties = connection_info['data']
self.driver.terminate_connection(self.volume, connector)

View File

@ -14,7 +14,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)"""
"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)."""
from cinder import context
from cinder import exception
@ -67,7 +67,7 @@ class FakeResponce(object):
class NetappNfsDriverTestCase(test.TestCase):
"""Test case for NetApp specific NFS clone driver"""
"""Test case for NetApp specific NFS clone driver."""
def setUp(self):
self._driver = netapp_nfs.NetAppNFSDriver()
@ -79,13 +79,11 @@ class NetappNfsDriverTestCase(test.TestCase):
def test_check_for_setup_error(self):
mox = self._mox
drv = self._driver
required_flags = [
'netapp_wsdl_url',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port'
]
required_flags = ['netapp_wsdl_url',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
@ -124,7 +122,7 @@ class NetappNfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted"""
"""Test snapshot can be created and deleted."""
mox = self._mox
drv = self._driver
@ -137,7 +135,7 @@ class NetappNfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot"""
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self._mox
volume = FakeVolume(1)
@ -177,8 +175,8 @@ class NetappNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
drv._volume_not_present(IgnoreArg(),
IgnoreArg()).AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())

View File

@ -113,26 +113,21 @@ class TestNexentaDriver(cinder.test.TestCase):
('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},),
u'Unable to create iscsi target\n'
u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already'
u' configured\n'
u' itadm create-target failed with error 17\n',
),
u' configured\n'
u' itadm create-target failed with error 17\n', ),
('stmf', 'create_targetgroup', ('cinder/volume1',),
u'Unable to create targetgroup: stmfadm: cinder/volume1:'
u' already exists\n',
),
u' already exists\n', ),
('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'),
u'Unable to add member to targetgroup: stmfadm:'
u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n',
),
u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n', ),
('scsidisk', 'create_lu', ('cinder/volume1', {}),
u"Unable to create lu with zvol 'cinder/volume1':\n"
u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n",
),
u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n", ),
('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', {
'target_group': 'cinder/volume1', 'lun': '0'}),
'target_group': 'cinder/volume1', 'lun': '0'}),
u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n"
u" stmfadm: view entry exists\n",
),
u" stmfadm: view entry exists\n", ),
]
def _stub_export_method(self, module, method, args, error, fail=False):
@ -150,7 +145,8 @@ class TestNexentaDriver(cinder.test.TestCase):
self._stub_all_export_methods()
self.mox.ReplayAll()
retval = self.drv.create_export({}, self.TEST_VOLUME_REF)
self.assertEquals(retval,
self.assertEquals(
retval,
{'provider_location':
'%s:%s,1 %s%s' % (FLAGS.nexenta_host,
FLAGS.nexenta_iscsi_target_portal_port,
@ -165,7 +161,9 @@ class TestNexentaDriver(cinder.test.TestCase):
fail=True)
self.mox.ReplayAll()
self.assertRaises(nexenta.NexentaException,
self.drv.create_export, {}, self.TEST_VOLUME_REF)
self.drv.create_export,
{},
self.TEST_VOLUME_REF)
return _test_create_export_fail
for i in range(len(_CREATE_EXPORT_METHODS)):
@ -185,8 +183,8 @@ class TestNexentaDriver(cinder.test.TestCase):
def test_remove_export_fail_0(self):
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
self.nms_mock.stmf.destroy_targetgroup('cinder/volume1').AndRaise(
nexenta.NexentaException())
self.nms_mock.stmf.destroy_targetgroup(
'cinder/volume1').AndRaise(nexenta.NexentaException())
self.nms_mock.iscsitarget.delete_target('iqn:volume1')
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
@ -194,8 +192,8 @@ class TestNexentaDriver(cinder.test.TestCase):
def test_remove_export_fail_1(self):
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
self.nms_mock.stmf.destroy_targetgroup('cinder/volume1')
self.nms_mock.iscsitarget.delete_target('iqn:volume1').AndRaise(
nexenta.NexentaException())
self.nms_mock.iscsitarget.delete_target(
'iqn:volume1').AndRaise(nexenta.NexentaException())
self.mox.ReplayAll()
self.drv.remove_export({}, self.TEST_VOLUME_REF)
@ -205,9 +203,9 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
URL_S = 'https://example.com/'
USER = 'user'
PASSWORD = 'password'
HEADERS = {'Authorization': 'Basic %s' % (base64.b64encode(
':'.join((USER, PASSWORD))),),
'Content-Type': 'application/json'}
HEADERS = {'Authorization': 'Basic %s' % (
base64.b64encode(':'.join((USER, PASSWORD))),),
'Content-Type': 'application/json'}
REQUEST = 'the request'
def setUp(self):
@ -222,21 +220,23 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
def test_call(self):
urllib2.Request(self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(
self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
'{"error": null, "result": "the result"}')
self.mox.ReplayAll()
result = self.proxy('arg1', 'arg2')
self.assertEquals("the result", result)
def test_call_deep(self):
urllib2.Request(self.URL,
'{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
' "method": "meth"}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(
self.URL,
'{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
' "method": "meth"}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
@ -245,12 +245,14 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
self.assertEquals("the result", result)
def test_call_auto(self):
urllib2.Request(self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(self.URL_S,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(
self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(
self.URL_S,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = 'EOF in headers'
self.resp_mock.read().AndReturn(
'{"error": null, "result": "the result"}')
@ -260,9 +262,10 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
self.assertEquals("the result", result)
def test_call_error(self):
urllib2.Request(self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(
self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = ''
self.resp_mock.read().AndReturn(
'{"error": {"message": "the error"}, "result": "the result"}')
@ -271,9 +274,10 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
self.proxy, 'arg1', 'arg2')
def test_call_fail(self):
urllib2.Request(self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
urllib2.Request(
self.URL,
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
self.HEADERS).AndReturn(self.REQUEST)
self.resp_info_mock.status = 'EOF in headers'
self.proxy.auto = False
self.mox.ReplayAll()

View File

@ -14,7 +14,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NFS driver module"""
"""Unit tests for the NFS driver module."""
import __builtin__
import errno
@ -44,7 +44,7 @@ class DumbVolume(object):
class NfsDriverTestCase(test.TestCase):
"""Test case for NFS driver"""
"""Test case for NFS driver."""
TEST_NFS_EXPORT1 = 'nfs-host1:/export'
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
@ -71,7 +71,7 @@ class NfsDriverTestCase(test.TestCase):
self.stubs.Set(obj, attr_name, stub)
def test_path_exists_should_return_true(self):
"""_path_exists should return True if stat returns 0"""
"""_path_exists should return True if stat returns 0."""
mox = self._mox
drv = self._driver
@ -85,14 +85,17 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_path_exists_should_return_false(self):
"""_path_exists should return True if stat doesn't return 0"""
"""_path_exists should return True if stat doesn't return 0."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True).\
drv._execute(
'stat',
self.TEST_FILE_NAME, run_as_root=True).\
AndRaise(ProcessExecutionError(
stderr="stat: cannot stat `test.txt': No such file or directory"))
stderr="stat: cannot stat `test.txt': No such file "
"or directory"))
mox.ReplayAll()
@ -101,7 +104,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_local_path(self):
"""local_path common use case"""
"""local_path common use case."""
nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv = self._driver
@ -114,7 +117,7 @@ class NfsDriverTestCase(test.TestCase):
drv.local_path(volume))
def test_mount_nfs_should_mount_correctly(self):
"""_mount_nfs common case usage"""
"""_mount_nfs common case usage."""
mox = self._mox
drv = self._driver
@ -144,7 +147,7 @@ class NfsDriverTestCase(test.TestCase):
drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
self.TEST_MNT_POINT, run_as_root=True).\
AndRaise(ProcessExecutionError(
stderr='is busy or already mounted'))
stderr='is busy or already mounted'))
mox.ReplayAll()
@ -162,9 +165,13 @@ class NfsDriverTestCase(test.TestCase):
drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
mox.StubOutWithMock(drv, '_execute')
drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
self.TEST_MNT_POINT, run_as_root=True).\
AndRaise(ProcessExecutionError(stderr='is busy or already mounted'))
drv._execute(
'mount',
'-t',
'nfs',
self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, run_as_root=True).\
AndRaise(ProcessExecutionError(stderr='is busy or '
'already mounted'))
mox.ReplayAll()
@ -175,7 +182,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_mount_nfs_should_create_mountpoint_if_not_yet(self):
"""_mount_nfs should create mountpoint if it doesn't exist"""
"""_mount_nfs should create mountpoint if it doesn't exist."""
mox = self._mox
drv = self._driver
@ -193,7 +200,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_mount_nfs_should_not_create_mountpoint_if_already(self):
"""_mount_nfs should not create mountpoint if it already exists"""
"""_mount_nfs should not create mountpoint if it already exists."""
mox = self._mox
drv = self._driver
@ -210,14 +217,14 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_get_hash_str(self):
"""_get_hash_str should calculation correct value"""
"""_get_hash_str should calculation correct value."""
drv = self._driver
self.assertEqual('2f4f60214cf43c595666dd815f0360a4',
drv._get_hash_str(self.TEST_NFS_EXPORT1))
def test_get_mount_point_for_share(self):
"""_get_mount_point_for_share should calculate correct value"""
"""_get_mount_point_for_share should calculate correct value."""
drv = self._driver
nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
@ -226,7 +233,7 @@ class NfsDriverTestCase(test.TestCase):
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_available_capacity_with_df(self):
"""_get_available_capacity should calculate correct value"""
"""_get_available_capacity should calculate correct value."""
mox = self._mox
drv = self._driver
@ -255,7 +262,7 @@ class NfsDriverTestCase(test.TestCase):
delattr(nfs.FLAGS, 'nfs_disk_util')
def test_get_available_capacity_with_du(self):
"""_get_available_capacity should calculate correct value"""
"""_get_available_capacity should calculate correct value."""
mox = self._mox
drv = self._driver
@ -316,7 +323,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_ensure_share_mounted(self):
"""_ensure_share_mounted simple use case"""
"""_ensure_share_mounted simple use case."""
mox = self._mox
drv = self._driver
@ -334,7 +341,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success"""
"""_ensure_shares_mounted should save share if mounted with success."""
mox = self._mox
drv = self._driver
@ -353,7 +360,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
"""_ensure_shares_mounted should not save share if failed to mount"""
"""_ensure_shares_mounted should not save share if failed to mount."""
mox = self._mox
drv = self._driver
@ -371,7 +378,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_setup_should_throw_error_if_shares_config_not_configured(self):
"""do_setup should throw error if shares config is not configured """
"""do_setup should throw error if shares config is not configured."""
drv = self._driver
nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
@ -380,7 +387,7 @@ class NfsDriverTestCase(test.TestCase):
drv.do_setup, IsA(context.RequestContext))
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
"""do_setup should throw error if nfs client is not installed """
"""do_setup should throw error if nfs client is not installed."""
mox = self._mox
drv = self._driver
@ -400,7 +407,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
"""_find_share should throw error if there is no mounted shares"""
"""_find_share should throw error if there is no mounted shares."""
drv = self._driver
drv._mounted_shares = []
@ -409,7 +416,7 @@ class NfsDriverTestCase(test.TestCase):
self.TEST_SIZE_IN_GB)
def test_find_share(self):
"""_find_share simple use case"""
"""_find_share simple use case."""
mox = self._mox
drv = self._driver
@ -429,7 +436,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
"""_find_share should throw error if there is no share to host vol"""
"""_find_share should throw error if there is no share to host vol."""
mox = self._mox
drv = self._driver
@ -499,7 +506,7 @@ class NfsDriverTestCase(test.TestCase):
delattr(nfs.FLAGS, 'nfs_sparsed_volumes')
def test_create_volume_should_ensure_nfs_mounted(self):
"""create_volume should ensure shares provided in config are mounted"""
"""create_volume ensures shares provided in config are mounted."""
mox = self._mox
drv = self._driver
@ -519,7 +526,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_create_volume_should_return_provider_location(self):
"""create_volume should return provider_location with found share """
"""create_volume should return provider_location with found share."""
mox = self._mox
drv = self._driver
@ -540,7 +547,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_delete_volume(self):
"""delete_volume simple test case"""
"""delete_volume simple test case."""
mox = self._mox
drv = self._driver
@ -566,7 +573,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_delete_should_ensure_share_mounted(self):
"""delete_volume should ensure that corresponding share is mounted"""
"""delete_volume should ensure that corresponding share is mounted."""
mox = self._mox
drv = self._driver
@ -586,7 +593,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_delete_should_not_delete_if_provider_location_not_provided(self):
"""delete_volume shouldn't try to delete if provider_location missed"""
"""delete_volume shouldn't delete if provider_location missed."""
mox = self._mox
drv = self._driver
@ -605,7 +612,7 @@ class NfsDriverTestCase(test.TestCase):
mox.VerifyAll()
def test_delete_should_not_delete_if_there_is_no_file(self):
"""delete_volume should not try to delete if file missed"""
"""delete_volume should not try to delete if file missed."""
mox = self._mox
drv = self._driver

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Cinder"""
"""Test of Policy Engine For Cinder."""
import os.path
import StringIO
@ -147,8 +147,8 @@ class PolicyTestCase(test.TestCase):
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
'fake',
roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
@ -180,7 +180,7 @@ class DefaultPolicyTestCase(test.TestCase):
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
@ -188,7 +188,7 @@ class DefaultPolicyTestCase(test.TestCase):
def test_default_not_found(self):
self._set_brain("default_noexist")
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {})
self.context, "example:noexist", {})
class ContextIsAdminPolicyTestCase(test.TestCase):

File diff suppressed because it is too large Load Diff

View File

@ -46,22 +46,18 @@ class RBDTestCase(test.TestCase):
self.driver = RBDDriver(execute=fake_execute)
def test_good_locations(self):
locations = [
'rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F',
]
locations = ['rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F', ]
map(self.driver._parse_location, locations)
def test_bad_locations(self):
locations = [
'rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///',
]
locations = ['rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///', ]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
@ -142,13 +138,14 @@ class ManagedRBDTestCase(DriverTestCase):
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume_id = 1
# creating volume testdata
db.volume_create(self.context, {'id': volume_id,
'updated_at': timeutils.utcnow(),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
db.volume_create(self.context,
{'id': volume_id,
'updated_at': timeutils.utcnow(),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
try:
if clone_works:
self.volume.create_volume(self.context,

View File

@ -42,8 +42,7 @@ test_service_opts = [
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"),
]
help="Port number to bind test service to"), ]
flags.FLAGS.register_opts(test_service_opts)
@ -131,15 +130,15 @@ class ServiceTestCase(test.TestCase):
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
@ -164,15 +163,15 @@ class ServiceTestCase(test.TestCase):
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),

View File

@ -472,8 +472,9 @@ class StorwizeSVCManagementSimulator:
rows.append(["IO_group_name", "io_grp0"])
rows.append(["status", "online"])
rows.append(["mdisk_grp_id", "0"])
rows.append(["mdisk_grp_name",
self._flags["storwize_svc_volpool_name"]])
rows.append([
"mdisk_grp_name",
self._flags["storwize_svc_volpool_name"]])
rows.append(["capacity", cap])
rows.append(["type", "striped"])
rows.append(["formatted", "no"])
@ -900,14 +901,14 @@ class StorwizeSVCFakeDriver(storwize_svc.StorwizeSVCDriver):
LOG.debug(_('Run CLI command: %s') % cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
(stdout, stderr) = ret
LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') %
{'out': stdout, 'err': stderr})
LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') % {
'out': stdout, 'err': stderr})
except exception.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n '
'stderr: %(err)s') % {'out': e.stdout,
'err': e.stderr})
'err': e.stderr})
return ret
@ -964,25 +965,25 @@ class StorwizeSVCDriverTestCase(test.TestCase):
# Check for missing san_ip
self.flags(san_ip=None)
self.assertRaises(exception.InvalidInput,
self.driver._check_flags)
self.driver._check_flags)
if self.USESIM != 1:
# Check for invalid ip
self.flags(san_ip="-1.-1.-1.-1")
self.assertRaises(socket.gaierror,
self.driver.check_for_setup_error)
self.driver.check_for_setup_error)
# Check for unreachable IP
self.flags(san_ip="1.1.1.1")
self.assertRaises(socket.error,
self.driver.check_for_setup_error)
self.driver.check_for_setup_error)
def test_storwize_svc_connectivity(self):
# Make sure we detect if the pool doesn't exist
no_exist_pool = "i-dont-exist-%s" % random.randint(10000, 99999)
self.flags(storwize_svc_volpool_name=no_exist_pool)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self.driver.check_for_setup_error)
FLAGS.reset()
# Check the case where the user didn't configure IP addresses
@ -990,56 +991,56 @@ class StorwizeSVCDriverTestCase(test.TestCase):
if self.USESIM == 1:
self.sim.error_injection("lsnodecanister", "header_mismatch")
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.driver.check_for_setup_error)
self.sim.error_injection("lsnodecanister", "remove_field")
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.driver.check_for_setup_error)
self.sim.error_injection("lsportip", "ip_no_config")
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.driver.check_for_setup_error)
self.sim.error_injection("lsportip", "header_mismatch")
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.driver.check_for_setup_error)
self.sim.error_injection("lsportip", "remove_field")
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.driver.check_for_setup_error)
# Check with bad parameters
self.flags(san_password=None)
self.flags(san_private_key=None)
self.assertRaises(exception.InvalidInput,
self.driver._check_flags)
self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_rsize="invalid")
self.assertRaises(exception.InvalidInput,
self.driver._check_flags)
self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_warning="invalid")
self.assertRaises(exception.InvalidInput,
self.driver._check_flags)
self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_autoexpand="invalid")
self.assertRaises(exception.InvalidInput,
self.driver._check_flags)
self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_grainsize=str(42))
self.assertRaises(exception.InvalidInput,
self.driver._check_flags)
self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_flashcopy_timeout=str(601))
self.assertRaises(exception.InvalidInput,
self.driver._check_flags)
self.driver._check_flags)
FLAGS.reset()
self.flags(storwize_svc_vol_compression=True)
self.flags(storwize_svc_vol_rsize="-1")
self.assertRaises(exception.InvalidInput,
self.driver._check_flags)
self.driver._check_flags)
FLAGS.reset()
# Finally, check with good parameters
@ -1059,7 +1060,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
# Test timeout and volume cleanup
self.flags(storwize_svc_flashcopy_timeout=str(1))
self.assertRaises(exception.InvalidSnapshot,
self.driver.create_snapshot, snapshot)
self.driver.create_snapshot, snapshot)
is_volume_defined = self.driver._is_volume_defined(snapshot["name"])
self.assertEqual(is_volume_defined, False)
FLAGS.reset()
@ -1068,21 +1069,21 @@ class StorwizeSVCDriverTestCase(test.TestCase):
if self.USESIM == 1:
self.sim.error_injection("lsfcmap", "bogus_prepare")
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, snapshot)
self.driver.create_snapshot, snapshot)
# Test prestartfcmap, startfcmap, and rmfcmap failing
if self.USESIM == 1:
self.sim.error_injection("prestartfcmap", "bad_id")
self.assertRaises(exception.ProcessExecutionError,
self.driver.create_snapshot, snapshot)
self.driver.create_snapshot, snapshot)
self.sim.error_injection("lsfcmap", "speed_up")
self.sim.error_injection("startfcmap", "bad_id")
self.assertRaises(exception.ProcessExecutionError,
self.driver.create_snapshot, snapshot)
self.driver.create_snapshot, snapshot)
self.sim.error_injection("prestartfcmap", "bad_id")
self.sim.error_injection("rmfcmap", "bad_id")
self.assertRaises(exception.ProcessExecutionError,
self.driver.create_snapshot, snapshot)
self.driver.create_snapshot, snapshot)
# Test successful snapshot
self.driver.create_snapshot(snapshot)
@ -1119,7 +1120,9 @@ class StorwizeSVCDriverTestCase(test.TestCase):
if self.USESIM == 1:
self.sim.error_injection("prestartfcmap", "bad_id")
self.assertRaises(exception.ProcessExecutionError,
self.driver.create_volume_from_snapshot, volume2, snapshot)
self.driver.create_volume_from_snapshot,
volume2,
snapshot)
# Succeed
if self.USESIM == 1:
@ -1141,7 +1144,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
self.driver.create_volume(volume3)
snapshot["name"] = volume3["name"]
self.assertRaises(exception.InvalidSnapshot,
self.driver.create_snapshot, snapshot)
self.driver.create_snapshot,
snapshot)
self.driver._delete_volume(volume1, True)
self.driver._delete_volume(volume3, True)
@ -1150,7 +1154,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999)
snapshot["volume_name"] = "no_exist"
self.assertRaises(exception.VolumeNotFound,
self.driver.create_snapshot, snapshot)
self.driver.create_snapshot,
snapshot)
def test_storwize_svc_volumes(self):
# Create a first volume
@ -1176,7 +1181,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
# Try to create the volume again (should fail)
self.assertRaises(exception.ProcessExecutionError,
self.driver.create_volume, volume)
self.driver.create_volume,
volume)
# Try to delete a volume that doesn't exist (should not fail)
vol_no_exist = {"name": "i_dont_exist"}
@ -1270,7 +1276,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
if self.USESIM == 1:
self.sim.error_injection("mkvdisk", "no_compression")
self.assertRaises(exception.ProcessExecutionError,
self._create_test_vol)
self._create_test_vol)
FLAGS.reset()
def test_storwize_svc_unicode_host_and_volume_names(self):
@ -1328,7 +1334,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
# Try to delete the 1st volume (should fail because it is mapped)
self.assertRaises(exception.ProcessExecutionError,
self.driver.delete_volume, volume1)
self.driver.delete_volume,
volume1)
# Test no preferred node
self.driver.terminate_connection(volume1, conn)
@ -1346,7 +1353,9 @@ class StorwizeSVCDriverTestCase(test.TestCase):
# Try to remove connection from host that doesn't exist (should fail)
conn_no_exist = {"initiator": "i_dont_exist"}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection, volume1, conn_no_exist)
self.driver.terminate_connection,
volume1,
conn_no_exist)
# Try to remove connection from volume that isn't mapped (should print
# message but NOT fail)

View File

@ -20,7 +20,7 @@ from cinder.tests import utils as test_utils
class TestUtilsTestCase(test.TestCase):
def test_get_test_admin_context(self):
"""get_test_admin_context's return value behaves like admin context"""
"""get_test_admin_context's return value behaves like admin context."""
ctxt = test_utils.get_test_admin_context()
# TODO(soren): This should verify the full interface context

View File

@ -350,8 +350,9 @@ class GenericUtilsTestCase(test.TestCase):
self.assertEqual(reloaded_data, fake_contents)
self.reload_called = True
data = utils.read_cached_file("/this/is/a/fake", cache_data,
reload_func=test_reload)
data = utils.read_cached_file("/this/is/a/fake",
cache_data,
reload_func=test_reload)
self.assertEqual(data, fake_contents)
self.assertTrue(self.reload_called)
@ -445,7 +446,8 @@ class MonkeyPatchTestCase(test.TestCase):
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package + 'example_decorator'])
+ self.example_package
+ 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
@ -467,19 +469,19 @@ class MonkeyPatchTestCase(test.TestCase):
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertTrue(package_a + 'example_function_a'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(package_a + 'ExampleClassA.example_method_add'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(package_b + 'example_function_b'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(package_b + 'ExampleClassB.example_method_add'
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
class AuditPeriodTest(test.TestCase):
@ -501,149 +503,126 @@ class AuditPeriodTest(test.TestCase):
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEquals(begin, datetime.datetime(
hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
hour=8,
day=5,
month=3,
year=2012))
self.assertEquals(begin,
datetime.datetime(hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEquals(begin, datetime.datetime(
minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
minute=10,
hour=8,
day=5,
month=3,
year=2012))
self.assertEquals(begin, datetime.datetime(minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEquals(begin, datetime.datetime(
minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
minute=30,
hour=7,
day=5,
month=3,
year=2012))
self.assertEquals(begin, datetime.datetime(minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEquals(begin, datetime.datetime(
day=4,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
day=5,
month=3,
year=2012))
self.assertEquals(begin, datetime.datetime(day=4,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEquals(begin, datetime.datetime(
hour=6,
day=4,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
hour=6,
day=5,
month=3,
year=2012))
self.assertEquals(begin, datetime.datetime(hour=6,
day=4,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEquals(begin, datetime.datetime(
hour=10,
day=3,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(
hour=10,
day=4,
month=3,
year=2012))
self.assertEquals(begin, datetime.datetime(hour=10,
day=3,
month=3,
year=2012))
self.assertEquals(end, datetime.datetime(hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEquals(begin, datetime.datetime(
day=1,
month=2,
year=2012))
self.assertEquals(end, datetime.datetime(
day=1,
month=3,
year=2012))
self.assertEquals(begin, datetime.datetime(day=1,
month=2,
year=2012))
self.assertEquals(end, datetime.datetime(day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEquals(begin, datetime.datetime(
day=2,
month=2,
year=2012))
self.assertEquals(end, datetime.datetime(
day=2,
month=3,
year=2012))
self.assertEquals(begin, datetime.datetime(day=2,
month=2,
year=2012))
self.assertEquals(end, datetime.datetime(day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEquals(begin, datetime.datetime(
day=15,
month=1,
year=2012))
self.assertEquals(end, datetime.datetime(
day=15,
month=2,
year=2012))
self.assertEquals(begin, datetime.datetime(day=15,
month=1,
year=2012))
self.assertEquals(end, datetime.datetime(day=15,
month=2,
year=2012))
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEquals(begin, datetime.datetime(
day=1,
month=1,
year=2011))
self.assertEquals(end, datetime.datetime(
day=1,
month=1,
year=2012))
self.assertEquals(begin, datetime.datetime(day=1,
month=1,
year=2011))
self.assertEquals(end, datetime.datetime(day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEquals(begin, datetime.datetime(
day=1,
month=2,
year=2011))
self.assertEquals(end, datetime.datetime(
day=1,
month=2,
year=2012))
self.assertEquals(begin, datetime.datetime(day=1,
month=2,
year=2011))
self.assertEquals(end, datetime.datetime(day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEquals(begin, datetime.datetime(
day=1,
month=6,
year=2010))
self.assertEquals(end, datetime.datetime(
day=1,
month=6,
year=2011))
self.assertEquals(begin, datetime.datetime(day=1,
month=6,
year=2010))
self.assertEquals(end, datetime.datetime(day=1,
month=6,
year=2011))
class FakeSSHClient(object):

View File

@ -20,40 +20,40 @@ from cinder import version
class VersionTestCase(test.TestCase):
"""Test cases for Versions code"""
"""Test cases for Versions code."""
def setUp(self):
"""setup test with unchanging values"""
"""Setup test with unchanging values."""
super(VersionTestCase, self).setUp()
self.version = version
self.version.FINAL = False
self.version.CINDER_VERSION = ['2012', '10']
self.version.YEAR, self.version.COUNT = self.version.CINDER_VERSION
self.version.version_info = {'branch_nick': u'LOCALBRANCH',
'revision_id': 'LOCALREVISION',
'revno': 0}
'revision_id': 'LOCALREVISION',
'revno': 0}
def test_version_string_is_good(self):
"""Ensure version string works"""
"""Ensure version string works."""
self.assertEqual("2012.10-dev", self.version.version_string())
def test_canonical_version_string_is_good(self):
"""Ensure canonical version works"""
"""Ensure canonical version works."""
self.assertEqual("2012.10", self.version.canonical_version_string())
def test_final_version_strings_are_identical(self):
"""Ensure final version strings match only at release"""
"""Ensure final version strings match only at release."""
self.assertNotEqual(self.version.canonical_version_string(),
self.version.version_string())
self.version.version_string())
self.version.FINAL = True
self.assertEqual(self.version.canonical_version_string(),
self.version.version_string())
self.version.version_string())
def test_vcs_version_string_is_good(self):
"""Ensure uninstalled code generates local """
"""Ensure uninstalled code generates local."""
self.assertEqual("LOCALBRANCH:LOCALREVISION",
self.version.vcs_version_string())
self.version.vcs_version_string())
def test_version_string_with_vcs_is_good(self):
"""Ensure uninstalled code get version string"""
"""Ensure uninstalled code get version string."""
self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION",
self.version.version_string_with_vcs())
self.version.version_string_with_vcs())

View File

@ -201,8 +201,8 @@ class VolumeTestCase(test.TestCase):
self.volume.create_volume(self.context, volume_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
self.volume.driver.delete_volume(mox.IgnoreArg()) \
.AndRaise(exception.VolumeIsBusy)
self.volume.driver.delete_volume(
mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy)
self.mox.ReplayAll()
res = self.volume.delete_volume(self.context, volume_id)
self.assertEqual(True, res)
@ -226,9 +226,9 @@ class VolumeTestCase(test.TestCase):
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id, db.volume_get(
context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.assertEqual(snapshot_id,
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_snapshot(self.context, snapshot_id)
@ -454,8 +454,8 @@ class VolumeTestCase(test.TestCase):
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(mox.IgnoreArg()) \
.AndRaise(exception.SnapshotIsBusy)
self.volume.driver.delete_snapshot(
mox.IgnoreArg()).AndRaise(exception.SnapshotIsBusy)
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
@ -486,13 +486,14 @@ class VolumeTestCase(test.TestCase):
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume_id = 1
# creating volume testdata
db.volume_create(self.context, {'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
db.volume_create(self.context,
{'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
try:
self.volume.create_volume(self.context,
volume_id,
@ -526,12 +527,13 @@ class VolumeTestCase(test.TestCase):
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
# creating volume testdata
volume_id = 1
db.volume_create(self.context, {'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'host': 'dummy'})
db.volume_create(self.context,
{'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'host': 'dummy'})
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
@ -557,19 +559,20 @@ class VolumeTestCase(test.TestCase):
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
# creating volume testdata
volume_id = 1
db.volume_create(self.context, {'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'instance_uuid': None,
'host': 'dummy'})
db.volume_create(self.context,
{'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'instance_uuid': None,
'host': 'dummy'})
try:
# start test
self.volume.copy_volume_to_image(self.context,
volume_id,
image_id)
volume_id,
image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], 'available')
@ -591,21 +594,21 @@ class VolumeTestCase(test.TestCase):
image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
volume_id = 1
db.volume_create(self.context,
{'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'instance_uuid':
'b21f957d-a72f-4b93-b5a5-45b1161abb02',
'host': 'dummy'})
db.volume_create(
self.context,
{'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'instance_uuid': 'b21f957d-a72f-4b93-b5a5-45b1161abb02',
'host': 'dummy'})
try:
# start test
self.volume.copy_volume_to_image(self.context,
volume_id,
image_id)
volume_id,
image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], 'in-use')
@ -626,12 +629,13 @@ class VolumeTestCase(test.TestCase):
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
# creating volume testdata
volume_id = 1
db.volume_create(self.context, {'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'in-use',
'host': 'dummy'})
db.volume_create(self.context,
{'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'in-use',
'host': 'dummy'})
try:
# start test
@ -663,7 +667,7 @@ class VolumeTestCase(test.TestCase):
try:
volume_id = None
volume_api = cinder.volume.api.API(
image_service=_FakeImageService())
image_service=_FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=1)
volume_id = volume['id']

View File

@ -50,12 +50,13 @@ class VolumeGlanceMetadataTestCase(test.TestCase):
'value1')
vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key1',
'value1')
vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key2',
vol_metadata = db.volume_glance_metadata_create(ctxt, 2,
'key2',
'value2')
expected_metadata_1 = {'volume_id': '1',
'key': 'key1',
'value': 'value1'}
'key': 'key1',
'value': 'value1'}
metadata = db.volume_glance_metadata_get(ctxt, 1)
self.assertEqual(len(metadata), 1)
@ -106,8 +107,8 @@ class VolumeGlanceMetadataTestCase(test.TestCase):
db.volume_glance_metadata_copy_to_snapshot(ctxt, 100, 1)
expected_meta = {'snapshot_id': '100',
'key': 'key1',
'value': 'value1'}
'key': 'key1',
'value': 'value1'}
for meta in db.volume_snapshot_glance_metadata_get(ctxt, 100):
for (key, value) in expected_meta.items():

View File

@ -109,56 +109,56 @@ class VolumeRpcAPITestCase(test.TestCase):
def test_create_volume(self):
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume,
host='fake_host1',
snapshot_id='fake_snapshot_id',
image_id='fake_image_id')
rpc_method='cast',
volume=self.fake_volume,
host='fake_host1',
snapshot_id='fake_snapshot_id',
image_id='fake_image_id')
def test_delete_volume(self):
self._test_volume_api('delete_volume',
rpc_method='cast',
volume=self.fake_volume)
rpc_method='cast',
volume=self.fake_volume)
def test_create_snapshot(self):
self._test_volume_api('create_snapshot',
rpc_method='cast',
volume=self.fake_volume,
snapshot=self.fake_snapshot)
rpc_method='cast',
volume=self.fake_volume,
snapshot=self.fake_snapshot)
def test_delete_snapshot(self):
self._test_volume_api('delete_snapshot',
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host')
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host')
def test_attach_volume(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid='fake_uuid',
mountpoint='fake_mountpoint')
rpc_method='call',
volume=self.fake_volume,
instance_uuid='fake_uuid',
mountpoint='fake_mountpoint')
def test_detach_volume(self):
self._test_volume_api('detach_volume',
rpc_method='call',
volume=self.fake_volume)
rpc_method='call',
volume=self.fake_volume)
def test_copy_volume_to_image(self):
self._test_volume_api('copy_volume_to_image',
rpc_method='cast',
volume=self.fake_volume,
image_id='fake_image_id')
rpc_method='cast',
volume=self.fake_volume,
image_id='fake_image_id')
def test_initialize_connection(self):
self._test_volume_api('initialize_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector')
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector')
def test_terminate_connection(self):
self._test_volume_api('terminate_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector',
force=False)
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector',
force=False)

View File

@ -33,21 +33,20 @@ LOG = logging.getLogger(__name__)
class VolumeTypeTestCase(test.TestCase):
"""Test cases for volume type code"""
"""Test cases for volume type code."""
def setUp(self):
super(VolumeTypeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.vol_type1_name = str(int(time.time()))
self.vol_type1_specs = dict(
type="physical drive",
drive_type="SAS",
size="300",
rpm="7200",
visible="True")
self.vol_type1_specs = dict(type="physical drive",
drive_type="SAS",
size="300",
rpm="7200",
visible="True")
def test_volume_type_create_then_destroy(self):
"""Ensure volume types can be created and deleted"""
"""Ensure volume types can be created and deleted."""
prev_all_vtypes = volume_types.get_all_types(self.ctxt)
volume_types.create(self.ctxt,
@ -75,14 +74,14 @@ class VolumeTypeTestCase(test.TestCase):
'drive type was not deleted')
def test_get_all_volume_types(self):
"""Ensures that all volume types can be retrieved"""
"""Ensures that all volume types can be retrieved."""
session = sql_session.get_session()
total_volume_types = session.query(models.VolumeTypes).count()
vol_types = volume_types.get_all_types(self.ctxt)
self.assertEqual(total_volume_types, len(vol_types))
def test_get_default_volume_type(self):
""" Ensures default volume type can be retrieved """
"""Ensures default volume type can be retrieved."""
volume_types.create(self.ctxt,
fake_flags.def_vol_type,
{})
@ -91,26 +90,26 @@ class VolumeTypeTestCase(test.TestCase):
fake_flags.def_vol_type)
def test_default_volume_type_missing_in_db(self):
""" Ensures proper exception raised if default volume type
is not in database. """
"""Ensures proper exception raised if default volume type
is not in database."""
session = sql_session.get_session()
default_vol_type = volume_types.get_default_volume_type()
self.assertEqual(default_vol_type, {})
def test_non_existent_vol_type_shouldnt_delete(self):
"""Ensures that volume type creation fails with invalid args"""
"""Ensures that volume type creation fails with invalid args."""
self.assertRaises(exception.VolumeTypeNotFoundByName,
volume_types.destroy, self.ctxt, "sfsfsdfdfs")
def test_repeated_vol_types_shouldnt_raise(self):
"""Ensures that volume duplicates don't raise"""
"""Ensures that volume duplicates don't raise."""
new_name = self.vol_type1_name + "dup"
volume_types.create(self.ctxt, new_name)
volume_types.destroy(self.ctxt, new_name)
volume_types.create(self.ctxt, new_name)
def test_invalid_volume_types_params(self):
"""Ensures that volume type creation fails with invalid args"""
"""Ensures that volume type creation fails with invalid args."""
self.assertRaises(exception.InvalidVolumeType,
volume_types.destroy, self.ctxt, None)
self.assertRaises(exception.InvalidVolumeType,
@ -120,7 +119,7 @@ class VolumeTypeTestCase(test.TestCase):
self.ctxt, None)
def test_volume_type_get_by_id_and_name(self):
"""Ensure volume types get returns same entry"""
"""Ensure volume types get returns same entry."""
volume_types.create(self.ctxt,
self.vol_type1_name,
self.vol_type1_specs)
@ -131,7 +130,7 @@ class VolumeTypeTestCase(test.TestCase):
self.assertEqual(new, new2)
def test_volume_type_search_by_extra_spec(self):
"""Ensure volume types get by extra spec returns correct type"""
"""Ensure volume types get by extra spec returns correct type."""
volume_types.create(self.ctxt, "type1", {"key1": "val1",
"key2": "val2"})
volume_types.create(self.ctxt, "type2", {"key2": "val2",
@ -139,29 +138,32 @@ class VolumeTypeTestCase(test.TestCase):
volume_types.create(self.ctxt, "type3", {"key3": "another_value",
"key4": "val4"})
vol_types = volume_types.get_all_types(self.ctxt,
search_opts={'extra_specs': {"key1": "val1"}})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key1": "val1"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 1)
self.assertTrue("type1" in vol_types.keys())
self.assertEqual(vol_types['type1']['extra_specs'],
{"key1": "val1", "key2": "val2"})
vol_types = volume_types.get_all_types(self.ctxt,
search_opts={'extra_specs': {"key2": "val2"}})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key2": "val2"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 2)
self.assertTrue("type1" in vol_types.keys())
self.assertTrue("type2" in vol_types.keys())
vol_types = volume_types.get_all_types(self.ctxt,
search_opts={'extra_specs': {"key3": "val3"}})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key3": "val3"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 1)
self.assertTrue("type2" in vol_types.keys())
def test_volume_type_search_by_extra_spec_multiple(self):
"""Ensure volume types get by extra spec returns correct type"""
"""Ensure volume types get by extra spec returns correct type."""
volume_types.create(self.ctxt, "type1", {"key1": "val1",
"key2": "val2",
"key3": "val3"})
@ -171,9 +173,10 @@ class VolumeTypeTestCase(test.TestCase):
"key3": "val3",
"key4": "val4"})
vol_types = volume_types.get_all_types(self.ctxt,
search_opts={'extra_specs': {"key1": "val1",
"key3": "val3"}})
vol_types = volume_types.get_all_types(
self.ctxt,
search_opts={'extra_specs': {"key1": "val1",
"key3": "val3"}})
LOG.info("vol_types: %s" % vol_types)
self.assertEqual(len(vol_types), 2)
self.assertTrue("type1" in vol_types.keys())

View File

@ -30,8 +30,8 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
self.context = context.get_admin_context()
self.vol_type1 = dict(name="TEST: Regular volume test")
self.vol_type1_specs = dict(vol_extra1="value1",
vol_extra2="value2",
vol_extra3=3)
vol_extra2="value2",
vol_extra3=3)
self.vol_type1['extra_specs'] = self.vol_type1_specs
ref = db.volume_type_create(self.context, self.vol_type1)
self.volume_type1_id = ref.id
@ -53,31 +53,31 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
def test_volume_type_specs_get(self):
expected_specs = self.vol_type1_specs.copy()
actual_specs = db.volume_type_extra_specs_get(
context.get_admin_context(),
self.volume_type1_id)
context.get_admin_context(),
self.volume_type1_id)
self.assertEquals(expected_specs, actual_specs)
def test_volume_type_extra_specs_delete(self):
expected_specs = self.vol_type1_specs.copy()
del expected_specs['vol_extra2']
db.volume_type_extra_specs_delete(context.get_admin_context(),
self.volume_type1_id,
'vol_extra2')
self.volume_type1_id,
'vol_extra2')
actual_specs = db.volume_type_extra_specs_get(
context.get_admin_context(),
self.volume_type1_id)
context.get_admin_context(),
self.volume_type1_id)
self.assertEquals(expected_specs, actual_specs)
def test_volume_type_extra_specs_update(self):
expected_specs = self.vol_type1_specs.copy()
expected_specs['vol_extra3'] = "4"
db.volume_type_extra_specs_update_or_create(
context.get_admin_context(),
self.volume_type1_id,
dict(vol_extra3=4))
context.get_admin_context(),
self.volume_type1_id,
dict(vol_extra3=4))
actual_specs = db.volume_type_extra_specs_get(
context.get_admin_context(),
self.volume_type1_id)
context.get_admin_context(),
self.volume_type1_id)
self.assertEquals(expected_specs, actual_specs)
def test_volume_type_extra_specs_create(self):
@ -85,37 +85,37 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
expected_specs['vol_extra4'] = 'value4'
expected_specs['vol_extra5'] = 'value5'
db.volume_type_extra_specs_update_or_create(
context.get_admin_context(),
self.volume_type1_id,
dict(vol_extra4="value4",
vol_extra5="value5"))
context.get_admin_context(),
self.volume_type1_id,
dict(vol_extra4="value4",
vol_extra5="value5"))
actual_specs = db.volume_type_extra_specs_get(
context.get_admin_context(),
self.volume_type1_id)
context.get_admin_context(),
self.volume_type1_id)
self.assertEquals(expected_specs, actual_specs)
def test_volume_type_get_with_extra_specs(self):
volume_type = db.volume_type_get(
context.get_admin_context(),
self.volume_type1_id)
context.get_admin_context(),
self.volume_type1_id)
self.assertEquals(volume_type['extra_specs'],
self.vol_type1_specs)
volume_type = db.volume_type_get(
context.get_admin_context(),
self.vol_type2_id)
context.get_admin_context(),
self.vol_type2_id)
self.assertEquals(volume_type['extra_specs'], {})
def test_volume_type_get_by_name_with_extra_specs(self):
volume_type = db.volume_type_get_by_name(
context.get_admin_context(),
self.vol_type1['name'])
context.get_admin_context(),
self.vol_type1['name'])
self.assertEquals(volume_type['extra_specs'],
self.vol_type1_specs)
volume_type = db.volume_type_get_by_name(
context.get_admin_context(),
self.vol_type2_noextra['name'])
context.get_admin_context(),
self.vol_type2_noextra['name'])
self.assertEquals(volume_type['extra_specs'], {})
def test_volume_type_get_all(self):

View File

@ -52,7 +52,7 @@ class UsageInfoTestCase(test.TestCase):
super(UsageInfoTestCase, self).tearDown()
def _create_volume(self, params={}):
"""Create a test volume"""
"""Create a test volume."""
vol = {}
vol['snapshot_id'] = self.snapshot_id
vol['user_id'] = self.user_id

View File

@ -74,32 +74,34 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
def tearDown(self):
try:
if self._volume_data_2 and \
self._wutils.volume_exists(
self._volume_data_2['name']):
if (self._volume_data_2 and
self._wutils.volume_exists(self._volume_data_2['name'])):
self._wutils.delete_volume(self._volume_data_2['name'])
if self._volume_data and \
self._wutils.volume_exists(
self._volume_data['name']):
if (self._volume_data and
self._wutils.volume_exists(
self._volume_data['name'])):
self._wutils.delete_volume(self._volume_data['name'])
if self._snapshot_data and \
self._wutils.snapshot_exists(
self._snapshot_data['name']):
if (self._snapshot_data and
self._wutils.snapshot_exists(
self._snapshot_data['name'])):
self._wutils.delete_snapshot(self._snapshot_data['name'])
if self._connector_data and \
self._wutils.initiator_id_exists(
"%s%s" % (FLAGS.iscsi_target_prefix,
self._volume_data['name']),
self._connector_data['initiator']):
if (self._connector_data and
self._wutils.initiator_id_exists(
"%s%s" % (FLAGS.iscsi_target_prefix,
self._volume_data['name']),
self._connector_data['initiator'])):
target_name = "%s%s" % (FLAGS.iscsi_target_prefix,
self._volume_data['name'])
initiator_name = self._connector_data['initiator']
self._wutils.delete_initiator_id(target_name, initiator_name)
if self._volume_data and \
self._wutils.export_exists("%s%s" % (FLAGS.iscsi_target_prefix,
self._volume_data['name'])):
self._wutils.delete_export("%s%s" % (FLAGS.iscsi_target_prefix,
self._volume_data['name']))
if (self._volume_data and
self._wutils.export_exists("%s%s" %
(FLAGS.iscsi_target_prefix,
self._volume_data['name']))):
self._wutils.delete_export(
"%s%s" % (FLAGS.iscsi_target_prefix,
self._volume_data['name']))
finally:
super(TestWindowsDriver, self).tearDown()
@ -178,9 +180,10 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
retval = self._drv.create_export({}, self._volume_data)
volume_name = self._volume_data['name']
self.assertEquals(retval,
{'provider_location':
"%s%s" % (FLAGS.iscsi_target_prefix, volume_name)})
self.assertEquals(
retval,
{'provider_location': "%s%s" % (FLAGS.iscsi_target_prefix,
volume_name)})
def test_initialize_connection(self):
#Create a volume

View File

@ -83,9 +83,7 @@ class DriverTestCase(unittest.TestCase):
size=1, display_name='name', display_description='desc'))
mock.VerifyAll()
self.assertEquals(dict(
provider_location='sr_uuid/vdi_uuid'
), result)
self.assertEquals(dict(provider_location='sr_uuid/vdi_uuid'), result)
def test_delete_volume(self):
mock = mox.Mox()

View File

@ -29,15 +29,11 @@ from cinder.volume.drivers import xiv
FLAGS = flags.FLAGS
FAKE = "fake"
VOLUME = {
'size': 16,
'name': FAKE,
'id': 1
}
VOLUME = {'size': 16,
'name': FAKE,
'id': 1}
CONNECTOR = {
'initiator': "iqn.2012-07.org.fake:01:948f189c4695",
}
CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
class XIVFakeProxyDriver(object):
@ -82,21 +78,18 @@ class XIVFakeProxyDriver(object):
self.volumes[volume['name']]['attached'] = connector
return {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': True,
'target_portal': self.xiv_portal,
'target_iqn': self.xiv_iqn,
'target_lun': lun_id,
'volume_id': volume['id'],
'multipath': True,
# part of a patch to nova-compute to enable iscsi multipath
'provider_location': "%s,1 %s %s" % (
self.xiv_portal,
self.xiv_iqn,
lun_id),
},
return {'driver_volume_type': 'iscsi',
'data': {'target_discovered': True,
'target_discovered': True,
'target_portal': self.xiv_portal,
'target_iqn': self.xiv_iqn,
'target_lun': lun_id,
'volume_id': volume['id'],
'multipath': True,
'provider_location': "%s,1 %s %s" % (
self.xiv_portal,
self.xiv_iqn,
lun_id), },
}
def terminate_connection(self, volume, connector):
@ -110,8 +103,8 @@ class XIVFakeProxyDriver(object):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound()
return self.volumes[volume['name']].get('attached', None) \
== connector
return (self.volumes[volume['name']].get('attached', None)
== connector)
class XIVVolumeDriverTest(test.TestCase):
@ -126,18 +119,14 @@ class XIVVolumeDriverTest(test.TestCase):
def test_initialized_should_set_xiv_info(self):
"""Test that the san flags are passed to the XIV proxy."""
self.assertEquals(
self.driver.xiv_proxy.xiv_info['xiv_user'],
FLAGS.san_login)
self.assertEquals(
self.driver.xiv_proxy.xiv_info['xiv_pass'],
FLAGS.san_password)
self.assertEquals(
self.driver.xiv_proxy.xiv_info['xiv_address'],
FLAGS.san_ip)
self.assertEquals(
self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
FLAGS.san_clustername)
self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_user'],
FLAGS.san_login)
self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_pass'],
FLAGS.san_password)
self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_address'],
FLAGS.san_ip)
self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
FLAGS.san_clustername)
def test_setup_should_fail_if_credentials_are_invalid(self):
"""Test that the xiv_proxy validates credentials."""
@ -186,8 +175,10 @@ class XIVVolumeDriverTest(test.TestCase):
self.driver.do_setup(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': FAKE, 'id': 1, 'size': 12000})
self.driver.create_volume,
{'name': FAKE,
'id': 1,
'size': 12000})
def test_initialize_connection(self):
"""Test that inititialize connection attaches volume to host."""
@ -197,7 +188,7 @@ class XIVVolumeDriverTest(test.TestCase):
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.assertTrue(
self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver.delete_volume(VOLUME)
@ -207,7 +198,9 @@ class XIVVolumeDriverTest(test.TestCase):
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection, VOLUME, CONNECTOR)
self.driver.initialize_connection,
VOLUME,
CONNECTOR)
def test_terminate_connection(self):
"""Test terminating a connection."""
@ -217,10 +210,8 @@ class XIVVolumeDriverTest(test.TestCase):
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.assertFalse(
self.driver.xiv_proxy.is_volume_attached(
VOLUME,
CONNECTOR))
self.assertFalse(self.driver.xiv_proxy.is_volume_attached(VOLUME,
CONNECTOR))
self.driver.delete_volume(VOLUME)
@ -229,7 +220,9 @@ class XIVVolumeDriverTest(test.TestCase):
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection, VOLUME, CONNECTOR)
self.driver.terminate_connection,
VOLUME,
CONNECTOR)
def test_terminate_connection_should_fail_on_non_attached_volume(self):
"""Test that terminate won't work for volumes that are not attached."""
@ -238,6 +231,8 @@ class XIVVolumeDriverTest(test.TestCase):
self.driver.create_volume(VOLUME)
self.assertRaises(exception.VolumeNotFoundForInstance,
self.driver.terminate_connection, VOLUME, CONNECTOR)
self.driver.terminate_connection,
VOLUME,
CONNECTOR)
self.driver.delete_volume(VOLUME)

View File

@ -105,7 +105,7 @@ class FakeRequest(object):
('/api/vcontrollers.xml', self._list_controllers),
('/api/servers.xml', self._list_servers),
('/api/volumes/*/servers.xml',
self._list_vol_attachments)]
self._list_vol_attachments)]
}
ops_list = ops[self.method]
@ -139,8 +139,8 @@ class FakeRequest(object):
def _login(self):
params = self._get_parameters(self.body)
if params['user'] == RUNTIME_VARS['user'] and\
params['password'] == RUNTIME_VARS['password']:
if (params['user'] == RUNTIME_VARS['user'] and
params['password'] == RUNTIME_VARS['password']):
return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key']
else:
return RUNTIME_VARS['bad_login']
@ -246,8 +246,10 @@ class FakeRequest(object):
<created-at type='datetime'>2012-01-28...</created-at>
<modified-at type='datetime'>2012-01-28...</modified-at>
</volume>"""
return self._generate_list_resp(header, footer, body,
RUNTIME_VARS['volumes'])
return self._generate_list_resp(header,
footer,
body,
RUNTIME_VARS['volumes'])
def _list_controllers(self):
header = """<show-vcontrollers-response>
@ -267,8 +269,10 @@ class FakeRequest(object):
<chap-username>test_chap_user</chap-username>
<chap-target-secret>test_chap_secret</chap-target-secret>
</vcontroller>"""
return self._generate_list_resp(header, footer, body,
RUNTIME_VARS['controllers'])
return self._generate_list_resp(header,
footer,
body,
RUNTIME_VARS['controllers'])
def _list_servers(self):
header = """<show-servers-response>
@ -317,7 +321,8 @@ class FakeRequest(object):
for server in attachments:
srv_params = self._get_server_obj(server)
resp += body % (server,
srv_params['display_name'], srv_params['iqn'])
srv_params['display_name'],
srv_params['iqn'])
resp += footer
return resp
@ -353,7 +358,7 @@ class FakeHTTPSConnection(FakeHTTPConnection):
class ZadaraVPSADriverTestCase(test.TestCase):
"""Test case for Zadara VPSA volume driver"""
"""Test case for Zadara VPSA volume driver."""
def setUp(self):
LOG.debug('Enter: setUp')
@ -428,7 +433,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
self.driver.check_for_setup_error()
def test_volume_attach_detach(self):
"""Test volume attachment and detach"""
"""Test volume attachment and detach."""
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
connector = dict(initiator='test_iqn.1')
@ -450,7 +455,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
self.driver.delete_volume(volume)
def test_volume_attach_multiple_detach(self):
"""Test multiple volume attachment and detach"""
"""Test multiple volume attachment and detach."""
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
connector1 = dict(initiator='test_iqn.1')
connector2 = dict(initiator='test_iqn.2')
@ -467,7 +472,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
self.driver.delete_volume(volume)
def test_wrong_attach_params(self):
"""Test different wrong attach scenarios"""
"""Test different wrong attach scenarios."""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103}
@ -480,7 +485,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
volume1, connector1)
def test_wrong_detach_params(self):
"""Test different wrong detachment scenarios"""
"""Test different wrong detachment scenarios."""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
@ -505,7 +510,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
volume1, connector2)
def test_wrong_login_reply(self):
"""Test wrong login reply"""
"""Test wrong login reply."""
RUNTIME_VARS['login'] = """<hash>
<access-key>%s</access-key>
@ -530,13 +535,13 @@ class ZadaraVPSADriverTestCase(test.TestCase):
self.driver.do_setup, None)
def test_ssl_use(self):
"""Coverage test for SSL connection"""
"""Coverage test for SSL connection."""
self.flags(zadara_vpsa_use_ssl=True)
self.driver.do_setup(None)
self.flags(zadara_vpsa_use_ssl=False)
def test_bad_http_response(self):
"""Coverage test for non-good HTTP response"""
"""Coverage test for non-good HTTP response."""
RUNTIME_VARS['status'] = 400
volume = {'name': 'test_volume_01', 'size': 1}
@ -544,7 +549,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
self.driver.create_volume, volume)
def test_delete_without_detach(self):
"""Test volume deletion without detach"""
"""Test volume deletion without detach."""
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
connector1 = dict(initiator='test_iqn.1')

View File

@ -43,9 +43,9 @@ class BaseTestCase(cinder.test.TestCase):
super(BaseTestCase, self).tearDown()
has_errors = len([test for (test, msgs) in self._currentResult.errors
if test.id() == self.id()]) > 0
if test.id() == self.id()]) > 0
failed = len([test for (test, msgs) in self._currentResult.failures
if test.id() == self.id()]) > 0
if test.id() == self.id()]) > 0
if not has_errors and not failed:
self._save_mock_proxies()
@ -61,7 +61,7 @@ class BaseTestCase(cinder.test.TestCase):
test_name = test_name[len(prefix):]
file_name = '{0}_{1}.p.gz'.format(test_name, mock_name)
return os.path.join(os.path.dirname(mockproxy.__file__),
"stubs", file_name)
"stubs", file_name)
def _load_mock(self, name):
path = self._get_stub_file_path(self.id(), name)
@ -72,9 +72,9 @@ class BaseTestCase(cinder.test.TestCase):
def _load_mock_or_create_proxy(self, module_name):
m = None
if not gen_test_mocks_key in os.environ or \
os.environ[gen_test_mocks_key].lower() \
not in ['true', 'yes', '1']:
if (not gen_test_mocks_key in os.environ or
os.environ[gen_test_mocks_key].lower()
not in ['true', 'yes', '1']):
m = self._load_mock(module_name)
else:
module = __import__(module_name)

View File

@ -20,23 +20,17 @@ Stubouts, mocks and fixtures for windows volume test suite
def get_fake_volume_info(name):
return {
'name': name,
'size': 1,
'provider_location': 'iqn.2010-10.org.openstack:' + name,
'id': 1,
'provider_auth': None
}
return {'name': name,
'size': 1,
'provider_location': 'iqn.2010-10.org.openstack:' + name,
'id': 1,
'provider_auth': None}
def get_fake_snapshot_info(volume_name, snapshot_name):
return {
'name': snapshot_name,
'volume_name': volume_name,
}
return {'name': snapshot_name,
'volume_name': volume_name, }
def get_fake_connector_info(initiator):
return {
'initiator': initiator,
}
return {'initiator': initiator, }

View File

@ -44,7 +44,7 @@ def serialize_obj(obj):
def serialize_args(*args, **kwargs):
"""Workaround for float string conversion issues in Python 2.6"""
"""Workaround for float string conversion issues in Python 2.6."""
return serialize_obj((args, kwargs))
@ -113,8 +113,10 @@ class MockProxy(object):
self._recorded_values = {}
def _get_proxy_object(self, obj):
if hasattr(obj, '__dict__') or isinstance(obj, tuple) or \
isinstance(obj, list) or isinstance(obj, dict):
if (hasattr(obj, '__dict__') or
isinstance(obj, tuple) or
isinstance(obj, list) or
isinstance(obj, dict)):
p = MockProxy(obj)
else:
p = obj
@ -125,8 +127,9 @@ class MockProxy(object):
return object.__getattribute__(self, name)
else:
attr = getattr(self._wrapped, name)
if inspect.isfunction(attr) or inspect.ismethod(attr) or \
inspect.isbuiltin(attr):
if (inspect.isfunction(attr) or
inspect.ismethod(attr) or
inspect.isbuiltin(attr)):
def newfunc(*args, **kwargs):
result = attr(*args, **kwargs)
p = self._get_proxy_object(result)
@ -134,8 +137,9 @@ class MockProxy(object):
self._add_recorded_ret_value(name, params, p)
return p
return newfunc
elif hasattr(attr, '__dict__') or (hasattr(attr, '__getitem__')
and not (isinstance(attr, str) or isinstance(attr, unicode))):
elif (hasattr(attr, '__dict__') or
(hasattr(attr, '__getitem__') and not
(isinstance(attr, str) or isinstance(attr, unicode)))):
p = MockProxy(attr)
else:
p = attr

View File

@ -48,13 +48,13 @@ class WindowsUtils(object):
return self.__conn_wmi
def find_vhd_by_name(self, name):
''' Finds a volume by its name.'''
'''Finds a volume by its name.'''
wt_disks = self._conn_wmi.WT_Disk(Description=name)
return wt_disks
def volume_exists(self, name):
''' Checks if a volume exists.'''
'''Checks if a volume exists.'''
wt_disks = self.find_vhd_by_name(name)
if len(wt_disks) > 0:
@ -62,7 +62,7 @@ class WindowsUtils(object):
return False
def snapshot_exists(self, name):
''' Checks if a snapshot exists.'''
'''Checks if a snapshot exists.'''
wt_snapshots = self.find_snapshot_by_name(name)
if len(wt_snapshots) > 0:
@ -70,47 +70,47 @@ class WindowsUtils(object):
return False
def find_snapshot_by_name(self, name):
''' Finds a snapshot by its name.'''
'''Finds a snapshot by its name.'''
wt_snapshots = self._conn_wmi.WT_Snapshot(Description=name)
return wt_snapshots
def delete_volume(self, name):
''' Deletes a volume.'''
'''Deletes a volume.'''
wt_disk = self._conn_wmi.WT_Disk(Description=name)[0]
wt_disk.Delete_()
vhdfiles = self._conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
self._get_vhd_path(name) + "'")
"Select * from CIM_DataFile where Name = '" +
self._get_vhd_path(name) + "'")
if len(vhdfiles) > 0:
vhdfiles[0].Delete()
def _get_vhd_path(self, volume_name):
''' Gets the path disk of the volume'''
'''Gets the path disk of the volume.'''
base_vhd_folder = FLAGS.windows_iscsi_lun_path
return os.path.join(base_vhd_folder, volume_name + ".vhd")
def delete_snapshot(self, name):
''' Deletes a snapshot.'''
'''Deletes a snapshot.'''
wt_snapshot = self._conn_wmi.WT_Snapshot(Description=name)[0]
wt_snapshot.Delete_()
vhdfile = self._conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
self._get_vhd_path(name) + "'")[0]
"Select * from CIM_DataFile where Name = '" +
self._get_vhd_path(name) + "'")[0]
vhdfile.Delete()
def find_initiator_ids(self, target_name, initiator_name):
''' Finds a initiator id by its name.'''
'''Finds a initiator id by its name.'''
wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name,
Method=4,
Value=initiator_name)
return wt_idmethod
def initiator_id_exists(self, target_name, initiator_name):
''' Checks if a initiatorId exists.'''
'''Checks if a initiatorId exists.'''
wt_idmethod = self.find_initiator_ids(target_name, initiator_name)
if len(wt_idmethod) > 0:
@ -118,13 +118,13 @@ class WindowsUtils(object):
return False
def find_exports(self, target_name):
''' Finds a export id by its name.'''
'''Finds a export id by its name.'''
wt_host = self._conn_wmi.WT_Host(HostName=target_name)
return wt_host
def export_exists(self, target_name):
''' Checks if a export exists.'''
'''Checks if a export exists.'''
wt_host = self.find_exports(target_name)
if len(wt_host) > 0:
@ -132,13 +132,13 @@ class WindowsUtils(object):
return False
def delete_initiator_id(self, target_name, initiator_name):
''' Deletes a initiatorId.'''
'''Deletes a initiatorId.'''
wt_init_id = self.find_initiator_ids(target_name, initiator_name)[0]
wt_init_id.Delete_()
def delete_export(self, target_name):
''' Deletes an export.'''
'''Deletes an export.'''
wt_host = self.find_exports(target_name)[0]
wt_host.RemoveAllWTDisks()

Some files were not shown because too many files have changed in this diff Show More