Fix Bad indentation pylint issues

Fixed Bad indentation (warning code W0311) issues in cinder project.

Closes-Bug: 1462992
Change-Id: Idf3c036b1826ed5c0efa2e13ddf289e6880323a6
This commit is contained in:
Rajesh Tailor 2015-06-08 04:52:49 -07:00
parent 57d0164194
commit ca2be0a73e
24 changed files with 189 additions and 190 deletions

View File

@ -390,11 +390,11 @@ class QuotaSetsController(wsgi.Controller):
# If the project which is being deleted has allocated part of its quota
# to its subprojects, then subprojects' quotas should be deleted first.
for key, value in project_quotas.items():
if 'allocated' in project_quotas[key].keys():
if project_quotas[key]['allocated'] != 0:
msg = _("About to delete child projects having "
"non-zero quota. This should not be performed")
raise webob.exc.HTTPBadRequest(explanation=msg)
if 'allocated' in project_quotas[key].keys():
if project_quotas[key]['allocated'] != 0:
msg = _("About to delete child projects having "
"non-zero quota. This should not be performed")
raise webob.exc.HTTPBadRequest(explanation=msg)
if parent_id:
# Get the children of the project which the token is scoped to in

View File

@ -281,14 +281,14 @@ class BackupManager(manager.SchedulerDependentManager):
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
try:
mgr.detach_volume(ctxt, volume['id'],
attachment['id'])
except Exception:
LOG.exception(_LE("Detach attachment %(attach_id)s"
" failed."),
{'attach_id': attachment['id']},
resource=volume)
try:
mgr.detach_volume(ctxt, volume['id'],
attachment['id'])
except Exception:
LOG.exception(_LE("Detach attachment %(attach_id)s"
" failed."),
{'attach_id': attachment['id']},
resource=volume)
def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup):
# NOTE(xyang): If the service crashes or gets restarted during the

View File

@ -531,11 +531,11 @@ class API(base.Base):
if (not name and not description and not add_volumes_new and
not remove_volumes_new):
msg = (_("Cannot update consistency group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidConsistencyGroup(reason=msg)
msg = (_("Cannot update consistency group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidConsistencyGroup(reason=msg)
fields = {'updated_at': timeutils.utcnow()}

View File

@ -932,9 +932,9 @@ class ViolinBackendErrNotFound(CinderException):
# ZFSSA NFS driver exception.
class WebDAVClientError(CinderException):
message = _("The WebDAV request failed. Reason: %(msg)s, "
"Return code/reason: %(code)s, Source Volume: %(src)s, "
"Destination Volume: %(dst)s, Method: %(method)s.")
message = _("The WebDAV request failed. Reason: %(msg)s, "
"Return code/reason: %(code)s, Source Volume: %(src)s, "
"Destination Volume: %(dst)s, Method: %(method)s.")
# XtremIO Drivers

View File

@ -63,7 +63,7 @@ def _has_type_access(type_id, project_id):
for access in ACCESS_LIST:
if access['volume_type_id'] == type_id and \
access['project_id'] == project_id:
return True
return True
return False

View File

@ -874,7 +874,7 @@ class GlusterFsDriverTestCase(test.TestCase):
drv = self._driver
with mock.patch.object(drv, '_execute') as mock_execute,\
mock.patch.object(drv, '_ensure_share_mounted') as \
mock.patch.object(drv, '_ensure_share_mounted') as \
mock_ensure_share_mounted:
volume = DumbVolume()
volume['id'] = self.VOLUME_UUID
@ -890,23 +890,23 @@ class GlusterFsDriverTestCase(test.TestCase):
drv = self._driver
with mock.patch.object(drv, '_read_file') as mock_read_file:
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
hashed,
self.VOLUME_UUID)
info_path = '%s%s' % (volume_path, '.info')
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
hashed,
self.VOLUME_UUID)
info_path = '%s%s' % (volume_path, '.info')
mock_read_file.return_value = '{"%(id)s": "volume-%(id)s"}' %\
{'id': self.VOLUME_UUID}
mock_read_file.return_value = '{"%(id)s": "volume-%(id)s"}' %\
{'id': self.VOLUME_UUID}
volume = DumbVolume()
volume['id'] = self.VOLUME_UUID
volume['name'] = 'volume-%s' % self.VOLUME_UUID
volume = DumbVolume()
volume['id'] = self.VOLUME_UUID
volume['name'] = 'volume-%s' % self.VOLUME_UUID
info = drv._read_info_file(info_path)
info = drv._read_info_file(info_path)
self.assertEqual('volume-%s' % self.VOLUME_UUID,
info[self.VOLUME_UUID])
self.assertEqual('volume-%s' % self.VOLUME_UUID,
info[self.VOLUME_UUID])
def test_extend_volume(self):
drv = self._driver

View File

@ -83,11 +83,11 @@ class GPFSDriverTestCase(test.TestCase):
CONF.gpfs_images_dir = self.images_dir
def _cleanup(self, images_dir, volumes_path):
try:
os.rmdir(images_dir)
os.rmdir(volumes_path)
except OSError:
pass
try:
os.rmdir(images_dir)
os.rmdir(volumes_path)
except OSError:
pass
def test_different(self):
self.assertTrue(gpfs._different((True, False)))

View File

@ -1236,27 +1236,27 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
mock.patch.object(self.volume.driver, '_resize') \
as mock_resize:
mock_is_cloneable.side_effect = cloneable_side_effect
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
mock_is_cloneable.side_effect = cloneable_side_effect
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
image_meta)
mock_resize.assert_called_once_with(volume)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
image_meta)
mock_resize.assert_called_once_with(volume)
def test_clone_multilocation_failure(self):
expected = ({}, False)
@ -1267,24 +1267,24 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
mock.patch.object(self.volume.driver, '_resize') \
as mock_resize:
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
image_meta)
mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
image_meta)
self.assertFalse(mock_clone.called)
self.assertFalse(mock_resize.called)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
image_meta)
mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
image_meta)
self.assertFalse(mock_clone.called)
self.assertFalse(mock_resize.called)

View File

@ -1376,7 +1376,7 @@ class IsBlkDeviceTestCase(test.TestCase):
class WrongException(Exception):
pass
pass
class TestRetryDecorator(test.TestCase):

View File

@ -945,7 +945,7 @@ class VolumeOpsTestCase(test.TestCase):
del obj.eagerlyScrub
elif (type == "ns0:VirtualMachineRelocateSpec" and
delete_disk_attribute):
del obj.disk
del obj.disk
else:
pass
return obj

View File

@ -59,25 +59,25 @@ CONF.register_opts(d_opts)
def _authenticated(func):
"""Ensure the driver is authenticated to make a request.
"""Ensure the driver is authenticated to make a request.
In do_setup() we fetch an auth token and store it. If that expires when
we do API request, we'll fetch a new one.
"""
def func_wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except exception.NotAuthorized:
# Prevent recursion loop. After the self arg is the
# resource_type arg from _issue_api_request(). If attempt to
# login failed, we should just give up.
if args[0] == 'login':
raise
In do_setup() we fetch an auth token and store it. If that expires when
we do API request, we'll fetch a new one.
"""
def func_wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except exception.NotAuthorized:
# Prevent recursion loop. After the self arg is the
# resource_type arg from _issue_api_request(). If attempt to
# login failed, we should just give up.
if args[0] == 'login':
raise
# Token might've expired, get a new one, try again.
self._login()
return func(self, *args, **kwargs)
return func_wrapper
# Token might've expired, get a new one, try again.
self._login()
return func(self, *args, **kwargs)
return func_wrapper
class DateraDriver(san.SanISCSIDriver):

View File

@ -3830,7 +3830,7 @@ class EMCVMAXCommon(object):
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
for volumeRef in volumes:
volumeRef['status'] = 'deleted'
volumeRef['status'] = 'deleted'
except Exception:
for volumeRef in volumes:
volumeRef['status'] = 'error_deleting'

View File

@ -126,7 +126,7 @@ class HnasBackend(object):
"""
if (self.drv_configs['ssh_enabled'] == 'True' and
self.drv_configs['cluster_admin_ip0'] is not None):
util = 'SMU ' + cmd
util = 'SMU ' + cmd
else:
out, err = utils.execute(cmd,
"-version",

View File

@ -134,7 +134,7 @@ class SmartX(object):
else:
opts['LUNType'] = 1
if opts['thick_provisioning_support'] == 'true':
opts['LUNType'] = 0
opts['LUNType'] = 0
return opts

View File

@ -169,11 +169,11 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
initiator_info.get_child_content('initiator-name'))
if initiator_set == initiator_set_for_igroup:
igroup = initiator_group_info.get_child_content(
'initiator-group-name')
lun_id = initiator_group_info.get_child_content(
'lun-id')
return igroup, lun_id
igroup = initiator_group_info.get_child_content(
'initiator-group-name')
lun_id = initiator_group_info.get_child_content(
'lun-id')
return igroup, lun_id
return None, None

View File

@ -191,9 +191,9 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
" on this storage family and ontap version.")))
volume_type = na_utils.get_volume_type_from_volume(volume)
if volume_type and 'qos_spec_id' in volume_type:
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("QoS specs are not supported"
" on this storage family and ONTAP version."))
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("QoS specs are not supported"
" on this storage family and ONTAP version."))
def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
"""Set QoS policy on backend from volume type information."""

View File

@ -138,35 +138,35 @@ def round_down(value, precision):
def log_extra_spec_warnings(extra_specs):
for spec in (set(extra_specs.keys() if extra_specs else []) &
set(OBSOLETE_SSC_SPECS.keys())):
LOG.warning(_LW('Extra spec %(old)s is obsolete. Use %(new)s '
'instead.'), {'old': spec,
'new': OBSOLETE_SSC_SPECS[spec]})
LOG.warning(_LW('Extra spec %(old)s is obsolete. Use %(new)s '
'instead.'), {'old': spec,
'new': OBSOLETE_SSC_SPECS[spec]})
for spec in (set(extra_specs.keys() if extra_specs else []) &
set(DEPRECATED_SSC_SPECS.keys())):
LOG.warning(_LW('Extra spec %(old)s is deprecated. Use %(new)s '
'instead.'), {'old': spec,
'new': DEPRECATED_SSC_SPECS[spec]})
LOG.warning(_LW('Extra spec %(old)s is deprecated. Use %(new)s '
'instead.'), {'old': spec,
'new': DEPRECATED_SSC_SPECS[spec]})
def get_iscsi_connection_properties(lun_id, volume, iqn,
address, port):
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = '%s:%s' % (address, port)
properties['target_iqn'] = iqn
properties['target_lun'] = int(lun_id)
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = '%s:%s' % (address, port)
properties['target_iqn'] = iqn
properties['target_lun'] = int(lun_id)
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def validate_qos_spec(qos_spec):

View File

@ -729,17 +729,17 @@ class HP3PARCommon(object):
if (not _convert_to_base and
isinstance(ex, hpexceptions.HTTPForbidden) and
ex.get_code() == 150):
# Error code 150 means 'invalid operation: Cannot grow
# this type of volume'.
# Suppress raising this exception because we can
# resolve it by converting it into a base volume.
# Afterwards, extending the volume should succeed, or
# fail with a different exception/error code.
ex_ctxt.reraise = False
model_update = self._extend_volume(
volume, volume_name,
growth_size_mib,
_convert_to_base=True)
# Error code 150 means 'invalid operation: Cannot grow
# this type of volume'.
# Suppress raising this exception because we can
# resolve it by converting it into a base volume.
# Afterwards, extending the volume should succeed, or
# fail with a different exception/error code.
ex_ctxt.reraise = False
model_update = self._extend_volume(
volume, volume_name,
growth_size_mib,
_convert_to_base=True)
else:
LOG.error(_LE("Error extending volume: %(vol)s. "
"Exception: %(ex)s"),

View File

@ -125,7 +125,7 @@ class ScalityDriver(remotefs_drv.RemoteFSSnapDriver):
parts = mount.split()
if (parts[0].endswith('fuse') and
parts[1].rstrip('/') == mount_path):
return True
return True
return False
@lockutils.synchronized('mount-sofs', 'cinder-sofs', external=True)

View File

@ -1634,13 +1634,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
'backup_id': backup['id']})
self._download_vmdk(context, volume, backing, tmp_file_path)
with open(tmp_file_path, "rb") as tmp_file:
LOG.debug("Calling backup service to backup file: %s.",
tmp_file_path)
backup_service.backup(backup, tmp_file)
LOG.debug("Created backup: %(backup_id)s for volume: "
"%(name)s.",
{'backup_id': backup['id'],
'name': volume['name']})
LOG.debug("Calling backup service to backup file: %s.",
tmp_file_path)
backup_service.backup(backup, tmp_file)
LOG.debug("Created backup: %(backup_id)s for volume: "
"%(name)s.",
{'backup_id': backup['id'],
'name': volume['name']})
def _create_backing_from_stream_optimized_file(
self, context, name, volume, tmp_file_path, file_size_bytes):
@ -1787,35 +1787,35 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
tmp_vmdk_name = uuidutils.generate_uuid()
with self._temporary_file(suffix=".vmdk",
prefix=tmp_vmdk_name) as tmp_file_path:
LOG.debug("Using temporary file: %(tmp_path)s for restoring "
"backup: %(backup_id)s.",
{'tmp_path': tmp_file_path,
'backup_id': backup['id']})
with open(tmp_file_path, "wb") as tmp_file:
LOG.debug("Calling backup service to restore backup: "
"%(backup_id)s to file: %(tmp_path)s.",
{'backup_id': backup['id'],
'tmp_path': tmp_file_path})
backup_service.restore(backup, volume['id'], tmp_file)
LOG.debug("Backup: %(backup_id)s restored to file: "
"%(tmp_path)s.",
{'backup_id': backup['id'],
'tmp_path': tmp_file_path})
self._restore_backing(context, volume, backing, tmp_file_path,
backup['size'] * units.Gi)
if backup['size'] < volume['size']:
# Current backing size is backup size.
LOG.debug("Backup size: %(backup_size)d is less than "
"volume size: %(vol_size)d; extending volume.",
{'backup_size': backup['size'],
'vol_size': volume['size']})
self.extend_volume(volume, volume['size'])
LOG.debug("Backup: %(backup_id)s restored to volume: "
"%(name)s.",
LOG.debug("Using temporary file: %(tmp_path)s for restoring "
"backup: %(backup_id)s.",
{'tmp_path': tmp_file_path,
'backup_id': backup['id']})
with open(tmp_file_path, "wb") as tmp_file:
LOG.debug("Calling backup service to restore backup: "
"%(backup_id)s to file: %(tmp_path)s.",
{'backup_id': backup['id'],
'name': volume['name']})
'tmp_path': tmp_file_path})
backup_service.restore(backup, volume['id'], tmp_file)
LOG.debug("Backup: %(backup_id)s restored to file: "
"%(tmp_path)s.",
{'backup_id': backup['id'],
'tmp_path': tmp_file_path})
self._restore_backing(context, volume, backing, tmp_file_path,
backup['size'] * units.Gi)
if backup['size'] < volume['size']:
# Current backing size is backup size.
LOG.debug("Backup size: %(backup_size)d is less than "
"volume size: %(vol_size)d; extending volume.",
{'backup_size': backup['size'],
'vol_size': volume['size']})
self.extend_volume(volume, volume['size'])
LOG.debug("Backup: %(backup_id)s restored to volume: "
"%(name)s.",
{'backup_id': backup['id'],
'name': volume['name']})
class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):

View File

@ -324,7 +324,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver):
if ('origin' in lun2del and
lun2del['origin']['project'] == lcfg.zfssa_cache_project):
self._check_origin(lun2del, volume['name'])
self._check_origin(lun2del, volume['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot of a volume.

View File

@ -772,9 +772,9 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# Update the newly created volume db entry before we clone it
# for the image-volume creation.
if model_update:
volume_ref = self.db.volume_update(context,
volume_ref['id'],
model_update)
volume_ref = self.db.volume_update(context,
volume_ref['id'],
model_update)
self.manager._create_image_cache_volume_entry(internal_context,
volume_ref,
image_id,

View File

@ -3077,10 +3077,9 @@ class VolumeManager(manager.SchedulerDependentManager):
metadata['key']: metadata['value']
for metadata in volume.get('volume_metadata')}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.get(
'volume_admin_metadata')}
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.get('volume_admin_metadata')}
else:
model_update_new[key] = volume[key]
self.db.volume_update(ctxt.elevated(), new_volume['id'],

View File

@ -145,10 +145,10 @@ class TgtAdm(iscsi.ISCSITarget):
@utils.retry(putils.ProcessExecutionError)
def _do_tgt_update(self, name):
(out, err) = utils.execute('tgt-admin', '--update', name,
run_as_root=True)
LOG.debug("StdOut from tgt-admin --update: %s", out)
LOG.debug("StdErr from tgt-admin --update: %s", err)
(out, err) = utils.execute('tgt-admin', '--update', name,
run_as_root=True)
LOG.debug("StdOut from tgt-admin --update: %s", out)
LOG.debug("StdErr from tgt-admin --update: %s", err)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):