Fixed indentation

At some place of code, wrong indentation is used.
In this patchset, I corrected them.

TrivialFix

Change-Id: If08d2a1c4f183ac0b98b14e010571c2a12457d21
This commit is contained in:
Gábor Antal 2016-09-05 20:00:36 +02:00
parent 89f369ce61
commit 27530c02fb
10 changed files with 76 additions and 80 deletions

View File

@ -165,7 +165,7 @@ class VolumeTypeEncryptionController(wsgi.Controller):
raise webob.exc.HTTPBadRequest(explanation=expl)
else:
# Not found exception will be handled at the wsgi level
db.volume_type_encryption_delete(context, type_id)
db.volume_type_encryption_delete(context, type_id)
return webob.Response(status_int=202)

View File

@ -63,7 +63,7 @@ class Controller(volume_meta_v2.Controller):
def update(self, req, volume_id, id, body):
self._ensure_min_version(req, METADATA_MICRO_VERSION)
if not self._validate_etag(req, volume_id):
return webob.Response(status_int=412)
return webob.Response(status_int=412)
return super(Controller, self).update(req, volume_id,
id, body)

View File

@ -1655,7 +1655,7 @@ class RestClient(object):
qos_flag = 0
extra_flag = False
if 'LATENCY' not in qos and items['LATENCY'] != '0':
extra_flag = True
extra_flag = True
else:
for item in items:
if item in extra_qos:

View File

@ -2139,9 +2139,9 @@ class StorwizeSVCCommonDriver(san.SanDriver,
command,
check_exit_code=check_exit_code)
except Exception as e:
LOG.error(_LE('Error has occurred: %s'), e)
last_exception = e
greenthread.sleep(self.DEFAULT_GR_SLEEP)
LOG.error(_LE('Error has occurred: %s'), e)
last_exception = e
greenthread.sleep(self.DEFAULT_GR_SLEEP)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,

View File

@ -1313,12 +1313,12 @@ class Client(client_base.Client):
for storage_disk_info in attributes_list.get_children():
disk_raid_info = storage_disk_info.get_child_by_name(
'disk-raid-info') or netapp_api.NaElement('none')
disk_type = disk_raid_info.get_child_content(
'effective-disk-type')
if disk_type:
disk_types.add(disk_type)
disk_raid_info = storage_disk_info.get_child_by_name(
'disk-raid-info') or netapp_api.NaElement('none')
disk_type = disk_raid_info.get_child_content(
'effective-disk-type')
if disk_type:
disk_types.add(disk_type)
return disk_types

View File

@ -724,10 +724,10 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
for vol in vol_entries:
nfs_share = vol['provider_location']
if ((nfs_share in self.shares) and
(self._get_nfs_server_version(nfs_share) < 4)):
sub_share, mnt_path = self._get_subshare_mount_point(
nfs_share, vol)
self._ensure_share_mounted(sub_share, mnt_path)
(self._get_nfs_server_version(nfs_share) < 4)):
sub_share, mnt_path = self._get_subshare_mount_point(
nfs_share, vol)
self._ensure_share_mounted(sub_share, mnt_path)
def _get_nfs_server_version(self, share):
if not self.nfs_versions.get(share):

View File

@ -1387,63 +1387,63 @@ class PureBaseVolumeDriver(san.SanDriver):
@pure_driver_debug_trace
def _setup_replicated_pgroups(self, primary, secondaries, pg_name,
replication_interval, retention_policy):
self._create_protection_group_if_not_exist(
primary, pg_name)
self._create_protection_group_if_not_exist(
primary, pg_name)
# Apply retention policies to a protection group.
# These retention policies will be applied on the replicated
# snapshots on the target array.
primary.set_pgroup(pg_name, **retention_policy)
# Apply retention policies to a protection group.
# These retention policies will be applied on the replicated
# snapshots on the target array.
primary.set_pgroup(pg_name, **retention_policy)
# Configure replication propagation frequency on a
# protection group.
primary.set_pgroup(pg_name,
replicate_frequency=replication_interval)
for target_array in secondaries:
try:
# Configure PG to replicate to target_array.
primary.set_pgroup(pg_name,
addtargetlist=[target_array.array_name])
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and (
ERR_MSG_ALREADY_INCLUDES
in err.text):
ctxt.reraise = False
LOG.info(_LI("Skipping add target %(target_array)s"
" to protection group %(pgname)s"
" since it's already added."),
{"target_array": target_array.array_name,
"pgname": pg_name})
# Configure replication propagation frequency on a
# protection group.
primary.set_pgroup(pg_name,
replicate_frequency=replication_interval)
for target_array in secondaries:
try:
# Configure PG to replicate to target_array.
primary.set_pgroup(pg_name,
addtargetlist=[target_array.array_name])
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and (
ERR_MSG_ALREADY_INCLUDES
in err.text):
ctxt.reraise = False
LOG.info(_LI("Skipping add target %(target_array)s"
" to protection group %(pgname)s"
" since it's already added."),
{"target_array": target_array.array_name,
"pgname": pg_name})
# Wait until "Target Group" setting propagates to target_array.
pgroup_name_on_target = self._get_pgroup_name_on_target(
primary.array_name, pg_name)
# Wait until "Target Group" setting propagates to target_array.
pgroup_name_on_target = self._get_pgroup_name_on_target(
primary.array_name, pg_name)
for target_array in secondaries:
self._wait_until_target_group_setting_propagates(
target_array,
pgroup_name_on_target)
try:
# Configure the target_array to allow replication from the
# PG on source_array.
target_array.set_pgroup(pgroup_name_on_target,
allowed=True)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_ALREADY_ALLOWED in err.text):
ctxt.reraise = False
LOG.info(_LI("Skipping allow pgroup %(pgname)s on "
"target array %(target_array)s since "
"it is already allowed."),
{"pgname": pg_name,
"target_array": target_array.array_name})
for target_array in secondaries:
self._wait_until_target_group_setting_propagates(
target_array,
pgroup_name_on_target)
try:
# Configure the target_array to allow replication from the
# PG on source_array.
target_array.set_pgroup(pgroup_name_on_target,
allowed=True)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_ALREADY_ALLOWED in err.text):
ctxt.reraise = False
LOG.info(_LI("Skipping allow pgroup %(pgname)s on "
"target array %(target_array)s since "
"it is already allowed."),
{"pgname": pg_name,
"target_array": target_array.array_name})
# Wait until source array acknowledges previous operation.
self._wait_until_source_array_allowed(primary, pg_name)
# Start replication on the PG.
primary.set_pgroup(pg_name, replicate_enabled=True)
# Wait until source array acknowledges previous operation.
self._wait_until_source_array_allowed(primary, pg_name)
# Start replication on the PG.
primary.set_pgroup(pg_name, replicate_enabled=True)
@pure_driver_debug_trace
def _generate_replication_retention(self):

View File

@ -493,12 +493,8 @@ class TegileISCSIDriver(TegileIntelliFlashVolumeDriver, san.SanISCSIDriver):
"""Driver entry point to attach a volume to an instance."""
if getattr(self.configuration, 'use_chap_auth', False):
chap_username = getattr(self.configuration,
'chap_username',
'')
chap_password = getattr(self.configuration,
'chap_password',
'')
chap_username = getattr(self.configuration, 'chap_username', '')
chap_password = getattr(self.configuration, 'chap_password', '')
else:
chap_username = ''
chap_password = ''

View File

@ -1085,6 +1085,6 @@ class V7000Common(object):
copying the lun is not possible.
"""
if lun_type != CONCERTO_LUN_TYPE_THICK:
msg = _('Lun copy currently only supported for thick luns')
LOG.error(msg)
raise exception.ViolinBackendErr(message=msg)
msg = _('Lun copy currently only supported for thick luns')
LOG.error(msg)
raise exception.ViolinBackendErr(message=msg)

View File

@ -457,9 +457,9 @@ class VolumeManager(manager.SchedulerDependentManager):
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(