Fix incorrect reraising of exceptions
There are several places in the code where exception handling raises the exception variable rather than just calling raise. This results in the traceback being incorrect. Had considered adding a hacking check for this, but that becomes a little tricky. There are valid places where "raise ex" is used that would prevent a simple check. Change-Id: Ib2bd745c7ef600c514a94c3fd638d15d17a623a2
This commit is contained in:
@@ -452,7 +452,7 @@ class CommandLineHelper(object):
|
||||
self.CLI_RESP_PATTERN_LUN_NOT_EXIST) >= 0:
|
||||
return False
|
||||
else:
|
||||
raise ex
|
||||
raise
|
||||
return _lun_state_validation(data)
|
||||
|
||||
self._wait_for_a_condition(lun_is_ready,
|
||||
@@ -2428,7 +2428,7 @@ class EMCVnxCliBase(object):
|
||||
self._client.connect_host_to_storage_group(
|
||||
hostname, storage_group)
|
||||
else:
|
||||
raise ex
|
||||
raise
|
||||
return hostname
|
||||
|
||||
def get_lun_owner(self, volume):
|
||||
@@ -2629,7 +2629,7 @@ class EMCVnxCliBase(object):
|
||||
poll=False)
|
||||
except exception.EMCVnxCLICmdError as ex:
|
||||
if ex.kwargs["rc"] != 83:
|
||||
raise ex
|
||||
raise
|
||||
# Storage Group has not existed yet
|
||||
self.assure_storage_group(hostname)
|
||||
if self.itor_auto_reg:
|
||||
|
||||
@@ -332,7 +332,7 @@ class Client(client_base.Client):
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code != 'UnknownCloneId':
|
||||
self._clear_clone(clone_id)
|
||||
raise e
|
||||
raise
|
||||
|
||||
def _wait_for_clone_finish(self, clone_op_id, vol_uuid):
|
||||
"""Waits till a clone operation is complete or errored out."""
|
||||
|
||||
@@ -206,13 +206,13 @@ class NetAppNfsDriver(nfs.NfsDriver):
|
||||
if vol_size != src_vol_size:
|
||||
try:
|
||||
self.extend_volume(volume, vol_size)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
LOG.error(
|
||||
_LE("Resizing %s failed. Cleaning volume."),
|
||||
volume.name)
|
||||
self._execute('rm', path,
|
||||
run_as_root=self._execute_as_root)
|
||||
raise e
|
||||
raise
|
||||
else:
|
||||
raise exception.CinderException(
|
||||
_("NFS file %s not discovered.") % volume['name'])
|
||||
|
||||
@@ -154,7 +154,7 @@ class NfsDriver(remotefs.RemoteFSDriver):
|
||||
msg = _('%s is not installed') % package
|
||||
raise exception.NfsException(msg)
|
||||
else:
|
||||
raise exc
|
||||
raise
|
||||
|
||||
# Now that all configuration data has been loaded (shares),
|
||||
# we can "set" our final NAS file security options.
|
||||
|
||||
@@ -117,7 +117,7 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||
raise exception.VolumeDriverException(
|
||||
'mount.quobyte is not installed')
|
||||
else:
|
||||
raise exc
|
||||
raise
|
||||
|
||||
def set_nas_security_options(self, is_new_cinder_install):
|
||||
self.configuration.nas_secure_file_operations = 'true'
|
||||
|
||||
@@ -494,10 +494,10 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||
LOG.debug("creating snapshot='%s'", clone_snap)
|
||||
src_volume.create_snap(clone_snap)
|
||||
src_volume.protect_snap(clone_snap)
|
||||
except Exception as exc:
|
||||
except Exception:
|
||||
# Only close if exception since we still need it.
|
||||
src_volume.close()
|
||||
raise exc
|
||||
raise
|
||||
|
||||
# Now clone source volume snapshot
|
||||
try:
|
||||
@@ -508,10 +508,10 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
|
||||
client.ioctx, dest_name,
|
||||
features=client.features)
|
||||
except Exception as exc:
|
||||
except Exception:
|
||||
src_volume.unprotect_snap(clone_snap)
|
||||
src_volume.remove_snap(clone_snap)
|
||||
raise exc
|
||||
raise
|
||||
finally:
|
||||
src_volume.close()
|
||||
|
||||
|
||||
@@ -1259,8 +1259,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||
snapshot['volume_id'],
|
||||
connection_info)
|
||||
LOG.debug('nova call result: %s', result)
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Call to Nova to create snapshot failed %s'), e)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Call to Nova to create snapshot failed'))
|
||||
raise
|
||||
|
||||
# Loop and wait for result
|
||||
@@ -1344,8 +1344,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||
context,
|
||||
snapshot['id'],
|
||||
delete_info)
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Call to Nova delete snapshot failed %s'), e)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Call to Nova delete snapshot failed'))
|
||||
raise
|
||||
|
||||
# Loop and wait for result
|
||||
|
||||
@@ -185,9 +185,9 @@ class RestClientURL(object):
|
||||
message="REST Not Available: \
|
||||
Please Upgrade")
|
||||
|
||||
except RestClientError as err:
|
||||
except RestClientError:
|
||||
del self.headers['authorization']
|
||||
raise err
|
||||
raise
|
||||
|
||||
def login(self, auth_str):
|
||||
"""Login to an appliance using a user name and password.
|
||||
|
||||
@@ -97,7 +97,7 @@ class ZFSSANFSDriver(nfs.NfsDriver):
|
||||
msg = _('%s is not installed') % package
|
||||
raise exception.NfsException(msg)
|
||||
else:
|
||||
raise exc
|
||||
raise
|
||||
|
||||
lcfg = self.configuration
|
||||
LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip)
|
||||
|
||||
Reference in New Issue
Block a user