Fix bad indentation in netapp and san.hp volume drivers

Closes-Bug: #1356223

Change-Id: I4e9fb3c6b3e098998f41e266b2cfc51a4af1fc65
Signed-off-by: Danny Al-Gaaf <danny.al-gaaf@bisect.de>
This commit is contained in:
Danny Al-Gaaf 2014-08-13 09:49:06 +02:00
parent 03884b3d64
commit 58908e6d6c
7 changed files with 97 additions and 97 deletions

View File

@ -143,8 +143,8 @@ class NetAppDriverFactory(object):
def check_netapp_driver(location):
"""Checks if the driver requested is a netapp driver."""
if location.find(".netapp.") == -1:
raise exception.InvalidInput(
reason=_("Only loading netapp drivers supported."))
raise exception.InvalidInput(
reason=_("Only loading netapp drivers supported."))
class Deprecated(driver.VolumeDriver):

View File

@ -124,13 +124,13 @@ class RestClient(WebserviceClient):
" verify: %(v)s, kwargs: %(k)s." % (params))
url = self._get_resource_url(path, use_system, **kwargs)
if self._content_type == 'json':
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
data = json.dumps(data) if data else None
res = self.invoke_service(method, url, data=data,
headers=headers,
timeout=timeout, verify=verify)
return res.json() if res.text else None
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
data = json.dumps(data) if data else None
res = self.invoke_service(method, url, data=data,
headers=headers,
timeout=timeout, verify=verify)
return res.json() if res.text else None
else:
raise exception.NetAppDriverException(
_("Content type not supported."))

View File

@ -1223,7 +1223,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
if avl_vol['name'] in self.volume_list:
return avl_vol
elif self._get_vol_option(avl_vol['name'], 'root') != 'true':
return avl_vol
return avl_vol
return None
def _get_igroup_by_initiator(self, initiator):

View File

@ -272,13 +272,13 @@ class NetAppNFSDriver(nfs.NfsDriver):
def _spawn_clean_cache_job(self):
"""Spawns a clean task if not running."""
if getattr(self, 'cleaning', None):
LOG.debug('Image cache cleaning in progress. Returning... ')
return
LOG.debug('Image cache cleaning in progress. Returning... ')
return
else:
#set cleaning to True
self.cleaning = True
t = Timer(0, self._clean_image_cache)
t.start()
#set cleaning to True
self.cleaning = True
t = Timer(0, self._clean_image_cache)
t.start()
def _clean_image_cache(self):
"""Clean the image cache files in cache of space crunch."""
@ -352,9 +352,9 @@ class NetAppNFSDriver(nfs.NfsDriver):
return True
return False
if _do_delete():
bytes_to_free = bytes_to_free - int(f[1])
if bytes_to_free <= 0:
return
bytes_to_free = bytes_to_free - int(f[1])
if bytes_to_free <= 0:
return
def _delete_file(self, path):
"""Delete file from disk and return result as boolean."""

View File

@ -418,44 +418,44 @@ def refresh_cluster_stale_ssc(*args, **kwargs):
@utils.synchronized(lock_pr)
def refresh_stale_ssc():
stale_vols = backend._update_stale_vols(reset=True)
LOG.info(_('Running stale ssc refresh job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
refresh_vols = set()
expired_vols = set()
for vol in stale_vols:
name = vol.id['name']
res = get_cluster_vols_with_ssc(na_server, vserver, name)
if res:
refresh_vols.add(res.pop())
else:
expired_vols.add(vol)
for vol in refresh_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
if k == "mirrored" and vol.mirror.get('mirrored'):
vol_set.add(vol)
if k == "dedup" and vol.sis.get('dedup'):
vol_set.add(vol)
if k == "compression" and vol.sis.get('compression'):
vol_set.add(vol)
if k == "thin" and vol.space.get('thin_provisioned'):
vol_set.add(vol)
if k == "all":
vol_set.add(vol)
for vol in expired_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
LOG.info(_('Successfully completed stale refresh job for'
' %(server)s and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
stale_vols = backend._update_stale_vols(reset=True)
LOG.info(_('Running stale ssc refresh job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
refresh_vols = set()
expired_vols = set()
for vol in stale_vols:
name = vol.id['name']
res = get_cluster_vols_with_ssc(na_server, vserver, name)
if res:
refresh_vols.add(res.pop())
else:
expired_vols.add(vol)
for vol in refresh_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
if k == "mirrored" and vol.mirror.get('mirrored'):
vol_set.add(vol)
if k == "dedup" and vol.sis.get('dedup'):
vol_set.add(vol)
if k == "compression" and vol.sis.get('compression'):
vol_set.add(vol)
if k == "thin" and vol.space.get('thin_provisioned'):
vol_set.add(vol)
if k == "all":
vol_set.add(vol)
for vol in expired_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
LOG.info(_('Successfully completed stale refresh job for'
' %(server)s and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
@ -503,8 +503,8 @@ def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
raise exception.InvalidInput(reason=_("Backend server not NaServer."))
delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
if getattr(backend, 'ssc_job_running', None):
LOG.warn(_('ssc job in progress. Returning... '))
return
LOG.warn(_('ssc job in progress. Returning... '))
return
elif (getattr(backend, 'ssc_run_time', None) is None or
(backend.ssc_run_time and
timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
@ -515,8 +515,8 @@ def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warn(_('refresh stale ssc job in progress. Returning... '))
return
LOG.warn(_('refresh stale ssc job in progress. Returning... '))
return
else:
if backend.stale_vols:
if synchronous:

View File

@ -194,32 +194,32 @@ def invoke_api(na_server, api_name, api_family='cm', query=None,
def create_api_request(api_name, query=None, des_result=None,
additional_elems=None, is_iter=False,
record_step=50, tag=None):
"""Creates a NetApp api request.
"""Creates a NetApp api request.
:param api_name: api name string
:param query: api query as dict
:param des_result: desired result as dict
:param additional_elems: dict other than query and des_result
:param is_iter: is iterator api
:param record_step: records at a time for iter api
:param tag: next tag for iter api
"""
api_el = NaElement(api_name)
if query:
query_el = NaElement('query')
query_el.translate_struct(query)
api_el.add_child_elem(query_el)
if des_result:
res_el = NaElement('desired-attributes')
res_el.translate_struct(des_result)
api_el.add_child_elem(res_el)
if additional_elems:
api_el.translate_struct(additional_elems)
if is_iter:
api_el.add_new_child('max-records', str(record_step))
if tag:
api_el.add_new_child('tag', tag, True)
return api_el
:param api_name: api name string
:param query: api query as dict
:param des_result: desired result as dict
:param additional_elems: dict other than query and des_result
:param is_iter: is iterator api
:param record_step: records at a time for iter api
:param tag: next tag for iter api
"""
api_el = NaElement(api_name)
if query:
query_el = NaElement('query')
query_el.translate_struct(query)
api_el.add_child_elem(query_el)
if des_result:
res_el = NaElement('desired-attributes')
res_el.translate_struct(des_result)
api_el.add_child_elem(res_el)
if additional_elems:
api_el.translate_struct(additional_elems)
if is_iter:
api_el.add_new_child('max-records', str(record_step))
if tag:
api_el.add_new_child('tag', tag, True)
return api_el
def to_bool(val):
@ -229,7 +229,7 @@ def to_bool(val):
if (strg == 'true' or strg == 'y'
or strg == 'yes' or strg == 'enabled'
or strg == '1'):
return True
return True
else:
return False
else:

View File

@ -412,16 +412,16 @@ class HP3PARCommon(object):
if (not _convert_to_base and
isinstance(ex, hpexceptions.HTTPForbidden) and
ex.get_code() == 150):
# Error code 150 means 'invalid operation: Cannot grow
# this type of volume'.
# Suppress raising this exception because we can
# resolve it by converting it into a base volume.
# Afterwards, extending the volume should succeed, or
# fail with a different exception/error code.
ex_ctxt.reraise = False
self._extend_volume(volume, volume_name,
growth_size_mib,
_convert_to_base=True)
# Error code 150 means 'invalid operation: Cannot grow
# this type of volume'.
# Suppress raising this exception because we can
# resolve it by converting it into a base volume.
# Afterwards, extending the volume should succeed, or
# fail with a different exception/error code.
ex_ctxt.reraise = False
self._extend_volume(volume, volume_name,
growth_size_mib,
_convert_to_base=True)
else:
LOG.error(_("Error extending volume: %(vol)s. "
"Exception: %(ex)s") %