Browse Source

Replace retrying with tenacity

We are replacing all usages of the 'retrying' package with
'tenacity' as the author of retrying is not actively maintaining
the project. Tenacity is a fork of retrying, but has improved the
interface and extensibility (see [1] for more details). Our end
goal here is removing the retrying package from our requirements.

Tenacity provides the same functionality as retrying, but has the
following major differences to account for:
- Tenacity uses seconds rather than ms as retrying did
  (the retry interface in manila exposed time in seconds as well)
- Tenacity has different kwargs for the decorator and
Retrying class itself.
- Tenacity has a different approach for retrying args by
using classes for its stop/wait/retry kwargs.
- By default tenacity raises a RetryError if a retried callable
times out; retrying raises the last exception from the callable.
Tenacity provides backwards compatibility here by offering
the 'reraise' kwarg - we are going to set this in the retry interface
by default.
- For retries that check a result, tenacity will raise if the
retried function raises, whereas retrying retried on all
exceptions - we haven't exposed this in the retry interface.

This patch updates all usages of retrying with tenacity.
Unit tests are added where applicable.

[1] https://github.com/jd/tenacity

Co-Authored-By: boden <bodenvmw@gmail.com>
Co-Authored-By: Goutham Pacha Ravi <gouthampravi@gmail.com>
Closes-Bug: #1635393
Change-Id: Ia0c3fa5cd82356a33becbf57444f3db5ffbb0dd0
Signed-off-by: Goutham Pacha Ravi <gouthampravi@gmail.com>
changes/11/801911/8
ashrod98 5 months ago
committed by Goutham Pacha Ravi
parent
commit
903aab1920
  1. 2
      lower-constraints.txt
  2. 10
      manila/data/helper.py
  3. 2
      manila/data/utils.py
  4. 2
      manila/network/neutron/neutron_network_plugin.py
  5. 3
      manila/share/drivers/container/security_service_helper.py
  6. 32
      manila/share/drivers/dell_emc/plugins/powermax/object_manager.py
  7. 32
      manila/share/drivers/dell_emc/plugins/vnx/object_manager.py
  8. 11
      manila/share/drivers/generic.py
  9. 8
      manila/share/drivers/hitachi/hnas/ssh.py
  10. 4
      manila/share/drivers/hitachi/hsp/rest.py
  11. 8
      manila/share/drivers/huawei/v3/connection.py
  12. 3
      manila/share/drivers/lvm.py
  13. 6
      manila/share/drivers/netapp/dataontap/client/client_cmode.py
  14. 30
      manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py
  15. 18
      manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
  16. 2
      manila/share/drivers/nexenta/ns4/jsonrpc.py
  17. 2
      manila/share/drivers/qnap/api.py
  18. 8
      manila/share/drivers/qnap/qnap.py
  19. 2
      manila/share/drivers/tegile/tegile.py
  20. 2
      manila/share/drivers/windows/winrm_helper.py
  21. 4
      manila/share/drivers/zfsonlinux/driver.py
  22. 2
      manila/share/drivers/zfsonlinux/utils.py
  23. 4
      manila/share/manager.py
  24. 2
      manila/share/migration.py
  25. 8
      manila/tests/share/test_manager.py
  26. 144
      manila/tests/test_utils.py
  27. 97
      manila/utils.py
  28. 2
      requirements.txt

2
lower-constraints.txt

@ -120,7 +120,7 @@ statsd==3.2.2
stestr==3.0.1
stevedore==3.2.2
Tempita==0.5.2
tenacity==6.0.0
tenacity==6.3.1
testrepository==0.0.20
testresources==2.0.1
testscenarios==0.4

10
manila/data/helper.py

@ -235,12 +235,18 @@ class DataServiceHelper(object):
return access_list
@utils.retry(exception.NotFound, 0.1, 10, 0.1)
@utils.retry(retry_param=exception.NotFound,
interval=1,
retries=10,
backoff_rate=1)
def _check_dir_exists(self, path):
if not os.path.exists(path):
raise exception.NotFound("Folder %s could not be found." % path)
@utils.retry(exception.Found, 0.1, 10, 0.1)
@utils.retry(retry_param=exception.Found,
interval=1,
retries=10,
backoff_rate=1)
def _check_dir_not_exists(self, path):
if os.path.exists(path):
raise exception.Found("Folder %s was found." % path)

2
manila/data/utils.py

@ -145,7 +145,7 @@ class Copy(object):
self.current_size += int(size)
LOG.info(self.get_progress())
@utils.retry(exception.ShareDataCopyFailed, retries=2)
@utils.retry(retry_param=exception.ShareDataCopyFailed, retries=2)
def _copy_and_validate(self, src_item, dest_item):
utils.execute("cp", "-P", "--preserve=all", src_item,
dest_item, run_as_root=True)

2
manila/network/neutron/neutron_network_plugin.py

@ -531,7 +531,7 @@ class NeutronBindNetworkPlugin(NeutronNetworkPlugin):
self._wait_for_ports_bind(ports, share_server)
return ports
@utils.retry(exception.NetworkBindException, retries=20)
@utils.retry(retry_param=exception.NetworkBindException, retries=20)
def _wait_for_ports_bind(self, ports, share_server):
inactive_ports = []
for port in ports:

3
manila/share/drivers/container/security_service_helper.py

@ -80,7 +80,8 @@ class SecurityServiceHelper(driver.ExecuteMixin):
interval = 5
retries = int(timeout / interval) or 1
@manila_utils.retry(exception.ProcessExecutionError, interval=interval,
@manila_utils.retry(retry_param=exception.ProcessExecutionError,
interval=interval,
retries=retries, backoff_rate=1)
def try_ldap_operation():
try:

32
manila/share/drivers/dell_emc/plugins/powermax/object_manager.py

@ -144,7 +144,7 @@ class StorageObject(object):
)
)
@utils.retry(exception.EMCPowerMaxLockRequiredException)
@utils.retry(retry_param=exception.EMCPowerMaxLockRequiredException)
def _send_request(self, req):
req_xml = constants.XML_HEADER + ET.tostring(req).decode('utf-8')
@ -161,7 +161,7 @@ class StorageObject(object):
return response
@utils.retry(exception.EMCPowerMaxLockRequiredException)
@utils.retry(retry_param=exception.EMCPowerMaxLockRequiredException)
def _execute_cmd(self, cmd, retry_patterns=None, check_exit_code=False):
"""Execute NAS command via SSH.
@ -218,7 +218,7 @@ class FileSystem(StorageObject):
super(FileSystem, self).__init__(conn, elt_maker, xml_parser, manager)
self.filesystem_map = {}
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, name, size, pool_name, mover_name, is_vdm=True):
pool_id = self.get_context('StoragePool').get_id(pool_name)
@ -548,7 +548,7 @@ class MountPoint(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(MountPoint, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, mount_path, fs_name, mover_name, is_vdm=True):
fs_id = self.get_context('FileSystem').get_id(fs_name)
@ -588,7 +588,7 @@ class MountPoint(StorageObject):
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def get(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -619,7 +619,7 @@ class MountPoint(StorageObject):
else:
return constants.STATUS_OK, response['objects']
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def delete(self, mount_path, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -855,7 +855,7 @@ class VDM(StorageObject):
super(VDM, self).__init__(conn, elt_maker, xml_parser, manager)
self.vdm_map = {}
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, name, mover_name):
mover_id = self._get_mover_id(mover_name, False)
@ -1145,7 +1145,7 @@ class MoverInterface(StorageObject):
super(MoverInterface, self).__init__(conn, elt_maker, xml_parser,
manager)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, interface):
# Maximum of 32 characters for mover interface name
name = interface['name']
@ -1226,7 +1226,7 @@ class MoverInterface(StorageObject):
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def delete(self, ip_addr, mover_name):
mover_id = self._get_mover_id(mover_name, False)
@ -1268,7 +1268,7 @@ class DNSDomain(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(DNSDomain, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, mover_name, name, servers, protocol='udp'):
mover_id = self._get_mover_id(mover_name, False)
@ -1298,7 +1298,7 @@ class DNSDomain(StorageObject):
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def delete(self, mover_name, name):
mover_id = self._get_mover_id(mover_name, False)
@ -1331,7 +1331,7 @@ class CIFSServer(StorageObject):
super(CIFSServer, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_server_map = {}
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, server_args):
compName = server_args['name']
# Maximum of 14 characters for netBIOS name
@ -1387,7 +1387,7 @@ class CIFSServer(StorageObject):
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def get_all(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -1439,7 +1439,7 @@ class CIFSServer(StorageObject):
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def modify(self, server_args):
"""Make CIFS server join or un-join the domain.
@ -1552,7 +1552,7 @@ class CIFSShare(StorageObject):
super(CIFSShare, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_share_map = {}
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, name, server_name, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -1605,7 +1605,7 @@ class CIFSShare(StorageObject):
return constants.STATUS_OK, self.cifs_share_map[name]
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def delete(self, name, mover_name, is_vdm=True):
status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status:

32
manila/share/drivers/dell_emc/plugins/vnx/object_manager.py

@ -144,7 +144,7 @@ class StorageObject(object):
)
)
@utils.retry(exception.EMCVnxLockRequiredException)
@utils.retry(retry_param=exception.EMCVnxLockRequiredException)
def _send_request(self, req):
req_xml = constants.XML_HEADER + ET.tostring(req).decode('utf-8')
@ -161,7 +161,7 @@ class StorageObject(object):
return response
@utils.retry(exception.EMCVnxLockRequiredException)
@utils.retry(retry_param=exception.EMCVnxLockRequiredException)
def _execute_cmd(self, cmd, retry_patterns=None, check_exit_code=False):
"""Execute NAS command via SSH.
@ -218,7 +218,7 @@ class FileSystem(StorageObject):
super(FileSystem, self).__init__(conn, elt_maker, xml_parser, manager)
self.filesystem_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, name, size, pool_name, mover_name, is_vdm=True):
pool_id = self.get_context('StoragePool').get_id(pool_name)
@ -547,7 +547,7 @@ class MountPoint(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(MountPoint, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, mount_path, fs_name, mover_name, is_vdm=True):
fs_id = self.get_context('FileSystem').get_id(fs_name)
@ -587,7 +587,7 @@ class MountPoint(StorageObject):
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def get(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -618,7 +618,7 @@ class MountPoint(StorageObject):
else:
return constants.STATUS_OK, response['objects']
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def delete(self, mount_path, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -854,7 +854,7 @@ class VDM(StorageObject):
super(VDM, self).__init__(conn, elt_maker, xml_parser, manager)
self.vdm_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, name, mover_name):
mover_id = self._get_mover_id(mover_name, False)
@ -1144,7 +1144,7 @@ class MoverInterface(StorageObject):
super(MoverInterface, self).__init__(conn, elt_maker, xml_parser,
manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, interface):
# Maximum of 32 characters for mover interface name
name = interface['name']
@ -1228,7 +1228,7 @@ class MoverInterface(StorageObject):
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def delete(self, ip_addr, mover_name):
mover_id = self._get_mover_id(mover_name, False)
@ -1270,7 +1270,7 @@ class DNSDomain(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(DNSDomain, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, mover_name, name, servers, protocol='udp'):
mover_id = self._get_mover_id(mover_name, False)
@ -1300,7 +1300,7 @@ class DNSDomain(StorageObject):
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def delete(self, mover_name, name):
mover_id = self._get_mover_id(mover_name, False)
@ -1333,7 +1333,7 @@ class CIFSServer(StorageObject):
super(CIFSServer, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_server_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, server_args):
compName = server_args['name']
# Maximum of 14 characters for netBIOS name
@ -1389,7 +1389,7 @@ class CIFSServer(StorageObject):
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def get_all(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -1441,7 +1441,7 @@ class CIFSServer(StorageObject):
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def modify(self, server_args):
"""Make CIFS server join or un-join the domain.
@ -1554,7 +1554,7 @@ class CIFSShare(StorageObject):
super(CIFSShare, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_share_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, name, server_name, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -1607,7 +1607,7 @@ class CIFSShare(StorageObject):
return constants.STATUS_OK, self.cifs_share_map[name]
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def delete(self, name, mover_name, is_vdm=True):
status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status:

11
manila/share/drivers/generic.py

@ -23,7 +23,6 @@ from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import units
import retrying
import six
from manila.common import constants as const
@ -243,7 +242,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
server_details, share['name'])
return export_locations
@utils.retry(exception.ProcessExecutionError, backoff_rate=1)
@utils.retry(retry_param=exception.ProcessExecutionError, backoff_rate=1)
def _is_device_file_available(self, server_details, volume):
"""Checks whether the device file is available"""
command = ['sudo', 'test', '-b', volume['mountpoint']]
@ -372,7 +371,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
raise exception.ShareBackendException(msg=six.text_type(e))
return _mount_device_with_lock()
@utils.retry(exception.ProcessExecutionError)
@utils.retry(retry_param=exception.ProcessExecutionError)
def _unmount_device(self, share, server_details):
"""Unmounts block device from directory on service vm."""
@ -416,9 +415,9 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
_('Volume %s is already attached to another instance')
% volume['id'])
@retrying.retry(stop_max_attempt_number=3,
wait_fixed=2000,
retry_on_exception=lambda exc: True)
@utils.retry(retries=3,
interval=2,
backoff_rate=1)
def attach_volume():
self.compute_api.instance_volume_attach(
self.admin_context, instance_id, volume['id'])

8
manila/share/drivers/hitachi/hnas/ssh.py

@ -353,7 +353,7 @@ class HNASSSHBackend(object):
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
@mutils.retry(exception=exception.HNASSSCContextChange, wait_random=True,
@mutils.retry(retry_param=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def create_directory(self, dest_path):
self._locked_selectfs('create', dest_path)
@ -366,7 +366,7 @@ class HNASSSHBackend(object):
LOG.warning(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(exception=exception.HNASSSCContextChange, wait_random=True,
@mutils.retry(retry_param=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def delete_directory(self, path):
try:
@ -383,7 +383,7 @@ class HNASSSHBackend(object):
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(exception=exception.HNASSSCIsBusy, wait_random=True,
@mutils.retry(retry_param=exception.HNASSSCIsBusy, wait_random=True,
retries=5)
def check_directory(self, path):
command = ['path-to-object-number', '-f', self.fs_name, path]
@ -621,7 +621,7 @@ class HNASSSHBackend(object):
export_list.append(Export(items[i]))
return export_list
@mutils.retry(exception=exception.HNASConnException, wait_random=True)
@mutils.retry(retry_param=exception.HNASConnException, wait_random=True)
def _execute(self, commands):
command = ['ssc', '127.0.0.1']
if self.admin_ip0 is not None:

4
manila/share/drivers/hitachi/hsp/rest.py

@ -191,7 +191,9 @@ class HSPRestBackend(object):
msg = _("No cluster was found on HSP.")
raise exception.HSPBackendException(msg=msg)
@utils.retry(exception.HSPTimeoutException, retries=10, wait_random=True)
@utils.retry(retry_param=exception.HSPTimeoutException,
retries=10,
wait_random=True)
def _wait_job_status(self, job_url, target_status):
resp_json = self._send_get(job_url)

8
manila/share/drivers/huawei/v3/connection.py

@ -1409,10 +1409,10 @@ class V3StorageConnection(driver.HuaweiBase):
interval = wait_interval
backoff_rate = 1
@utils.retry(exception.InvalidShare,
interval,
retries,
backoff_rate)
@utils.retry(retry_param=exception.InvalidShare,
interval=interval,
retries=retries,
backoff_rate=backoff_rate)
def _check_AD_status():
ad = self.helper.get_AD_config()
if ad['DOMAINSTATUS'] != expected_status:

3
manila/share/drivers/lvm.py

@ -272,7 +272,8 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
retries = 10 if retry_busy_device else 1
@utils.retry(exception.ShareBusyException, retries=retries)
@utils.retry(retry_param=exception.ShareBusyException,
retries=retries)
def _unmount_device_with_retry():
try:
self._execute('umount', '-f', mount_path, run_as_root=True)

6
manila/share/drivers/netapp/dataontap/client/client_cmode.py

@ -1570,8 +1570,10 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
api_args = {'client-config': client_config_name,
'client-enabled': 'true'}
@manila_utils.retry(exception.ShareBackendException, interval=interval,
retries=retries, backoff_rate=1)
@manila_utils.retry(retry_param=exception.ShareBackendException,
interval=interval,
retries=retries,
backoff_rate=1)
def try_enable_ldap_client():
try:
self.send_request('ldap-config-create', api_args)

30
manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py

@ -282,8 +282,10 @@ class DataMotionSession(object):
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.ReplicationException,
interval=5,
retries=retries,
backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver,
@ -318,8 +320,10 @@ class DataMotionSession(object):
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.ReplicationException,
interval=5,
retries=retries,
backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors(
source_vserver=src_vserver, dest_vserver=dest_vserver,
@ -613,8 +617,10 @@ class DataMotionSession(object):
interval = 10
retries = (timeout / interval or 1)
@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.VserverNotReady,
interval=interval,
retries=retries,
backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if vserver_info.get('subtype') != 'default':
@ -686,8 +692,10 @@ class DataMotionSession(object):
if subtype:
expected['subtype'] = subtype
@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.VserverNotReady,
interval=interval,
retries=retries,
backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if not all(item in vserver_info.items() for
@ -705,8 +713,10 @@ class DataMotionSession(object):
interval = 10
retries = (timeout / interval or 1)
@utils.retry(exception.NetAppException, interval=interval,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.NetAppException,
interval=interval,
retries=retries,
backoff_rate=1)
def release_snapmirror():
snapmirrors = src_client.get_snapmirror_destinations_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver)

18
manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py

@ -2652,8 +2652,10 @@ class NetAppCmodeFileStorageLibrary(object):
retries = (self.configuration.netapp_start_volume_move_timeout / 5
or 1)
@manila_utils.retry(exception.ShareBusyException, interval=5,
retries=retries, backoff_rate=1)
@manila_utils.retry(retry_param=exception.ShareBusyException,
interval=5,
retries=retries,
backoff_rate=1)
def try_move_volume():
try:
self._move_volume(source_share, destination_share,
@ -2805,8 +2807,8 @@ class NetAppCmodeFileStorageLibrary(object):
"""Abort an ongoing migration."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
share_volume = self._get_backend_share_name(source_share['id'])
retries = (self.configuration.netapp_migration_cancel_timeout / 5 or
1)
retries = (math.ceil(
self.configuration.netapp_migration_cancel_timeout / 5) or 1)
try:
self._get_volume_move_status(source_share, share_server)
@ -2816,7 +2818,7 @@ class NetAppCmodeFileStorageLibrary(object):
self._client.abort_volume_move(share_volume, vserver)
@manila_utils.retry(exception.InUse, interval=5,
@manila_utils.retry(retry_param=exception.InUse, interval=5,
retries=retries, backoff_rate=1)
def wait_for_migration_cancel_complete():
move_status = self._get_volume_move_status(source_share,
@ -3028,8 +3030,10 @@ class NetAppCmodeFileStorageLibrary(object):
retries = (self.configuration.netapp_volume_move_cutover_timeout / 5
or 1)
@manila_utils.retry(exception.ShareBusyException, interval=5,
retries=retries, backoff_rate=1)
@manila_utils.retry(retry_param=exception.ShareBusyException,
interval=5,
retries=retries,
backoff_rate=1)
def check_move_completion():
status = self._get_volume_move_status(source_share, share_server)
if status['phase'].lower() != 'completed':

2
manila/share/drivers/nexenta/ns4/jsonrpc.py

@ -69,7 +69,7 @@ class NexentaJSONProxy(object):
def __repr__(self):
return 'NMS proxy: %s' % self.url
@utils.retry(retry_exc_tuple, retries=6)
@utils.retry(retry_param=retry_exc_tuple, retries=6)
def __call__(self, *args):
data = jsonutils.dumps({
'object': self.obj,

2
manila/share/drivers/qnap/api.py

@ -41,7 +41,7 @@ MSG_UNEXPECT_RESP = _("Unexpected response from QNAP API")
def _connection_checker(func):
"""Decorator to check session has expired or not."""
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
retries=5)
@functools.wraps(func)
def inner_connection_checker(self, *args, **kwargs):

8
manila/share/drivers/qnap/qnap.py

@ -286,7 +286,7 @@ class QnapShareDriver(driver.ShareDriver):
}
super(QnapShareDriver, self)._update_share_stats(data)
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
interval=3,
retries=5)
@utils.synchronized('qnap-create_share')
@ -365,7 +365,7 @@ class QnapShareDriver(driver.ShareDriver):
self.configuration.qnap_share_ip,
volID)
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
interval=5, retries=5, backoff_rate=1)
def _get_share_info(self, share_name):
share = self.api_executor.get_share_info(
@ -441,7 +441,7 @@ class QnapShareDriver(driver.ShareDriver):
}
self.api_executor.edit_share(share_dict)
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
interval=3,
retries=5)
@utils.synchronized('qnap-create_snapshot')
@ -514,7 +514,7 @@ class QnapShareDriver(driver.ShareDriver):
self.api_executor.delete_snapshot_api(snapshot_id)
self.private_storage.delete(snapshot['id'])
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
interval=3,
retries=5)
@utils.synchronized('qnap-create_share_from_snapshot')

2
manila/share/drivers/tegile/tegile.py

@ -97,7 +97,7 @@ class TegileAPIExecutor(object):
return self._send_api_request(*args, **kwargs)
@debugger
@utils.retry(exception=(requests.ConnectionError, requests.Timeout),
@utils.retry(retry_param=(requests.ConnectionError, requests.Timeout),
interval=30,
retries=3,
backoff_rate=1)

2
manila/share/drivers/windows/winrm_helper.py

@ -93,7 +93,7 @@ class WinRMHelper(object):
retries = self._config.winrm_retry_count if retry else 1
conn = self._get_conn(server)
@utils.retry(exception=Exception,
@utils.retry(retry_param=Exception,
interval=self._config.winrm_retry_interval,
retries=retries)
def _execute():

4
manila/share/drivers/zfsonlinux/driver.py

@ -273,7 +273,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
msg=_("Could not destroy '%s' dataset, "
"because it had opened files.") % name)
@utils.retry(exception.ProcessExecutionError)
@utils.retry(retry_param=exception.ProcessExecutionError, retries=10)
def _zfs_destroy_with_retry():
"""Retry destroying dataset ten times with exponential backoff."""
# NOTE(bswartz): There appears to be a bug in ZFS when creating and
@ -863,7 +863,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
"""Unmanage dataset snapshot."""
self.private_storage.delete(snapshot_instance["snapshot_id"])
@utils.retry(exception.ZFSonLinuxException)
@utils.retry(retry_param=exception.ZFSonLinuxException, retries=10)
def _unmount_share_with_retry(self, share_name):
out, err = self.execute("sudo", "mount")
if "%s " % share_name not in out:

2
manila/share/drivers/zfsonlinux/utils.py

@ -96,7 +96,7 @@ class ExecuteMixin(driver.ExecuteMixin):
cmd = cmd[1:]
return executor(*cmd, **kwargs)
@utils.retry(exception.ProcessExecutionError,
@utils.retry(retry_param=exception.ProcessExecutionError,
interval=5, retries=36, backoff_rate=1)
def execute_with_retry(self, *cmd, **kwargs):
"""Retry wrapper over common shell interface."""

4
manila/share/manager.py

@ -334,8 +334,8 @@ class ShareManager(manager.SchedulerDependentManager):
# we want to retry to setup the driver. In case of a multi-backend
# scenario, working backends are usable and the non-working ones (where
# do_setup() or check_for_setup_error() fail) retry.
@utils.retry(Exception, interval=2, backoff_rate=2,
backoff_sleep_max=600, retries=0)
@utils.retry(interval=2, backoff_rate=2,
infinite=True, backoff_sleep_max=600)
def _driver_setup():
self.driver.initialized = False
LOG.debug("Start initialization of driver: '%s'", driver_host_pair)

2
manila/share/migration.py

@ -186,7 +186,7 @@ class ShareMigrationHelper(object):
else:
LOG.debug("No access rules to sync to destination share instance.")
@utils.retry(exception.ShareServerNotReady, retries=8)
@utils.retry(retry_param=exception.ShareServerNotReady, retries=8)
def wait_for_share_server(self, share_server_id):
share_server = self.db.share_server_get(self.context, share_server_id)
if share_server['status'] == constants.STATUS_ERROR:

8
manila/tests/share/test_manager.py

@ -234,8 +234,8 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(self.share_manager.driver, 'do_setup',
mock.Mock(side_effect=Exception()))
# break the endless retry loop
with mock.patch("time.sleep",
side_effect=CustomTimeSleepException()):
with mock.patch('tenacity.nap.sleep') as sleep:
sleep.side_effect = CustomTimeSleepException()
self.assertRaises(CustomTimeSleepException,
self.share_manager.init_host)
self.assertRaises(
@ -251,8 +251,8 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(manager.LOG, 'exception')
self.share_manager.driver.initialized = False
with mock.patch("time.sleep",
side_effect=CustomTimeSleepException()):
with mock.patch('time.sleep') as mock_sleep:
mock_sleep.side_effect = CustomTimeSleepException()
self.assertRaises(CustomTimeSleepException,
self.share_manager.init_host)

144
manila/tests/test_utils.py

@ -25,6 +25,7 @@ from oslo_utils import encodeutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import paramiko
import tenacity
from webob import exc
import manila
@ -557,12 +558,16 @@ class TestComparableMixin(test.TestCase):
self.one._compare(1, self.one._cmpkey))
class WrongException(Exception):
pass
class TestRetryDecorator(test.TestCase):
def test_no_retry_required(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=2,
retries=3,
backoff_rate=2)
@ -578,8 +583,8 @@ class TestRetryDecorator(test.TestCase):
def test_no_retry_required_random(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=2,
retries=3,
backoff_rate=2,
@ -593,22 +598,21 @@ class TestRetryDecorator(test.TestCase):
self.assertEqual('success', ret)
self.assertEqual(1, self.counter)
def test_retries_once_random(self):
def test_retries_once(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate,
wait_random=True)
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=interval,
retries=retries,
backoff_rate=backoff_rate)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.ManilaException(message='fake')
raise exception.ManilaException(data='fake')
else:
return 'success'
@ -616,19 +620,20 @@ class TestRetryDecorator(test.TestCase):
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
self.assertTrue(mock_sleep.called)
mock_sleep.assert_called_with(interval)
def test_retries_once(self):
def test_retries_once_random(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate)
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=interval,
retries=retries,
backoff_rate=backoff_rate,
wait_random=True)
def fails_once():
self.counter += 1
if self.counter < 2:
@ -640,7 +645,7 @@ class TestRetryDecorator(test.TestCase):
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
mock_sleep.assert_called_with(interval * backoff_rate)
self.assertTrue(mock_sleep.called)
def test_limit_is_reached(self):
self.counter = 0
@ -648,11 +653,11 @@ class TestRetryDecorator(test.TestCase):
interval = 2
backoff_rate = 4
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate)
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=interval,
retries=retries,
backoff_rate=backoff_rate)
def always_fails():
self.counter += 1
raise exception.ManilaException(data='fake')
@ -662,34 +667,94 @@ class TestRetryDecorator(test.TestCase):
self.assertEqual(retries, self.counter)
expected_sleep_arg = []
for i in range(retries):
if i > 0:
interval *= backoff_rate
interval *= (backoff_rate ** (i - 1))
expected_sleep_arg.append(float(interval))
mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg))
mock_sleep.assert_has_calls(
list(map(mock.call, expected_sleep_arg)))
def test_wrong_exception_no_retry(self):
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException)
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException)
def raise_unexpected_error():
raise ValueError("value error")
raise WrongException("wrong exception")
self.assertRaises(ValueError, raise_unexpected_error)
self.assertRaises(WrongException, raise_unexpected_error)
self.assertFalse(mock_sleep.called)
def test_wrong_retries_num(self):
self.assertRaises(ValueError, utils.retry, exception.ManilaException,
retries=-1)
@mock.patch('tenacity.nap.sleep')
def test_retry_exit_code(self, sleep_mock):
exit_code = 5
exception = utils.processutils.ProcessExecutionError
@utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code)
def raise_retriable_exit_code():
raise exception(exit_code=exit_code)
self.assertRaises(exception, raise_retriable_exit_code)
# we should be sleeping 1 less time than the number of retries,
# default (10)
self.assertEqual(9, sleep_mock.call_count)
sleep_mock.assert_has_calls([mock.call(1.0),
mock.call(2.0),
mock.call(4.0),
mock.call(8.0),
mock.call(16.0),
mock.call(32.0),
mock.call(64.0),
mock.call(128.0),
mock.call(256.0)])
@mock.patch('tenacity.nap.sleep')
def test_retry_exit_code_non_retriable(self, sleep_mock):
exit_code = 5
exception = utils.processutils.ProcessExecutionError
@utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code)
def raise_non_retriable_exit_code():
raise exception(exit_code=exit_code + 1)
self.assertRaises(exception, raise_non_retriable_exit_code)
sleep_mock.assert_not_called()
def test_infinite_retry(self):
retry_param = exception.ManilaException
class FakeTenacityRetry(tenacity.Retrying):
def __init__(*args, **kwargs):
pass
with mock.patch('tenacity.Retrying',
autospec=FakeTenacityRetry) as tenacity_retry:
@utils.retry(retry_param=retry_param,
wait_random=True,
infinite=True)
def some_retriable_function():
pass
some_retriable_function()
tenacity_retry.assert_called_once_with(
sleep=tenacity.nap.sleep,
before_sleep=mock.ANY,
after=mock.ANY,
stop=tenacity.stop.stop_never,
reraise=True,
retry=utils.IsAMatcher(tenacity.retry_if_exception_type),
wait=utils.IsAMatcher(tenacity.wait_random_exponential))
def test_max_backoff_sleep(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
retries=0,
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
infinite=True,
backoff_rate=2,
backoff_sleep_max=4)
def fails_then_passes():
@ -700,7 +765,8 @@ class TestRetryDecorator(test.TestCase):
return 'success'
self.assertEqual('success', fails_then_passes())
mock_sleep.assert_has_calls(map(mock.call, [2, 4, 4, 4]))
mock_sleep.assert_has_calls(
[mock.call(1), mock.call(2), mock.call(4), mock.call(4)])
@ddt.ddt

97
manila/utils.py

@ -22,11 +22,11 @@ import functools
import inspect
import os
import pyclbr
import random
import re
import shutil
import sys
import tempfile
import tenacity
import time
from eventlet import pools
@ -42,15 +42,16 @@ from oslo_utils import netutils
from oslo_utils import strutils
from oslo_utils import timeutils
import paramiko
import retrying
import six
from webob import exc
from manila.common import constants
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
if hasattr('CONF', 'debug') and CONF.debug:
@ -457,65 +458,59 @@ class ComparableMixin(object):
return self._compare(other, lambda s, o: s != o)
def retry(exception, interval=1, retries=10, backoff_rate=2,
wait_random=False, backoff_sleep_max=None):
"""A wrapper around retrying library.
This decorator allows to log and to check 'retries' input param.
Time interval between retries is calculated in the following way:
interval * backoff_rate ^ previous_attempt_number
:param exception: expected exception type. When wrapped function
raises an exception of this type, the function
execution is retried.
:param interval: param 'interval' is used to calculate time interval
between retries:
interval * backoff_rate ^ previous_attempt_number
:param retries: number of retries. Use 0 for an infinite retry loop.
:param backoff_rate: param 'backoff_rate' is used to calculate time
interval between retries:
interval * backoff_rate ^ previous_attempt_number
:param wait_random: boolean value to enable retry with random wait timer.
:param backoff_sleep_max: Maximum number of seconds for the calculated
backoff sleep. Use None if no maximum is needed.
"""
def _retry_on_exception(e):
return isinstance(e, exception)
class retry_if_exit_code(tenacity.retry_if_exception):
"""Retry on ProcessExecutionError specific exit codes."""
def __init__(self, codes):
self.codes = (codes,) if isinstance(codes, int) else codes
super(retry_if_exit_code, self).__init__(self._check_exit_code)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = max(0, interval * exp)
def _check_exit_code(self, exc):
return (exc and isinstance(exc, processutils.ProcessExecutionError) and
exc.exit_code in self.codes)
if wait_random:
wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
else:
wait_val = wait_for * 1000.0
if backoff_sleep_max:
wait_val = min(backoff_sleep_max * 1000.0, wait_val)
def retry(retry_param=Exception,
interval=1,
retries=10,
backoff_rate=2,
backoff_sleep_max=None,
wait_random=False,
infinite=False,
retry=tenacity.retry_if_exception_type):
LOG.debug("Sleeping for %s seconds.", (wait_val / 1000.0))
return wait_val
if retries < 1:
raise ValueError('Retries must be greater than or '
'equal to 1 (received: %s). ' % retries)
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return retries > 0 and previous_attempt_number == retries
if wait_random:
kwargs = {'multiplier': interval}
if backoff_sleep_max is not None:
kwargs.update({'max': backoff_sleep_max})
wait = tenacity.wait_random_exponential(**kwargs)
else:
kwargs = {'multiplier': interval, 'min': 0, 'exp_base': backoff_rate}
if backoff_sleep_max is not None:
kwargs.update({'max': backoff_sleep_max})
wait = tenacity.wait_exponential(**kwargs)
if retries < 0:
raise ValueError(_('Retries must be greater than or '
'equal to 0 (received: %s).') % retries)
if infinite:
stop = tenacity.stop.stop_never
else:
stop = tenacity.stop_after_attempt(retries)
def _decorator(f):
@six.wraps(f)
@functools.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
r = tenacity.Retrying(
sleep=tenacity.nap.sleep,
before_sleep=tenacity.before_sleep_log(LOG, logging.DEBUG),
after=tenacity.after_log(LOG, logging.DEBUG),
stop=stop,
reraise=True,
retry=retry(retry_param),
wait=wait)
return r(f, *args, **kwargs)
return _wrapper

2
requirements.txt

@ -34,7 +34,7 @@ python-neutronclient>=6.7.0 # Apache-2.0
keystoneauth1>=4.2.1 # Apache-2.0
keystonemiddleware>=9.1.0 # Apache-2.0
requests>=2.23.0 # Apache-2.0
retrying!=1.3.0,>=1.2.3 # Apache-2.0
tenacity>=6.3.1 # Apache-2.0
Routes>=2.4.1 # MIT
six>=1.15.0 # MIT
SQLAlchemy>=1.3.17 # MIT

Loading…
Cancel
Save