Merge "Replace retrying with tenacity"

This commit is contained in:
Zuul 2021-09-03 17:07:51 +00:00 committed by Gerrit Code Review
commit 45f98a7d0a
28 changed files with 283 additions and 197 deletions

View File

@ -120,7 +120,7 @@ statsd==3.2.2
stestr==3.0.1
stevedore==3.2.2
Tempita==0.5.2
tenacity==6.0.0
tenacity==6.3.1
testrepository==0.0.20
testresources==2.0.1
testscenarios==0.4

View File

@ -235,12 +235,18 @@ class DataServiceHelper(object):
return access_list
@utils.retry(exception.NotFound, 0.1, 10, 0.1)
@utils.retry(retry_param=exception.NotFound,
interval=1,
retries=10,
backoff_rate=1)
def _check_dir_exists(self, path):
if not os.path.exists(path):
raise exception.NotFound("Folder %s could not be found." % path)
@utils.retry(exception.Found, 0.1, 10, 0.1)
@utils.retry(retry_param=exception.Found,
interval=1,
retries=10,
backoff_rate=1)
def _check_dir_not_exists(self, path):
if os.path.exists(path):
raise exception.Found("Folder %s was found." % path)

View File

@ -145,7 +145,7 @@ class Copy(object):
self.current_size += int(size)
LOG.info(self.get_progress())
@utils.retry(exception.ShareDataCopyFailed, retries=2)
@utils.retry(retry_param=exception.ShareDataCopyFailed, retries=2)
def _copy_and_validate(self, src_item, dest_item):
utils.execute("cp", "-P", "--preserve=all", src_item,
dest_item, run_as_root=True)

View File

@ -531,7 +531,7 @@ class NeutronBindNetworkPlugin(NeutronNetworkPlugin):
self._wait_for_ports_bind(ports, share_server)
return ports
@utils.retry(exception.NetworkBindException, retries=20)
@utils.retry(retry_param=exception.NetworkBindException, retries=20)
def _wait_for_ports_bind(self, ports, share_server):
inactive_ports = []
for port in ports:

View File

@ -80,7 +80,8 @@ class SecurityServiceHelper(driver.ExecuteMixin):
interval = 5
retries = int(timeout / interval) or 1
@manila_utils.retry(exception.ProcessExecutionError, interval=interval,
@manila_utils.retry(retry_param=exception.ProcessExecutionError,
interval=interval,
retries=retries, backoff_rate=1)
def try_ldap_operation():
try:

View File

@ -144,7 +144,7 @@ class StorageObject(object):
)
)
@utils.retry(exception.EMCPowerMaxLockRequiredException)
@utils.retry(retry_param=exception.EMCPowerMaxLockRequiredException)
def _send_request(self, req):
req_xml = constants.XML_HEADER + ET.tostring(req).decode('utf-8')
@ -161,7 +161,7 @@ class StorageObject(object):
return response
@utils.retry(exception.EMCPowerMaxLockRequiredException)
@utils.retry(retry_param=exception.EMCPowerMaxLockRequiredException)
def _execute_cmd(self, cmd, retry_patterns=None, check_exit_code=False):
"""Execute NAS command via SSH.
@ -218,7 +218,7 @@ class FileSystem(StorageObject):
super(FileSystem, self).__init__(conn, elt_maker, xml_parser, manager)
self.filesystem_map = {}
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, name, size, pool_name, mover_name, is_vdm=True):
pool_id = self.get_context('StoragePool').get_id(pool_name)
@ -548,7 +548,7 @@ class MountPoint(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(MountPoint, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, mount_path, fs_name, mover_name, is_vdm=True):
fs_id = self.get_context('FileSystem').get_id(fs_name)
@ -588,7 +588,7 @@ class MountPoint(StorageObject):
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def get(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -619,7 +619,7 @@ class MountPoint(StorageObject):
else:
return constants.STATUS_OK, response['objects']
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def delete(self, mount_path, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -855,7 +855,7 @@ class VDM(StorageObject):
super(VDM, self).__init__(conn, elt_maker, xml_parser, manager)
self.vdm_map = {}
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, name, mover_name):
mover_id = self._get_mover_id(mover_name, False)
@ -1145,7 +1145,7 @@ class MoverInterface(StorageObject):
super(MoverInterface, self).__init__(conn, elt_maker, xml_parser,
manager)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, interface):
# Maximum of 32 characters for mover interface name
name = interface['name']
@ -1226,7 +1226,7 @@ class MoverInterface(StorageObject):
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def delete(self, ip_addr, mover_name):
mover_id = self._get_mover_id(mover_name, False)
@ -1268,7 +1268,7 @@ class DNSDomain(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(DNSDomain, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, mover_name, name, servers, protocol='udp'):
mover_id = self._get_mover_id(mover_name, False)
@ -1298,7 +1298,7 @@ class DNSDomain(StorageObject):
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def delete(self, mover_name, name):
mover_id = self._get_mover_id(mover_name, False)
@ -1331,7 +1331,7 @@ class CIFSServer(StorageObject):
super(CIFSServer, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_server_map = {}
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, server_args):
compName = server_args['name']
# Maximum of 14 characters for netBIOS name
@ -1387,7 +1387,7 @@ class CIFSServer(StorageObject):
LOG.error(message)
raise exception.EMCPowerMaxXMLAPIError(err=message)
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def get_all(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -1439,7 +1439,7 @@ class CIFSServer(StorageObject):
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def modify(self, server_args):
"""Make CIFS server join or un-join the domain.
@ -1552,7 +1552,7 @@ class CIFSShare(StorageObject):
super(CIFSShare, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_share_map = {}
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def create(self, name, server_name, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -1605,7 +1605,7 @@ class CIFSShare(StorageObject):
return constants.STATUS_OK, self.cifs_share_map[name]
@utils.retry(exception.EMCPowerMaxInvalidMoverID)
@utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID)
def delete(self, name, mover_name, is_vdm=True):
status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status:

View File

@ -144,7 +144,7 @@ class StorageObject(object):
)
)
@utils.retry(exception.EMCVnxLockRequiredException)
@utils.retry(retry_param=exception.EMCVnxLockRequiredException)
def _send_request(self, req):
req_xml = constants.XML_HEADER + ET.tostring(req).decode('utf-8')
@ -161,7 +161,7 @@ class StorageObject(object):
return response
@utils.retry(exception.EMCVnxLockRequiredException)
@utils.retry(retry_param=exception.EMCVnxLockRequiredException)
def _execute_cmd(self, cmd, retry_patterns=None, check_exit_code=False):
"""Execute NAS command via SSH.
@ -218,7 +218,7 @@ class FileSystem(StorageObject):
super(FileSystem, self).__init__(conn, elt_maker, xml_parser, manager)
self.filesystem_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, name, size, pool_name, mover_name, is_vdm=True):
pool_id = self.get_context('StoragePool').get_id(pool_name)
@ -547,7 +547,7 @@ class MountPoint(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(MountPoint, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, mount_path, fs_name, mover_name, is_vdm=True):
fs_id = self.get_context('FileSystem').get_id(fs_name)
@ -587,7 +587,7 @@ class MountPoint(StorageObject):
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def get(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -618,7 +618,7 @@ class MountPoint(StorageObject):
else:
return constants.STATUS_OK, response['objects']
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def delete(self, mount_path, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -854,7 +854,7 @@ class VDM(StorageObject):
super(VDM, self).__init__(conn, elt_maker, xml_parser, manager)
self.vdm_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, name, mover_name):
mover_id = self._get_mover_id(mover_name, False)
@ -1144,7 +1144,7 @@ class MoverInterface(StorageObject):
super(MoverInterface, self).__init__(conn, elt_maker, xml_parser,
manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, interface):
# Maximum of 32 characters for mover interface name
name = interface['name']
@ -1228,7 +1228,7 @@ class MoverInterface(StorageObject):
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def delete(self, ip_addr, mover_name):
mover_id = self._get_mover_id(mover_name, False)
@ -1270,7 +1270,7 @@ class DNSDomain(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(DNSDomain, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, mover_name, name, servers, protocol='udp'):
mover_id = self._get_mover_id(mover_name, False)
@ -1300,7 +1300,7 @@ class DNSDomain(StorageObject):
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def delete(self, mover_name, name):
mover_id = self._get_mover_id(mover_name, False)
@ -1333,7 +1333,7 @@ class CIFSServer(StorageObject):
super(CIFSServer, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_server_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, server_args):
compName = server_args['name']
# Maximum of 14 characters for netBIOS name
@ -1389,7 +1389,7 @@ class CIFSServer(StorageObject):
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def get_all(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -1441,7 +1441,7 @@ class CIFSServer(StorageObject):
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def modify(self, server_args):
"""Make CIFS server join or un-join the domain.
@ -1554,7 +1554,7 @@ class CIFSShare(StorageObject):
super(CIFSShare, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_share_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def create(self, name, server_name, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
@ -1607,7 +1607,7 @@ class CIFSShare(StorageObject):
return constants.STATUS_OK, self.cifs_share_map[name]
@utils.retry(exception.EMCVnxInvalidMoverID)
@utils.retry(retry_param=exception.EMCVnxInvalidMoverID)
def delete(self, name, mover_name, is_vdm=True):
status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status:

View File

@ -23,7 +23,6 @@ from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import units
import retrying
import six
from manila.common import constants as const
@ -243,7 +242,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
server_details, share['name'])
return export_locations
@utils.retry(exception.ProcessExecutionError, backoff_rate=1)
@utils.retry(retry_param=exception.ProcessExecutionError, backoff_rate=1)
def _is_device_file_available(self, server_details, volume):
"""Checks whether the device file is available"""
command = ['sudo', 'test', '-b', volume['mountpoint']]
@ -372,7 +371,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
raise exception.ShareBackendException(msg=six.text_type(e))
return _mount_device_with_lock()
@utils.retry(exception.ProcessExecutionError)
@utils.retry(retry_param=exception.ProcessExecutionError)
def _unmount_device(self, share, server_details):
"""Unmounts block device from directory on service vm."""
@ -416,9 +415,9 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
_('Volume %s is already attached to another instance')
% volume['id'])
@retrying.retry(stop_max_attempt_number=3,
wait_fixed=2000,
retry_on_exception=lambda exc: True)
@utils.retry(retries=3,
interval=2,
backoff_rate=1)
def attach_volume():
self.compute_api.instance_volume_attach(
self.admin_context, instance_id, volume['id'])

View File

@ -353,7 +353,7 @@ class HNASSSHBackend(object):
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
@mutils.retry(exception=exception.HNASSSCContextChange, wait_random=True,
@mutils.retry(retry_param=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def create_directory(self, dest_path):
self._locked_selectfs('create', dest_path)
@ -366,7 +366,7 @@ class HNASSSHBackend(object):
LOG.warning(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(exception=exception.HNASSSCContextChange, wait_random=True,
@mutils.retry(retry_param=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def delete_directory(self, path):
try:
@ -383,7 +383,7 @@ class HNASSSHBackend(object):
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(exception=exception.HNASSSCIsBusy, wait_random=True,
@mutils.retry(retry_param=exception.HNASSSCIsBusy, wait_random=True,
retries=5)
def check_directory(self, path):
command = ['path-to-object-number', '-f', self.fs_name, path]
@ -621,7 +621,7 @@ class HNASSSHBackend(object):
export_list.append(Export(items[i]))
return export_list
@mutils.retry(exception=exception.HNASConnException, wait_random=True)
@mutils.retry(retry_param=exception.HNASConnException, wait_random=True)
def _execute(self, commands):
command = ['ssc', '127.0.0.1']
if self.admin_ip0 is not None:

View File

@ -191,7 +191,9 @@ class HSPRestBackend(object):
msg = _("No cluster was found on HSP.")
raise exception.HSPBackendException(msg=msg)
@utils.retry(exception.HSPTimeoutException, retries=10, wait_random=True)
@utils.retry(retry_param=exception.HSPTimeoutException,
retries=10,
wait_random=True)
def _wait_job_status(self, job_url, target_status):
resp_json = self._send_get(job_url)

View File

@ -1410,10 +1410,10 @@ class V3StorageConnection(driver.HuaweiBase):
interval = wait_interval
backoff_rate = 1
@utils.retry(exception.InvalidShare,
interval,
retries,
backoff_rate)
@utils.retry(retry_param=exception.InvalidShare,
interval=interval,
retries=retries,
backoff_rate=backoff_rate)
def _check_AD_status():
ad = self.helper.get_AD_config()
if ad['DOMAINSTATUS'] != expected_status:

View File

@ -276,7 +276,8 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
retries = 10 if retry_busy_device else 1
@utils.retry(exception.ShareBusyException, retries=retries)
@utils.retry(retry_param=exception.ShareBusyException,
retries=retries)
def _unmount_device_with_retry():
try:
self._execute('umount', '-f', mount_path, run_as_root=True)

View File

@ -1570,8 +1570,10 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
api_args = {'client-config': client_config_name,
'client-enabled': 'true'}
@manila_utils.retry(exception.ShareBackendException, interval=interval,
retries=retries, backoff_rate=1)
@manila_utils.retry(retry_param=exception.ShareBackendException,
interval=interval,
retries=retries,
backoff_rate=1)
def try_enable_ldap_client():
try:
self.send_request('ldap-config-create', api_args)

View File

@ -282,8 +282,10 @@ class DataMotionSession(object):
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.ReplicationException,
interval=5,
retries=retries,
backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver,
@ -318,8 +320,10 @@ class DataMotionSession(object):
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.ReplicationException,
interval=5,
retries=retries,
backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors(
source_vserver=src_vserver, dest_vserver=dest_vserver,
@ -613,8 +617,10 @@ class DataMotionSession(object):
interval = 10
retries = (timeout / interval or 1)
@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.VserverNotReady,
interval=interval,
retries=retries,
backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if vserver_info.get('subtype') != 'default':
@ -686,8 +692,10 @@ class DataMotionSession(object):
if subtype:
expected['subtype'] = subtype
@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.VserverNotReady,
interval=interval,
retries=retries,
backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if not all(item in vserver_info.items() for
@ -705,8 +713,10 @@ class DataMotionSession(object):
interval = 10
retries = (timeout / interval or 1)
@utils.retry(exception.NetAppException, interval=interval,
retries=retries, backoff_rate=1)
@utils.retry(retry_param=exception.NetAppException,
interval=interval,
retries=retries,
backoff_rate=1)
def release_snapmirror():
snapmirrors = src_client.get_snapmirror_destinations_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver)

View File

@ -2656,8 +2656,10 @@ class NetAppCmodeFileStorageLibrary(object):
retries = (self.configuration.netapp_start_volume_move_timeout / 5
or 1)
@manila_utils.retry(exception.ShareBusyException, interval=5,
retries=retries, backoff_rate=1)
@manila_utils.retry(retry_param=exception.ShareBusyException,
interval=5,
retries=retries,
backoff_rate=1)
def try_move_volume():
try:
self._move_volume(source_share, destination_share,
@ -2809,8 +2811,8 @@ class NetAppCmodeFileStorageLibrary(object):
"""Abort an ongoing migration."""
vserver, vserver_client = self._get_vserver(share_server=share_server)
share_volume = self._get_backend_share_name(source_share['id'])
retries = (self.configuration.netapp_migration_cancel_timeout / 5 or
1)
retries = (math.ceil(
self.configuration.netapp_migration_cancel_timeout / 5) or 1)
try:
self._get_volume_move_status(source_share, share_server)
@ -2820,7 +2822,7 @@ class NetAppCmodeFileStorageLibrary(object):
self._client.abort_volume_move(share_volume, vserver)
@manila_utils.retry(exception.InUse, interval=5,
@manila_utils.retry(retry_param=exception.InUse, interval=5,
retries=retries, backoff_rate=1)
def wait_for_migration_cancel_complete():
move_status = self._get_volume_move_status(source_share,
@ -3032,8 +3034,10 @@ class NetAppCmodeFileStorageLibrary(object):
retries = (self.configuration.netapp_volume_move_cutover_timeout / 5
or 1)
@manila_utils.retry(exception.ShareBusyException, interval=5,
retries=retries, backoff_rate=1)
@manila_utils.retry(retry_param=exception.ShareBusyException,
interval=5,
retries=retries,
backoff_rate=1)
def check_move_completion():
status = self._get_volume_move_status(source_share, share_server)
if status['phase'].lower() != 'completed':

View File

@ -69,7 +69,7 @@ class NexentaJSONProxy(object):
def __repr__(self):
return 'NMS proxy: %s' % self.url
@utils.retry(retry_exc_tuple, retries=6)
@utils.retry(retry_param=retry_exc_tuple, retries=6)
def __call__(self, *args):
data = jsonutils.dumps({
'object': self.obj,

View File

@ -41,7 +41,7 @@ MSG_UNEXPECT_RESP = _("Unexpected response from QNAP API")
def _connection_checker(func):
"""Decorator to check session has expired or not."""
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
retries=5)
@functools.wraps(func)
def inner_connection_checker(self, *args, **kwargs):

View File

@ -290,7 +290,7 @@ class QnapShareDriver(driver.ShareDriver):
}
super(QnapShareDriver, self)._update_share_stats(data)
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
interval=3,
retries=5)
@utils.synchronized('qnap-create_share')
@ -369,7 +369,7 @@ class QnapShareDriver(driver.ShareDriver):
self.configuration.qnap_share_ip,
volID)
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
interval=5, retries=5, backoff_rate=1)
def _get_share_info(self, share_name):
share = self.api_executor.get_share_info(
@ -445,7 +445,7 @@ class QnapShareDriver(driver.ShareDriver):
}
self.api_executor.edit_share(share_dict)
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
interval=3,
retries=5)
@utils.synchronized('qnap-create_snapshot')
@ -518,7 +518,7 @@ class QnapShareDriver(driver.ShareDriver):
self.api_executor.delete_snapshot_api(snapshot_id)
self.private_storage.delete(snapshot['id'])
@utils.retry(exception=exception.ShareBackendException,
@utils.retry(retry_param=exception.ShareBackendException,
interval=3,
retries=5)
@utils.synchronized('qnap-create_share_from_snapshot')

View File

@ -97,7 +97,7 @@ class TegileAPIExecutor(object):
return self._send_api_request(*args, **kwargs)
@debugger
@utils.retry(exception=(requests.ConnectionError, requests.Timeout),
@utils.retry(retry_param=(requests.ConnectionError, requests.Timeout),
interval=30,
retries=3,
backoff_rate=1)

View File

@ -93,7 +93,7 @@ class WinRMHelper(object):
retries = self._config.winrm_retry_count if retry else 1
conn = self._get_conn(server)
@utils.retry(exception=Exception,
@utils.retry(retry_param=Exception,
interval=self._config.winrm_retry_interval,
retries=retries)
def _execute():

View File

@ -273,7 +273,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
msg=_("Could not destroy '%s' dataset, "
"because it had opened files.") % name)
@utils.retry(exception.ProcessExecutionError)
@utils.retry(retry_param=exception.ProcessExecutionError, retries=10)
def _zfs_destroy_with_retry():
"""Retry destroying dataset ten times with exponential backoff."""
# NOTE(bswartz): There appears to be a bug in ZFS when creating and
@ -869,7 +869,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
"""Unmanage dataset snapshot."""
self.private_storage.delete(snapshot_instance["snapshot_id"])
@utils.retry(exception.ZFSonLinuxException)
@utils.retry(retry_param=exception.ZFSonLinuxException, retries=10)
def _unmount_share_with_retry(self, share_name):
out, err = self.execute("sudo", "mount")
if "%s " % share_name not in out:

View File

@ -96,7 +96,7 @@ class ExecuteMixin(driver.ExecuteMixin):
cmd = cmd[1:]
return executor(*cmd, **kwargs)
@utils.retry(exception.ProcessExecutionError,
@utils.retry(retry_param=exception.ProcessExecutionError,
interval=5, retries=36, backoff_rate=1)
def execute_with_retry(self, *cmd, **kwargs):
"""Retry wrapper over common shell interface."""

View File

@ -334,8 +334,8 @@ class ShareManager(manager.SchedulerDependentManager):
# we want to retry to setup the driver. In case of a multi-backend
# scenario, working backends are usable and the non-working ones (where
# do_setup() or check_for_setup_error() fail) retry.
@utils.retry(Exception, interval=2, backoff_rate=2,
backoff_sleep_max=600, retries=0)
@utils.retry(interval=2, backoff_rate=2,
infinite=True, backoff_sleep_max=600)
def _driver_setup():
self.driver.initialized = False
LOG.debug("Start initialization of driver: '%s'", driver_host_pair)

View File

@ -186,7 +186,7 @@ class ShareMigrationHelper(object):
else:
LOG.debug("No access rules to sync to destination share instance.")
@utils.retry(exception.ShareServerNotReady, retries=8)
@utils.retry(retry_param=exception.ShareServerNotReady, retries=8)
def wait_for_share_server(self, share_server_id):
share_server = self.db.share_server_get(self.context, share_server_id)
if share_server['status'] == constants.STATUS_ERROR:

View File

@ -234,8 +234,8 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(self.share_manager.driver, 'do_setup',
mock.Mock(side_effect=Exception()))
# break the endless retry loop
with mock.patch("time.sleep",
side_effect=CustomTimeSleepException()):
with mock.patch('tenacity.nap.sleep') as sleep:
sleep.side_effect = CustomTimeSleepException()
self.assertRaises(CustomTimeSleepException,
self.share_manager.init_host)
self.assertRaises(
@ -251,8 +251,8 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(manager.LOG, 'exception')
self.share_manager.driver.initialized = False
with mock.patch("time.sleep",
side_effect=CustomTimeSleepException()):
with mock.patch('time.sleep') as mock_sleep:
mock_sleep.side_effect = CustomTimeSleepException()
self.assertRaises(CustomTimeSleepException,
self.share_manager.init_host)

View File

@ -25,6 +25,7 @@ from oslo_utils import encodeutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import paramiko
import tenacity
from webob import exc
import manila
@ -557,12 +558,16 @@ class TestComparableMixin(test.TestCase):
self.one._compare(1, self.one._cmpkey))
class WrongException(Exception):
pass
class TestRetryDecorator(test.TestCase):
def test_no_retry_required(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=2,
retries=3,
backoff_rate=2)
@ -578,8 +583,8 @@ class TestRetryDecorator(test.TestCase):
def test_no_retry_required_random(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=2,
retries=3,
backoff_rate=2,
@ -593,42 +598,17 @@ class TestRetryDecorator(test.TestCase):
self.assertEqual('success', ret)
self.assertEqual(1, self.counter)
def test_retries_once_random(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate,
wait_random=True)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.ManilaException(message='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
self.assertTrue(mock_sleep.called)
def test_retries_once(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate)
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=interval,
retries=retries,
backoff_rate=backoff_rate)
def fails_once():
self.counter += 1
if self.counter < 2:
@ -640,7 +620,32 @@ class TestRetryDecorator(test.TestCase):
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
mock_sleep.assert_called_with(interval * backoff_rate)
mock_sleep.assert_called_with(interval)
def test_retries_once_random(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=interval,
retries=retries,
backoff_rate=backoff_rate,
wait_random=True)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.ManilaException(data='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
self.assertTrue(mock_sleep.called)
def test_limit_is_reached(self):
self.counter = 0
@ -648,11 +653,11 @@ class TestRetryDecorator(test.TestCase):
interval = 2
backoff_rate = 4
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate)
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
interval=interval,
retries=retries,
backoff_rate=backoff_rate)
def always_fails():
self.counter += 1
raise exception.ManilaException(data='fake')
@ -662,34 +667,94 @@ class TestRetryDecorator(test.TestCase):
self.assertEqual(retries, self.counter)
expected_sleep_arg = []
for i in range(retries):
if i > 0:
interval *= backoff_rate
interval *= (backoff_rate ** (i - 1))
expected_sleep_arg.append(float(interval))
mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg))
mock_sleep.assert_has_calls(
list(map(mock.call, expected_sleep_arg)))
def test_wrong_exception_no_retry(self):
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException)
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException)
def raise_unexpected_error():
raise ValueError("value error")
raise WrongException("wrong exception")
self.assertRaises(ValueError, raise_unexpected_error)
self.assertRaises(WrongException, raise_unexpected_error)
self.assertFalse(mock_sleep.called)
def test_wrong_retries_num(self):
self.assertRaises(ValueError, utils.retry, exception.ManilaException,
retries=-1)
@mock.patch('tenacity.nap.sleep')
def test_retry_exit_code(self, sleep_mock):
exit_code = 5
exception = utils.processutils.ProcessExecutionError
@utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code)
def raise_retriable_exit_code():
raise exception(exit_code=exit_code)
self.assertRaises(exception, raise_retriable_exit_code)
# we should be sleeping 1 less time than the number of retries,
# default (10)
self.assertEqual(9, sleep_mock.call_count)
sleep_mock.assert_has_calls([mock.call(1.0),
mock.call(2.0),
mock.call(4.0),
mock.call(8.0),
mock.call(16.0),
mock.call(32.0),
mock.call(64.0),
mock.call(128.0),
mock.call(256.0)])
@mock.patch('tenacity.nap.sleep')
def test_retry_exit_code_non_retriable(self, sleep_mock):
exit_code = 5
exception = utils.processutils.ProcessExecutionError
@utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code)
def raise_non_retriable_exit_code():
raise exception(exit_code=exit_code + 1)
self.assertRaises(exception, raise_non_retriable_exit_code)
sleep_mock.assert_not_called()
def test_infinite_retry(self):
retry_param = exception.ManilaException
class FakeTenacityRetry(tenacity.Retrying):
def __init__(*args, **kwargs):
pass
with mock.patch('tenacity.Retrying',
autospec=FakeTenacityRetry) as tenacity_retry:
@utils.retry(retry_param=retry_param,
wait_random=True,
infinite=True)
def some_retriable_function():
pass
some_retriable_function()
tenacity_retry.assert_called_once_with(
sleep=tenacity.nap.sleep,
before_sleep=mock.ANY,
after=mock.ANY,
stop=tenacity.stop.stop_never,
reraise=True,
retry=utils.IsAMatcher(tenacity.retry_if_exception_type),
wait=utils.IsAMatcher(tenacity.wait_random_exponential))
def test_max_backoff_sleep(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
retries=0,
with mock.patch('tenacity.nap.sleep') as mock_sleep:
@utils.retry(retry_param=exception.ManilaException,
infinite=True,
backoff_rate=2,
backoff_sleep_max=4)
def fails_then_passes():
@ -700,7 +765,8 @@ class TestRetryDecorator(test.TestCase):
return 'success'
self.assertEqual('success', fails_then_passes())
mock_sleep.assert_has_calls(map(mock.call, [2, 4, 4, 4]))
mock_sleep.assert_has_calls(
[mock.call(1), mock.call(2), mock.call(4), mock.call(4)])
@ddt.ddt

View File

@ -22,11 +22,11 @@ import functools
import inspect
import os
import pyclbr
import random
import re
import shutil
import sys
import tempfile
import tenacity
import time
from eventlet import pools
@ -42,15 +42,16 @@ from oslo_utils import netutils
from oslo_utils import strutils
from oslo_utils import timeutils
import paramiko
import retrying
import six
from webob import exc
from manila.common import constants
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
if hasattr('CONF', 'debug') and CONF.debug:
@ -457,65 +458,59 @@ class ComparableMixin(object):
return self._compare(other, lambda s, o: s != o)
def retry(exception, interval=1, retries=10, backoff_rate=2,
wait_random=False, backoff_sleep_max=None):
"""A wrapper around retrying library.
class retry_if_exit_code(tenacity.retry_if_exception):
"""Retry on ProcessExecutionError specific exit codes."""
def __init__(self, codes):
self.codes = (codes,) if isinstance(codes, int) else codes
super(retry_if_exit_code, self).__init__(self._check_exit_code)
This decorator allows to log and to check 'retries' input param.
Time interval between retries is calculated in the following way:
interval * backoff_rate ^ previous_attempt_number
def _check_exit_code(self, exc):
return (exc and isinstance(exc, processutils.ProcessExecutionError) and
exc.exit_code in self.codes)
:param exception: expected exception type. When wrapped function
raises an exception of this type, the function
execution is retried.
:param interval: param 'interval' is used to calculate time interval
between retries:
interval * backoff_rate ^ previous_attempt_number
:param retries: number of retries. Use 0 for an infinite retry loop.
:param backoff_rate: param 'backoff_rate' is used to calculate time
interval between retries:
interval * backoff_rate ^ previous_attempt_number
:param wait_random: boolean value to enable retry with random wait timer.
:param backoff_sleep_max: Maximum number of seconds for the calculated
backoff sleep. Use None if no maximum is needed.
"""
def _retry_on_exception(e):
return isinstance(e, exception)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = max(0, interval * exp)
def retry(retry_param=Exception,
interval=1,
retries=10,
backoff_rate=2,
backoff_sleep_max=None,
wait_random=False,
infinite=False,
retry=tenacity.retry_if_exception_type):
if wait_random:
wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
else:
wait_val = wait_for * 1000.0
if retries < 1:
raise ValueError('Retries must be greater than or '
'equal to 1 (received: %s). ' % retries)
if backoff_sleep_max:
wait_val = min(backoff_sleep_max * 1000.0, wait_val)
if wait_random:
kwargs = {'multiplier': interval}
if backoff_sleep_max is not None:
kwargs.update({'max': backoff_sleep_max})
wait = tenacity.wait_random_exponential(**kwargs)
else:
kwargs = {'multiplier': interval, 'min': 0, 'exp_base': backoff_rate}
if backoff_sleep_max is not None:
kwargs.update({'max': backoff_sleep_max})
wait = tenacity.wait_exponential(**kwargs)
LOG.debug("Sleeping for %s seconds.", (wait_val / 1000.0))
return wait_val
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return retries > 0 and previous_attempt_number == retries
if retries < 0:
raise ValueError(_('Retries must be greater than or '
'equal to 0 (received: %s).') % retries)
if infinite:
stop = tenacity.stop.stop_never
else:
stop = tenacity.stop_after_attempt(retries)
def _decorator(f):
@six.wraps(f)
@functools.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
r = tenacity.Retrying(
sleep=tenacity.nap.sleep,
before_sleep=tenacity.before_sleep_log(LOG, logging.DEBUG),
after=tenacity.after_log(LOG, logging.DEBUG),
stop=stop,
reraise=True,
retry=retry(retry_param),
wait=wait)
return r(f, *args, **kwargs)
return _wrapper

View File

@ -34,7 +34,7 @@ python-neutronclient>=6.7.0 # Apache-2.0
keystoneauth1>=4.2.1 # Apache-2.0
keystonemiddleware>=9.1.0 # Apache-2.0
requests>=2.23.0 # Apache-2.0
retrying!=1.3.0,>=1.2.3 # Apache-2.0
tenacity>=6.3.1 # Apache-2.0
Routes>=2.4.1 # MIT
six>=1.15.0 # MIT
SQLAlchemy>=1.3.17 # MIT