diff --git a/doc/source/user/logging.rst b/doc/source/user/logging.rst index 57bf99f61..e3239d3ff 100644 --- a/doc/source/user/logging.rst +++ b/doc/source/user/logging.rst @@ -57,10 +57,10 @@ openstack.cloud.exc exception to the `openstack.cloud.exc` logger. Wrapped exceptions are usually considered implementation details, but can be useful for debugging problems. -openstack.cloud.iterate_timeout +openstack.iterate_timeout When `shade` needs to poll a resource, it does so in a loop that waits between iterations and ultimately timesout. The - `openstack.cloud.iterate_timeout` logger emits messages for each iteration + `openstack.iterate_timeout` logger emits messages for each iteration indicating it is waiting and for how long. These can be useful to see for long running tasks so that one can know things are not stuck, but can also be noisy. diff --git a/openstack/cloud/_utils.py b/openstack/cloud/_utils.py index 58356c766..b5eb032b4 100644 --- a/openstack/cloud/_utils.py +++ b/openstack/cloud/_utils.py @@ -22,7 +22,6 @@ import re import six import sre_constants import sys -import time import uuid from decorator import decorator @@ -40,42 +39,6 @@ def _exc_clear(): sys.exc_clear() -def _iterate_timeout(timeout, message, wait=2): - """Iterate and raise an exception on timeout. - - This is a generator that will continually yield and sleep for - wait seconds, and if the timeout is reached, will raise an exception - with . - - """ - log = _log.setup_logging('openstack.cloud.iterate_timeout') - - try: - # None as a wait winds up flowing well in the per-resource cache - # flow. We could spread this logic around to all of the calling - # points, but just having this treat None as "I don't have a value" - # seems friendlier - if wait is None: - wait = 2 - elif wait == 0: - # wait should be < timeout, unless timeout is None - wait = 0.1 if timeout is None else min(0.1, timeout) - wait = float(wait) - except ValueError: - raise exc.OpenStackCloudException( - "Wait value must be an int or float value. {wait} given" - " instead".format(wait=wait)) - - start = time.time() - count = 0 - while (timeout is None) or (time.time() < start + timeout): - count += 1 - yield count - log.debug('Waiting %s seconds', wait) - time.sleep(wait) - raise exc.OpenStackCloudTimeout(message) - - def _make_unicode(input): """Turn an input into unicode unconditionally diff --git a/openstack/cloud/exc.py b/openstack/cloud/exc.py index c02de2d77..d82d73e6c 100644 --- a/openstack/cloud/exc.py +++ b/openstack/cloud/exc.py @@ -15,6 +15,7 @@ from openstack import exceptions OpenStackCloudException = exceptions.SDKException +OpenStackCloudTimeout = exceptions.ResourceTimeout class OpenStackCloudCreateException(OpenStackCloudException): @@ -27,10 +28,6 @@ class OpenStackCloudCreateException(OpenStackCloudException): self.resource_id = resource_id -class OpenStackCloudTimeout(OpenStackCloudException): - pass - - class OpenStackCloudUnavailableExtension(OpenStackCloudException): pass diff --git a/openstack/cloud/openstackcloud.py b/openstack/cloud/openstackcloud.py index e82cabb00..90fa03d56 100644 --- a/openstack/cloud/openstackcloud.py +++ b/openstack/cloud/openstackcloud.py @@ -47,6 +47,7 @@ import openstack.config import openstack.config.defaults import openstack.connection from openstack import task_manager +from openstack import utils # TODO(shade) shade keys were x-object-meta-x-sdk-md5 - we need to add those # to freshness checks so that a shade->sdk transition doens't @@ -4471,7 +4472,7 @@ class OpenStackCloud(_normalize.Normalizer): def wait_for_image(self, image, timeout=3600): image_id = image['id'] - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for image to snapshot"): self.list_images.invalidate(self) image = self.get_image(image_id) @@ -4510,7 +4511,7 @@ class OpenStackCloud(_normalize.Normalizer): self.delete_object(container=container, name=objname) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to be deleted."): self._get_cache(None).invalidate() @@ -4740,7 +4741,7 @@ class OpenStackCloud(_normalize.Normalizer): if not wait: return self.get_image(response['image_id']) try: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to finish."): image_obj = self.get_image(response['image_id']) @@ -4829,7 +4830,7 @@ class OpenStackCloud(_normalize.Normalizer): if not wait: return image try: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to finish."): image_obj = self.get_image(image.id) @@ -4869,7 +4870,7 @@ class OpenStackCloud(_normalize.Normalizer): if wait: start = time.time() image_id = None - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to import."): try: @@ -5032,7 +5033,7 @@ class OpenStackCloud(_normalize.Normalizer): if wait: vol_id = volume['id'] - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume to be available."): volume = self.get_volume(vol_id) @@ -5118,7 +5119,7 @@ class OpenStackCloud(_normalize.Normalizer): self.list_volumes.invalidate(self) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume to be deleted."): @@ -5180,7 +5181,7 @@ class OpenStackCloud(_normalize.Normalizer): volume=volume['id'], server=server['id']))) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to detach." % volume['id']): try: @@ -5249,7 +5250,7 @@ class OpenStackCloud(_normalize.Normalizer): server_id=server['id'])) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to attach." % volume['id']): try: @@ -5324,7 +5325,7 @@ class OpenStackCloud(_normalize.Normalizer): snapshot = self._get_and_munchify('snapshot', data) if wait: snapshot_id = snapshot['id'] - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume snapshot to be available." ): @@ -5420,7 +5421,7 @@ class OpenStackCloud(_normalize.Normalizer): backup_id = backup['id'] msg = ("Timeout waiting for the volume backup {} to be " "available".format(backup_id)) - for _ in _utils._iterate_timeout(timeout, msg): + for _ in utils.iterate_timeout(timeout, msg): backup = self.get_volume_backup(backup_id) if backup['status'] == 'available': @@ -5511,7 +5512,7 @@ class OpenStackCloud(_normalize.Normalizer): error_message=msg) if wait: msg = "Timeout waiting for the volume backup to be deleted." - for count in _utils._iterate_timeout(timeout, msg): + for count in utils.iterate_timeout(timeout, msg): if not self.get_volume_backup(volume_backup['id']): break @@ -5541,7 +5542,7 @@ class OpenStackCloud(_normalize.Normalizer): error_message="Error in deleting volume snapshot") if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume snapshot to be deleted."): if not self.get_volume_snapshot(volumesnapshot['id']): @@ -5842,7 +5843,7 @@ class OpenStackCloud(_normalize.Normalizer): # if we've provided a port as a parameter if wait: try: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for the floating IP" " to be ACTIVE", @@ -6050,7 +6051,7 @@ class OpenStackCloud(_normalize.Normalizer): if wait: # Wait for the address to be assigned to the server server_id = server['id'] - for _ in _utils._iterate_timeout( + for _ in utils.iterate_timeout( timeout, "Timeout waiting for the floating IP to be attached.", wait=self._SERVER_AGE): @@ -6082,7 +6083,7 @@ class OpenStackCloud(_normalize.Normalizer): timeout = self._PORT_AGE * 2 else: timeout = None - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for port to show up in list", wait=self._PORT_AGE): @@ -6869,7 +6870,7 @@ class OpenStackCloud(_normalize.Normalizer): start_time = time.time() # There is no point in iterating faster than the list_servers cache - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, timeout_message, # if _SERVER_AGE is 0 we still want to wait a bit @@ -6960,7 +6961,7 @@ class OpenStackCloud(_normalize.Normalizer): self._normalize_server(server), bare=bare, detailed=detailed) admin_pass = server.get('adminPass') or admin_pass - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for server {0} to " "rebuild.".format(server_id), @@ -7119,7 +7120,7 @@ class OpenStackCloud(_normalize.Normalizer): and self.get_volumes(server)): reset_volume_cache = True - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timed out waiting for server to get deleted.", # if _SERVER_AGE is 0 we still want to wait a bit diff --git a/openstack/cloud/operatorcloud.py b/openstack/cloud/operatorcloud.py index 71146d967..ae5ffed39 100644 --- a/openstack/cloud/operatorcloud.py +++ b/openstack/cloud/operatorcloud.py @@ -19,6 +19,7 @@ from openstack import _adapter from openstack.cloud.exc import * # noqa from openstack.cloud import openstackcloud from openstack.cloud import _utils +from openstack import utils class OperatorCloud(openstackcloud.OpenStackCloud): @@ -158,7 +159,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud): with _utils.shade_exceptions("Error inspecting machine"): machine = self.node_set_provision_state(machine['uuid'], 'inspect') if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for node transition to " "target state of 'inspect'"): @@ -277,7 +278,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud): with _utils.shade_exceptions( "Error transitioning node to available state"): if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for node transition to " "available state"): @@ -313,7 +314,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud): # Note(TheJulia): We need to wait for the lock to clear # before we attempt to set the machine into provide state # which allows for the transition to available. - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( lock_timeout, "Timeout waiting for reservation to clear " "before setting provide state"): @@ -409,7 +410,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud): microversion=version) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for machine to be deleted"): if not self.get_machine(uuid): @@ -635,7 +636,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud): error_message=msg, microversion=version) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for node transition to " "target state of '%s'" % state): @@ -857,7 +858,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud): else: msg = 'Waiting for lock to be released for node {node}'.format( node=node['uuid']) - for count in _utils._iterate_timeout(timeout, msg, 2): + for count in utils.iterate_timeout(timeout, msg, 2): current_node = self.get_machine(node['uuid']) if current_node['reservation'] is None: return @@ -2001,7 +2002,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud): self._identity_client.put(url, error_message=error_msg) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for role to be granted"): if self.list_role_assignments(filters=filters): @@ -2080,7 +2081,7 @@ class OperatorCloud(openstackcloud.OpenStackCloud): self._identity_client.delete(url, error_message=error_msg) if wait: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( timeout, "Timeout waiting for role to be revoked"): if not self.list_role_assignments(filters=filters): diff --git a/openstack/resource2.py b/openstack/resource2.py index 0f411f25b..258e196cf 100644 --- a/openstack/resource2.py +++ b/openstack/resource2.py @@ -33,7 +33,6 @@ and then returned to the caller. import collections import itertools -import time from openstack import exceptions from openstack import format @@ -841,7 +840,7 @@ def wait_for_status(session, resource, status, failures, interval, wait): :type resource: :class:`~openstack.resource.Resource` :param status: Desired status of the resource. :param list failures: Statuses that would indicate the transition - failed such as 'ERROR'. + failed such as 'ERROR'. Defaults to ['ERROR']. :param interval: Number of seconds to wait between checks. :param wait: Maximum number of seconds to wait for transition. @@ -856,22 +855,31 @@ def wait_for_status(session, resource, status, failures, interval, wait): if resource.status == status: return resource - total_sleep = 0 if failures is None: - failures = [] + failures = ['ERROR'] - while total_sleep < wait: - resource.get(session) - if resource.status == status: + failures = [f.lower() for f in failures] + name = "{res}:{id}".format(res=resource.__class__.__name__, id=resource.id) + msg = "Timeout waiting for {name} to transition to {status}".format( + name=name, status=status) + + for count in utils.iterate_timeout( + timeout=wait, + message=msg, + wait=interval): + resource = resource.get(session) + new_status = resource.status + + if not resource: + raise exceptions.ResourceFailure( + "{name} went away while waiting for {status}".format( + name=name, status=status)) + if new_status.lower() == status.lower(): return resource - if resource.status in failures: - msg = ("Resource %s transitioned to failure state %s" % - (resource.id, resource.status)) - raise exceptions.ResourceFailure(msg) - time.sleep(interval) - total_sleep += interval - msg = "Timeout waiting for %s to transition to %s" % (resource.id, status) - raise exceptions.ResourceTimeout(msg) + if resource.status.lower() in failures: + raise exceptions.ResourceFailure( + "{name} transitioned to failure state {status}".format( + name=name, status=resource.status)) def wait_for_delete(session, resource, interval, wait): @@ -888,13 +896,18 @@ def wait_for_delete(session, resource, interval, wait): :raises: :class:`~openstack.exceptions.ResourceTimeout` transition to status failed to occur in wait seconds. """ - total_sleep = 0 - while total_sleep < wait: + orig_resource = resource + for count in utils.iterate_timeout( + timeout=wait, + message="Timeout waiting for {res}:{id} to delete".format( + res=resource.__class__.__name__, + id=resource.id), + wait=interval): try: - resource.get(session) + resource = resource.get(session) + if not resource: + return orig_resource + if resource.status.lower() == 'deleted': + return resource except exceptions.NotFoundException: - return resource - time.sleep(interval) - total_sleep += interval - msg = "Timeout waiting for %s delete" % (resource.id) - raise exceptions.ResourceTimeout(msg) + return orig_resource diff --git a/openstack/tests/functional/cloud/test_compute.py b/openstack/tests/functional/cloud/test_compute.py index 828f680f0..8e12cd3b2 100644 --- a/openstack/tests/functional/cloud/test_compute.py +++ b/openstack/tests/functional/cloud/test_compute.py @@ -25,7 +25,7 @@ import six from openstack.cloud import exc from openstack.tests.functional.cloud import base from openstack.tests.functional.cloud.util import pick_flavor -from openstack.cloud import _utils +from openstack import utils class TestCompute(base.BaseFunctionalTestCase): @@ -293,7 +293,7 @@ class TestCompute(base.BaseFunctionalTestCase): # Volumes do not show up as unattached for a bit immediately after # deleting a server that had had a volume attached. Yay for eventual # consistency! - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( 60, 'Timeout waiting for volume {volume_id} to detach'.format( volume_id=volume_id)): diff --git a/openstack/tests/functional/cloud/test_floating_ip.py b/openstack/tests/functional/cloud/test_floating_ip.py index aa4e478bc..04a7b418f 100644 --- a/openstack/tests/functional/cloud/test_floating_ip.py +++ b/openstack/tests/functional/cloud/test_floating_ip.py @@ -24,11 +24,11 @@ import pprint from testtools import content from openstack import _adapter -from openstack.cloud import _utils from openstack.cloud import meta from openstack.cloud.exc import OpenStackCloudException from openstack.tests.functional.cloud import base from openstack.tests.functional.cloud.util import pick_flavor +from openstack import utils class TestFloatingIP(base.BaseFunctionalTestCase): @@ -195,7 +195,7 @@ class TestFloatingIP(base.BaseFunctionalTestCase): # ToDo: remove the following iteration when create_server waits for # the IP to be attached ip = None - for _ in _utils._iterate_timeout( + for _ in utils.iterate_timeout( self.timeout, "Timeout waiting for IP address to be attached"): ip = meta.get_server_external_ipv4(self.user_cloud, new_server) if ip is not None: @@ -215,7 +215,7 @@ class TestFloatingIP(base.BaseFunctionalTestCase): # ToDo: remove the following iteration when create_server waits for # the IP to be attached ip = None - for _ in _utils._iterate_timeout( + for _ in utils.iterate_timeout( self.timeout, "Timeout waiting for IP address to be attached"): ip = meta.get_server_external_ipv4(self.user_cloud, new_server) if ip is not None: diff --git a/openstack/tests/functional/cloud/test_volume.py b/openstack/tests/functional/cloud/test_volume.py index 21a3ebb5e..ee695bbe8 100644 --- a/openstack/tests/functional/cloud/test_volume.py +++ b/openstack/tests/functional/cloud/test_volume.py @@ -20,9 +20,9 @@ Functional tests for `shade` block storage methods. from fixtures import TimeoutException from testtools import content -from openstack.cloud import _utils from openstack.cloud import exc from openstack.tests.functional.cloud import base +from openstack import utils class TestVolume(base.BaseFunctionalTestCase): @@ -107,7 +107,7 @@ class TestVolume(base.BaseFunctionalTestCase): for v in volume: self.user_cloud.delete_volume(v, wait=False) try: - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( 180, "Timeout waiting for volume cleanup"): found = False for existing in self.user_cloud.list_volumes(): diff --git a/openstack/tests/unit/cloud/test_shade.py b/openstack/tests/unit/cloud/test_shade.py index 6283cf13e..6b3be1387 100644 --- a/openstack/tests/unit/cloud/test_shade.py +++ b/openstack/tests/unit/cloud/test_shade.py @@ -16,10 +16,10 @@ import uuid import testtools import openstack.cloud -from openstack.cloud import _utils from openstack.cloud import exc from openstack.tests import fakes from openstack.tests.unit import base +from openstack import utils RANGE_DATA = [ @@ -193,13 +193,13 @@ class TestShade(base.RequestsMockTestCase): with testtools.ExpectedException( exc.OpenStackCloudException, "Wait value must be an int or float value."): - for count in _utils._iterate_timeout( + for count in utils.iterate_timeout( 1, "test_iterate_timeout_bad_wait", wait="timeishard"): pass @mock.patch('time.sleep') def test_iterate_timeout_str_wait(self, mock_sleep): - iter = _utils._iterate_timeout( + iter = utils.iterate_timeout( 10, "test_iterate_timeout_str_wait", wait="1.6") next(iter) next(iter) @@ -207,7 +207,7 @@ class TestShade(base.RequestsMockTestCase): @mock.patch('time.sleep') def test_iterate_timeout_int_wait(self, mock_sleep): - iter = _utils._iterate_timeout( + iter = utils.iterate_timeout( 10, "test_iterate_timeout_int_wait", wait=1) next(iter) next(iter) @@ -219,7 +219,7 @@ class TestShade(base.RequestsMockTestCase): with testtools.ExpectedException( exc.OpenStackCloudTimeout, message): - for count in _utils._iterate_timeout(0.1, message, wait=1): + for count in utils.iterate_timeout(0.1, message, wait=1): pass mock_sleep.assert_called_with(1.0) diff --git a/openstack/tests/unit/test_resource2.py b/openstack/tests/unit/test_resource2.py index 28e1c224a..fdfc449e9 100644 --- a/openstack/tests/unit/test_resource2.py +++ b/openstack/tests/unit/test_resource2.py @@ -1424,39 +1424,40 @@ class TestWaitForStatus(base.TestCase): self.assertEqual(result, resource) - @mock.patch("time.sleep", return_value=None) - def test_status_match(self, mock_sleep): + def _resources_from_statuses(self, *statuses): + resources = [] + for status in statuses: + res = mock.Mock() + res.status = status + resources.append(res) + for index, res in enumerate(resources[:-1]): + res.get.return_value = resources[index + 1] + return resources + + def test_status_match(self): status = "loling" - resource = mock.Mock() # other gets past the first check, two anothers gets through # the sleep loop, and the third matches - statuses = ["other", "another", "another", status] - type(resource).status = mock.PropertyMock(side_effect=statuses) + resources = self._resources_from_statuses( + "first", "other", "another", "another", status) - result = resource2.wait_for_status("session", resource, status, - None, 1, 5) + result = resource2.wait_for_status( + mock.Mock(), resources[0], status, None, 1, 5) - self.assertEqual(result, resource) + self.assertEqual(result, resources[-1]) - @mock.patch("time.sleep", return_value=None) - def test_status_fails(self, mock_sleep): - status = "loling" + def test_status_fails(self): failure = "crying" - resource = mock.Mock() - # other gets past the first check, the first failure doesn't - # match the expected, the third matches the failure, - # the fourth is used in creating the exception message - statuses = ["other", failure, failure, failure] - type(resource).status = mock.PropertyMock(side_effect=statuses) + resources = self._resources_from_statuses("success", "other", failure) - self.assertRaises(exceptions.ResourceFailure, - resource2.wait_for_status, - "session", resource, status, [failure], 1, 5) + self.assertRaises( + exceptions.ResourceFailure, + resource2.wait_for_status, + mock.Mock(), resources[0], "loling", [failure], 1, 5) - @mock.patch("time.sleep", return_value=None) - def test_timeout(self, mock_sleep): + def test_timeout(self): status = "loling" resource = mock.Mock() @@ -1483,8 +1484,7 @@ class TestWaitForStatus(base.TestCase): class TestWaitForDelete(base.TestCase): - @mock.patch("time.sleep", return_value=None) - def test_success(self, mock_sleep): + def test_success(self): response = mock.Mock() response.headers = {} response.status_code = 404 @@ -1497,11 +1497,11 @@ class TestWaitForDelete(base.TestCase): self.assertEqual(result, resource) - @mock.patch("time.sleep", return_value=None) - def test_timeout(self, mock_sleep): + def test_timeout(self): resource = mock.Mock() - resource.get.side_effect = [None, None, None] + resource.status = 'ACTIVE' + resource.get.return_value = resource self.assertRaises(exceptions.ResourceTimeout, resource2.wait_for_delete, - "session", resource, 1, 3) + "session", resource, 0.1, 0.3) diff --git a/openstack/utils.py b/openstack/utils.py index f14c87e3b..606705531 100644 --- a/openstack/utils.py +++ b/openstack/utils.py @@ -12,9 +12,12 @@ import functools import logging +import time import deprecation +from openstack import _log +from openstack import exceptions from openstack import version @@ -113,3 +116,39 @@ def urljoin(*args): link. We generally won't care about that in client. """ return '/'.join(str(a or '').strip('/') for a in args) + + +def iterate_timeout(timeout, message, wait=2): + """Iterate and raise an exception on timeout. + + This is a generator that will continually yield and sleep for + wait seconds, and if the timeout is reached, will raise an exception + with . + + """ + log = _log.setup_logging('openstack.iterate_timeout') + + try: + # None as a wait winds up flowing well in the per-resource cache + # flow. We could spread this logic around to all of the calling + # points, but just having this treat None as "I don't have a value" + # seems friendlier + if wait is None: + wait = 2 + elif wait == 0: + # wait should be < timeout, unless timeout is None + wait = 0.1 if timeout is None else min(0.1, timeout) + wait = float(wait) + except ValueError: + raise exceptions.SDKException( + "Wait value must be an int or float value. {wait} given" + " instead".format(wait=wait)) + + start = time.time() + count = 0 + while (timeout is None) or (time.time() < start + timeout): + count += 1 + yield count + log.debug('Waiting %s seconds', wait) + time.sleep(wait) + raise exceptions.ResourceTimeout(message)