Replace retrying with tenacity

We are replacing all usages of the 'retrying' package with
'tenacity' as the author of retrying is not actively maintaining
the project.
Unit tests will be added/removed where applicable.

Tenacity [1] is a fork of retrying, but has improved the
interface and extensibility.
Our end goal here is removing the retrying package from our
requirements.

Tenacity provides the same functionality as retrying, but has the
following major differences to account for:
- Tenacity uses seconds rather than ms as retrying did.
- Tenacity has different kwargs for the decorator and
Retrying class itself.
- Tenacity has a different approach for retrying args by
using classes for its stop/wait/retry kwargs.
- By default tenacity raises a RetryError if a retried callable
times out; retrying raises the last exception from the callable.
Tenacity provides backwards compatibility here by offering
the 'reraise' kwarg.
- For retries that check a result, tenacity will raise if the retried
function raises, whereas retrying retried on all exceptions.

[1] https://github.com/jd/tenacity

Co-Authored-By: Dmitry Tantsur <dtantsur@protonmail.com>
Co-Authored-By: Riccardo Pittau <elfosardo@gmail.com>
Story: #1635390
Task: #10528
Change-Id: Ie5eb3ddc196505e8f58ed14de9952284598586fb
This commit is contained in:
Boden R 2016-09-26 09:26:10 -06:00 committed by Riccardo Pittau
parent 71ebba5cf3
commit b0607a2668
21 changed files with 168 additions and 138 deletions

View File

@ -25,9 +25,9 @@ from glanceclient import client
from glanceclient import exc as glance_exc
from oslo_log import log
from oslo_utils import uuidutils
import retrying
import sendfile
from swiftclient import utils as swift_utils
import tenacity
from ironic.common import exception
from ironic.common.glance_service import service_utils
@ -112,11 +112,12 @@ class GlanceImageService(object):
self.context = context
self.endpoint = None
@retrying.retry(
stop_max_attempt_number=CONF.glance.num_retries + 1,
retry_on_exception=lambda e: isinstance(
e, exception.GlanceConnectionFailed),
wait_fixed=1000
@tenacity.retry(
retry=tenacity.retry_if_exception_type(
exception.GlanceConnectionFailed),
stop=tenacity.stop_after_attempt(CONF.glance.num_retries + 1),
wait=tenacity.wait_fixed(1),
reraise=True
)
def call(self, method, *args, **kwargs):
"""Call a glance client method.
@ -124,7 +125,6 @@ class GlanceImageService(object):
If we get a connection error,
retry the request according to CONF.num_retries.
:param context: The request context, for access checks.
:param method: The method requested to be called.
:param args: A list of positional arguments for the method called
:param kwargs: A dict of keyword arguments for the method called

View File

@ -16,7 +16,8 @@ import ipaddress
import openstack
from openstack.connection import exceptions as openstack_exc
from oslo_log import log
import retrying
import tenacity
from tenacity import retry
from ironic.api.controllers.v1 import utils as api_utils
from ironic.common import context as ironic_context
@ -865,11 +866,11 @@ def get_physnets_by_port_uuid(client, port_uuid):
if network.provider_physical_network else set())
@retrying.retry(
stop_max_attempt_number=CONF.agent.neutron_agent_max_attempts,
retry_on_exception=lambda e: isinstance(e, exception.NetworkError),
wait_fixed=CONF.agent.neutron_agent_status_retry_interval * 1000
)
@retry(
retry=tenacity.retry_if_exception_type(exception.NetworkError),
stop=tenacity.stop_after_attempt(CONF.agent.neutron_agent_max_attempts),
wait=tenacity.wait_fixed(CONF.agent.neutron_agent_status_retry_interval),
reraise=True)
def wait_for_host_agent(client, host_id, target_state='up'):
"""Wait for neutron agent to become target state
@ -904,11 +905,11 @@ def wait_for_host_agent(client, host_id, target_state='up'):
'host': host_id, 'state': target_state})
@retrying.retry(
stop_max_attempt_number=CONF.agent.neutron_agent_max_attempts,
retry_on_exception=lambda e: isinstance(e, exception.NetworkError),
wait_fixed=CONF.agent.neutron_agent_status_retry_interval * 1000
)
@retry(
retry=tenacity.retry_if_exception_type(exception.NetworkError),
stop=tenacity.stop_after_attempt(CONF.agent.neutron_agent_max_attempts),
wait=tenacity.wait_fixed(CONF.agent.neutron_agent_status_retry_interval),
reraise=True)
def wait_for_port_status(client, port_id, status):
"""Wait for port status to be the desired status

View File

@ -18,7 +18,7 @@ from ironic_lib import metrics_utils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
import retrying
import tenacity
from ironic.common import exception
from ironic.common.i18n import _
@ -187,10 +187,13 @@ def _verify_node(node, allocation):
# node_locked_retry_attempt times, we try to allocate *any* node the same
# number of times. This avoids getting stuck on a node reserved e.g. for power
# sync periodic task.
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.AllocationFailed),
stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(exception.AllocationFailed),
stop=tenacity.stop_after_attempt(
CONF.conductor.node_locked_retry_attempts),
wait=tenacity.wait_fixed(
CONF.conductor.node_locked_retry_interval),
reraise=True)
def _allocate_node(context, allocation, nodes):
"""Go through the list of nodes and try to allocate one of them."""
retry_nodes = []

View File

@ -108,7 +108,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
import retrying
import tenacity
from ironic.common import driver_factory
from ironic.common import exception
@ -275,12 +275,13 @@ class TaskManager(object):
attempts = 1
# NodeLocked exceptions can be annoying. Let's try to alleviate
# some of that pain by retrying our lock attempts. The retrying
# module expects a wait_fixed value in milliseconds.
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
stop_max_attempt_number=attempts,
wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
# some of that pain by retrying our lock attempts.
@tenacity.retry(
retry=tenacity.retry_if_exception_type(exception.NodeLocked),
stop=tenacity.stop_after_attempt(attempts),
wait=tenacity.wait_fixed(
CONF.conductor.node_locked_retry_interval),
reraise=True)
def reserve_node():
self.node = objects.Node.reserve(self.context, CONF.host,
self.node_id)
@ -300,8 +301,8 @@ class TaskManager(object):
when provided with one.
:param purpose: optionally change the purpose of the lock
:param retry: whether to retry locking if it fails, the class-level
value is used by default
:param retry: whether to retry locking if it fails, the
class-level value is used by default
:raises: NodeLocked if an exclusive lock remains on the node after
"node_locked_retry_attempts"
"""

View File

@ -22,7 +22,7 @@ from ironic_lib import metrics_utils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import timeutils
import retrying
import tenacity
from ironic.common import boot_devices
from ironic.common import dhcp_factory
@ -1168,14 +1168,15 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
:param task: a TaskManager object containing the node
"""
wait = CONF.agent.post_deploy_get_power_state_retry_interval * 1000
wait = CONF.agent.post_deploy_get_power_state_retry_interval
attempts = CONF.agent.post_deploy_get_power_state_retries + 1
@retrying.retry(
stop_max_attempt_number=attempts,
retry_on_result=lambda state: state != states.POWER_OFF,
wait_fixed=wait
)
@tenacity.retry(stop=tenacity.stop_after_attempt(attempts),
retry=(tenacity.retry_if_result(
lambda state: state != states.POWER_OFF)
| tenacity.retry_if_exception_type(Exception)),
wait=tenacity.wait_fixed(wait),
reraise=True)
def _wait_until_powered_off(task):
return task.driver.power.get_power_state(task)
@ -1219,7 +1220,7 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
'in at least %(timeout)d seconds. Forcing '
'hard power off and proceeding.',
{'node_uuid': node.uuid,
'timeout': (wait * (attempts - 1)) / 1000})
'timeout': (wait * (attempts - 1))})
manager_utils.node_power_action(task, states.POWER_OFF)
else:
# Flush the file system prior to hard rebooting the node

View File

@ -20,7 +20,7 @@ from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import strutils
import requests
import retrying
import tenacity
from ironic.common import exception
from ironic.common.i18n import _
@ -103,11 +103,12 @@ class AgentClient(object):
error=get_command_error(result))
@METRICS.timer('AgentClient._wait_for_command')
@retrying.retry(
retry_on_exception=(
lambda e: isinstance(e, exception.AgentCommandTimeout)),
stop_max_attempt_number=CONF.agent.command_wait_attempts,
wait_fixed=CONF.agent.command_wait_interval * 1000)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(
exception.AgentCommandTimeout),
stop=tenacity.stop_after_attempt(CONF.agent.command_wait_attempts),
wait=tenacity.wait_fixed(CONF.agent.command_wait_interval),
reraise=True)
def _wait_for_command(self, node, method):
"""Wait for a command to complete.
@ -134,10 +135,11 @@ class AgentClient(object):
return result
@METRICS.timer('AgentClient._command')
@retrying.retry(
retry_on_exception=(
lambda e: isinstance(e, exception.AgentConnectionFailed)),
stop_max_attempt_number=CONF.agent.max_command_attempts)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(
exception.AgentConnectionFailed),
stop=tenacity.stop_after_attempt(CONF.agent.max_command_attempts),
reraise=True)
def _command(self, node, method, params, wait=False, poll=False):
"""Sends command to agent.
@ -300,10 +302,12 @@ class AgentClient(object):
raise exception.AgentConnectionFailed(reason=msg)
if retry_connection:
_get = retrying.retry(
retry_on_exception=(
lambda e: isinstance(e, exception.AgentConnectionFailed)),
stop_max_attempt_number=CONF.agent.max_command_attempts)(_get)
_get = tenacity.retry(
retry=tenacity.retry_if_exception_type(
exception.AgentConnectionFailed),
stop=tenacity.stop_after_attempt(
CONF.agent.max_command_attempts),
reraise=True)(_get)
result = _get().json()['commands']
status = '; '.join('%(cmd)s: result "%(res)s", error "%(err)s"' %

View File

@ -18,7 +18,7 @@ import time
from oslo_config import cfg
from oslo_log import log
import retrying
import tenacity
from ironic.common import exception
from ironic.common.i18n import _
@ -179,13 +179,13 @@ class AgentPower(base.PowerInterface):
if not timeout:
timeout = CONF.agent.post_deploy_get_power_state_retries * wait
@retrying.retry(
stop_max_delay=timeout,
retry_on_result=lambda result: not result,
retry_on_exception=(
lambda e: isinstance(e, exception.AgentConnectionFailed)),
wait_fixed=wait * 1000
)
@tenacity.retry(
stop=tenacity.stop_after_delay(timeout),
retry=(tenacity.retry_if_result(lambda result: not result)
| tenacity.retry_if_exception_type(
exception.AgentConnectionFailed)),
wait=tenacity.wait_fixed(wait),
reraise=True)
def _wait_until_rebooted(task):
try:
status = self._client.get_commands_status(

View File

@ -26,7 +26,7 @@ from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
import retrying
import tenacity
import yaml
from ironic.common import dhcp_factory
@ -624,14 +624,16 @@ class AnsibleDeploy(agent_base.HeartbeatMixin,
:param task: a TaskManager object containing the node
"""
wait = CONF.ansible.post_deploy_get_power_state_retry_interval * 1000
wait = CONF.ansible.post_deploy_get_power_state_retry_interval
attempts = CONF.ansible.post_deploy_get_power_state_retries + 1
@retrying.retry(
stop_max_attempt_number=attempts,
retry_on_result=lambda state: state != states.POWER_OFF,
wait_fixed=wait
)
@tenacity.retry(
stop=tenacity.stop_after_attempt(attempts),
retry=(tenacity.retry_if_result(
lambda state: state != states.POWER_OFF)
| tenacity.retry_if_exception_type(Exception)),
wait=tenacity.wait_fixed(wait),
reraise=True)
def _wait_until_powered_off(task):
return task.driver.power.get_power_state(task)

View File

@ -17,7 +17,7 @@ DRAC Lifecycle job specific methods
from oslo_log import log as logging
from oslo_utils import importutils
import retrying
import tenacity
from ironic.common import exception
from ironic.common.i18n import _
@ -91,10 +91,11 @@ def list_unfinished_jobs(node):
raise exception.DracOperationError(error=exc)
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.DracOperationError),
stop_max_attempt_number=CONF.drac.config_job_max_retries,
wait_fixed=WAIT_CLOCK * 1000)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(exception.DracOperationError),
stop=tenacity.stop_after_attempt(CONF.drac.config_job_max_retries),
wait=tenacity.wait_fixed(WAIT_CLOCK),
reraise=True)
def wait_for_job_completion(node,
retries=CONF.drac.config_job_max_retries):
"""Wait for job to complete

View File

@ -21,7 +21,7 @@ from oslo_log import log
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import strutils
import retrying
import tenacity
from ironic.common import exception
from ironic.common.i18n import _
@ -133,10 +133,11 @@ def handle_ibmc_exception(action):
{'retry_times': CONF.ibmc.connection_attempts})
return connect_error
@retrying.retry(
retry_on_exception=should_retry,
stop_max_attempt_number=CONF.ibmc.connection_attempts,
wait_fixed=CONF.ibmc.connection_retry_interval * 1000)
@tenacity.retry(
retry=tenacity.retry_if_exception(should_retry),
stop=tenacity.stop_after_attempt(CONF.ibmc.connection_attempts),
wait=tenacity.wait_fixed(CONF.ibmc.connection_retry_interval),
reraise=True)
@functools.wraps(f)
def wrapper(*args, **kwargs):
# NOTE(dtantsur): this code could be written simpler, but then unit

View File

@ -15,7 +15,7 @@
from oslo_log import log
from oslo_utils import importutils
import retrying
import tenacity
from ironic.common import boot_devices
from ironic.common import exception
@ -155,8 +155,10 @@ def _test_retry(exception):
return False
@retrying.retry(wait_fixed=3000, stop_max_attempt_number=3,
retry_on_exception=_test_retry)
@tenacity.retry(retry=tenacity.retry_if_exception(_test_retry),
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_fixed(3),
reraise=True)
def _insert_vmedia(task, managers, boot_url, boot_device):
"""Insert bootable ISO image into virtual CD or DVD

View File

@ -21,8 +21,8 @@ from oslo_log import log
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
import retrying
import rfc3986
import tenacity
from ironic.common import exception
from ironic.common.i18n import _
@ -300,11 +300,12 @@ def _get_connection(node, lambda_fun, *args):
"""
driver_info = parse_driver_info(node)
@retrying.retry(
retry_on_exception=(
lambda e: isinstance(e, exception.RedfishConnectionError)),
stop_max_attempt_number=CONF.redfish.connection_attempts,
wait_fixed=CONF.redfish.connection_retry_interval * 1000)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(
exception.RedfishConnectionError),
stop=tenacity.stop_after_attempt(CONF.redfish.connection_attempts),
wait=tenacity.wait_fixed(CONF.redfish.connection_retry_interval),
reraise=True)
def _get_cached_connection(lambda_fun, *args):
try:
with SessionCache(driver_info) as conn:

View File

@ -16,7 +16,7 @@ from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import strutils
import retrying
import tenacity
from ironic.common import cinder
from ironic.common import exception
@ -307,10 +307,11 @@ class CinderStorage(base.StorageInterface):
if not connector:
connector = self._generate_connector(task)
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.StorageError),
stop_max_attempt_number=CONF.cinder.action_retries + 1,
wait_fixed=CONF.cinder.action_retry_interval * 1000)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(exception.StorageError),
stop=tenacity.stop_after_attempt(CONF.cinder.action_retries + 1),
wait=tenacity.wait_fixed(CONF.cinder.action_retry_interval),
reraise=True)
def detach_volumes():
try:
# NOTE(TheJulia): If the node is in ACTIVE state, we can

View File

@ -24,7 +24,7 @@ from glanceclient import exc as glance_exc
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_utils import uuidutils
import retrying
import tenacity
import testtools
from ironic.common import context
@ -163,8 +163,11 @@ class TestGlanceImageService(base.TestCase):
self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
def test_show_raises_when_no_authtoken_in_the_context(self):
@mock.patch.object(service_utils, 'is_image_active', autospec=True)
def test_show_raises_when_no_authtoken_in_the_context(self,
mock_is_active):
self.context.auth_token = False
mock_is_active.return_value = True
self.assertRaises(exception.ImageNotFound,
self.service.show,
uuidutils.generate_uuid())
@ -177,8 +180,8 @@ class TestGlanceImageService(base.TestCase):
self.assertRaises(exception.ImageUnacceptable,
self.service.show, image_id)
@mock.patch.object(retrying.time, 'sleep', autospec=True)
def test_download_with_retries(self, mock_sleep):
@mock.patch.object(tenacity, 'retry', autospec=True)
def test_download_with_retries(self, mock_retry):
tries = [0]
class MyGlanceStubClient(stubs.StubGlanceClient):
@ -196,6 +199,7 @@ class TestGlanceImageService(base.TestCase):
stub_context.project_id = 'fake'
stub_service = image_service.GlanceImageService(stub_client,
stub_context)
stub_service.call.retry.sleep = mock.Mock()
image_id = uuidutils.generate_uuid()
writer = NullWriter()
@ -211,7 +215,7 @@ class TestGlanceImageService(base.TestCase):
stub_context)
tries = [0]
stub_service.download(image_id, writer)
self.assertTrue(mock_sleep.called)
mock_retry.assert_called_once()
def test_download_no_data(self):
self.client.fake_wrapped = None

View File

@ -20,6 +20,7 @@ from keystoneauth1 import loading as ks_loading
import openstack
from openstack.connection import exceptions as openstack_exc
from oslo_utils import uuidutils
import tenacity
from ironic.common import context
from ironic.common import exception
@ -886,12 +887,14 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
validate_agent_mock.return_value = True
self.assertTrue(neutron.wait_for_host_agent(
self.client_mock, 'hostname'))
sleep_mock.assert_not_called()
validate_agent_mock.assert_called_once()
@mock.patch.object(neutron, '_validate_agent', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_wait_for_host_agent_down_target_state_up(
self, sleep_mock, validate_agent_mock):
neutron.wait_for_host_agent.retry.stop = (
tenacity.stop_after_attempt(3))
validate_agent_mock.return_value = False
self.assertRaises(exception.NetworkError,
neutron.wait_for_host_agent,
@ -901,27 +904,31 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
@mock.patch.object(time, 'sleep', autospec=True)
def test_wait_for_host_agent_up_target_state_down(
self, sleep_mock, validate_agent_mock):
neutron.wait_for_host_agent.retry.stop = (
tenacity.stop_after_attempt(3))
validate_agent_mock.return_value = True
self.assertRaises(exception.NetworkError,
neutron.wait_for_host_agent,
self.client_mock, 'hostname', target_state='down')
@mock.patch.object(neutron, '_validate_agent', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_wait_for_host_agent_down_target_state_down(
self, sleep_mock, validate_agent_mock):
self, validate_agent_mock):
validate_agent_mock.return_value = False
self.assertTrue(
neutron.wait_for_host_agent(self.client_mock, 'hostname',
target_state='down'))
sleep_mock = mock.Mock()
neutron.wait_for_host_agent.retry.sleep = sleep_mock
sleep_mock.assert_not_called()
validate_agent_mock.assert_called_once()
@mock.patch.object(neutron, '_get_port_by_uuid', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_wait_for_port_status_up(self, sleep_mock, get_port_mock):
get_port_mock.return_value = stubs.FakeNeutronPort(status='ACTIVE')
neutron.wait_for_port_status(self.client_mock, 'port_id', 'ACTIVE')
sleep_mock.assert_not_called()
get_port_mock.assert_called_once()
@mock.patch.object(neutron, '_get_port_by_uuid', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
@ -929,12 +936,14 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
get_port_mock.side_effect = [stubs.FakeNeutronPort(status='DOWN'),
stubs.FakeNeutronPort(status='ACTIVE')]
neutron.wait_for_port_status(self.client_mock, 'port_id', 'ACTIVE')
sleep_mock.assert_called_once()
self.assertEqual(get_port_mock.call_count, 2)
@mock.patch.object(neutron, '_get_port_by_uuid', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_wait_for_port_status_active_max_retry(self, sleep_mock,
get_port_mock):
def test_wait_for_port_status_active_max_retry(
self, sleep_mock, get_port_mock):
neutron.wait_for_port_status.retry.stop = (
tenacity.stop_after_attempt(3))
get_port_mock.return_value = stubs.FakeNeutronPort(status='DOWN')
self.assertRaises(exception.NetworkError,
neutron.wait_for_port_status,
@ -942,8 +951,10 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
@mock.patch.object(neutron, '_get_port_by_uuid', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_wait_for_port_status_down_max_retry(self, sleep_mock,
get_port_mock):
def test_wait_for_port_status_down_max_retry(
self, sleep_mock, get_port_mock):
neutron.wait_for_port_status.retry.stop = (
tenacity.stop_after_attempt(3))
get_port_mock.return_value = stubs.FakeNeutronPort(status='ACTIVE')
self.assertRaises(exception.NetworkError,
neutron.wait_for_port_status,

View File

@ -31,6 +31,7 @@ import oslo_messaging as messaging
from oslo_utils import uuidutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import fields
import tenacity
from ironic.common import boot_devices
from ironic.common import components
@ -1786,12 +1787,14 @@ class ContinueNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
deployments.continue_node_deploy,
mock.ANY)
@mock.patch.object(tenacity, 'stop_after_attempt',
return_value=tenacity.stop_after_attempt(4),
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_deploy_locked(self, mock_spawn):
def test_continue_node_deploy_locked(self, mock_spawn, mock_stop):
"""Test that continuing a deploy via RPC cannot fail due to locks."""
max_attempts = 3
self.config(node_locked_retry_attempts=max_attempts, group='conductor')
prv_state = states.DEPLOYWAIT
tgt_prv_state = states.ACTIVE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@ -2611,9 +2614,12 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_continue_node_clean_manual_abort_last_clean_step(self):
self._continue_node_clean_abort_last_clean_step(manual=True)
@mock.patch.object(tenacity, 'stop_after_attempt',
return_value=tenacity.stop_after_attempt(4),
autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
def test_continue_node_clean_locked(self, mock_spawn):
def test_continue_node_clean_locked(self, mock_spawn, mock_stop):
"""Test that continuing a clean via RPC cannot fail due to locks."""
max_attempts = 3
self.config(node_locked_retry_attempts=max_attempts, group='conductor')

View File

@ -21,6 +21,7 @@ from unittest import mock
import futurist
from oslo_utils import uuidutils
import tenacity
from ironic.common import driver_factory
from ironic.common import exception
@ -222,13 +223,13 @@ class TaskManagerTestCase(db_base.DbTestCase):
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
@mock.patch.object(tenacity, 'stop_after_attempt',
return_value=tenacity.stop_after_attempt(4),
autospec=True)
def test_excl_lock_exception_patient(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
self, stop_mock, get_voltgt_mock, get_volconn_mock,
get_portgroups_mock, get_ports_mock, build_driver_mock,
reserve_mock, release_mock, node_get_mock):
retry_attempts = 3
self.config(node_locked_retry_attempts=retry_attempts,
group='conductor')
# Fail on the first 3 attempts, succeed on the fourth.
reserve_mock.side_effect = (

View File

@ -16,6 +16,7 @@
import collections
import copy
import os
import time
from unittest import mock
from oslo_config import cfg
@ -202,12 +203,11 @@ class RedfishUtilsTestCase(db_base.DbTestCase):
redfish_utils.get_system(self.node)
fake_conn.get_system.assert_called_once_with(None)
@mock.patch('time.sleep', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_get_system_resource_connection_error_retry(self, mock_sushy,
mock_sleep):
def test_get_system_resource_connection_error_retry(self, mock_sushy):
# Redfish specific configurations
self.config(connection_attempts=3, group='redfish')
@ -223,8 +223,8 @@ class RedfishUtilsTestCase(db_base.DbTestCase):
mock.call(self.parsed_driver_info['system_id']),
]
fake_conn.get_system.assert_has_calls(expected_get_system_calls)
mock_sleep.assert_called_with(
redfish_utils.CONF.redfish.connection_retry_interval)
self.assertEqual(fake_conn.get_system.call_count,
redfish_utils.CONF.redfish.connection_attempts)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'

View File

@ -888,8 +888,8 @@ class AgentRescueTests(AgentDeployMixinBaseTest):
class AgentDeployMixinTest(AgentDeployMixinBaseTest):
@mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@ -991,8 +991,8 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
self.assertFalse(mock_collect.called)
@mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@ -1005,7 +1005,7 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.side_effect = RuntimeError("boom")
get_power_state_mock.return_value = RuntimeError("boom")
power_on_node_if_needed_mock.return_value = None
self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)

View File

@ -17,7 +17,6 @@ import json
from unittest import mock
import requests
import retrying
from ironic.common import exception
from ironic import conf
@ -384,7 +383,6 @@ class TestAgentClient(base.TestCase):
self.assertRaises(exception.InvalidParameterValue,
self.client._command, self.node, method, params)
@mock.patch('time.sleep', lambda seconds: None)
def test__command_poll(self):
response_data = {'status': 'ok'}
final_status = MockCommandStatus('SUCCEEDED', name='run_image')
@ -696,9 +694,7 @@ class TestAgentClientAttempts(base.TestCase):
self.client.session = mock.MagicMock(autospec=requests.Session)
self.node = MockNode()
@mock.patch.object(retrying.time, 'sleep', autospec=True)
def test__command_fail_all_attempts(self, mock_sleep):
mock_sleep.return_value = None
def test__command_fail_all_attempts(self):
error = 'Connection Timeout'
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
@ -720,9 +716,7 @@ class TestAgentClientAttempts(base.TestCase):
'error': error}, str(e))
self.assertEqual(3, self.client.session.post.call_count)
@mock.patch.object(retrying.time, 'sleep', autospec=True)
def test__command_succeed_after_two_timeouts(self, mock_sleep):
mock_sleep.return_value = None
def test__command_succeed_after_two_timeouts(self):
error = 'Connection Timeout'
response_data = {'status': 'ok'}
method = 'standby.run_image'
@ -742,9 +736,7 @@ class TestAgentClientAttempts(base.TestCase):
timeout=60,
verify=True)
@mock.patch.object(retrying.time, 'sleep', autospec=True)
def test__command_fail_agent_token_required(self, mock_sleep):
mock_sleep.return_value = None
def test__command_fail_agent_token_required(self):
error = 'Unknown Argument: "agent_token"'
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
@ -769,9 +761,7 @@ class TestAgentClientAttempts(base.TestCase):
'meowmeowmeow',
self.node.driver_internal_info.get('agent_secret_token'))
@mock.patch.object(retrying.time, 'sleep', autospec=True)
def test__command_succeed_after_one_timeout(self, mock_sleep):
mock_sleep.return_value = None
def test__command_succeed_after_one_timeout(self):
error = 'Connection Timeout'
response_data = {'status': 'ok'}
method = 'standby.run_image'

View File

@ -36,7 +36,7 @@ jsonpatch!=1.20,>=1.16 # BSD
Jinja2>=2.10 # BSD License (3 clause)
keystonemiddleware>=4.17.0 # Apache-2.0
oslo.messaging>=5.29.0 # Apache-2.0
retrying!=1.3.0,>=1.2.3 # Apache-2.0
tenacity>=6.2.0 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
jsonschema>=3.2.0 # MIT
psutil>=3.2.2 # BSD