Replace deprecated LOG.warn with warning
LOG.warn is deprecated. It still used in a few places. This updates those one to use the non-deprecated LOG.warning instead. Closes-Bug: 1508442 Change-Id: Id947e4c8ae9894f480192a5ab034c24c25125788
This commit is contained in:
parent
93f5a12a99
commit
5d22ccf016
|
@ -214,10 +214,10 @@ def list_partitions(device):
|
||||||
for line in lines:
|
for line in lines:
|
||||||
match = _PARTED_PRINT_RE.match(line)
|
match = _PARTED_PRINT_RE.match(line)
|
||||||
if match is None:
|
if match is None:
|
||||||
LOG.warn(_LW("Partition information from parted for device "
|
LOG.warning(_LW("Partition information from parted for device "
|
||||||
"%(device)s does not match "
|
"%(device)s does not match "
|
||||||
"expected format: %(line)s"),
|
"expected format: %(line)s"),
|
||||||
dict(device=device, line=line))
|
dict(device=device, line=line))
|
||||||
continue
|
continue
|
||||||
# Cast int fields to ints (some are floats and we round them down)
|
# Cast int fields to ints (some are floats and we round them down)
|
||||||
groups = [int(float(x)) if i < 4 else x
|
groups = [int(float(x)) if i < 4 else x
|
||||||
|
|
|
@ -476,8 +476,8 @@ def unlink_without_raise(path):
|
||||||
if e.errno == errno.ENOENT:
|
if e.errno == errno.ENOENT:
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Failed to unlink %(path)s, error: %(e)s"),
|
LOG.warning(_LW("Failed to unlink %(path)s, error: %(e)s"),
|
||||||
{'path': path, 'e': e})
|
{'path': path, 'e': e})
|
||||||
|
|
||||||
|
|
||||||
def rmtree_without_raise(path):
|
def rmtree_without_raise(path):
|
||||||
|
@ -485,8 +485,8 @@ def rmtree_without_raise(path):
|
||||||
if os.path.isdir(path):
|
if os.path.isdir(path):
|
||||||
shutil.rmtree(path)
|
shutil.rmtree(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
LOG.warn(_LW("Failed to remove dir %(path)s, error: %(e)s"),
|
LOG.warning(_LW("Failed to remove dir %(path)s, error: %(e)s"),
|
||||||
{'path': path, 'e': e})
|
{'path': path, 'e': e})
|
||||||
|
|
||||||
|
|
||||||
def write_to_file(path, contents):
|
def write_to_file(path, contents):
|
||||||
|
@ -501,9 +501,10 @@ def create_link_without_raise(source, link):
|
||||||
if e.errno == errno.EEXIST:
|
if e.errno == errno.EEXIST:
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Failed to create symlink from %(source)s to %(link)s"
|
LOG.warning(
|
||||||
", error: %(e)s"),
|
_LW("Failed to create symlink from %(source)s to %(link)s"
|
||||||
{'source': source, 'link': link, 'e': e})
|
", error: %(e)s"),
|
||||||
|
{'source': source, 'link': link, 'e': e})
|
||||||
|
|
||||||
|
|
||||||
def safe_rstrip(value, chars=None):
|
def safe_rstrip(value, chars=None):
|
||||||
|
@ -515,8 +516,9 @@ def safe_rstrip(value, chars=None):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not isinstance(value, six.string_types):
|
if not isinstance(value, six.string_types):
|
||||||
LOG.warn(_LW("Failed to remove trailing character. Returning original "
|
LOG.warning(_LW("Failed to remove trailing character. Returning "
|
||||||
"object. Supplied object is not a string: %s,"), value)
|
"original object. Supplied object is not a string: "
|
||||||
|
"%s,"), value)
|
||||||
return value
|
return value
|
||||||
|
|
||||||
return value.rstrip(chars) or value
|
return value.rstrip(chars) or value
|
||||||
|
|
|
@ -286,9 +286,10 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||||
except exception.ConductorAlreadyRegistered:
|
except exception.ConductorAlreadyRegistered:
|
||||||
# This conductor was already registered and did not shut down
|
# This conductor was already registered and did not shut down
|
||||||
# properly, so log a warning and update the record.
|
# properly, so log a warning and update the record.
|
||||||
LOG.warn(_LW("A conductor with hostname %(hostname)s "
|
LOG.warning(
|
||||||
"was previously registered. Updating registration"),
|
_LW("A conductor with hostname %(hostname)s "
|
||||||
{'hostname': self.host})
|
"was previously registered. Updating registration"),
|
||||||
|
{'hostname': self.host})
|
||||||
cdr = self.dbapi.register_conductor({'hostname': self.host,
|
cdr = self.dbapi.register_conductor({'hostname': self.host,
|
||||||
'drivers': self.drivers},
|
'drivers': self.drivers},
|
||||||
update_existing=True)
|
update_existing=True)
|
||||||
|
@ -1789,27 +1790,27 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||||
sensors_data = task.driver.management.get_sensors_data(
|
sensors_data = task.driver.management.get_sensors_data(
|
||||||
task)
|
task)
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
'get_sensors_data is not implemented for driver'
|
'get_sensors_data is not implemented for driver'
|
||||||
' %(driver)s, node_uuid is %(node)s'),
|
' %(driver)s, node_uuid is %(node)s'),
|
||||||
{'node': node_uuid, 'driver': driver})
|
{'node': node_uuid, 'driver': driver})
|
||||||
except exception.FailedToParseSensorData as fps:
|
except exception.FailedToParseSensorData as fps:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"During get_sensors_data, could not parse "
|
"During get_sensors_data, could not parse "
|
||||||
"sensor data for node %(node)s. Error: %(err)s."),
|
"sensor data for node %(node)s. Error: %(err)s."),
|
||||||
{'node': node_uuid, 'err': str(fps)})
|
{'node': node_uuid, 'err': str(fps)})
|
||||||
except exception.FailedToGetSensorData as fgs:
|
except exception.FailedToGetSensorData as fgs:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"During get_sensors_data, could not get "
|
"During get_sensors_data, could not get "
|
||||||
"sensor data for node %(node)s. Error: %(err)s."),
|
"sensor data for node %(node)s. Error: %(err)s."),
|
||||||
{'node': node_uuid, 'err': str(fgs)})
|
{'node': node_uuid, 'err': str(fgs)})
|
||||||
except exception.NodeNotFound:
|
except exception.NodeNotFound:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"During send_sensor_data, node %(node)s was not "
|
"During send_sensor_data, node %(node)s was not "
|
||||||
"found and presumed deleted by another process."),
|
"found and presumed deleted by another process."),
|
||||||
{'node': node_uuid})
|
{'node': node_uuid})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Failed to get sensor data for node %(node)s. "
|
"Failed to get sensor data for node %(node)s. "
|
||||||
"Error: %(error)s"), {'node': node_uuid, 'error': str(e)})
|
"Error: %(error)s"), {'node': node_uuid, 'error': str(e)})
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -89,15 +89,15 @@ def node_power_action(task, new_state):
|
||||||
node['power_state'] = new_state
|
node['power_state'] = new_state
|
||||||
node['target_power_state'] = states.NOSTATE
|
node['target_power_state'] = states.NOSTATE
|
||||||
node.save()
|
node.save()
|
||||||
LOG.warn(_LW("Not going to change node power state because "
|
LOG.warning(_LW("Not going to change node power state because "
|
||||||
"current state = requested state = '%(state)s'."),
|
"current state = requested state = '%(state)s'."),
|
||||||
{'state': curr_state})
|
{'state': curr_state})
|
||||||
return
|
return
|
||||||
|
|
||||||
if curr_state == states.ERROR:
|
if curr_state == states.ERROR:
|
||||||
# be optimistic and continue action
|
# be optimistic and continue action
|
||||||
LOG.warn(_LW("Driver returns ERROR power state for node %s."),
|
LOG.warning(_LW("Driver returns ERROR power state for node %s."),
|
||||||
node.uuid)
|
node.uuid)
|
||||||
|
|
||||||
# Set the target_power_state and clear any last_error, if we're
|
# Set the target_power_state and clear any last_error, if we're
|
||||||
# starting a new operation. This will expose to other processes
|
# starting a new operation. This will expose to other processes
|
||||||
|
|
|
@ -578,8 +578,9 @@ class Connection(api.Connection):
|
||||||
|
|
||||||
if nodes:
|
if nodes:
|
||||||
nodes = ', '.join(nodes)
|
nodes = ', '.join(nodes)
|
||||||
LOG.warn(_LW('Cleared reservations held by %(hostname)s: '
|
LOG.warning(
|
||||||
'%(nodes)s'), {'hostname': hostname, 'nodes': nodes})
|
_LW('Cleared reservations held by %(hostname)s: '
|
||||||
|
'%(nodes)s'), {'hostname': hostname, 'nodes': nodes})
|
||||||
|
|
||||||
def get_active_driver_dict(self, interval=None):
|
def get_active_driver_dict(self, interval=None):
|
||||||
if interval is None:
|
if interval is None:
|
||||||
|
|
|
@ -279,10 +279,10 @@ class NeutronDHCPApi(base.BaseDHCP):
|
||||||
failures.append(port.uuid)
|
failures.append(port.uuid)
|
||||||
|
|
||||||
if failures:
|
if failures:
|
||||||
LOG.warn(_LW("Some errors were encountered on node %(node)s"
|
LOG.warning(_LW("Some errors were encountered on node %(node)s"
|
||||||
" while retrieving IP address on the following"
|
" while retrieving IP address on the following"
|
||||||
" ports: %(ports)s."),
|
" ports: %(ports)s."),
|
||||||
{'node': task.node.uuid, 'ports': failures})
|
{'node': task.node.uuid, 'ports': failures})
|
||||||
|
|
||||||
return ip_addresses
|
return ip_addresses
|
||||||
|
|
||||||
|
|
|
@ -1055,7 +1055,7 @@ def driver_periodic_task(parallel=True, **other):
|
||||||
|
|
||||||
eventlet.greenthread.spawn_n(_internal)
|
eventlet.greenthread.spawn_n(_internal)
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
'Using periodic tasks with parallel=False is deprecated, '
|
'Using periodic tasks with parallel=False is deprecated, '
|
||||||
'"parallel" argument will be ignored starting with '
|
'"parallel" argument will be ignored starting with '
|
||||||
'the Mitaka release'))
|
'the Mitaka release'))
|
||||||
|
|
|
@ -356,8 +356,8 @@ def set_config(task, **kwargs):
|
||||||
attrib_names.append(k)
|
attrib_names.append(k)
|
||||||
|
|
||||||
if unchanged_attribs:
|
if unchanged_attribs:
|
||||||
LOG.warn(_LW('Ignoring unchanged BIOS settings %r'),
|
LOG.warning(_LW('Ignoring unchanged BIOS settings %r'),
|
||||||
unchanged_attribs)
|
unchanged_attribs)
|
||||||
|
|
||||||
if invalid_attribs_msgs or read_only_keys:
|
if invalid_attribs_msgs or read_only_keys:
|
||||||
raise exception.DracOperationFailed(
|
raise exception.DracOperationFailed(
|
||||||
|
|
|
@ -447,8 +447,8 @@ def _disable_secure_boot_if_supported(task):
|
||||||
# attempted deploy. Handling this exception here, will help the
|
# attempted deploy. Handling this exception here, will help the
|
||||||
# user to tear down such a Node.
|
# user to tear down such a Node.
|
||||||
except exception.IloOperationNotSupported:
|
except exception.IloOperationNotSupported:
|
||||||
LOG.warn(_LW('Secure boot mode is not supported for node %s'),
|
LOG.warning(_LW('Secure boot mode is not supported for node %s'),
|
||||||
task.node.uuid)
|
task.node.uuid)
|
||||||
|
|
||||||
|
|
||||||
class IloVirtualMediaIscsiDeploy(base.DeployInterface):
|
class IloVirtualMediaIscsiDeploy(base.DeployInterface):
|
||||||
|
|
|
@ -56,9 +56,9 @@ def _create_ports_if_not_exist(node, macs):
|
||||||
LOG.info(_LI("Port created for MAC address %(address)s for node "
|
LOG.info(_LI("Port created for MAC address %(address)s for node "
|
||||||
"%(node)s"), {'address': mac, 'node': node.uuid})
|
"%(node)s"), {'address': mac, 'node': node.uuid})
|
||||||
except exception.MACAlreadyExists:
|
except exception.MACAlreadyExists:
|
||||||
LOG.warn(_LW("Port already exists for MAC address %(address)s "
|
LOG.warning(_LW("Port already exists for MAC address %(address)s "
|
||||||
"for node %(node)s"),
|
"for node %(node)s"),
|
||||||
{'address': mac, 'node': node.uuid})
|
{'address': mac, 'node': node.uuid})
|
||||||
|
|
||||||
|
|
||||||
def _get_essential_properties(node, ilo_object):
|
def _get_essential_properties(node, ilo_object):
|
||||||
|
|
|
@ -98,9 +98,9 @@ def _execute_ilo_clean_step(node, step, *args, **kwargs):
|
||||||
except ilo_error.IloCommandNotSupportedError:
|
except ilo_error.IloCommandNotSupportedError:
|
||||||
# This clean step is not supported on Gen8 and below servers.
|
# This clean step is not supported on Gen8 and below servers.
|
||||||
# Log the failure and continue with cleaning.
|
# Log the failure and continue with cleaning.
|
||||||
LOG.warn(_LW("'%(step)s' clean step is not supported on node "
|
LOG.warning(_LW("'%(step)s' clean step is not supported on node "
|
||||||
"%(uuid)s. Skipping the clean step."),
|
"%(uuid)s. Skipping the clean step."),
|
||||||
{'step': step, 'uuid': node.uuid})
|
{'step': step, 'uuid': node.uuid})
|
||||||
except ilo_error.IloError as ilo_exception:
|
except ilo_error.IloError as ilo_exception:
|
||||||
raise exception.NodeCleaningFailure(_(
|
raise exception.NodeCleaningFailure(_(
|
||||||
"Clean step %(step)s failed "
|
"Clean step %(step)s failed "
|
||||||
|
|
|
@ -201,10 +201,11 @@ class ImageCache(object):
|
||||||
return
|
return
|
||||||
amount = self._clean_up_ensure_cache_size(survived, amount)
|
amount = self._clean_up_ensure_cache_size(survived, amount)
|
||||||
if amount is not None and amount > 0:
|
if amount is not None and amount > 0:
|
||||||
LOG.warn(_LW("Cache clean up was unable to reclaim %(required)d "
|
LOG.warning(
|
||||||
"MiB of disk space, still %(left)d MiB required"),
|
_LW("Cache clean up was unable to reclaim %(required)d "
|
||||||
{'required': amount_copy / 1024 / 1024,
|
"MiB of disk space, still %(left)d MiB required"),
|
||||||
'left': amount / 1024 / 1024})
|
{'required': amount_copy / 1024 / 1024,
|
||||||
|
'left': amount / 1024 / 1024})
|
||||||
|
|
||||||
def _clean_up_too_old(self, listing, amount):
|
def _clean_up_too_old(self, listing, amount):
|
||||||
"""Clean up stage 1: drop images that are older than TTL.
|
"""Clean up stage 1: drop images that are older than TTL.
|
||||||
|
@ -228,9 +229,9 @@ class ImageCache(object):
|
||||||
try:
|
try:
|
||||||
os.unlink(file_name)
|
os.unlink(file_name)
|
||||||
except EnvironmentError as exc:
|
except EnvironmentError as exc:
|
||||||
LOG.warn(_LW("Unable to delete file %(name)s from "
|
LOG.warning(_LW("Unable to delete file %(name)s from "
|
||||||
"master image cache: %(exc)s"),
|
"master image cache: %(exc)s"),
|
||||||
{'name': file_name, 'exc': exc})
|
{'name': file_name, 'exc': exc})
|
||||||
else:
|
else:
|
||||||
if amount is not None:
|
if amount is not None:
|
||||||
amount -= stat.st_size
|
amount -= stat.st_size
|
||||||
|
@ -267,9 +268,9 @@ class ImageCache(object):
|
||||||
try:
|
try:
|
||||||
os.unlink(file_name)
|
os.unlink(file_name)
|
||||||
except EnvironmentError as exc:
|
except EnvironmentError as exc:
|
||||||
LOG.warn(_LW("Unable to delete file %(name)s from "
|
LOG.warning(_LW("Unable to delete file %(name)s from "
|
||||||
"master image cache: %(exc)s"),
|
"master image cache: %(exc)s"),
|
||||||
{'name': file_name, 'exc': exc})
|
{'name': file_name, 'exc': exc})
|
||||||
else:
|
else:
|
||||||
total_size -= stat.st_size
|
total_size -= stat.st_size
|
||||||
if amount is not None:
|
if amount is not None:
|
||||||
|
@ -402,9 +403,9 @@ def _delete_master_path_if_stale(master_path, href, ctx):
|
||||||
if not img_mtime:
|
if not img_mtime:
|
||||||
# This means that href is not a glance image and doesn't have an
|
# This means that href is not a glance image and doesn't have an
|
||||||
# updated_at attribute
|
# updated_at attribute
|
||||||
LOG.warn(_LW("Image service couldn't determine last "
|
LOG.warning(_LW("Image service couldn't determine last "
|
||||||
"modification time of %(href)s, considering "
|
"modification time of %(href)s, considering "
|
||||||
"cached image up to date."), {'href': href})
|
"cached image up to date."), {'href': href})
|
||||||
return True
|
return True
|
||||||
master_mtime = utils.unix_file_modification_datetime(master_path)
|
master_mtime = utils.unix_file_modification_datetime(master_path)
|
||||||
if img_mtime <= master_mtime:
|
if img_mtime <= master_mtime:
|
||||||
|
|
|
@ -134,10 +134,11 @@ def _get_pxe_conf_option(task, opt_name):
|
||||||
CONF.pybasedir)
|
CONF.pybasedir)
|
||||||
|
|
||||||
if current_value != default_value:
|
if current_value != default_value:
|
||||||
LOG.warn(_LW("The CONF option [agent]agent_%(opt_name)s is "
|
LOG.warning(
|
||||||
"deprecated and will be removed in Mitaka release of "
|
_LW("The CONF option [agent]agent_%(opt_name)s is "
|
||||||
"Ironic. Please use [pxe]%(opt_name)s instead."),
|
"deprecated and will be removed in Mitaka release of "
|
||||||
{'opt_name': opt_name})
|
"Ironic. Please use [pxe]%(opt_name)s instead."),
|
||||||
|
{'opt_name': opt_name})
|
||||||
return current_value
|
return current_value
|
||||||
|
|
||||||
# Either task.driver.deploy is ISCSIDeploy() or the default value hasn't
|
# Either task.driver.deploy is ISCSIDeploy() or the default value hasn't
|
||||||
|
@ -541,16 +542,18 @@ class PXEBoot(base.BootInterface):
|
||||||
]
|
]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
if not iwdi:
|
if not iwdi:
|
||||||
LOG.warn(_LW("The UUID for the root partition can't be "
|
LOG.warning(
|
||||||
"found, unable to switch the pxe config from "
|
_LW("The UUID for the root partition can't be "
|
||||||
"deployment mode to service (boot) mode for "
|
"found, unable to switch the pxe config from "
|
||||||
"node %(node)s"), {"node": task.node.uuid})
|
"deployment mode to service (boot) mode for "
|
||||||
|
"node %(node)s"), {"node": task.node.uuid})
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("The disk id for the whole disk image can't "
|
LOG.warning(
|
||||||
"be found, unable to switch the pxe config "
|
_LW("The disk id for the whole disk image can't "
|
||||||
"from deployment mode to service (boot) mode "
|
"be found, unable to switch the pxe config "
|
||||||
"for node %(node)s"),
|
"from deployment mode to service (boot) mode "
|
||||||
{"node": task.node.uuid})
|
"for node %(node)s"),
|
||||||
|
{"node": task.node.uuid})
|
||||||
else:
|
else:
|
||||||
pxe_config_path = pxe_utils.get_pxe_config_file_path(
|
pxe_config_path = pxe_utils.get_pxe_config_file_path(
|
||||||
task.node.uuid)
|
task.node.uuid)
|
||||||
|
|
|
@ -143,8 +143,8 @@ def get_node_capability(node, capability):
|
||||||
if parts[0].strip() == capability:
|
if parts[0].strip() == capability:
|
||||||
return parts[1].strip()
|
return parts[1].strip()
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Ignoring malformed capability '%s'. "
|
LOG.warning(_LW("Ignoring malformed capability '%s'. "
|
||||||
"Format should be 'key:val'."), node_capability)
|
"Format should be 'key:val'."), node_capability)
|
||||||
|
|
||||||
|
|
||||||
def add_node_capability(task, capability, value):
|
def add_node_capability(task, capability, value):
|
||||||
|
|
|
@ -186,7 +186,7 @@ BYT;
|
||||||
'parted', '-s', '-m', '/dev/fake', 'unit', 'MiB', 'print',
|
'parted', '-s', '-m', '/dev/fake', 'unit', 'MiB', 'print',
|
||||||
use_standard_locale=True, run_as_root=True)
|
use_standard_locale=True, run_as_root=True)
|
||||||
|
|
||||||
@mock.patch.object(disk_partitioner.LOG, 'warn', autospec=True)
|
@mock.patch.object(disk_partitioner.LOG, 'warning', autospec=True)
|
||||||
def test_incorrect(self, log_mock, execute_mock):
|
def test_incorrect(self, log_mock, execute_mock):
|
||||||
output = """
|
output = """
|
||||||
BYT;
|
BYT;
|
||||||
|
|
|
@ -843,7 +843,8 @@ class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
|
||||||
self.assertNotIn('boot_iso_created_in_web_server', dinfo)
|
self.assertNotIn('boot_iso_created_in_web_server', dinfo)
|
||||||
self.assertNotIn('root_uuid_or_disk_id', dinfo)
|
self.assertNotIn('root_uuid_or_disk_id', dinfo)
|
||||||
|
|
||||||
@mock.patch.object(ilo_deploy.LOG, 'warn', spec_set=True, autospec=True)
|
@mock.patch.object(ilo_deploy.LOG, 'warning',
|
||||||
|
spec_set=True, autospec=True)
|
||||||
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
|
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
|
||||||
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
|
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
|
@ -987,7 +988,8 @@ class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase):
|
||||||
update_secure_boot_mode_mock.assert_called_once_with(task, False)
|
update_secure_boot_mode_mock.assert_called_once_with(task, False)
|
||||||
self.assertEqual(states.DELETED, returned_state)
|
self.assertEqual(states.DELETED, returned_state)
|
||||||
|
|
||||||
@mock.patch.object(ilo_deploy.LOG, 'warn', spec_set=True, autospec=True)
|
@mock.patch.object(ilo_deploy.LOG, 'warning',
|
||||||
|
spec_set=True, autospec=True)
|
||||||
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
|
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
|
||||||
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
|
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
|
@ -1730,7 +1732,8 @@ class IloPXEDeployTestCase(db_base.DbTestCase):
|
||||||
pxe_tear_down_mock.assert_called_once_with(mock.ANY, task)
|
pxe_tear_down_mock.assert_called_once_with(mock.ANY, task)
|
||||||
self.assertEqual(states.DELETED, returned_state)
|
self.assertEqual(states.DELETED, returned_state)
|
||||||
|
|
||||||
@mock.patch.object(ilo_deploy.LOG, 'warn', spec_set=True, autospec=True)
|
@mock.patch.object(ilo_deploy.LOG, 'warning',
|
||||||
|
spec_set=True, autospec=True)
|
||||||
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'tear_down',
|
@mock.patch.object(iscsi_deploy.ISCSIDeploy, 'tear_down',
|
||||||
spec_set=True, autospec=True)
|
spec_set=True, autospec=True)
|
||||||
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
|
@mock.patch.object(ilo_deploy, 'exception', spec_set=True, autospec=True)
|
||||||
|
|
|
@ -243,7 +243,8 @@ class TestInspectPrivateMethods(db_base.DbTestCase):
|
||||||
db_obj.create_port.assert_any_call(port_dict1)
|
db_obj.create_port.assert_any_call(port_dict1)
|
||||||
db_obj.create_port.assert_any_call(port_dict2)
|
db_obj.create_port.assert_any_call(port_dict2)
|
||||||
|
|
||||||
@mock.patch.object(ilo_inspect.LOG, 'warn', spec_set=True, autospec=True)
|
@mock.patch.object(ilo_inspect.LOG, 'warning',
|
||||||
|
spec_set=True, autospec=True)
|
||||||
@mock.patch.object(dbapi, 'get_instance', spec_set=True, autospec=True)
|
@mock.patch.object(dbapi, 'get_instance', spec_set=True, autospec=True)
|
||||||
def test__create_ports_if_not_exist_mac_exception(self,
|
def test__create_ports_if_not_exist_mac_exception(self,
|
||||||
instance_mock,
|
instance_mock,
|
||||||
|
|
|
@ -220,7 +220,7 @@ class IloManagementTestCase(db_base.DbTestCase):
|
||||||
ilo_management._execute_ilo_clean_step(
|
ilo_management._execute_ilo_clean_step(
|
||||||
self.node, 'fake-step', 'args', kwarg='kwarg')
|
self.node, 'fake-step', 'args', kwarg='kwarg')
|
||||||
clean_step_mock.assert_called_once_with('args', kwarg='kwarg')
|
clean_step_mock.assert_called_once_with('args', kwarg='kwarg')
|
||||||
self.assertTrue(log_mock.warn.called)
|
self.assertTrue(log_mock.warning.called)
|
||||||
|
|
||||||
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
|
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
|
|
|
@ -469,7 +469,7 @@ class TestImageCacheCleanUp(base.TestCase):
|
||||||
'uuid', 'fake', 'fake')
|
'uuid', 'fake', 'fake')
|
||||||
self.assertTrue(mock_rmtree.called)
|
self.assertTrue(mock_rmtree.called)
|
||||||
|
|
||||||
@mock.patch.object(image_cache.LOG, 'warn', autospec=True)
|
@mock.patch.object(image_cache.LOG, 'warning', autospec=True)
|
||||||
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old',
|
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old',
|
||||||
autospec=True)
|
autospec=True)
|
||||||
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size',
|
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size',
|
||||||
|
|
Loading…
Reference in New Issue