Replace deprecated LOG.warn with LOG.warning
LOG.warn is deprecated. It still used in a few places. Updated to non-deprecated LOG.warning. Change-Id: I42c7b6a172b64a3c1ac6e0703df48ac2ddf8390a Partial-Bug:#1508442
This commit is contained in:
parent
859ff4893f
commit
a023c32c70
@ -22,7 +22,7 @@ For example::
|
||||
|
||||
or::
|
||||
|
||||
LOG.warn(_LW('Unknown base file %(img)s'), {'img': img})
|
||||
LOG.warning(_LW('Unknown base file %(img)s'), {'img': img})
|
||||
|
||||
You should use the basic wrapper ``_()`` for strings which are not log
|
||||
messages::
|
||||
|
@ -211,7 +211,7 @@ class ExtensionManager(object):
|
||||
|
||||
if isinstance(ext_factory, six.string_types):
|
||||
if ext_factory.startswith('nova.api.openstack.compute.contrib'):
|
||||
LOG.warn(_LW("The legacy v2 API module already moved into"
|
||||
LOG.warning(_LW("The legacy v2 API module already moved into"
|
||||
"'nova.api.openstack.compute.legacy_v2.contrib'. "
|
||||
"Use new path instead of old path %s"),
|
||||
ext_factory)
|
||||
|
@ -319,9 +319,10 @@ class MoveClaim(Claim):
|
||||
|
||||
def create_migration_context(self):
|
||||
if not self.migration:
|
||||
LOG.warn(_LW("Can't create a migration_context record without a "
|
||||
"migration object specified."),
|
||||
instance=self.instance)
|
||||
LOG.warning(
|
||||
_LW("Can't create a migration_context record without a "
|
||||
"migration object specified."),
|
||||
instance=self.instance)
|
||||
return
|
||||
|
||||
mig_context = objects.MigrationContext(
|
||||
|
@ -3202,7 +3202,7 @@ class ComputeManager(manager.Manager):
|
||||
instance.task_state = None
|
||||
instance.save()
|
||||
msg = _LW("Image not found during snapshot")
|
||||
LOG.warn(msg, instance=instance)
|
||||
LOG.warning(msg, instance=instance)
|
||||
|
||||
def _post_interrupted_snapshot_cleanup(self, context, instance):
|
||||
self.driver.post_interrupted_snapshot_cleanup(context, instance)
|
||||
@ -5040,7 +5040,7 @@ class ComputeManager(manager.Manager):
|
||||
self.driver.attach_interface(instance, image_meta, network_info[0])
|
||||
except exception.NovaException as ex:
|
||||
port_id = network_info[0].get('id')
|
||||
LOG.warn(_LW("attach interface failed , try to deallocate "
|
||||
LOG.warning(_LW("attach interface failed , try to deallocate "
|
||||
"port %(port_id)s, reason: %(msg)s"),
|
||||
{'port_id': port_id, 'msg': ex},
|
||||
instance=instance)
|
||||
@ -5048,7 +5048,7 @@ class ComputeManager(manager.Manager):
|
||||
self.network_api.deallocate_port_for_instance(
|
||||
context, instance, port_id)
|
||||
except Exception:
|
||||
LOG.warn(_LW("deallocate port %(port_id)s failed"),
|
||||
LOG.warning(_LW("deallocate port %(port_id)s failed"),
|
||||
{'port_id': port_id}, instance=instance)
|
||||
raise exception.InterfaceAttachFailed(
|
||||
instance_uuid=instance.uuid)
|
||||
@ -6534,7 +6534,7 @@ class ComputeManager(manager.Manager):
|
||||
self.driver.power_off(instance)
|
||||
except Exception:
|
||||
msg = _LW("Failed to power off instance")
|
||||
LOG.warn(msg, instance=instance, exc_info=True)
|
||||
LOG.warning(msg, instance=instance, exc_info=True)
|
||||
|
||||
elif action == 'reap':
|
||||
LOG.info(_LI("Destroying instance with name label "
|
||||
|
@ -193,7 +193,7 @@ def get_api_servers():
|
||||
if '//' not in api_server:
|
||||
api_server = 'http://' + api_server
|
||||
# NOTE(sdague): remove in N.
|
||||
LOG.warn(
|
||||
LOG.warning(
|
||||
_LW("No protocol specified in for api_server '%s', "
|
||||
"please update [glance] api_servers with fully "
|
||||
"qualified url including scheme (http / https)"),
|
||||
|
@ -814,7 +814,7 @@ def ensure_floating_forward(floating_ip, fixed_ip, device, network):
|
||||
num_rules = iptables_manager.ipv4['nat'].remove_rules_regex(regex)
|
||||
if num_rules:
|
||||
msg = _LW('Removed %(num)d duplicate rules for floating IP %(float)s')
|
||||
LOG.warn(msg, {'num': num_rules, 'float': floating_ip})
|
||||
LOG.warning(msg, {'num': num_rules, 'float': floating_ip})
|
||||
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
|
||||
iptables_manager.ipv4['nat'].add_rule(chain, rule)
|
||||
iptables_manager.apply()
|
||||
|
@ -99,7 +99,7 @@ class DbDriver(base.Driver):
|
||||
# as nova-conductor is restarted, so only log this error once.
|
||||
if not getattr(service, 'model_disconnected', False):
|
||||
service.model_disconnected = True
|
||||
LOG.warn(_LW('Lost connection to nova-conductor '
|
||||
LOG.warning(_LW('Lost connection to nova-conductor '
|
||||
'for reporting service status.'))
|
||||
except Exception:
|
||||
# NOTE(rpodolyaka): we'd like to avoid catching of all possible
|
||||
|
@ -89,5 +89,5 @@ class MemcachedDriver(base.Driver):
|
||||
except Exception:
|
||||
if not getattr(service, 'model_disconnected', False):
|
||||
service.model_disconnected = True
|
||||
LOG.warn(_LW('Lost connection to memcache server '
|
||||
LOG.warning(_LW('Lost connection to memcache server '
|
||||
'for reporting service status.'))
|
||||
|
@ -6192,11 +6192,11 @@ class ComputeTestCase(BaseTestCase):
|
||||
|
||||
self.compute.driver.set_bootable(inst1, False).AndRaise(
|
||||
NotImplementedError)
|
||||
compute_manager.LOG.warn(mox.IgnoreArg())
|
||||
compute_manager.LOG.warning(mox.IgnoreArg())
|
||||
self.compute.driver.power_off(inst1)
|
||||
self.compute.driver.set_bootable(inst2, False).AndRaise(
|
||||
NotImplementedError)
|
||||
compute_manager.LOG.warn(mox.IgnoreArg())
|
||||
compute_manager.LOG.warning(mox.IgnoreArg())
|
||||
self.compute.driver.power_off(inst2)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
@ -6213,11 +6213,11 @@ class ComputeTestCase(BaseTestCase):
|
||||
|
||||
self.compute.driver.set_bootable(inst1, False)
|
||||
self.compute.driver.power_off(inst1).AndRaise(e)
|
||||
compute_manager.LOG.warn(mox.IgnoreArg())
|
||||
compute_manager.LOG.warning(mox.IgnoreArg())
|
||||
|
||||
self.compute.driver.set_bootable(inst2, False)
|
||||
self.compute.driver.power_off(inst2).AndRaise(e)
|
||||
compute_manager.LOG.warn(mox.IgnoreArg())
|
||||
compute_manager.LOG.warning(mox.IgnoreArg())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.compute._cleanup_running_deleted_instances(ctxt)
|
||||
|
@ -132,7 +132,7 @@ class ContextTestCase(test.NoDBTestCase):
|
||||
a = a[0]
|
||||
warns.append(str(msg) % a)
|
||||
|
||||
self.stub_out('nova.context.LOG.warn', stub_warn)
|
||||
self.stub_out('nova.context.LOG.warning', stub_warn)
|
||||
|
||||
ctxt = context.RequestContext('111',
|
||||
'222',
|
||||
|
@ -118,7 +118,7 @@ class LookupTorrentURLTestCase(test.NoDBTestCase):
|
||||
def fake_warn(msg):
|
||||
warnings.append(msg)
|
||||
|
||||
self.stubs.Set(bittorrent.LOG, 'warn', fake_warn)
|
||||
self.stubs.Set(bittorrent.LOG, 'warning', fake_warn)
|
||||
|
||||
lookup_fn = self.store._lookup_torrent_url_fn()
|
||||
self.assertEqual('fakeimageid.torrent',
|
||||
|
@ -311,7 +311,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
|
||||
self['mount_device'],
|
||||
encryption=encryption)
|
||||
except Exception:
|
||||
LOG.warn(_LW("Driver failed to detach volume "
|
||||
LOG.warning(_LW("Driver failed to detach volume "
|
||||
"%(volume_id)s at %(mount_point)s."),
|
||||
{'volume_id': volume_id,
|
||||
'mount_point': self['mount_device']},
|
||||
@ -362,9 +362,10 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
|
||||
try:
|
||||
volume_api.delete(context, volume_id)
|
||||
except Exception as exc:
|
||||
LOG.warn(_LW('Failed to delete volume: %(volume_id)s '
|
||||
'due to %(exc)s'),
|
||||
{'volume_id': volume_id, 'exc': exc})
|
||||
LOG.warning(
|
||||
_LW('Failed to delete volume: %(volume_id)s '
|
||||
'due to %(exc)s'),
|
||||
{'volume_id': volume_id, 'exc': exc})
|
||||
|
||||
|
||||
class DriverSnapshotBlockDevice(DriverVolumeBlockDevice):
|
||||
|
@ -112,7 +112,7 @@ class InstanceEventHandler(object):
|
||||
try:
|
||||
instance_uuid = self._vmutils.get_instance_uuid(instance_name)
|
||||
if not instance_uuid:
|
||||
LOG.warn(_LW("Instance uuid could not be retrieved for "
|
||||
LOG.warning(_LW("Instance uuid could not be retrieved for "
|
||||
"instance %s. Instance state change event "
|
||||
"will be ignored."),
|
||||
instance_name)
|
||||
|
@ -32,7 +32,7 @@ def get_domain_info(libvirt, host, virt_dom):
|
||||
return virt_dom.info()
|
||||
except libvirt.libvirtError as e:
|
||||
if not host.has_min_version((1, 2, 11)) and is_race(e):
|
||||
LOG.warn(_LW('Race detected in libvirt.virDomain.info, '
|
||||
LOG.warning(_LW('Race detected in libvirt.virDomain.info, '
|
||||
'trying one more time'))
|
||||
return virt_dom.info()
|
||||
raise
|
||||
|
@ -57,7 +57,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
|
||||
try:
|
||||
libvirt = importutils.import_module('libvirt')
|
||||
except ImportError:
|
||||
LOG.warn(_LW("Libvirt module could not be loaded. "
|
||||
LOG.warning(_LW("Libvirt module could not be loaded. "
|
||||
"NWFilterFirewall will not work correctly."))
|
||||
self._host = host
|
||||
self.static_filters_configured = False
|
||||
|
@ -485,7 +485,7 @@ class Host(object):
|
||||
self._event_lifecycle_callback,
|
||||
self)
|
||||
except Exception as e:
|
||||
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
|
||||
LOG.warning(_LW("URI %(uri)s does not support events: %(error)s"),
|
||||
{'uri': self._uri, 'error': e})
|
||||
|
||||
try:
|
||||
@ -501,7 +501,7 @@ class Host(object):
|
||||
LOG.debug("The version of python-libvirt does not support "
|
||||
"registerCloseCallback or is too old: %s", e)
|
||||
except libvirt.libvirtError as e:
|
||||
LOG.warn(_LW("URI %(uri)s does not support connection"
|
||||
LOG.warning(_LW("URI %(uri)s does not support connection"
|
||||
" events: %(error)s"),
|
||||
{'uri': self._uri, 'error': e})
|
||||
|
||||
@ -799,7 +799,7 @@ class Host(object):
|
||||
except libvirt.libvirtError as ex:
|
||||
error_code = ex.get_error_code()
|
||||
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
|
||||
LOG.warn(_LW("URI %(uri)s does not support full set"
|
||||
LOG.warning(_LW("URI %(uri)s does not support full set"
|
||||
" of host capabilities: %(error)s"),
|
||||
{'uri': self._uri, 'error': ex})
|
||||
else:
|
||||
@ -941,9 +941,9 @@ class Host(object):
|
||||
# TODO(sahid): Use get_info...
|
||||
dom_mem = int(guest._get_domain_info(self)[2])
|
||||
except libvirt.libvirtError as e:
|
||||
LOG.warn(_LW("couldn't obtain the memory from domain:"
|
||||
" %(uuid)s, exception: %(ex)s"),
|
||||
{"uuid": guest.uuid, "ex": e})
|
||||
LOG.warning(_LW("couldn't obtain the memory from domain:"
|
||||
" %(uuid)s, exception: %(ex)s"),
|
||||
{"uuid": guest.uuid, "ex": e})
|
||||
continue
|
||||
# skip dom0
|
||||
if guest.id != 0:
|
||||
|
@ -329,7 +329,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
|
||||
inuse_images.append(backing_path)
|
||||
|
||||
if backing_path in self.unexplained_images:
|
||||
LOG.warn(_LW('Instance %(instance)s is using a '
|
||||
LOG.warning(_LW('Instance %(instance)s is using a '
|
||||
'backing file %(backing)s which '
|
||||
'does not appear in the image '
|
||||
'service'),
|
||||
@ -550,7 +550,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
|
||||
self.active_base_files.append(base_file)
|
||||
|
||||
if not base_file:
|
||||
LOG.warn(_LW('image %(id)s at (%(base_file)s): warning '
|
||||
LOG.warning(_LW('image %(id)s at (%(base_file)s): warning '
|
||||
'-- an absent base file is in use! '
|
||||
'instances: %(instance_list)s'),
|
||||
{'id': img_id,
|
||||
@ -588,7 +588,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
|
||||
|
||||
error_images = self.used_swap_images - self.back_swap_images
|
||||
for error_image in error_images:
|
||||
LOG.warn(_LW('%s swap image was used by instance'
|
||||
LOG.warning(_LW('%s swap image was used by instance'
|
||||
' but no back files existing!'), error_image)
|
||||
|
||||
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
|
||||
@ -614,7 +614,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
|
||||
|
||||
# Anything left is an unknown base image
|
||||
for img in self.unexplained_images:
|
||||
LOG.warn(_LW('Unknown base file: %s'), img)
|
||||
LOG.warning(_LW('Unknown base file: %s'), img)
|
||||
self.removable_base_files.append(img)
|
||||
|
||||
# Dump these lists
|
||||
|
@ -90,7 +90,7 @@ class InstanceJobTracker(object):
|
||||
instance=instance)
|
||||
else:
|
||||
# The process is still around
|
||||
LOG.warn(_LW("Failed to kill a long running process "
|
||||
LOG.warning(_LW("Failed to kill a long running process "
|
||||
"%(pid)s related to the instance when "
|
||||
"deleting it."), {'pid': pid},
|
||||
instance=instance)
|
||||
|
@ -75,7 +75,7 @@ def create_volume(vg, lv, size, sparse=False):
|
||||
preallocated_space = 64 * units.Mi
|
||||
check_size(vg, lv, preallocated_space)
|
||||
if free_space < size:
|
||||
LOG.warn(_LW('Volume group %(vg)s will not be able'
|
||||
LOG.warning(_LW('Volume group %(vg)s will not be able'
|
||||
' to hold sparse volume %(lv)s.'
|
||||
' Virtual volume size is %(size)d bytes,'
|
||||
' but free space on volume group is'
|
||||
@ -223,7 +223,7 @@ def clear_volume(path):
|
||||
try:
|
||||
volume_size = get_volume_size(path)
|
||||
except exception.VolumeBDMPathNotFound:
|
||||
LOG.warn(_LW('ignoring missing logical volume %(path)s'),
|
||||
LOG.warning(_LW('ignoring missing logical volume %(path)s'),
|
||||
{'path': path})
|
||||
return
|
||||
|
||||
|
@ -287,7 +287,7 @@ class RBDDriver(object):
|
||||
try:
|
||||
rbd.RBD().remove(client.ioctx, name)
|
||||
except rbd.ImageNotFound:
|
||||
LOG.warn(_LW('image %(volume)s in pool %(pool)s can not be '
|
||||
LOG.warning(_LW('image %(volume)s in pool %(pool)s can not be '
|
||||
'found, failed to remove'),
|
||||
{'volume': name, 'pool': self.pool})
|
||||
except rbd.ImageHasSnapshots:
|
||||
@ -323,7 +323,7 @@ class RBDDriver(object):
|
||||
self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME,
|
||||
ignore_errors=True)
|
||||
except (rbd.ImageBusy, rbd.ImageHasSnapshots):
|
||||
LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s '
|
||||
LOG.warning(_LW('rbd remove %(volume)s in pool %(pool)s '
|
||||
'failed'),
|
||||
{'volume': volume, 'pool': self.pool})
|
||||
retryctx['retries'] -= 1
|
||||
|
@ -118,6 +118,6 @@ class LibvirtGlusterfsVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
|
||||
utils.execute(*gluster_cmd, run_as_root=True)
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
if ensure and 'already mounted' in six.text_type(exc):
|
||||
LOG.warn(_LW("%s is already mounted"), glusterfs_share)
|
||||
LOG.warning(_LW("%s is already mounted"), glusterfs_share)
|
||||
else:
|
||||
raise
|
||||
|
@ -106,6 +106,6 @@ class LibvirtNFSVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
|
||||
utils.execute(*nfs_cmd, run_as_root=True)
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
if ensure and 'already mounted' in six.text_type(exc):
|
||||
LOG.warn(_LW("%s is already mounted"), nfs_share)
|
||||
LOG.warning(_LW("%s is already mounted"), nfs_share)
|
||||
else:
|
||||
raise
|
||||
|
@ -61,7 +61,7 @@ def mount_share(mount_path, export_path,
|
||||
utils.execute(*mount_cmd, run_as_root=True)
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
if 'Device or resource busy' in six.text_type(exc):
|
||||
LOG.warn(_LW("%s is already mounted"), export_path)
|
||||
LOG.warning(_LW("%s is already mounted"), export_path)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
@ -99,7 +99,7 @@ class LibvirtScalityVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
|
||||
config = CONF.libvirt.scality_sofs_config
|
||||
if not config:
|
||||
msg = _("Value required for 'scality_sofs_config'")
|
||||
LOG.warn(msg)
|
||||
LOG.warning(msg)
|
||||
raise exception.NovaException(msg)
|
||||
|
||||
# config can be a file path or a URL, check it
|
||||
@ -110,13 +110,13 @@ class LibvirtScalityVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
|
||||
urllib.request.urlopen(config, timeout=5).close()
|
||||
except urllib.error.URLError as e:
|
||||
msg = _("Cannot access 'scality_sofs_config': %s") % e
|
||||
LOG.warn(msg)
|
||||
LOG.warning(msg)
|
||||
raise exception.NovaException(msg)
|
||||
|
||||
# mount.sofs must be installed
|
||||
if not os.access('/sbin/mount.sofs', os.X_OK):
|
||||
msg = _("Cannot execute /sbin/mount.sofs")
|
||||
LOG.warn(msg)
|
||||
LOG.warning(msg)
|
||||
raise exception.NovaException(msg)
|
||||
|
||||
def _sofs_is_mounted(self):
|
||||
@ -141,5 +141,5 @@ class LibvirtScalityVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
|
||||
run_as_root=True)
|
||||
if not self._sofs_is_mounted():
|
||||
msg = _("Cannot mount Scality SOFS, check syslog for errors")
|
||||
LOG.warn(msg)
|
||||
LOG.warning(msg)
|
||||
raise exception.NovaException(msg)
|
||||
|
@ -85,7 +85,7 @@ class LibvirtBaseVolumeDriver(object):
|
||||
new_key = 'disk_' + k
|
||||
setattr(conf, new_key, v)
|
||||
else:
|
||||
LOG.warn(_LW('Unknown content in connection_info/'
|
||||
LOG.warning(_LW('Unknown content in connection_info/'
|
||||
'qos_specs: %s'), specs)
|
||||
|
||||
# Extract access_mode control parameters
|
||||
|
@ -479,7 +479,7 @@ class VMwareVolumeOps(object):
|
||||
except oslo_vmw_exceptions.FileNotFoundException:
|
||||
# Volume's vmdk was moved; remove the device so that we can
|
||||
# relocate the volume.
|
||||
LOG.warn(_LW("Virtual disk: %s of volume's backing not found."),
|
||||
LOG.warning(_LW("Virtual disk: %s of volume's backing not found."),
|
||||
original_device_path, exc_info=True)
|
||||
LOG.debug("Removing disk device of volume's backing and "
|
||||
"reattempting relocate.")
|
||||
|
@ -70,7 +70,7 @@ class BittorrentStore(object):
|
||||
|
||||
if CONF.xenserver.torrent_base_url:
|
||||
if '/' not in CONF.xenserver.torrent_base_url:
|
||||
LOG.warn(_LW('Value specified in conf file for'
|
||||
LOG.warning(_LW('Value specified in conf file for'
|
||||
' xenserver.torrent_base_url does not contain a'
|
||||
' slash character, therefore it will not be used'
|
||||
' as part of the torrent URL. Specify a valid'
|
||||
|
@ -983,7 +983,7 @@ def try_auto_configure_disk(session, vdi_ref, new_gb):
|
||||
_auto_configure_disk(session, vdi_ref, new_gb)
|
||||
except exception.CannotResizeDisk as e:
|
||||
msg = _LW('Attempted auto_configure_disk failed because: %s')
|
||||
LOG.warn(msg % e)
|
||||
LOG.warning(msg % e)
|
||||
|
||||
|
||||
def _make_partition(session, dev, partition_start, partition_end):
|
||||
|
@ -136,7 +136,7 @@ def cinderclient(context):
|
||||
'release, and Nova is still configured to use it. '
|
||||
'Enable the V2 API in Cinder and set '
|
||||
'cinder.catalog_info in nova.conf to use it.')
|
||||
LOG.warn(msg)
|
||||
LOG.warning(msg)
|
||||
_V1_ERROR_RAISED = True
|
||||
|
||||
return cinder_client.Client(version,
|
||||
|
@ -67,8 +67,9 @@ def get_encryption_metadata(context, volume_api, volume_id, connection_info):
|
||||
metadata = volume_api.get_volume_encryption_metadata(context,
|
||||
volume_id)
|
||||
if not metadata:
|
||||
LOG.warn(_LW('Volume %s should be encrypted but there is no '
|
||||
'encryption metadata.'), volume_id)
|
||||
LOG.warning(_LW(
|
||||
'Volume %s should be encrypted but there is no '
|
||||
'encryption metadata.'), volume_id)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to retrieve encryption metadata for "
|
||||
"volume %(volume_id)s: %(exception)s"),
|
||||
|
Loading…
x
Reference in New Issue
Block a user