Remove translation of log messages

The i18n team has decided not to translate the logs because it
seems like it not very useful; operators prefer to have them in
English so that they can search for those strings on the internet.

Partially fix on nova/virt other paths will be fixed on next commits

Change-Id: Ie7821aa4a5147cdb0616741bd1a1b1fc22080440
This commit is contained in:
Ngo Quoc Cuong 2017-05-23 10:18:45 +07:00
parent 3a5d592e60
commit 6c3520ac5b
61 changed files with 868 additions and 1001 deletions

View File

@ -24,9 +24,6 @@ from oslo_utils import excutils
from nova import block_device from nova import block_device
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
CONF = nova.conf.CONF CONF = nova.conf.CONF
@ -258,7 +255,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
if 'multipath_id' in self['connection_info']['data']: if 'multipath_id' in self['connection_info']['data']:
connection_info['data']['multipath_id'] =\ connection_info['data']['multipath_id'] =\
self['connection_info']['data']['multipath_id'] self['connection_info']['data']['multipath_id']
LOG.info(_LI('preserve multipath_id %s'), LOG.info('preserve multipath_id %s',
connection_info['data']['multipath_id']) connection_info['data']['multipath_id'])
def driver_detach(self, context, instance, volume_api, virt_driver): def driver_detach(self, context, instance, volume_api, virt_driver):
@ -266,12 +263,12 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
mp = self['mount_device'] mp = self['mount_device']
volume_id = self.volume_id volume_id = self.volume_id
LOG.info(_LI('Attempting to driver detach volume %(volume_id)s from ' LOG.info('Attempting to driver detach volume %(volume_id)s from '
'mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp}, 'mountpoint %(mp)s', {'volume_id': volume_id, 'mp': mp},
instance=instance) instance=instance)
try: try:
if not virt_driver.instance_exists(instance): if not virt_driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'), LOG.warning('Detaching volume from unknown instance',
instance=instance) instance=instance)
encryption = encryptors.get_encryption_metadata(context, encryption = encryptors.get_encryption_metadata(context,
@ -279,15 +276,15 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
virt_driver.detach_volume(connection_info, instance, mp, virt_driver.detach_volume(connection_info, instance, mp,
encryption=encryption) encryption=encryption)
except exception.DiskNotFound as err: except exception.DiskNotFound as err:
LOG.warning(_LW('Ignoring DiskNotFound exception while ' LOG.warning('Ignoring DiskNotFound exception while '
'detaching volume %(volume_id)s from ' 'detaching volume %(volume_id)s from '
'%(mp)s : %(err)s'), '%(mp)s : %(err)s',
{'volume_id': volume_id, 'mp': mp, {'volume_id': volume_id, 'mp': mp,
'err': err}, instance=instance) 'err': err}, instance=instance)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to detach volume ' LOG.exception('Failed to detach volume '
'%(volume_id)s from %(mp)s'), '%(volume_id)s from %(mp)s',
{'volume_id': volume_id, 'mp': mp}, {'volume_id': volume_id, 'mp': mp},
instance=instance) instance=instance)
volume_api.roll_detaching(context, volume_id) volume_api.roll_detaching(context, volume_id)
@ -307,9 +304,9 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
LOG.debug("Skipping driver_detach during remote rebuild.", LOG.debug("Skipping driver_detach during remote rebuild.",
instance=instance) instance=instance)
elif destroy_bdm: elif destroy_bdm:
LOG.error(_LE("Unable to call for a driver detach of volume " LOG.error("Unable to call for a driver detach of volume "
"%(vol_id)s due to the instance being " "%(vol_id)s due to the instance being "
"registered to the remote host %(inst_host)s."), "registered to the remote host %(inst_host)s.",
{'vol_id': volume_id, {'vol_id': volume_id,
'inst_host': instance.host}, instance=instance) 'inst_host': instance.host}, instance=instance)
@ -322,20 +319,20 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
stashed_connector = connection_info.get('connector') stashed_connector = connection_info.get('connector')
if not stashed_connector: if not stashed_connector:
# Volume was attached before we began stashing connectors # Volume was attached before we began stashing connectors
LOG.warning(_LW("Host mismatch detected, but stashed " LOG.warning("Host mismatch detected, but stashed "
"volume connector not found. Instance host is " "volume connector not found. Instance host is "
"%(ihost)s, but volume connector host is " "%(ihost)s, but volume connector host is "
"%(chost)s."), "%(chost)s.",
{'ihost': instance.host, {'ihost': instance.host,
'chost': connector.get('host')}) 'chost': connector.get('host')})
elif stashed_connector.get('host') != instance.host: elif stashed_connector.get('host') != instance.host:
# Unexpected error. The stashed connector is also not matching # Unexpected error. The stashed connector is also not matching
# the needed instance host. # the needed instance host.
LOG.error(_LE("Host mismatch detected in stashed volume " LOG.error("Host mismatch detected in stashed volume "
"connector. Will use local volume connector. " "connector. Will use local volume connector. "
"Instance host is %(ihost)s. Local volume " "Instance host is %(ihost)s. Local volume "
"connector host is %(chost)s. Stashed volume " "connector host is %(chost)s. Stashed volume "
"connector host is %(schost)s."), "connector host is %(schost)s.",
{'ihost': instance.host, {'ihost': instance.host,
'chost': connector.get('host'), 'chost': connector.get('host'),
'schost': stashed_connector.get('host')}) 'schost': stashed_connector.get('host')})
@ -392,8 +389,8 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
device_type=self['device_type'], encryption=encryption) device_type=self['device_type'], encryption=encryption)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("Driver failed to attach volume " LOG.exception("Driver failed to attach volume "
"%(volume_id)s at %(mountpoint)s"), "%(volume_id)s at %(mountpoint)s",
{'volume_id': volume_id, {'volume_id': volume_id,
'mountpoint': self['mount_device']}, 'mountpoint': self['mount_device']},
instance=instance) instance=instance)
@ -424,11 +421,11 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
self['mount_device'], self['mount_device'],
encryption=encryption) encryption=encryption)
except Exception: except Exception:
LOG.warning(_LW("Driver failed to detach volume " LOG.warning("Driver failed to detach volume "
"%(volume_id)s at %(mount_point)s."), "%(volume_id)s at %(mount_point)s.",
{'volume_id': volume_id, {'volume_id': volume_id,
'mount_point': self['mount_device']}, 'mount_point': self['mount_device']},
exc_info=True, instance=instance) exc_info=True, instance=instance)
volume_api.terminate_connection(context, volume_id, volume_api.terminate_connection(context, volume_id,
connector) connector)
@ -475,8 +472,8 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
volume_api.delete(context, volume_id) volume_api.delete(context, volume_id)
except Exception as exc: except Exception as exc:
LOG.warning( LOG.warning(
_LW('Failed to delete volume: %(volume_id)s ' 'Failed to delete volume: %(volume_id)s '
'due to %(exc)s'), 'due to %(exc)s',
{'volume_id': volume_id, 'exc': exc}) {'volume_id': volume_id, 'exc': exc})
@ -600,25 +597,25 @@ def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm): def _log_and_attach(bdm):
instance = attach_args[1] instance = attach_args[1]
if bdm.get('volume_id'): if bdm.get('volume_id'):
LOG.info(_LI('Booting with volume %(volume_id)s at ' LOG.info('Booting with volume %(volume_id)s at '
'%(mountpoint)s'), '%(mountpoint)s',
{'volume_id': bdm.volume_id, {'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']}, 'mountpoint': bdm['mount_device']},
instance=instance) instance=instance)
elif bdm.get('snapshot_id'): elif bdm.get('snapshot_id'):
LOG.info(_LI('Booting with volume snapshot %(snapshot_id)s at ' LOG.info('Booting with volume snapshot %(snapshot_id)s at '
'%(mountpoint)s'), '%(mountpoint)s',
{'snapshot_id': bdm.snapshot_id, {'snapshot_id': bdm.snapshot_id,
'mountpoint': bdm['mount_device']}, 'mountpoint': bdm['mount_device']},
instance=instance) instance=instance)
elif bdm.get('image_id'): elif bdm.get('image_id'):
LOG.info(_LI('Booting with volume-backed-image %(image_id)s at ' LOG.info('Booting with volume-backed-image %(image_id)s at '
'%(mountpoint)s'), '%(mountpoint)s',
{'image_id': bdm.image_id, {'image_id': bdm.image_id,
'mountpoint': bdm['mount_device']}, 'mountpoint': bdm['mount_device']},
instance=instance) instance=instance)
else: else:
LOG.info(_LI('Booting with blank volume at %(mountpoint)s'), LOG.info('Booting with blank volume at %(mountpoint)s',
{'mountpoint': bdm['mount_device']}, {'mountpoint': bdm['mount_device']},
instance=instance) instance=instance)

View File

@ -38,8 +38,6 @@ from oslo_utils import units
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils from nova import utils
from nova.virt.disk.mount import api as mount from nova.virt.disk.mount import api as mount
from nova.virt.disk.vfs import api as vfs from nova.virt.disk.vfs import api as vfs
@ -243,8 +241,8 @@ def is_image_extendable(image):
# provides a bad configuration for libguestfs reported in # provides a bad configuration for libguestfs reported in
# the bug lp#1413142. When resolved we should remove this # the bug lp#1413142. When resolved we should remove this
# except to let the error to be propagated. # except to let the error to be propagated.
LOG.warning(_LW('Unable to mount image %(image)s with ' LOG.warning('Unable to mount image %(image)s with '
'error %(error)s. Cannot resize.'), 'error %(error)s. Cannot resize.',
{'image': image, 'error': e}) {'image': image, 'error': e})
finally: finally:
if fs is not None: if fs is not None:
@ -407,8 +405,8 @@ def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
inject_val = items[inject] inject_val = items[inject]
if inject_val: if inject_val:
raise raise
LOG.warning(_LW('Ignoring error injecting data into image %(image)s ' LOG.warning('Ignoring error injecting data into image %(image)s '
'(%(e)s)'), {'image': image, 'e': e}) '(%(e)s)', {'image': image, 'e': e})
return False return False
try: try:
@ -432,8 +430,8 @@ def setup_container(image, container_dir):
img = _DiskImage(image=image, mount_dir=container_dir) img = _DiskImage(image=image, mount_dir=container_dir)
dev = img.mount() dev = img.mount()
if dev is None: if dev is None:
LOG.error(_LE("Failed to mount container filesystem '%(image)s' " LOG.error("Failed to mount container filesystem '%(image)s' "
"on '%(target)s': %(errors)s"), "on '%(target)s': %(errors)s",
{"image": img, "target": container_dir, {"image": img, "target": container_dir,
"errors": img.errors}) "errors": img.errors})
raise exception.NovaException(img.errors) raise exception.NovaException(img.errors)
@ -465,7 +463,7 @@ def teardown_container(container_dir, container_root_device=None):
LOG.debug('No release necessary for block device %s', LOG.debug('No release necessary for block device %s',
container_root_device) container_root_device)
except Exception: except Exception:
LOG.exception(_LE('Failed to teardown container filesystem')) LOG.exception(_('Failed to teardown container filesystem'))
def clean_lxc_namespace(container_dir): def clean_lxc_namespace(container_dir):
@ -478,7 +476,7 @@ def clean_lxc_namespace(container_dir):
img = _DiskImage(image=None, mount_dir=container_dir) img = _DiskImage(image=None, mount_dir=container_dir)
img.umount() img.umount()
except Exception: except Exception:
LOG.exception(_LE('Failed to umount container filesystem')) LOG.exception(_('Failed to umount container filesystem'))
def inject_data_into_fs(fs, key, net, metadata, admin_password, files, def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
@ -511,8 +509,8 @@ def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
except Exception as e: except Exception as e:
if inject in mandatory: if inject in mandatory:
raise raise
LOG.warning(_LW('Ignoring error injecting %(inject)s into ' LOG.warning('Ignoring error injecting %(inject)s into '
'image (%(e)s)'), {'inject': inject, 'e': e}) 'image (%(e)s)', {'inject': inject, 'e': e})
status = False status = False
return status return status

View File

@ -21,7 +21,7 @@ from oslo_service import loopingcall
from oslo_utils import importutils from oslo_utils import importutils
from nova import exception from nova import exception
from nova.i18n import _, _LI, _LW from nova.i18n import _
from nova import utils from nova import utils
from nova.virt.image import model as imgmodel from nova.virt.image import model as imgmodel
@ -167,11 +167,10 @@ class Mount(object):
start_time = time.time() start_time = time.time()
device = self._inner_get_dev() device = self._inner_get_dev()
while not device: while not device:
LOG.info(_LI('Device allocation failed. Will retry in 2 seconds.')) LOG.info('Device allocation failed. Will retry in 2 seconds.')
time.sleep(2) time.sleep(2)
if time.time() - start_time > MAX_DEVICE_WAIT: if time.time() - start_time > MAX_DEVICE_WAIT:
LOG.warning(_LW('Device allocation failed after repeated ' LOG.warning('Device allocation failed after repeated retries.')
'retries.'))
return False return False
device = self._inner_get_dev() device = self._inner_get_dev()
return True return True

View File

@ -15,7 +15,7 @@
from oslo_log import log as logging from oslo_log import log as logging
from nova.i18n import _, _LI from nova.i18n import _
from nova import utils from nova import utils
from nova.virt.disk.mount import api from nova.virt.disk.mount import api
@ -32,7 +32,7 @@ class LoopMount(api.Mount):
run_as_root=True) run_as_root=True)
if err: if err:
self.error = _('Could not attach image to loopback: %s') % err self.error = _('Could not attach image to loopback: %s') % err
LOG.info(_LI('Loop mount error: %s'), self.error) LOG.info('Loop mount error: %s', self.error)
self.linked = False self.linked = False
self.device = None self.device = None
return False return False

View File

@ -21,7 +21,7 @@ import time
from oslo_log import log as logging from oslo_log import log as logging
import nova.conf import nova.conf
from nova.i18n import _, _LE, _LI, _LW from nova.i18n import _
from nova import utils from nova import utils
from nova.virt.disk.mount import api from nova.virt.disk.mount import api
@ -46,14 +46,14 @@ class NbdMount(api.Mount):
if not os.path.exists('/var/lock/qemu-nbd-%s' % device): if not os.path.exists('/var/lock/qemu-nbd-%s' % device):
return device return device
else: else:
LOG.error(_LE('NBD error - previous umount did not ' LOG.error('NBD error - previous umount did not '
'cleanup /var/lock/qemu-nbd-%s.'), device) 'cleanup /var/lock/qemu-nbd-%s.', device)
LOG.warning(_LW('No free nbd devices')) LOG.warning('No free nbd devices')
return None return None
def _allocate_nbd(self): def _allocate_nbd(self):
if not os.path.exists('/sys/block/nbd0'): if not os.path.exists('/sys/block/nbd0'):
LOG.error(_LE('nbd module not loaded')) LOG.error('nbd module not loaded')
self.error = _('nbd unavailable: module not loaded') self.error = _('nbd unavailable: module not loaded')
return None return None
@ -81,7 +81,7 @@ class NbdMount(api.Mount):
run_as_root=True) run_as_root=True)
if err: if err:
self.error = _('qemu-nbd error: %s') % err self.error = _('qemu-nbd error: %s') % err
LOG.info(_LI('NBD mount error: %s'), self.error) LOG.info('NBD mount error: %s', self.error)
return False return False
# NOTE(vish): this forks into another process, so give it a chance # NOTE(vish): this forks into another process, so give it a chance
@ -94,14 +94,14 @@ class NbdMount(api.Mount):
time.sleep(1) time.sleep(1)
else: else:
self.error = _('nbd device %s did not show up') % device self.error = _('nbd device %s did not show up') % device
LOG.info(_LI('NBD mount error: %s'), self.error) LOG.info('NBD mount error: %s', self.error)
# Cleanup # Cleanup
_out, err = utils.trycmd('qemu-nbd', '-d', device, _out, err = utils.trycmd('qemu-nbd', '-d', device,
run_as_root=True) run_as_root=True)
if err: if err:
LOG.warning(_LW('Detaching from erroneous nbd device returned ' LOG.warning('Detaching from erroneous nbd device returned '
'error: %s'), err) 'error: %s', err)
return False return False
self.error = '' self.error = ''

View File

@ -16,7 +16,6 @@ from oslo_log import log as logging
from oslo_utils import importutils from oslo_utils import importutils
from nova import exception from nova import exception
from nova.i18n import _LI
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -69,8 +68,8 @@ class VFS(object):
# check for capabilities. # check for capabilities.
raise raise
else: else:
LOG.info(_LI("Unable to import guestfs, " LOG.info("Unable to import guestfs, "
"falling back to VFSLocalFS")) "falling back to VFSLocalFS")
return importutils.import_object( return importutils.import_object(
"nova.virt.disk.vfs.localfs.VFSLocalFS", "nova.virt.disk.vfs.localfs.VFSLocalFS",

View File

@ -22,7 +22,6 @@ import six
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LW
from nova.virt.disk.vfs import api as vfs from nova.virt.disk.vfs import api as vfs
from nova.virt.image import model as imgmodel from nova.virt.image import model as imgmodel
@ -93,8 +92,8 @@ class VFSGuestFS(vfs.VFS):
def configure_debug(self): def configure_debug(self):
"""Configures guestfs to be verbose.""" """Configures guestfs to be verbose."""
if not self.handle: if not self.handle:
LOG.warning(_LW("Please consider to execute setup before trying " LOG.warning("Please consider to execute setup before trying "
"to configure debug log message.")) "to configure debug log message.")
else: else:
def log_callback(ev, eh, buf, array): def log_callback(ev, eh, buf, array):
if ev == guestfs.EVENT_APPLIANCE: if ev == guestfs.EVENT_APPLIANCE:
@ -197,8 +196,8 @@ class VFSGuestFS(vfs.VFS):
except AttributeError as ex: except AttributeError as ex:
# set_backend_settings method doesn't exist in older # set_backend_settings method doesn't exist in older
# libguestfs versions, so nothing we can do but ignore # libguestfs versions, so nothing we can do but ignore
LOG.warning(_LW("Unable to force TCG mode, " LOG.warning("Unable to force TCG mode, "
"libguestfs too old? %s"), ex) "libguestfs too old? %s", ex)
pass pass
try: try:
@ -246,7 +245,7 @@ class VFSGuestFS(vfs.VFS):
if self.mount: if self.mount:
self.handle.aug_close() self.handle.aug_close()
except RuntimeError as e: except RuntimeError as e:
LOG.warning(_LW("Failed to close augeas %s"), e) LOG.warning("Failed to close augeas %s", e)
try: try:
self.handle.shutdown() self.handle.shutdown()
@ -254,7 +253,7 @@ class VFSGuestFS(vfs.VFS):
# Older libguestfs versions haven't an explicit shutdown # Older libguestfs versions haven't an explicit shutdown
pass pass
except RuntimeError as e: except RuntimeError as e:
LOG.warning(_LW("Failed to shutdown appliance %s"), e) LOG.warning("Failed to shutdown appliance %s", e)
try: try:
self.handle.close() self.handle.close()
@ -262,7 +261,7 @@ class VFSGuestFS(vfs.VFS):
# Older libguestfs versions haven't an explicit close # Older libguestfs versions haven't an explicit close
pass pass
except RuntimeError as e: except RuntimeError as e:
LOG.warning(_LW("Failed to close guest handle %s"), e) LOG.warning("Failed to close guest handle %s", e)
finally: finally:
# dereference object and implicitly close() # dereference object and implicitly close()
self.handle = None self.handle = None

View File

@ -27,7 +27,7 @@ from oslo_utils import importutils
import six import six
import nova.conf import nova.conf
from nova.i18n import _, _LE, _LI from nova.i18n import _
from nova.virt import event as virtevent from nova.virt import event as virtevent
CONF = nova.conf.CONF CONF = nova.conf.CONF
@ -1444,7 +1444,7 @@ class ComputeDriver(object):
LOG.debug("Emitting event %s", six.text_type(event)) LOG.debug("Emitting event %s", six.text_type(event))
self._compute_event_callback(event) self._compute_event_callback(event)
except Exception as ex: except Exception as ex:
LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"), LOG.error("Exception dispatching event %(event)s: %(ex)s",
{'event': event, 'ex': ex}) {'event': event, 'ex': ex})
def delete_instance_files(self, instance): def delete_instance_files(self, instance):
@ -1613,10 +1613,10 @@ def load_compute_driver(virtapi, compute_driver=None):
compute_driver = CONF.compute_driver compute_driver = CONF.compute_driver
if not compute_driver: if not compute_driver:
LOG.error(_LE("Compute driver option required, but not specified")) LOG.error("Compute driver option required, but not specified")
sys.exit(1) sys.exit(1)
LOG.info(_LI("Loading compute driver '%s'"), compute_driver) LOG.info("Loading compute driver '%s'", compute_driver)
try: try:
driver = importutils.import_object( driver = importutils.import_object(
'nova.virt.%s' % compute_driver, 'nova.virt.%s' % compute_driver,
@ -1625,7 +1625,7 @@ def load_compute_driver(virtapi, compute_driver=None):
return driver return driver
raise ValueError() raise ValueError()
except ImportError: except ImportError:
LOG.exception(_LE("Unable to load the virtualization driver")) LOG.exception(_("Unable to load the virtualization driver"))
sys.exit(1) sys.exit(1)
except ValueError: except ValueError:
LOG.exception("Compute driver '%s' from 'nova.virt' is not of type" LOG.exception("Compute driver '%s' from 'nova.virt' is not of type"

View File

@ -35,7 +35,6 @@ from nova.compute import task_states
import nova.conf import nova.conf
from nova.console import type as ctype from nova.console import type as ctype
from nova import exception from nova import exception
from nova.i18n import _LW
from nova.objects import diagnostics as diagnostics_obj from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
from nova.virt import driver from nova.virt import driver
@ -274,7 +273,7 @@ class FakeDriver(driver.ComputeDriver):
disk=flavor.root_gb) disk=flavor.root_gb)
del self.instances[key] del self.instances[key]
else: else:
LOG.warning(_LW("Key '%(key)s' not in instances '%(inst)s'"), LOG.warning("Key '%(key)s' not in instances '%(inst)s'",
{'key': key, {'key': key,
'inst': self.instances}, instance=instance) 'inst': self.instances}, instance=instance)

View File

@ -21,7 +21,6 @@ from oslo_utils import importutils
from nova.compute import utils as compute_utils from nova.compute import utils as compute_utils
import nova.conf import nova.conf
from nova import context from nova import context
from nova.i18n import _LI
from nova.network import linux_net from nova.network import linux_net
from nova import objects from nova import objects
from nova import utils from nova import utils
@ -137,8 +136,8 @@ class IptablesFirewallDriver(FirewallDriver):
self.remove_filters_for_instance(instance) self.remove_filters_for_instance(instance)
self.iptables.apply() self.iptables.apply()
else: else:
LOG.info(_LI('Attempted to unfilter instance which is not ' LOG.info('Attempted to unfilter instance which is not filtered',
'filtered'), instance=instance) instance=instance)
def prepare_instance_filter(self, instance, network_info): def prepare_instance_filter(self, instance, network_info):
self.instance_info[instance.id] = (instance, network_info) self.instance_info[instance.id] = (instance, network_info)
@ -389,10 +388,8 @@ class IptablesFirewallDriver(FirewallDriver):
ipv6_rules): ipv6_rules):
chain_name = self._instance_chain_name(instance) chain_name = self._instance_chain_name(instance)
if not self.iptables.ipv4['filter'].has_chain(chain_name): if not self.iptables.ipv4['filter'].has_chain(chain_name):
LOG.info( LOG.info('instance chain %s disappeared during refresh, skipping',
_LI('instance chain %s disappeared during refresh, ' chain_name, instance=instance)
'skipping'), chain_name,
instance=instance)
return return
self.remove_filters_for_instance(instance) self.remove_filters_for_instance(instance)
self.add_filters_for_instance(instance, network_info, ipv4_rules, self.add_filters_for_instance(instance, network_info, ipv4_rules,

View File

@ -25,7 +25,7 @@ import six
import nova.conf import nova.conf
from nova import context from nova import context
from nova import exception from nova import exception
from nova.i18n import _, _LI from nova.i18n import _
from nova import objects from nova import objects
from nova.objects import fields from nova.objects import fields
from nova.objects import instance as obj_instance from nova.objects import instance as obj_instance
@ -784,8 +784,8 @@ def _pack_instance_onto_cores(available_siblings,
# vcpus_pinning=[(2, 0), (3, 4)] # vcpus_pinning=[(2, 0), (3, 4)]
vcpus_pinning = list(zip(sorted(instance_cores), vcpus_pinning = list(zip(sorted(instance_cores),
itertools.chain(*usable_cores))) itertools.chain(*usable_cores)))
msg = _LI("Computed NUMA topology CPU pinning: usable pCPUs: " msg = ("Computed NUMA topology CPU pinning: usable pCPUs: "
"%(usable_cores)s, vCPUs mapping: %(vcpus_pinning)s") "%(usable_cores)s, vCPUs mapping: %(vcpus_pinning)s")
msg_args = { msg_args = {
'usable_cores': usable_cores, 'usable_cores': usable_cores,
'vcpus_pinning': vcpus_pinning, 'vcpus_pinning': vcpus_pinning,
@ -809,8 +809,8 @@ def _pack_instance_onto_cores(available_siblings,
# cpuset_reserved=[4] # cpuset_reserved=[4]
cpuset_reserved = set(list( cpuset_reserved = set(list(
itertools.chain(*usable_cores))[:num_cpu_reserved]) itertools.chain(*usable_cores))[:num_cpu_reserved])
msg = _LI("Computed NUMA topology reserved pCPUs: usable pCPUs: " msg = ("Computed NUMA topology reserved pCPUs: usable pCPUs: "
"%(usable_cores)s, reserved pCPUs: %(cpuset_reserved)s") "%(usable_cores)s, reserved pCPUs: %(cpuset_reserved)s")
msg_args = { msg_args = {
'usable_cores': usable_cores, 'usable_cores': usable_cores,
'cpuset_reserved': cpuset_reserved, 'cpuset_reserved': cpuset_reserved,
@ -943,9 +943,9 @@ def _numa_fit_instance_cell_with_pinning(host_cell, instance_cell,
else: else:
if (instance_cell.cpu_thread_policy == if (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.REQUIRE): fields.CPUThreadAllocationPolicy.REQUIRE):
LOG.info(_LI("Host does not support hyperthreading or " LOG.info("Host does not support hyperthreading or "
"hyperthreading is disabled, but 'require' " "hyperthreading is disabled, but 'require' "
"threads policy was requested.")) "threads policy was requested.")
return return
# Straightforward to pin to available cpus when there is no # Straightforward to pin to available cpus when there is no

View File

@ -27,7 +27,6 @@ from oslo_log import log as logging
import six import six
from nova import exception from nova import exception
from nova.i18n import _LE
from nova.virt import driver from nova.virt import driver
from nova.virt.hyperv import eventhandler from nova.virt.hyperv import eventhandler
from nova.virt.hyperv import hostops from nova.virt.hyperv import hostops
@ -123,10 +122,10 @@ class HyperVDriver(driver.ComputeDriver):
# the version is of Windows is older than Windows Server 2012 R2. # the version is of Windows is older than Windows Server 2012 R2.
# Log an error, letting users know that this version is not # Log an error, letting users know that this version is not
# supported any longer. # supported any longer.
LOG.error(_LE('You are running nova-compute on an unsupported ' LOG.error('You are running nova-compute on an unsupported '
'version of Windows (older than Windows / Hyper-V ' 'version of Windows (older than Windows / Hyper-V '
'Server 2012). The support for this version of ' 'Server 2012). The support for this version of '
'Windows has been removed in Mitaka.')) 'Windows has been removed in Mitaka.')
raise exception.HypervisorTooOld(version='6.2') raise exception.HypervisorTooOld(version='6.2')
@property @property

View File

@ -19,7 +19,6 @@ from os_win import utilsfactory
from oslo_log import log as logging from oslo_log import log as logging
import nova.conf import nova.conf
from nova.i18n import _LW
from nova import utils from nova import utils
from nova.virt import event as virtevent from nova.virt import event as virtevent
from nova.virt.hyperv import serialconsoleops from nova.virt.hyperv import serialconsoleops
@ -83,10 +82,9 @@ class InstanceEventHandler(object):
try: try:
instance_uuid = self._vmutils.get_instance_uuid(instance_name) instance_uuid = self._vmutils.get_instance_uuid(instance_name)
if not instance_uuid: if not instance_uuid:
LOG.warning(_LW("Instance uuid could not be retrieved for " LOG.warning("Instance uuid could not be retrieved for "
"instance %s. Instance state change event " "instance %s. Instance state change event "
"will be ignored."), "will be ignored.", instance_name)
instance_name)
return instance_uuid return instance_uuid
except os_win_exc.HyperVVMNotFoundException: except os_win_exc.HyperVVMNotFoundException:
# The instance has been deleted. # The instance has been deleted.

View File

@ -26,7 +26,7 @@ from oslo_utils import uuidutils
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LI from nova.i18n import _
from nova import utils from nova import utils
from nova.virt.hyperv import pathutils from nova.virt.hyperv import pathutils
from nova.virt import imagecache from nova.virt import imagecache
@ -201,7 +201,7 @@ class ImageCache(imagecache.ImageCacheManager):
for img in backing_files: for img in backing_files:
age_seconds = self._pathutils.get_age_of_file(img) age_seconds = self._pathutils.get_age_of_file(img)
if age_seconds > max_age_seconds: if age_seconds > max_age_seconds:
LOG.info(_LI("Removing old, unused image: %s"), img) LOG.info("Removing old, unused image: %s", img)
self._remove_old_image(img) self._remove_old_image(img)
def _remove_old_image(self, image_path): def _remove_old_image(self, image_path):

View File

@ -24,7 +24,7 @@ from oslo_utils import excutils
from oslo_utils import units from oslo_utils import units
from nova import exception from nova import exception
from nova.i18n import _, _LW, _LE from nova.i18n import _
from nova import objects from nova import objects
from nova.virt import configdrive from nova.virt import configdrive
from nova.virt.hyperv import block_device_manager from nova.virt.hyperv import block_device_manager
@ -98,7 +98,7 @@ class MigrationOps(object):
except Exception as ex: except Exception as ex:
# Log and ignore this exception # Log and ignore this exception
LOG.exception(ex) LOG.exception(ex)
LOG.error(_LE("Cannot cleanup migration files")) LOG.error("Cannot cleanup migration files")
def _check_target_flavor(self, instance, flavor): def _check_target_flavor(self, instance, flavor):
new_root_gb = flavor.root_gb new_root_gb = flavor.root_gb
@ -314,8 +314,8 @@ class MigrationOps(object):
elif sum(eph['size'] for eph in ephemerals) != new_eph_gb: elif sum(eph['size'] for eph in ephemerals) != new_eph_gb:
# New ephemeral size is different from the original ephemeral size # New ephemeral size is different from the original ephemeral size
# and there are multiple ephemerals. # and there are multiple ephemerals.
LOG.warning(_LW("Cannot resize multiple ephemeral disks for " LOG.warning("Cannot resize multiple ephemeral disks for instance.",
"instance."), instance=instance) instance=instance)
for index, eph in enumerate(ephemerals): for index, eph in enumerate(ephemerals):
eph_name = "eph%s" % index eph_name = "eph%s" % index

View File

@ -22,7 +22,7 @@ from oslo_log import log as logging
from nova.console import serial as serial_console from nova.console import serial as serial_console
from nova.console import type as ctype from nova.console import type as ctype
from nova import exception from nova import exception
from nova.i18n import _, _LI from nova.i18n import _
from nova.virt.hyperv import constants from nova.virt.hyperv import constants
from nova.virt.hyperv import pathutils from nova.virt.hyperv import pathutils
from nova.virt.hyperv import serialproxy from nova.virt.hyperv import serialproxy
@ -76,12 +76,12 @@ class SerialConsoleHandler(object):
self._listen_port = serial_console.acquire_port( self._listen_port = serial_console.acquire_port(
self._listen_host) self._listen_host)
LOG.info(_LI('Initializing serial proxy on ' LOG.info('Initializing serial proxy on '
'%(addr)s:%(port)s, handling connections ' '%(addr)s:%(port)s, handling connections '
'to instance %(instance_name)s.'), 'to instance %(instance_name)s.',
{'addr': self._listen_host, {'addr': self._listen_host,
'port': self._listen_port, 'port': self._listen_port,
'instance_name': self._instance_name}) 'instance_name': self._instance_name})
# Use this event in order to manage # Use this event in order to manage
# pending queue operations. # pending queue operations.

View File

@ -21,7 +21,6 @@ from oslo_log import log as logging
import six import six
from nova import exception from nova import exception
from nova.i18n import _LI, _LE
from nova import utils from nova import utils
from nova.virt.hyperv import pathutils from nova.virt.hyperv import pathutils
from nova.virt.hyperv import serialconsolehandler from nova.virt.hyperv import serialconsolehandler
@ -58,8 +57,8 @@ class SerialConsoleOps(object):
handler.start() handler.start()
_console_handlers[instance_name] = handler _console_handlers[instance_name] = handler
except Exception as exc: except Exception as exc:
LOG.error(_LE('Instance %(instance_name)s serial console handler ' LOG.error('Instance %(instance_name)s serial console handler '
'could not start. Exception %(exc)s'), 'could not start. Exception %(exc)s',
{'instance_name': instance_name, {'instance_name': instance_name,
'exc': exc}) 'exc': exc})
if handler: if handler:
@ -72,8 +71,8 @@ class SerialConsoleOps(object):
def stop_console_handler_unsync(self, instance_name): def stop_console_handler_unsync(self, instance_name):
handler = _console_handlers.get(instance_name) handler = _console_handlers.get(instance_name)
if handler: if handler:
LOG.info(_LI("Stopping instance %(instance_name)s " LOG.info("Stopping instance %(instance_name)s "
"serial console handler."), "serial console handler.",
{'instance_name': instance_name}) {'instance_name': instance_name})
handler.stop() handler.stop()
del _console_handlers[instance_name] del _console_handlers[instance_name]

View File

@ -22,7 +22,7 @@ from os_win import utilsfactory
from oslo_log import log as logging from oslo_log import log as logging
from nova.compute import task_states from nova.compute import task_states
from nova.i18n import _LE from nova.i18n import _
from nova.image import glance from nova.image import glance
from nova.virt.hyperv import pathutils from nova.virt.hyperv import pathutils
@ -112,7 +112,7 @@ class SnapshotOps(object):
LOG.debug("Removing snapshot %s", image_id) LOG.debug("Removing snapshot %s", image_id)
self._vmutils.remove_vm_snapshot(snapshot_path) self._vmutils.remove_vm_snapshot(snapshot_path)
except Exception: except Exception:
LOG.exception(_LE('Failed to remove snapshot for VM %s'), LOG.exception(_('Failed to remove snapshot for VM %s'),
instance_name, instance=instance) instance_name, instance=instance)
if export_dir: if export_dir:
LOG.debug('Removing directory: %s', export_dir) LOG.debug('Removing directory: %s', export_dir)

View File

@ -38,7 +38,7 @@ from nova.api.metadata import base as instance_metadata
from nova.compute import vm_states from nova.compute import vm_states
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LI, _LE, _LW from nova.i18n import _
from nova import objects from nova import objects
from nova.objects import fields from nova.objects import fields
from nova import utils from nova import utils
@ -268,7 +268,7 @@ class VMOps(object):
def spawn(self, context, instance, image_meta, injected_files, def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None): admin_password, network_info, block_device_info=None):
"""Create a new VM and start it.""" """Create a new VM and start it."""
LOG.info(_LI("Spawning new instance"), instance=instance) LOG.info("Spawning new instance", instance=instance)
instance_name = instance.name instance_name = instance.name
if self._vmutils.vm_exists(instance_name): if self._vmutils.vm_exists(instance_name):
@ -319,13 +319,13 @@ class VMOps(object):
yield yield
except etimeout.Timeout: except etimeout.Timeout:
# We never heard from Neutron # We never heard from Neutron
LOG.warning(_LW('Timeout waiting for vif plugging callback for ' LOG.warning('Timeout waiting for vif plugging callback for '
'instance.'), instance=instance) 'instance.', instance=instance)
if CONF.vif_plugging_is_fatal: if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException() raise exception.VirtualInterfaceCreateException()
def _neutron_failed_callback(self, event_name, instance): def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event %s'), LOG.error('Neutron Reported failure on event %s',
event_name, instance=instance) event_name, instance=instance)
if CONF.vif_plugging_is_fatal: if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException() raise exception.VirtualInterfaceCreateException()
@ -357,10 +357,10 @@ class VMOps(object):
"has to be disabled in order for the instance to " "has to be disabled in order for the instance to "
"benefit from it.", instance=instance) "benefit from it.", instance=instance)
if CONF.hyperv.dynamic_memory_ratio > 1.0: if CONF.hyperv.dynamic_memory_ratio > 1.0:
LOG.warning(_LW( LOG.warning(
"Instance vNUMA topology requested, but dynamic memory " "Instance vNUMA topology requested, but dynamic memory "
"ratio is higher than 1.0 in nova.conf. Ignoring dynamic " "ratio is higher than 1.0 in nova.conf. Ignoring dynamic "
"memory ratio option."), instance=instance) "memory ratio option.", instance=instance)
dynamic_memory_ratio = 1.0 dynamic_memory_ratio = 1.0
vnuma_enabled = True vnuma_enabled = True
else: else:
@ -549,8 +549,8 @@ class VMOps(object):
image_prop_vm = image_meta.properties.get('hw_machine_type', image_prop_vm = image_meta.properties.get('hw_machine_type',
default_vm_gen) default_vm_gen)
if image_prop_vm not in self._hostutils.get_supported_vm_types(): if image_prop_vm not in self._hostutils.get_supported_vm_types():
reason = _LE('Requested VM Generation %s is not supported on ' reason = _('Requested VM Generation %s is not supported on '
'this OS.') % image_prop_vm 'this OS.') % image_prop_vm
raise exception.InstanceUnacceptable(instance_id=instance_id, raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason) reason=reason)
@ -560,8 +560,8 @@ class VMOps(object):
if (vm_gen != constants.VM_GEN_1 and root_vhd_path and if (vm_gen != constants.VM_GEN_1 and root_vhd_path and
self._vhdutils.get_vhd_format( self._vhdutils.get_vhd_format(
root_vhd_path) == constants.DISK_FORMAT_VHD): root_vhd_path) == constants.DISK_FORMAT_VHD):
reason = _LE('Requested VM Generation %s, but provided VHD ' reason = _('Requested VM Generation %s, but provided VHD '
'instead of VHDX.') % vm_gen 'instead of VHDX.') % vm_gen
raise exception.InstanceUnacceptable(instance_id=instance_id, raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason) reason=reason)
@ -628,7 +628,7 @@ class VMOps(object):
raise exception.ConfigDriveUnsupportedFormat( raise exception.ConfigDriveUnsupportedFormat(
format=CONF.config_drive_format) format=CONF.config_drive_format)
LOG.info(_LI('Using config drive for instance'), instance=instance) LOG.info('Using config drive for instance', instance=instance)
extra_md = {} extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password: if admin_password and CONF.hyperv.config_drive_inject_password:
@ -640,7 +640,7 @@ class VMOps(object):
configdrive_path_iso = self._pathutils.get_configdrive_path( configdrive_path_iso = self._pathutils.get_configdrive_path(
instance.name, constants.DVD_FORMAT, rescue=rescue) instance.name, constants.DVD_FORMAT, rescue=rescue)
LOG.info(_LI('Creating config drive at %(path)s'), LOG.info('Creating config drive at %(path)s',
{'path': configdrive_path_iso}, instance=instance) {'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
@ -648,9 +648,8 @@ class VMOps(object):
cdb.make_drive(configdrive_path_iso) cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e: except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with ' LOG.error('Creating config drive failed with '
'error: %s'), 'error: %s', e, instance=instance)
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom: if not CONF.hyperv.config_drive_cdrom:
configdrive_path = self._pathutils.get_configdrive_path( configdrive_path = self._pathutils.get_configdrive_path(
@ -701,7 +700,7 @@ class VMOps(object):
def destroy(self, instance, network_info=None, block_device_info=None, def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True): destroy_disks=True):
instance_name = instance.name instance_name = instance.name
LOG.info(_LI("Got request to destroy instance"), instance=instance) LOG.info("Got request to destroy instance", instance=instance)
try: try:
if self._vmutils.vm_exists(instance_name): if self._vmutils.vm_exists(instance_name):
@ -718,7 +717,7 @@ class VMOps(object):
self._delete_disk_files(instance_name) self._delete_disk_files(instance_name)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to destroy instance: %s'), LOG.exception(_('Failed to destroy instance: %s'),
instance_name) instance_name)
def reboot(self, instance, network_info, reboot_type): def reboot(self, instance, network_info, reboot_type):
@ -754,7 +753,7 @@ class VMOps(object):
timeout, instance=instance) timeout, instance=instance)
self._vmutils.soft_shutdown_vm(instance.name) self._vmutils.soft_shutdown_vm(instance.name)
if self._wait_for_power_off(instance.name, wait_time): if self._wait_for_power_off(instance.name, wait_time):
LOG.info(_LI("Soft shutdown succeeded."), LOG.info("Soft shutdown succeeded.",
instance=instance) instance=instance)
return True return True
except os_win_exc.HyperVException as e: except os_win_exc.HyperVException as e:
@ -765,7 +764,7 @@ class VMOps(object):
timeout -= retry_interval timeout -= retry_interval
LOG.warning(_LW("Timed out while waiting for soft shutdown."), LOG.warning("Timed out while waiting for soft shutdown.",
instance=instance) instance=instance)
return False return False
@ -842,8 +841,8 @@ class VMOps(object):
'req_state': req_state}) 'req_state': req_state})
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to change vm state of %(instance_name)s" LOG.error("Failed to change vm state of %(instance_name)s"
" to %(req_state)s"), " to %(req_state)s",
{'instance_name': instance_name, {'instance_name': instance_name,
'req_state': req_state}) 'req_state': req_state})
@ -966,9 +965,9 @@ class VMOps(object):
image_meta, rescue_password) image_meta, rescue_password)
except Exception as exc: except Exception as exc:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
err_msg = _LE("Instance rescue failed. Exception: %(exc)s. " LOG.error("Instance rescue failed. Exception: %(exc)s. "
"Attempting to unrescue the instance.") "Attempting to unrescue the instance.",
LOG.error(err_msg, {'exc': exc}, instance=instance) {'exc': exc}, instance=instance)
self.unrescue_instance(instance) self.unrescue_instance(instance)
def _rescue_instance(self, context, instance, network_info, image_meta, def _rescue_instance(self, context, instance, network_info, image_meta,

View File

@ -26,7 +26,7 @@ from oslo_utils import strutils
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LI, _LW from nova.i18n import _
from nova import utils from nova import utils
from nova.virt import driver from nova.virt import driver
from nova.virt.hyperv import constants from nova.virt.hyperv import constants
@ -77,10 +77,10 @@ class VolumeOps(object):
tries_left -= 1 tries_left -= 1
if not tries_left: if not tries_left:
LOG.exception( LOG.exception(
_LE("Failed to attach volume %(connection_info)s " _("Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "), "to instance %(instance_name)s. "),
{'connection_info': strutils.mask_dict_password( {'connection_info':
connection_info), strutils.mask_dict_password(connection_info),
'instance_name': instance_name}) 'instance_name': instance_name})
self.disconnect_volume(connection_info) self.disconnect_volume(connection_info)
@ -89,9 +89,9 @@ class VolumeOps(object):
reason=ex) reason=ex)
else: else:
LOG.warning( LOG.warning(
_LW("Failed to attach volume %(connection_info)s " "Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. " "to instance %(instance_name)s. "
"Tries left: %(tries_left)s."), "Tries left: %(tries_left)s.",
{'connection_info': strutils.mask_dict_password( {'connection_info': strutils.mask_dict_password(
connection_info), connection_info),
'instance_name': instance_name, 'instance_name': instance_name,
@ -194,12 +194,11 @@ class VolumeOps(object):
unsupported_specs = set(qos_specs.keys()).difference( unsupported_specs = set(qos_specs.keys()).difference(
supported_qos_specs) supported_qos_specs)
if unsupported_specs: if unsupported_specs:
msg = (_LW('Got unsupported QoS specs: ' LOG.warning('Got unsupported QoS specs: '
'%(unsupported_specs)s. ' '%(unsupported_specs)s. '
'Supported qos specs: %(supported_qos_specs)s') % 'Supported qos specs: %(supported_qos_specs)s',
{'unsupported_specs': unsupported_specs, {'unsupported_specs': unsupported_specs,
'supported_qos_specs': supported_qos_specs}) 'supported_qos_specs': supported_qos_specs})
LOG.warning(msg)
class BaseVolumeDriver(object): class BaseVolumeDriver(object):
@ -302,8 +301,8 @@ class BaseVolumeDriver(object):
return ctrller_path, slot return ctrller_path, slot
def set_disk_qos_specs(self, connection_info, disk_qos_specs): def set_disk_qos_specs(self, connection_info, disk_qos_specs):
LOG.info(_LI("The %(protocol)s Hyper-V volume driver " LOG.info("The %(protocol)s Hyper-V volume driver "
"does not support QoS. Ignoring QoS specs."), "does not support QoS. Ignoring QoS specs.",
dict(protocol=self._protocol)) dict(protocol=self._protocol))

View File

@ -41,9 +41,6 @@ from nova.console import type as console_type
from nova import context as nova_context from nova import context as nova_context
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import objects from nova import objects
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
from nova import servicegroup from nova import servicegroup
@ -90,7 +87,7 @@ def map_power_state(state):
try: try:
return _POWER_STATE_MAP[state] return _POWER_STATE_MAP[state]
except KeyError: except KeyError:
LOG.warning(_LW("Power state %s not found."), state) LOG.warning("Power state %s not found.", state)
return power_state.NOSTATE return power_state.NOSTATE
@ -217,8 +214,8 @@ class IronicDriver(virt_driver.ComputeDriver):
try: try:
properties[prop] = int(node.properties.get(prop, 0)) properties[prop] = int(node.properties.get(prop, 0))
except (TypeError, ValueError): except (TypeError, ValueError):
LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". ' LOG.warning('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer.'), 'It should be an integer.',
{'uuid': node.uuid, 'prop': prop}) {'uuid': node.uuid, 'prop': prop})
properties[prop] = 0 properties[prop] = 0
@ -228,7 +225,7 @@ class IronicDriver(virt_driver.ComputeDriver):
except exception.InvalidArchitectureName: except exception.InvalidArchitectureName:
cpu_arch = None cpu_arch = None
if not cpu_arch: if not cpu_arch:
LOG.warning(_LW("cpu_arch not defined for node '%s'"), node.uuid) LOG.warning("cpu_arch not defined for node '%s'", node.uuid)
properties['cpu_arch'] = cpu_arch properties['cpu_arch'] = cpu_arch
properties['raw_cpu_arch'] = raw_cpu_arch properties['raw_cpu_arch'] = raw_cpu_arch
@ -253,9 +250,9 @@ class IronicDriver(virt_driver.ComputeDriver):
instance_info[prop] = int(node.instance_info.get(prop, instance_info[prop] = int(node.instance_info.get(prop,
original)) original))
except (TypeError, ValueError): except (TypeError, ValueError):
LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". ' LOG.warning('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer but its value ' 'It should be an integer but its value '
'is "%(value)s".'), 'is "%(value)s".',
{'uuid': node.uuid, 'prop': prop, {'uuid': node.uuid, 'prop': prop,
'value': node.instance_info.get(prop)}) 'value': node.instance_info.get(prop)})
instance_info[prop] = original instance_info[prop] = original
@ -298,8 +295,8 @@ class IronicDriver(virt_driver.ComputeDriver):
if len(parts) == 2 and parts[0] and parts[1]: if len(parts) == 2 and parts[0] and parts[1]:
nodes_extra_specs[parts[0].strip()] = parts[1] nodes_extra_specs[parts[0].strip()] = parts[1]
else: else:
LOG.warning(_LW("Ignoring malformed capability '%s'. " LOG.warning("Ignoring malformed capability '%s'. "
"Format should be 'key:val'."), capability) "Format should be 'key:val'.", capability)
vcpus_used = 0 vcpus_used = 0
memory_mb_used = 0 memory_mb_used = 0
@ -389,9 +386,9 @@ class IronicDriver(virt_driver.ComputeDriver):
try: try:
self.ironicclient.call('node.update', node.uuid, patch) self.ironicclient.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest as e: except ironic.exc.BadRequest as e:
LOG.warning(_LW("Failed to remove deploy parameters from node " LOG.warning("Failed to remove deploy parameters from node "
"%(node)s when unprovisioning the instance " "%(node)s when unprovisioning the instance "
"%(instance)s: %(reason)s"), "%(instance)s: %(reason)s",
{'node': node.uuid, 'instance': instance.uuid, {'node': node.uuid, 'instance': instance.uuid,
'reason': six.text_type(e)}) 'reason': six.text_type(e)})
@ -730,15 +727,15 @@ class IronicDriver(virt_driver.ComputeDriver):
properties = self._parse_node_properties(node) properties = self._parse_node_properties(node)
memory_kib = properties['memory_mb'] * 1024 memory_kib = properties['memory_mb'] * 1024
if memory_kib == 0: if memory_kib == 0:
LOG.warning(_LW("Warning, memory usage is 0 for " LOG.warning("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s."), "%(instance)s on baremetal node %(node)s.",
{'instance': instance.uuid, {'instance': instance.uuid,
'node': instance.node}) 'node': instance.node})
num_cpu = properties['cpus'] num_cpu = properties['cpus']
if num_cpu == 0: if num_cpu == 0:
LOG.warning(_LW("Warning, number of cpus is 0 for " LOG.warning("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s."), "%(instance)s on baremetal node %(node)s.",
{'instance': instance.uuid, {'instance': instance.uuid,
'node': instance.node}) 'node': instance.node})
@ -912,8 +909,8 @@ class IronicDriver(virt_driver.ComputeDriver):
self._start_firewall(instance, network_info) self._start_firewall(instance, network_info)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Error preparing deploy for instance " LOG.error("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s."), "%(instance)s on baremetal node %(node)s.",
{'instance': instance.uuid, {'instance': instance.uuid,
'node': node_uuid}) 'node': node_uuid})
self._cleanup_deploy(node, instance, network_info) self._cleanup_deploy(node, instance, network_info)
@ -931,14 +928,14 @@ class IronicDriver(virt_driver.ComputeDriver):
files=injected_files) files=injected_files)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
msg = (_LE("Failed to build configdrive: %s") % msg = ("Failed to build configdrive: %s" %
six.text_type(e)) six.text_type(e))
LOG.error(msg, instance=instance) LOG.error(msg, instance=instance)
self._cleanup_deploy(node, instance, network_info) self._cleanup_deploy(node, instance, network_info)
LOG.info(_LI("Config drive for instance %(instance)s on " LOG.info("Config drive for instance %(instance)s on "
"baremetal node %(node)s created."), "baremetal node %(node)s created.",
{'instance': instance['uuid'], 'node': node_uuid}) {'instance': instance['uuid'], 'node': node_uuid})
# trigger the node deploy # trigger the node deploy
try: try:
@ -947,25 +944,24 @@ class IronicDriver(virt_driver.ComputeDriver):
configdrive=configdrive_value) configdrive=configdrive_value)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
msg = (_LE("Failed to request Ironic to provision instance " LOG.error("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s"), "%(inst)s: %(reason)s",
{'inst': instance.uuid, {'inst': instance.uuid,
'reason': six.text_type(e)}) 'reason': six.text_type(e)})
LOG.error(msg)
self._cleanup_deploy(node, instance, network_info) self._cleanup_deploy(node, instance, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active, timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
instance) instance)
try: try:
timer.start(interval=CONF.ironic.api_retry_interval).wait() timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully provisioned Ironic node %s'), LOG.info('Successfully provisioned Ironic node %s',
node.uuid, instance=instance) node.uuid, instance=instance)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Error deploying instance %(instance)s on " LOG.error("Error deploying instance %(instance)s on "
"baremetal node %(node)s."), "baremetal node %(node)s.",
{'instance': instance.uuid, {'instance': instance.uuid,
'node': node_uuid}) 'node': node_uuid})
def _unprovision(self, instance, node): def _unprovision(self, instance, node):
"""This method is called from destroy() to unprovision """This method is called from destroy() to unprovision
@ -1039,7 +1035,7 @@ class IronicDriver(virt_driver.ComputeDriver):
try: try:
node = self._validate_instance_and_node(instance) node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound: except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."), LOG.warning("Destroy called on non-existing instance %s.",
instance.uuid) instance.uuid)
# NOTE(deva): if nova.compute.ComputeManager._delete_instance() # NOTE(deva): if nova.compute.ComputeManager._delete_instance()
# is called on a non-existing instance, the only way # is called on a non-existing instance, the only way
@ -1056,7 +1052,7 @@ class IronicDriver(virt_driver.ComputeDriver):
self._remove_instance_info_from_node(node, instance) self._remove_instance_info_from_node(node, instance)
self._cleanup_deploy(node, instance, network_info) self._cleanup_deploy(node, instance, network_info)
LOG.info(_LI('Successfully unprovisioned Ironic node %s'), LOG.info('Successfully unprovisioned Ironic node %s',
node.uuid, instance=instance) node.uuid, instance=instance)
def reboot(self, context, instance, network_info, reboot_type, def reboot(self, context, instance, network_info, reboot_type,
@ -1088,8 +1084,8 @@ class IronicDriver(virt_driver.ComputeDriver):
'reboot', soft=True) 'reboot', soft=True)
hard = False hard = False
except ironic.exc.BadRequest as exc: except ironic.exc.BadRequest as exc:
LOG.info(_LI('Soft reboot is not supported by ironic hardware ' LOG.info('Soft reboot is not supported by ironic hardware '
'driver. Falling back to hard reboot: %s'), 'driver. Falling back to hard reboot: %s',
exc, exc,
instance=instance) instance=instance)
@ -1099,8 +1095,7 @@ class IronicDriver(virt_driver.ComputeDriver):
timer = loopingcall.FixedIntervalLoopingCall( timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'reboot') self._wait_for_power_state, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait() timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully rebooted(type %(type)s) Ironic node ' LOG.info('Successfully rebooted(type %(type)s) Ironic node %(node)s',
'%(node)s'),
{'type': ('HARD' if hard else 'SOFT'), {'type': ('HARD' if hard else 'SOFT'),
'node': node.uuid}, 'node': node.uuid},
instance=instance) instance=instance)
@ -1131,25 +1126,24 @@ class IronicDriver(virt_driver.ComputeDriver):
timer.start(interval=CONF.ironic.api_retry_interval).wait() timer.start(interval=CONF.ironic.api_retry_interval).wait()
node = self._validate_instance_and_node(instance) node = self._validate_instance_and_node(instance)
if node.power_state == ironic_states.POWER_OFF: if node.power_state == ironic_states.POWER_OFF:
LOG.info(_LI('Successfully soft powered off Ironic node ' LOG.info('Successfully soft powered off Ironic node %s',
'%s'),
node.uuid, instance=instance) node.uuid, instance=instance)
return return
LOG.info(_LI("Failed to soft power off instance " LOG.info("Failed to soft power off instance "
"%(instance)s on baremetal node %(node)s " "%(instance)s on baremetal node %(node)s "
"within the required timeout %(timeout)d " "within the required timeout %(timeout)d "
"seconds due to error: %(reason)s. " "seconds due to error: %(reason)s. "
"Attempting hard power off."), "Attempting hard power off.",
{'instance': instance.uuid, {'instance': instance.uuid,
'timeout': timeout, 'timeout': timeout,
'node': node.uuid, 'node': node.uuid,
'reason': node.last_error}, 'reason': node.last_error},
instance=instance) instance=instance)
except ironic.exc.ClientException as e: except ironic.exc.ClientException as e:
LOG.info(_LI("Failed to soft power off instance " LOG.info("Failed to soft power off instance "
"%(instance)s on baremetal node %(node)s " "%(instance)s on baremetal node %(node)s "
"due to error: %(reason)s. " "due to error: %(reason)s. "
"Attempting hard power off."), "Attempting hard power off.",
{'instance': instance.uuid, {'instance': instance.uuid,
'node': node.uuid, 'node': node.uuid,
'reason': e}, 'reason': e},
@ -1159,7 +1153,7 @@ class IronicDriver(virt_driver.ComputeDriver):
timer = loopingcall.FixedIntervalLoopingCall( timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'power off') self._wait_for_power_state, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait() timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully hard powered off Ironic node %s'), LOG.info('Successfully hard powered off Ironic node %s',
node.uuid, instance=instance) node.uuid, instance=instance)
def power_on(self, context, instance, network_info, def power_on(self, context, instance, network_info,
@ -1184,7 +1178,7 @@ class IronicDriver(virt_driver.ComputeDriver):
timer = loopingcall.FixedIntervalLoopingCall( timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'power on') self._wait_for_power_state, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait() timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully powered on Ironic node %s'), LOG.info('Successfully powered on Ironic node %s',
node.uuid, instance=instance) node.uuid, instance=instance)
def trigger_crash_dump(self, instance): def trigger_crash_dump(self, instance):
@ -1202,7 +1196,7 @@ class IronicDriver(virt_driver.ComputeDriver):
self.ironicclient.call("node.inject_nmi", node.uuid) self.ironicclient.call("node.inject_nmi", node.uuid)
LOG.info(_LI('Successfully triggered crash dump into Ironic node %s'), LOG.info('Successfully triggered crash dump into Ironic node %s',
node.uuid, instance=instance) node.uuid, instance=instance)
def refresh_security_group_rules(self, security_group_id): def refresh_security_group_rules(self, security_group_id):
@ -1379,7 +1373,7 @@ class IronicDriver(virt_driver.ComputeDriver):
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active, timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
instance) instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait() timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Instance was successfully rebuilt'), instance=instance) LOG.info('Instance was successfully rebuilt', instance=instance)
def network_binding_host_id(self, context, instance): def network_binding_host_id(self, context, instance):
"""Get host ID to associate with network ports. """Get host ID to associate with network ports.
@ -1434,10 +1428,9 @@ class IronicDriver(virt_driver.ComputeDriver):
except (exception.NovaException, # Retry failed except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance ironic.exc.BadRequest) as e: # Maintenance
LOG.error(_LE('Failed to acquire console information for ' LOG.error('Failed to acquire console information for '
'instance %(inst)s: %(reason)s'), 'instance %(inst)s: %(reason)s',
{'inst': instance.uuid, {'inst': instance.uuid, 'reason': e})
'reason': e})
raise exception.ConsoleNotAvailable() raise exception.ConsoleNotAvailable()
def _wait_state(state): def _wait_state(state):
@ -1459,8 +1452,8 @@ class IronicDriver(virt_driver.ComputeDriver):
except (exception.NovaException, # Retry failed except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance ironic.exc.BadRequest) as e: # Maintenance
LOG.error(_LE('Failed to set console mode to "%(mode)s" ' LOG.error('Failed to set console mode to "%(mode)s" '
'for instance %(inst)s: %(reason)s'), 'for instance %(inst)s: %(reason)s',
{'mode': mode, {'mode': mode,
'inst': instance.uuid, 'inst': instance.uuid,
'reason': e}) 'reason': e})
@ -1474,8 +1467,8 @@ class IronicDriver(virt_driver.ComputeDriver):
timeout=CONF.ironic.serial_console_state_timeout, timeout=CONF.ironic.serial_console_state_timeout,
jitter=0.5).wait() jitter=0.5).wait()
except loopingcall.LoopingCallTimeOut: except loopingcall.LoopingCallTimeOut:
LOG.error(_LE('Timeout while waiting for console mode to be ' LOG.error('Timeout while waiting for console mode to be '
'set to "%(mode)s" on node %(node)s'), 'set to "%(mode)s" on node %(node)s',
{'mode': mode, {'mode': mode,
'node': node_uuid}) 'node': node_uuid})
raise exception.ConsoleNotAvailable() raise exception.ConsoleNotAvailable()
@ -1528,8 +1521,8 @@ class IronicDriver(virt_driver.ComputeDriver):
console_info = result['console_info'] console_info = result['console_info']
if console_info["type"] != "socat": if console_info["type"] != "socat":
LOG.warning(_LW('Console type "%(type)s" (of ironic node ' LOG.warning('Console type "%(type)s" (of ironic node '
'%(node)s) does not support Nova serial console'), '%(node)s) does not support Nova serial console',
{'type': console_info["type"], {'type': console_info["type"],
'node': node.uuid}, 'node': node.uuid},
instance=instance) instance=instance)
@ -1544,8 +1537,8 @@ class IronicDriver(virt_driver.ComputeDriver):
if not (scheme and hostname and port): if not (scheme and hostname and port):
raise AssertionError() raise AssertionError()
except (ValueError, AssertionError): except (ValueError, AssertionError):
LOG.error(_LE('Invalid Socat console URL "%(url)s" ' LOG.error('Invalid Socat console URL "%(url)s" '
'(ironic node %(node)s)'), '(ironic node %(node)s)',
{'url': console_info["url"], {'url': console_info["url"],
'node': node.uuid}, 'node': node.uuid},
instance=instance) instance=instance)
@ -1555,8 +1548,8 @@ class IronicDriver(virt_driver.ComputeDriver):
return console_type.ConsoleSerial(host=hostname, return console_type.ConsoleSerial(host=hostname,
port=port) port=port)
else: else:
LOG.warning(_LW('Socat serial console only supports "tcp". ' LOG.warning('Socat serial console only supports "tcp". '
'This URL is "%(url)s" (ironic node %(node)s).'), 'This URL is "%(url)s" (ironic node %(node)s).',
{'url': console_info["url"], {'url': console_info["url"],
'node': node.uuid}, 'node': node.uuid},
instance=instance) instance=instance)

View File

@ -12,8 +12,6 @@
from oslo_log import log as logging from oslo_log import log as logging
from nova.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -32,7 +30,7 @@ def get_domain_info(libvirt, host, virt_dom):
return virt_dom.info() return virt_dom.info()
except libvirt.libvirtError as e: except libvirt.libvirtError as e:
if not host.has_min_version((1, 2, 11)) and is_race(e): if not host.has_min_version((1, 2, 11)) and is_race(e):
LOG.warning(_LW('Race detected in libvirt.virDomain.info, ' LOG.warning('Race detected in libvirt.virDomain.info, '
'trying one more time')) 'trying one more time')
return virt_dom.info() return virt_dom.info()
raise raise

File diff suppressed because it is too large Load Diff

View File

@ -24,8 +24,6 @@ from oslo_utils import excutils
from oslo_utils import importutils from oslo_utils import importutils
import nova.conf import nova.conf
from nova.i18n import _LI
from nova.i18n import _LW
import nova.virt.firewall as base_firewall import nova.virt.firewall as base_firewall
from nova.virt import netutils from nova.virt import netutils
@ -55,8 +53,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
try: try:
libvirt = importutils.import_module('libvirt') libvirt = importutils.import_module('libvirt')
except ImportError: except ImportError:
LOG.warning(_LW("Libvirt module could not be loaded. " LOG.warning("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly.")) "NWFilterFirewall will not work correctly.")
self._host = host self._host = host
self.static_filters_configured = False self.static_filters_configured = False
@ -109,10 +107,10 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
def setup_basic_filtering(self, instance, network_info): def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection).""" """Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_LI('Called setup_basic_filtering in nwfilter'), LOG.info('Called setup_basic_filtering in nwfilter',
instance=instance) instance=instance)
LOG.info(_LI('Ensuring static filters'), instance=instance) LOG.info('Ensuring static filters', instance=instance)
self._ensure_static_filters() self._ensure_static_filters()
nodhcp_base_filter = self.get_base_filter_list(instance, False) nodhcp_base_filter = self.get_base_filter_list(instance, False)
@ -281,9 +279,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if errcode == libvirt.VIR_ERR_OPERATION_INVALID: if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in use # This happens when the instance filter is still in use
# (ie. when the instance has not terminated properly) # (ie. when the instance has not terminated properly)
LOG.info(_LI('Failed to undefine network filter ' LOG.info('Failed to undefine network filter '
'%(name)s. Try %(cnt)d of ' '%(name)s. Try %(cnt)d of %(max_retry)d.',
'%(max_retry)d.'),
{'name': instance_filter_name, {'name': instance_filter_name,
'cnt': cnt + 1, 'cnt': cnt + 1,
'max_retry': max_retry}, 'max_retry': max_retry},
@ -349,8 +346,8 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
self.iptables.apply() self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info) self.nwfilter.unfilter_instance(instance, network_info)
else: else:
LOG.info(_LI('Attempted to unfilter instance which is not ' LOG.info('Attempted to unfilter instance which is not filtered',
'filtered'), instance=instance) instance=instance)
def instance_filter_exists(self, instance, network_info): def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists.""" """Check nova-instance-instance-xxx exists."""

View File

@ -40,8 +40,6 @@ import six
from nova.compute import power_state from nova.compute import power_state
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils from nova import utils
from nova.virt import hardware from nova.virt import hardware
from nova.virt.libvirt import compat from nova.virt.libvirt import compat
@ -127,7 +125,7 @@ class Guest(object):
guest = host.write_instance_config(xml) guest = host.write_instance_config(xml)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a guest with XML: %s'), LOG.error('Error defining a guest with XML: %s',
encodeutils.safe_decode(xml)) encodeutils.safe_decode(xml))
return guest return guest
@ -141,8 +139,8 @@ class Guest(object):
return self._domain.createWithFlags(flags) return self._domain.createWithFlags(flags)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Error launching a defined domain ' LOG.error('Error launching a defined domain '
'with XML: %s'), 'with XML: %s',
self._encoded_xml, errors='ignore') self._encoded_xml, errors='ignore')
def poweroff(self): def poweroff(self):
@ -177,7 +175,7 @@ class Guest(object):
LOG.debug('Failed to set time: agent not configured', LOG.debug('Failed to set time: agent not configured',
instance_uuid=self.uuid) instance_uuid=self.uuid)
else: else:
LOG.warning(_LW('Failed to set time: %(reason)s'), LOG.warning('Failed to set time: %(reason)s',
{'reason': e}, instance_uuid=self.uuid) {'reason': e}, instance_uuid=self.uuid)
except Exception as ex: except Exception as ex:
# The highest priority is not to let this method crash and thus # The highest priority is not to let this method crash and thus
@ -210,7 +208,7 @@ class Guest(object):
check_exit_code=[0, 1]) check_exit_code=[0, 1])
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Error enabling hairpin mode with XML: %s'), LOG.error('Error enabling hairpin mode with XML: %s',
self._encoded_xml, errors='ignore') self._encoded_xml, errors='ignore')
def get_interfaces(self): def get_interfaces(self):

View File

@ -48,9 +48,6 @@ import nova.conf
from nova import context as nova_context from nova import context as nova_context
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc from nova import rpc
from nova import utils from nova import utils
from nova.virt import event as virtevent from nova.virt import event as virtevent
@ -149,7 +146,7 @@ class Host(object):
try: try:
handler() handler()
except Exception: except Exception:
LOG.exception(_LE('Exception handling connection event')) LOG.exception(_('Exception handling connection event'))
finally: finally:
self._conn_event_handler_queue.task_done() self._conn_event_handler_queue.task_done()
@ -378,8 +375,8 @@ class Host(object):
self._event_lifecycle_callback, self._event_lifecycle_callback,
self) self)
except Exception as e: except Exception as e:
LOG.warning(_LW("URI %(uri)s does not support events: %(error)s"), LOG.warning("URI %(uri)s does not support events: %(error)s",
{'uri': self._uri, 'error': e}) {'uri': self._uri, 'error': e})
try: try:
LOG.debug("Registering for connection events: %s", str(self)) LOG.debug("Registering for connection events: %s", str(self))
@ -394,9 +391,9 @@ class Host(object):
LOG.debug("The version of python-libvirt does not support " LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e) "registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e: except libvirt.libvirtError as e:
LOG.warning(_LW("URI %(uri)s does not support connection" LOG.warning("URI %(uri)s does not support connection"
" events: %(error)s"), " events: %(error)s",
{'uri': self._uri, 'error': e}) {'uri': self._uri, 'error': e})
return wrapped_conn return wrapped_conn
@ -453,7 +450,7 @@ class Host(object):
try: try:
conn = self._get_connection() conn = self._get_connection()
except libvirt.libvirtError as ex: except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex) LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip, payload = dict(ip=CONF.my_ip,
method='_connect', method='_connect',
reason=ex) reason=ex)
@ -637,7 +634,7 @@ class Host(object):
""" """
if not self._caps: if not self._caps:
xmlstr = self.get_connection().getCapabilities() xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr) LOG.info("Libvirt host capabilities %s", xmlstr)
self._caps = vconfig.LibvirtConfigCaps() self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr) self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features # NOTE(mriedem): Don't attempt to get baseline CPU features
@ -658,8 +655,8 @@ class Host(object):
except libvirt.libvirtError as ex: except libvirt.libvirtError as ex:
error_code = ex.get_error_code() error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT: if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning(_LW("URI %(uri)s does not support full set" LOG.warning("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"), " of host capabilities: %(error)s",
{'uri': self._uri, 'error': ex}) {'uri': self._uri, 'error': ex})
else: else:
raise raise
@ -689,10 +686,9 @@ class Host(object):
if self._hostname is None: if self._hostname is None:
self._hostname = hostname self._hostname = hostname
elif hostname != self._hostname: elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s ' LOG.error('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'), 'to %(new)s. A restart is required to take effect.',
{'old': self._hostname, {'old': self._hostname, 'new': hostname})
'new': hostname})
return self._hostname return self._hostname
def find_secret(self, usage_type, usage_id): def find_secret(self, usage_type, usage_id):
@ -750,7 +746,7 @@ class Host(object):
return secret return secret
except libvirt.libvirtError: except libvirt.libvirtError:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s'), xml) LOG.error('Error defining a secret with XML: %s', xml)
def delete_secret(self, usage_type, usage_id): def delete_secret(self, usage_type, usage_id):
"""Delete a secret. """Delete a secret.
@ -800,8 +796,8 @@ class Host(object):
# TODO(sahid): Use get_info... # TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2]) dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e: except libvirt.libvirtError as e:
LOG.warning(_LW("couldn't obtain the memory from domain:" LOG.warning("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s"), " %(uuid)s, exception: %(ex)s",
{"uuid": guest.uuid, "ex": e}) {"uuid": guest.uuid, "ex": e})
continue continue
# skip dom0 # skip dom0

View File

@ -31,7 +31,6 @@ import six
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LE, _LI, _LW
from nova import image from nova import image
from nova import keymgr from nova import keymgr
from nova import utils from nova import utils
@ -248,8 +247,8 @@ class Image(object):
can_fallocate = not err can_fallocate = not err
self.__class__.can_fallocate = can_fallocate self.__class__.can_fallocate = can_fallocate
if not can_fallocate: if not can_fallocate:
LOG.warning(_LW('Unable to preallocate image at path: ' LOG.warning('Unable to preallocate image at path: %(path)s',
'%(path)s'), {'path': self.path}) {'path': self.path})
return can_fallocate return can_fallocate
def verify_base_size(self, base, size, base_size=0): def verify_base_size(self, base, size, base_size=0):
@ -274,11 +273,11 @@ class Image(object):
base_size = self.get_disk_size(base) base_size = self.get_disk_size(base)
if size < base_size: if size < base_size:
msg = _LE('%(base)s virtual size %(base_size)s ' LOG.error('%(base)s virtual size %(base_size)s '
'larger than flavor root disk size %(size)s') 'larger than flavor root disk size %(size)s',
LOG.error(msg, {'base': base, {'base': base,
'base_size': base_size, 'base_size': base_size,
'size': size}) 'size': size})
raise exception.FlavorDiskSmallerThanImage( raise exception.FlavorDiskSmallerThanImage(
flavor_size=size, image_size=base_size) flavor_size=size, image_size=base_size)
@ -483,10 +482,9 @@ class Flat(Image):
data = images.qemu_img_info(self.path) data = images.qemu_img_info(self.path)
return data.file_format return data.file_format
except exception.InvalidDiskInfo as e: except exception.InvalidDiskInfo as e:
LOG.info(_LI('Failed to get image info from path %(path)s; ' LOG.info('Failed to get image info from path %(path)s; '
'error: %(error)s'), 'error: %(error)s',
{'path': self.path, {'path': self.path, 'error': e})
'error': e})
return 'raw' return 'raw'
def _supports_encryption(self): def _supports_encryption(self):
@ -728,8 +726,8 @@ class Lvm(Image):
self.ephemeral_key_uuid).get_encoded() self.ephemeral_key_uuid).get_encoded()
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to retrieve ephemeral encryption" LOG.error("Failed to retrieve ephemeral "
" key")) "encryption key")
else: else:
raise exception.InternalError( raise exception.InternalError(
_("Instance disk to be encrypted but no context provided")) _("Instance disk to be encrypted but no context provided"))

View File

@ -32,9 +32,6 @@ from oslo_utils import encodeutils
import six import six
import nova.conf import nova.conf
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import utils from nova import utils
from nova.virt import imagecache from nova.virt import imagecache
from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt import utils as libvirt_utils
@ -197,10 +194,9 @@ class ImageCacheManager(imagecache.ImageCacheManager):
inuse_images.append(backing_path) inuse_images.append(backing_path)
if backing_path in self.unexplained_images: if backing_path in self.unexplained_images:
LOG.warning(_LW('Instance %(instance)s is using a ' LOG.warning('Instance %(instance)s is using a '
'backing file %(backing)s which ' 'backing file %(backing)s which '
'does not appear in the image ' 'does not appear in the image service',
'service'),
{'instance': ent, {'instance': ent,
'backing': backing_file}) 'backing': backing_file})
self.unexplained_images.remove(backing_path) self.unexplained_images.remove(backing_path)
@ -261,7 +257,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if not exists or age < maxage: if not exists or age < maxage:
return return
LOG.info(_LI('Removing base or swap file: %s'), base_file) LOG.info('Removing base or swap file: %s', base_file)
try: try:
os.remove(base_file) os.remove(base_file)
@ -279,14 +275,13 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if os.path.exists(signature): if os.path.exists(signature):
os.remove(signature) os.remove(signature)
except OSError as e: except OSError as e:
LOG.error(_LE('Failed to remove %(base_file)s, ' LOG.error('Failed to remove %(base_file)s, '
'error was %(error)s'), 'error was %(error)s',
{'base_file': base_file, {'base_file': base_file,
'error': e}) 'error': e})
if age < maxage: if age < maxage:
LOG.info(_LI('Base or swap file too young to remove: %s'), LOG.info('Base or swap file too young to remove: %s', base_file)
base_file)
else: else:
_inner_remove_old_enough_file() _inner_remove_old_enough_file()
if remove_lock: if remove_lock:
@ -321,7 +316,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
def _mark_in_use(self, img_id, base_file): def _mark_in_use(self, img_id, base_file):
"""Mark a single base image as in use.""" """Mark a single base image as in use."""
LOG.info(_LI('image %(id)s at (%(base_file)s): checking'), LOG.info('image %(id)s at (%(base_file)s): checking',
{'id': img_id, 'base_file': base_file}) {'id': img_id, 'base_file': base_file})
if base_file in self.unexplained_images: if base_file in self.unexplained_images:
@ -345,8 +340,8 @@ class ImageCacheManager(imagecache.ImageCacheManager):
error_images = self.used_swap_images - self.back_swap_images error_images = self.used_swap_images - self.back_swap_images
for error_image in error_images: for error_image in error_images:
LOG.warning(_LW('%s swap image was used by instance' LOG.warning('%s swap image was used by instance'
' but no back files existing!'), error_image) ' but no back files existing!', error_image)
def _age_and_verify_cached_images(self, context, all_instances, base_dir): def _age_and_verify_cached_images(self, context, all_instances, base_dir):
LOG.debug('Verify base images') LOG.debug('Verify base images')
@ -368,16 +363,16 @@ class ImageCacheManager(imagecache.ImageCacheManager):
# Anything left is an unknown base image # Anything left is an unknown base image
for img in self.unexplained_images: for img in self.unexplained_images:
LOG.warning(_LW('Unknown base file: %s'), img) LOG.warning('Unknown base file: %s', img)
self.removable_base_files.append(img) self.removable_base_files.append(img)
# Dump these lists # Dump these lists
if self.active_base_files: if self.active_base_files:
LOG.info(_LI('Active base files: %s'), LOG.info('Active base files: %s',
' '.join(self.active_base_files)) ' '.join(self.active_base_files))
if self.removable_base_files: if self.removable_base_files:
LOG.info(_LI('Removable base files: %s'), LOG.info('Removable base files: %s',
' '.join(self.removable_base_files)) ' '.join(self.removable_base_files))
if self.remove_unused_base_images: if self.remove_unused_base_images:

View File

@ -20,9 +20,6 @@ import signal
from oslo_log import log as logging from oslo_log import log as logging
from nova.i18n import _LE
from nova.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -75,9 +72,9 @@ class InstanceJobTracker(object):
os.kill(pid, signal.SIGKILL) os.kill(pid, signal.SIGKILL)
except OSError as exc: except OSError as exc:
if exc.errno != errno.ESRCH: if exc.errno != errno.ESRCH:
LOG.error(_LE('Failed to kill process %(pid)s ' LOG.error('Failed to kill process %(pid)s '
'due to %(reason)s, while deleting the ' 'due to %(reason)s, while deleting the '
'instance.'), {'pid': pid, 'reason': exc}, 'instance.', {'pid': pid, 'reason': exc},
instance=instance) instance=instance)
try: try:
@ -85,14 +82,12 @@ class InstanceJobTracker(object):
os.kill(pid, 0) os.kill(pid, 0)
except OSError as exc: except OSError as exc:
if exc.errno != errno.ESRCH: if exc.errno != errno.ESRCH:
LOG.error(_LE('Unexpected error while checking process ' LOG.error('Unexpected error while checking process '
'%(pid)s.'), {'pid': pid}, '%(pid)s.', {'pid': pid}, instance=instance)
instance=instance)
else: else:
# The process is still around # The process is still around
LOG.warning(_LW("Failed to kill a long running process " LOG.warning("Failed to kill a long running process "
"%(pid)s related to the instance when " "%(pid)s related to the instance when "
"deleting it."), {'pid': pid}, "deleting it.", {'pid': pid}, instance=instance)
instance=instance)
self.remove_job(instance, pid) self.remove_job(instance, pid)

View File

@ -24,8 +24,6 @@ from oslo_log import log as logging
from nova.compute import power_state from nova.compute import power_state
import nova.conf import nova.conf
from nova.i18n import _LI
from nova.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -240,7 +238,7 @@ def find_job_type(guest, instance):
instance=instance) instance=instance)
return libvirt.VIR_DOMAIN_JOB_COMPLETED return libvirt.VIR_DOMAIN_JOB_COMPLETED
else: else:
LOG.info(_LI("Error %(ex)s, migration failed"), LOG.info("Error %(ex)s, migration failed",
{"ex": ex}, instance=instance) {"ex": ex}, instance=instance)
return libvirt.VIR_DOMAIN_JOB_FAILED return libvirt.VIR_DOMAIN_JOB_FAILED
@ -271,15 +269,14 @@ def should_abort(instance, now,
if (progress_timeout != 0 and if (progress_timeout != 0 and
(now - progress_time) > progress_timeout): (now - progress_time) > progress_timeout):
LOG.warning(_LW("Live migration stuck for %d sec"), LOG.warning("Live migration stuck for %d sec",
(now - progress_time), instance=instance) (now - progress_time), instance=instance)
return True return True
if (completion_timeout != 0 and if (completion_timeout != 0 and
elapsed > completion_timeout): elapsed > completion_timeout):
LOG.warning( LOG.warning("Live migration not completed after %d sec",
_LW("Live migration not completed after %d sec"), completion_timeout, instance=instance)
completion_timeout, instance=instance)
return True return True
return False return False
@ -359,8 +356,8 @@ def update_downtime(guest, instance,
instance=instance) instance=instance)
return olddowntime return olddowntime
LOG.info(_LI("Increasing downtime to %(downtime)d ms " LOG.info("Increasing downtime to %(downtime)d ms "
"after %(waittime)d sec elapsed time"), "after %(waittime)d sec elapsed time",
{"downtime": thisstep[1], {"downtime": thisstep[1],
"waittime": thisstep[0]}, "waittime": thisstep[0]},
instance=instance) instance=instance)
@ -368,8 +365,7 @@ def update_downtime(guest, instance,
try: try:
guest.migrate_configure_max_downtime(thisstep[1]) guest.migrate_configure_max_downtime(thisstep[1])
except libvirt.libvirtError as e: except libvirt.libvirtError as e:
LOG.warning(_LW("Unable to increase max downtime to %(time)d" LOG.warning("Unable to increase max downtime to %(time)d ms: %(e)s",
"ms: %(e)s"),
{"time": thisstep[1], "e": e}, instance=instance) {"time": thisstep[1], "e": e}, instance=instance)
return thisstep[1] return thisstep[1]
@ -404,14 +400,13 @@ def trigger_postcopy_switch(guest, instance, migration):
try: try:
guest.migrate_start_postcopy() guest.migrate_start_postcopy()
except libvirt.libvirtError as e: except libvirt.libvirtError as e:
LOG.warning(_LW("Failed to switch to post-copy live " LOG.warning("Failed to switch to post-copy live migration: %s",
"migration: %s"),
e, instance=instance) e, instance=instance)
else: else:
# NOTE(ltomas): Change the migration status to indicate that # NOTE(ltomas): Change the migration status to indicate that
# it is in post-copy active mode, i.e., the VM at # it is in post-copy active mode, i.e., the VM at
# destination is the active one # destination is the active one
LOG.info(_LI("Switching to post-copy migration mode"), LOG.info("Switching to post-copy migration mode",
instance=instance) instance=instance)
migration.status = 'running (post-copy)' migration.status = 'running (post-copy)'
migration.save() migration.save()
@ -443,8 +438,8 @@ def run_tasks(guest, instance, active_migrations, on_migration_failure,
task = tasks.popleft() task = tasks.popleft()
if task == 'force-complete': if task == 'force-complete':
if migration.status == 'running (post-copy)': if migration.status == 'running (post-copy)':
LOG.warning(_LW("Live-migration %s already switched " LOG.warning("Live-migration %s already switched "
"to post-copy mode."), "to post-copy mode.",
instance=instance) instance=instance)
elif is_post_copy_enabled: elif is_post_copy_enabled:
trigger_postcopy_switch(guest, instance, migration) trigger_postcopy_switch(guest, instance, migration)
@ -453,11 +448,11 @@ def run_tasks(guest, instance, active_migrations, on_migration_failure,
guest.pause() guest.pause()
on_migration_failure.append("unpause") on_migration_failure.append("unpause")
except Exception as e: except Exception as e:
LOG.warning(_LW("Failed to pause instance during " LOG.warning("Failed to pause instance during "
"live-migration %s"), "live-migration %s",
e, instance=instance) e, instance=instance)
else: else:
LOG.warning(_LW("Unknown migration task '%(task)s'"), LOG.warning("Unknown migration task '%(task)s'",
{"task": task}, instance=instance) {"task": task}, instance=instance)
@ -488,11 +483,11 @@ def run_recover_tasks(host, guest, instance, on_migration_failure):
if state == power_state.PAUSED: if state == power_state.PAUSED:
guest.resume() guest.resume()
except Exception as e: except Exception as e:
LOG.warning(_LW("Failed to resume paused instance " LOG.warning("Failed to resume paused instance "
"before live-migration rollback %s"), "before live-migration rollback %s",
e, instance=instance) e, instance=instance)
else: else:
LOG.warning(_LW("Unknown migration task '%(task)s'"), LOG.warning("Unknown migration task '%(task)s'",
{"task": task}, instance=instance) {"task": task}, instance=instance)

View File

@ -20,7 +20,6 @@ from oslo_concurrency import processutils
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from nova.i18n import _LE
from nova.virt.libvirt import utils from nova.virt.libvirt import utils
@ -67,8 +66,8 @@ def create_volume(target, device, cipher, key_size, key):
utils.execute(*cmd, process_input=key, run_as_root=True) utils.execute(*cmd, process_input=key, run_as_root=True)
except processutils.ProcessExecutionError as e: except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not start encryption for disk %(device)s: " LOG.error("Could not start encryption for disk %(device)s: "
"%(exception)s"), {'device': device, 'exception': e}) "%(exception)s", {'device': device, 'exception': e})
def delete_volume(target): def delete_volume(target):
@ -87,10 +86,10 @@ def delete_volume(target):
LOG.debug("Ignoring exit code 4, volume already destroyed") LOG.debug("Ignoring exit code 4, volume already destroyed")
else: else:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not disconnect encrypted volume " LOG.error("Could not disconnect encrypted volume "
"%(volume)s. If dm-crypt device is still active " "%(volume)s. If dm-crypt device is still active "
"it will have to be destroyed manually for " "it will have to be destroyed manually for "
"cleanup to succeed."), {'volume': target}) "cleanup to succeed.", {'volume': target})
def list_volumes(): def list_volumes():

View File

@ -27,7 +27,6 @@ import six
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LW
from nova.virt.libvirt import utils from nova.virt.libvirt import utils
CONF = nova.conf.CONF CONF = nova.conf.CONF
@ -62,11 +61,11 @@ def create_volume(vg, lv, size, sparse=False):
preallocated_space = 64 * units.Mi preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space) check_size(vg, lv, preallocated_space)
if free_space < size: if free_space < size:
LOG.warning(_LW('Volume group %(vg)s will not be able' LOG.warning('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.' ' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)d bytes,' ' Virtual volume size is %(size)d bytes,'
' but free space on volume group is' ' but free space on volume group is'
' only %(free_space)db.'), ' only %(free_space)db.',
{'vg': vg, {'vg': vg,
'free_space': free_space, 'free_space': free_space,
'size': size, 'size': size,
@ -210,8 +209,7 @@ def clear_volume(path):
try: try:
volume_size = get_volume_size(path) volume_size = get_volume_size(path)
except exception.VolumeBDMPathNotFound: except exception.VolumeBDMPathNotFound:
LOG.warning(_LW('ignoring missing logical volume %(path)s'), LOG.warning('ignoring missing logical volume %(path)s', {'path': path})
{'path': path})
return return
if volume_clear_size != 0 and volume_clear_size < volume_size: if volume_clear_size != 0 and volume_clear_size < volume_size:

View File

@ -32,8 +32,6 @@ from oslo_utils import units
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils from nova import utils
from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt import utils as libvirt_utils
@ -78,7 +76,7 @@ class RBDVolumeProxy(object):
driver._disconnect_from_rados(client, ioctx) driver._disconnect_from_rados(client, ioctx)
except rbd.Error: except rbd.Error:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("error opening rbd image %s"), name) LOG.exception(_("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx) driver._disconnect_from_rados(client, ioctx)
self.driver = driver self.driver = driver
@ -306,13 +304,13 @@ class RBDDriver(object):
try: try:
RbdProxy().remove(client.ioctx, name) RbdProxy().remove(client.ioctx, name)
except rbd.ImageNotFound: except rbd.ImageNotFound:
LOG.warning(_LW('image %(volume)s in pool %(pool)s can not be ' LOG.warning('image %(volume)s in pool %(pool)s can not be '
'found, failed to remove'), 'found, failed to remove',
{'volume': name, 'pool': self.pool}) {'volume': name, 'pool': self.pool})
except rbd.ImageHasSnapshots: except rbd.ImageHasSnapshots:
LOG.error(_LE('image %(volume)s in pool %(pool)s has ' LOG.error('image %(volume)s in pool %(pool)s has '
'snapshots, failed to remove'), 'snapshots, failed to remove',
{'volume': name, 'pool': self.pool}) {'volume': name, 'pool': self.pool})
def import_image(self, base, name): def import_image(self, base, name):
"""Import RBD volume from image file. """Import RBD volume from image file.
@ -342,9 +340,8 @@ class RBDDriver(object):
self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME, self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True) ignore_errors=True)
except (rbd.ImageBusy, rbd.ImageHasSnapshots): except (rbd.ImageBusy, rbd.ImageHasSnapshots):
LOG.warning(_LW('rbd remove %(volume)s in pool %(pool)s ' LOG.warning('rbd remove %(volume)s in pool %(pool)s failed',
'failed'), {'volume': volume, 'pool': self.pool})
{'volume': volume, 'pool': self.pool})
retryctx['retries'] -= 1 retryctx['retries'] -= 1
if retryctx['retries'] <= 0: if retryctx['retries'] <= 0:
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
@ -406,17 +403,16 @@ class RBDDriver(object):
if force: if force:
vol.unprotect_snap(name) vol.unprotect_snap(name)
elif not ignore_errors: elif not ignore_errors:
LOG.warning(_LW('snapshot(%(name)s) on rbd ' LOG.warning('snapshot(%(name)s) on rbd '
'image(%(img)s) is protected, ' 'image(%(img)s) is protected, skipping',
'skipping'),
{'name': name, 'img': volume}) {'name': name, 'img': volume})
return return
LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)', LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)',
{'name': name, 'img': volume}) {'name': name, 'img': volume})
vol.remove_snap(name) vol.remove_snap(name)
elif not ignore_errors: elif not ignore_errors:
LOG.warning(_LW('no snapshot(%(name)s) found on rbd ' LOG.warning('no snapshot(%(name)s) found on rbd '
'image(%(img)s)'), 'image(%(img)s)',
{'name': name, 'img': volume}) {'name': name, 'img': volume})
def rollback_to_snap(self, volume, name): def rollback_to_snap(self, volume, name):

View File

@ -27,8 +27,6 @@ from oslo_log import log as logging
import nova.conf import nova.conf
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LI
from nova.i18n import _LW
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
from nova import utils from nova import utils
from nova.virt.disk import api as disk from nova.virt.disk import api as disk
@ -167,7 +165,7 @@ def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
else: else:
return "tap" return "tap"
else: else:
LOG.info(_LI("tap-ctl check: %s"), out) LOG.info("tap-ctl check: %s", out)
except OSError as exc: except OSError as exc:
if exc.errno == errno.ENOENT: if exc.errno == errno.ENOENT:
LOG.debug("tap-ctl tool is not installed") LOG.debug("tap-ctl tool is not installed")
@ -279,8 +277,8 @@ def update_mtime(path):
# the same base image and using shared storage, so log the exception # the same base image and using shared storage, so log the exception
# but don't fail. Ideally we'd know if we were on shared storage and # but don't fail. Ideally we'd know if we were on shared storage and
# would re-raise the error if we are not on shared storage. # would re-raise the error if we are not on shared storage.
LOG.warning(_LW("Failed to update mtime on path %(path)s. " LOG.warning("Failed to update mtime on path %(path)s. "
"Error: %(error)s"), "Error: %(error)s",
{'path': path, "error": exc}) {'path': path, "error": exc})

View File

@ -29,7 +29,6 @@ from oslo_log import log as logging
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LE
from nova.network import linux_net from nova.network import linux_net
from nova.network import model as network_model from nova.network import model as network_model
from nova.network import os_vif_util from nova.network import os_vif_util
@ -634,10 +633,8 @@ class LibvirtGenericVIFDriver(object):
fabric, network_model.VIF_TYPE_IB_HOSTDEV, fabric, network_model.VIF_TYPE_IB_HOSTDEV,
pci_slot, run_as_root=True) pci_slot, run_as_root=True)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception( LOG.exception(_("Failed while plugging ib hostdev vif"),
_LE("Failed while plugging ib hostdev vif"), instance=instance)
instance=instance
)
def plug_802qbg(self, instance, vif): def plug_802qbg(self, instance, vif):
pass pass
@ -679,7 +676,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('mm-ctl', '--bind-port', port_id, dev, utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True) run_as_root=True)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance) LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif): def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver """Plug using PLUMgrid IO Visor Driver
@ -700,7 +697,7 @@ class LibvirtGenericVIFDriver(object):
'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id, 'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id,
run_as_root=True) run_as_root=True)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance) LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_tap(self, instance, vif): def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface.""" """Plug a VIF_TYPE_TAP virtual interface."""
@ -754,7 +751,7 @@ class LibvirtGenericVIFDriver(object):
linux_net.create_tap_dev(dev, multiqueue=multiqueue) linux_net.create_tap_dev(dev, multiqueue=multiqueue)
utils.execute('vrouter-port-control', cmd_args, run_as_root=True) utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance) LOG.exception(_("Failed while plugging vif"), instance=instance)
def _plug_os_vif(self, instance, vif): def _plug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance) instance_info = os_vif_util.nova_to_osvif_instance(instance)
@ -817,16 +814,14 @@ class LibvirtGenericVIFDriver(object):
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name) v2_name)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"), LOG.exception(_("Failed while unplugging vif"), instance=instance)
instance=instance)
def unplug_ivs_ethernet(self, instance, vif): def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge.""" """Unplug the VIF by deleting the port from the bridge."""
try: try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif)) linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"), LOG.exception(_("Failed while unplugging vif"), instance=instance)
instance=instance)
def unplug_ivs_hybrid(self, instance, vif): def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS) """UnPlug using hybrid strategy (same as OVS)
@ -844,8 +839,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('brctl', 'delbr', br_name, run_as_root=True) utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name) linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"), LOG.exception(_("Failed while unplugging vif"), instance=instance)
instance=instance)
def unplug_ivs(self, instance, vif): def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
@ -864,7 +858,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('ebrctl', 'del-port', fabric, vnic_mac, utils.execute('ebrctl', 'del-port', fabric, vnic_mac,
run_as_root=True) run_as_root=True)
except Exception: except Exception:
LOG.exception(_LE("Failed while unplugging ib hostdev vif")) LOG.exception(_("Failed while unplugging ib hostdev vif"))
def unplug_802qbg(self, instance, vif): def unplug_802qbg(self, instance, vif):
pass pass
@ -900,8 +894,7 @@ class LibvirtGenericVIFDriver(object):
run_as_root=True) run_as_root=True)
linux_net.delete_net_dev(dev) linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"), LOG.exception(_("Failed while unplugging vif"), instance=instance)
instance=instance)
def unplug_tap(self, instance, vif): def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface.""" """Unplug a VIF_TYPE_TAP virtual interface."""
@ -909,8 +902,7 @@ class LibvirtGenericVIFDriver(object):
try: try:
linux_net.delete_net_dev(dev) linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"), LOG.exception(_("Failed while unplugging vif"), instance=instance)
instance=instance)
def unplug_iovisor(self, instance, vif): def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver """Unplug using PLUMgrid IO Visor Driver
@ -926,8 +918,7 @@ class LibvirtGenericVIFDriver(object):
run_as_root=True) run_as_root=True)
linux_net.delete_net_dev(dev) linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"), LOG.exception(_("Failed while unplugging vif"), instance=instance)
instance=instance)
def unplug_vhostuser(self, instance, vif): def unplug_vhostuser(self, instance, vif):
pass pass
@ -943,8 +934,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('vrouter-port-control', cmd_args, run_as_root=True) utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
linux_net.delete_net_dev(dev) linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception( LOG.exception(_("Failed while unplugging vif"), instance=instance)
_LE("Failed while unplugging vif"), instance=instance)
def _unplug_os_vif(self, instance, vif): def _unplug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance) instance_info = os_vif_util.nova_to_osvif_instance(instance)

View File

@ -16,7 +16,6 @@ from os_brick.initiator import connector
from oslo_log import log as logging from oslo_log import log as logging
import nova.conf import nova.conf
from nova.i18n import _LW
from nova import utils from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume from nova.virt.libvirt.volume import volume as libvirt_volume
@ -73,7 +72,7 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
try: try:
self.connector.disconnect_volume(connection_info['data'], None) self.connector.disconnect_volume(connection_info['data'], None)
except os_brick_exception.VolumeDeviceNotFound as exc: except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning(_LW('Ignoring VolumeDeviceNotFound: %s'), exc) LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return return
LOG.debug("Disconnected iSCSI Volume %s", disk_dev) LOG.debug("Disconnected iSCSI Volume %s", disk_dev)

View File

@ -23,7 +23,7 @@ import six
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _LE, _LW from nova.i18n import _
from nova import utils from nova import utils
CONF = nova.conf.CONF CONF = nova.conf.CONF
@ -111,8 +111,7 @@ class _HostMountStateManager(object):
""" """
with self.cond: with self.cond:
if self.state is not None: if self.state is not None:
LOG.warning(_LW("host_up called, but we think host is " LOG.warning("host_up called, but we think host is already up")
"already up"))
self._host_down() self._host_down()
# Wait until all operations using a previous state generation are # Wait until all operations using a previous state generation are
@ -139,8 +138,7 @@ class _HostMountStateManager(object):
""" """
with self.cond: with self.cond:
if self.state is None: if self.state is None:
LOG.warning(_LW("host_down called, but we don't think host " LOG.warning("host_down called, but we don't think host is up")
"is up"))
return return
self._host_down() self._host_down()
@ -313,10 +311,10 @@ class _HostMountState(object):
# We're not going to raise the exception because we're # We're not going to raise the exception because we're
# in the desired state anyway. However, this is still # in the desired state anyway. However, this is still
# unusual so we'll log it. # unusual so we'll log it.
LOG.exception(_LE('Error mounting %(fstype)s export ' LOG.exception(_('Error mounting %(fstype)s export '
'%(export)s on %(mountpoint)s. ' '%(export)s on %(mountpoint)s. '
'Continuing because mountpount is ' 'Continuing because mountpount is '
'mounted despite this.'), 'mounted despite this.'),
{'fstype': fstype, 'export': export, {'fstype': fstype, 'export': export,
'mountpoint': mountpoint}) 'mountpoint': mountpoint})
@ -353,10 +351,9 @@ class _HostMountState(object):
try: try:
mount.remove_attachment(vol_name, instance.uuid) mount.remove_attachment(vol_name, instance.uuid)
except KeyError: except KeyError:
LOG.warning(_LW("Request to remove attachment " LOG.warning("Request to remove attachment "
"(%(vol_name)s, %(instance)s) from " "(%(vol_name)s, %(instance)s) from "
"%(mountpoint)s, but we don't think it's in " "%(mountpoint)s, but we don't think it's in use.",
"use."),
{'vol_name': vol_name, 'instance': instance.uuid, {'vol_name': vol_name, 'instance': instance.uuid,
'mountpoint': mountpoint}) 'mountpoint': mountpoint})
@ -384,15 +381,15 @@ class _HostMountState(object):
utils.execute('umount', mountpoint, run_as_root=True, utils.execute('umount', mountpoint, run_as_root=True,
attempts=3, delay_on_retry=True) attempts=3, delay_on_retry=True)
except processutils.ProcessExecutionError as ex: except processutils.ProcessExecutionError as ex:
LOG.error(_LE("Couldn't unmount %(mountpoint)s: %(reason)s"), LOG.error("Couldn't unmount %(mountpoint)s: %(reason)s",
{'mountpoint': mountpoint, 'reason': six.text_type(ex)}) {'mountpoint': mountpoint, 'reason': six.text_type(ex)})
if not os.path.ismount(mountpoint): if not os.path.ismount(mountpoint):
try: try:
utils.execute('rmdir', mountpoint) utils.execute('rmdir', mountpoint)
except processutils.ProcessExecutionError as ex: except processutils.ProcessExecutionError as ex:
LOG.error(_LE("Couldn't remove directory %(mountpoint)s: " LOG.error("Couldn't remove directory %(mountpoint)s: "
"%(reason)s"), "%(reason)s",
{'mountpoint': mountpoint, {'mountpoint': mountpoint,
'reason': six.text_type(ex)}) 'reason': six.text_type(ex)})
return False return False

View File

@ -14,7 +14,7 @@ from oslo_log import log as logging
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LW from nova.i18n import _
from nova import utils from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume from nova.virt.libvirt.volume import volume as libvirt_volume
@ -81,10 +81,10 @@ class LibvirtNetVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
# NOTE(mriedem): We'll have to be extra careful about this in case # NOTE(mriedem): We'll have to be extra careful about this in case
# the reason we got here is due to an old volume connection created # the reason we got here is due to an old volume connection created
# before we started preferring the Cinder settings in Ocata. # before we started preferring the Cinder settings in Ocata.
LOG.warning(_LW('Falling back to Nova configuration values for ' LOG.warning('Falling back to Nova configuration values for '
'RBD authentication. Cinder should be configured ' 'RBD authentication. Cinder should be configured '
'for auth with Ceph volumes. This fallback will ' 'for auth with Ceph volumes. This fallback will '
'be dropped in the Nova 16.0.0 Pike release.')) 'be dropped in the Nova 16.0.0 Pike release.')
# use the nova config values # use the nova config values
conf.auth_username = CONF.libvirt.rbd_user conf.auth_username = CONF.libvirt.rbd_user
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid

View File

@ -24,8 +24,6 @@ import six
import nova.conf import nova.conf
from nova import exception as nova_exception from nova import exception as nova_exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova import utils from nova import utils
from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import fs from nova.virt.libvirt.volume import fs
@ -53,7 +51,7 @@ def mount_volume(volume, mnt_base, configfile=None):
mnt_base) mnt_base)
# Run mount command but do not fail on already mounted exit code # Run mount command but do not fail on already mounted exit code
utils.execute(*command, check_exit_code=[0, 4]) utils.execute(*command, check_exit_code=[0, 4])
LOG.info(_LI('Mounted volume: %s'), volume) LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base): def umount_volume(mnt_base):
@ -62,10 +60,9 @@ def umount_volume(mnt_base):
utils.execute('umount.quobyte', mnt_base) utils.execute('umount.quobyte', mnt_base)
except processutils.ProcessExecutionError as exc: except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc): if 'Device or resource busy' in six.text_type(exc):
LOG.error(_LE("The Quobyte volume at %s is still in use."), LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
mnt_base)
else: else:
LOG.exception(_LE("Couldn't unmount the Quobyte Volume at %s"), LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base) mnt_base)
@ -81,8 +78,8 @@ def validate_volume(mnt_base):
raise nova_exception.InternalError(msg) raise nova_exception.InternalError(msg)
if not os.access(mnt_base, os.W_OK | os.X_OK): if not os.access(mnt_base, os.W_OK | os.X_OK):
msg = (_LE("Volume is not writable. Please broaden the file" msg = _("Volume is not writable. Please broaden the file"
" permissions. Mount: %s") % mnt_base) " permissions. Mount: %s") % mnt_base
raise nova_exception.InternalError(msg) raise nova_exception.InternalError(msg)
@ -121,8 +118,8 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
except OSError as exc: except OSError as exc:
if exc.errno == errno.ENOTCONN: if exc.errno == errno.ENOTCONN:
mounted = False mounted = False
LOG.info(_LI('Fixing previous mount %s which was not' LOG.info('Fixing previous mount %s which was not'
' unmounted correctly.'), mount_path) ' unmounted correctly.', mount_path)
umount_volume(mount_path) umount_volume(mount_path)
if not mounted: if not mounted:
@ -143,7 +140,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
if libvirt_utils.is_mounted(mount_path, 'quobyte@' + quobyte_volume): if libvirt_utils.is_mounted(mount_path, 'quobyte@' + quobyte_volume):
umount_volume(mount_path) umount_volume(mount_path)
else: else:
LOG.info(_LI("Trying to disconnected unmounted volume at %s"), LOG.info("Trying to disconnected unmounted volume at %s",
mount_path) mount_path)
def _normalize_export(self, export): def _normalize_export(self, export):

View File

@ -24,7 +24,7 @@ from oslo_utils import importutils
import six import six
import nova.conf import nova.conf
from nova.i18n import _LE, _LW from nova.i18n import _
from nova import utils from nova import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -52,7 +52,7 @@ def mount_share(mount_path, export_path,
utils.execute(*mount_cmd, run_as_root=True) utils.execute(*mount_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc: except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc): if 'Device or resource busy' in six.text_type(exc):
LOG.warning(_LW("%s is already mounted"), export_path) LOG.warning("%s is already mounted", export_path)
else: else:
raise raise
@ -70,8 +70,7 @@ def unmount_share(mount_path, export_path):
if 'target is busy' in six.text_type(exc): if 'target is busy' in six.text_type(exc):
LOG.debug("The share %s is still in use.", export_path) LOG.debug("The share %s is still in use.", export_path)
else: else:
LOG.exception(_LE("Couldn't unmount the share %s"), LOG.exception(_("Couldn't unmount the share %s"), export_path)
export_path)
class RemoteFilesystem(object): class RemoteFilesystem(object):

View File

@ -21,8 +21,6 @@ from oslo_log import log as logging
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _LE
from nova.i18n import _LW
from nova import profiler from nova import profiler
from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import config as vconfig
import nova.virt.libvirt.driver import nova.virt.libvirt.driver
@ -76,8 +74,8 @@ class LibvirtBaseVolumeDriver(object):
new_key = 'disk_' + k new_key = 'disk_' + k
setattr(conf, new_key, v) setattr(conf, new_key, v)
else: else:
LOG.warning(_LW('Unknown content in connection_info/' LOG.warning('Unknown content in connection_info/'
'qos_specs: %s'), specs) 'qos_specs: %s', specs)
# Extract access_mode control parameters # Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']: if 'access_mode' in data and data['access_mode']:
@ -85,8 +83,8 @@ class LibvirtBaseVolumeDriver(object):
if access_mode in ('ro', 'rw'): if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro' conf.readonly = access_mode == 'ro'
else: else:
LOG.error(_LE('Unknown content in ' LOG.error('Unknown content in '
'connection_info/access_mode: %s'), 'connection_info/access_mode: %s',
access_mode) access_mode)
raise exception.InvalidVolumeAccessMode( raise exception.InvalidVolumeAccessMode(
access_mode=access_mode) access_mode=access_mode)

View File

@ -16,7 +16,6 @@ from oslo_log import log as logging
from oslo_utils import importutils from oslo_utils import importutils
from nova import exception from nova import exception
from nova.i18n import _LW, _LI
libosinfo = None libosinfo = None
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -40,7 +39,7 @@ class _OsInfoDatabase(object):
libosinfo = importutils.import_module( libosinfo = importutils.import_module(
'gi.repository.Libosinfo') 'gi.repository.Libosinfo')
except ImportError as exp: except ImportError as exp:
LOG.info(_LI("Cannot load Libosinfo: (%s)"), exp) LOG.info("Cannot load Libosinfo: (%s)", exp)
else: else:
self.loader = libosinfo.Loader() self.loader = libosinfo.Loader()
self.loader.process_default_path() self.loader.process_default_path()
@ -94,8 +93,7 @@ class OsInfo(object):
try: try:
return _OsInfoDatabase.get_instance().get_os(os_name) return _OsInfoDatabase.get_instance().get_os(os_name)
except exception.NovaException as e: except exception.NovaException as e:
LOG.warning(_LW("Cannot find OS information " LOG.warning("Cannot find OS information - Reason: (%s)", e)
"- Reason: (%s)"), e)
@property @property
def network_model(self): def network_model(self):

View File

@ -20,7 +20,6 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from nova.i18n import _LW
from nova import utils from nova import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -62,7 +61,7 @@ def register_storage_use(storage_path, hostname):
try: try:
d = jsonutils.loads(f.read()) d = jsonutils.loads(f.read())
except ValueError: except ValueError:
LOG.warning(_LW("Cannot decode JSON from %(id_path)s"), LOG.warning("Cannot decode JSON from %(id_path)s",
{"id_path": id_path}) {"id_path": id_path})
d[hostname] = time.time() d[hostname] = time.time()
@ -90,7 +89,7 @@ def get_storage_users(storage_path):
try: try:
d = jsonutils.loads(f.read()) d = jsonutils.loads(f.read())
except ValueError: except ValueError:
LOG.warning(_LW("Cannot decode JSON from %(id_path)s"), LOG.warning("Cannot decode JSON from %(id_path)s",
{"id_path": id_path}) {"id_path": id_path})
recent_users = [] recent_users = []

View File

@ -34,7 +34,7 @@ from nova.compute import power_state
from nova.compute import task_states from nova.compute import task_states
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LI, _LE, _LW from nova.i18n import _
from nova.virt import driver from nova.virt import driver
from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import error_util
@ -131,7 +131,7 @@ class VMwareVCDriver(driver.ComputeDriver):
next_min_ver = v_utils.convert_version_to_int( next_min_ver = v_utils.convert_version_to_int(
constants.NEXT_MIN_VC_VERSION) constants.NEXT_MIN_VC_VERSION)
vc_version = vim_util.get_vc_version(self._session) vc_version = vim_util.get_vc_version(self._session)
LOG.info(_LI("VMware vCenter version: %s"), vc_version) LOG.info("VMware vCenter version: %s", vc_version)
if v_utils.convert_version_to_int(vc_version) < min_version: if v_utils.convert_version_to_int(vc_version) < min_version:
raise exception.NovaException( raise exception.NovaException(
_('Detected vCenter version %(version)s. Nova requires VMware ' _('Detected vCenter version %(version)s. Nova requires VMware '
@ -139,10 +139,10 @@ class VMwareVCDriver(driver.ComputeDriver):
'version': vc_version, 'version': vc_version,
'min_version': constants.MIN_VC_VERSION}) 'min_version': constants.MIN_VC_VERSION})
elif v_utils.convert_version_to_int(vc_version) < next_min_ver: elif v_utils.convert_version_to_int(vc_version) < next_min_ver:
LOG.warning(_LW('Running Nova with a VMware vCenter version less ' LOG.warning('Running Nova with a VMware vCenter version less '
'than %(version)s is deprecated. The required ' 'than %(version)s is deprecated. The required '
'minimum version of vCenter will be raised to ' 'minimum version of vCenter will be raised to '
'%(version)s in the 16.0.0 release.'), '%(version)s in the 16.0.0 release.',
{'version': constants.NEXT_MIN_VC_VERSION}) {'version': constants.NEXT_MIN_VC_VERSION})
@property @property
@ -166,8 +166,7 @@ class VMwareVCDriver(driver.ComputeDriver):
CONF.vmware.pbm_default_policy): CONF.vmware.pbm_default_policy):
raise error_util.PbmDefaultPolicyDoesNotExist() raise error_util.PbmDefaultPolicyDoesNotExist()
if CONF.vmware.datastore_regex: if CONF.vmware.datastore_regex:
LOG.warning(_LW( LOG.warning("datastore_regex is ignored when PBM is enabled")
"datastore_regex is ignored when PBM is enabled"))
self._datastore_regex = None self._datastore_regex = None
def init_host(self, host): def init_host(self, host):
@ -365,13 +364,13 @@ class VMwareVCDriver(driver.ComputeDriver):
self.detach_volume(connection_info, instance, self.detach_volume(connection_info, instance,
disk.get('device_name')) disk.get('device_name'))
except exception.DiskNotFound: except exception.DiskNotFound:
LOG.warning(_LW('The volume %s does not exist!'), LOG.warning('The volume %s does not exist!',
disk.get('device_name'), disk.get('device_name'),
instance=instance) instance=instance)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to detach %(device_name)s. " LOG.error("Failed to detach %(device_name)s. "
"Exception: %(exc)s"), "Exception: %(exc)s",
{'device_name': disk.get('device_name'), {'device_name': disk.get('device_name'),
'exc': e}, 'exc': e},
instance=instance) instance=instance)
@ -396,8 +395,8 @@ class VMwareVCDriver(driver.ComputeDriver):
try: try:
self._detach_instance_volumes(instance, block_device_info) self._detach_instance_volumes(instance, block_device_info)
except vexc.ManagedObjectNotFoundException: except vexc.ManagedObjectNotFoundException:
LOG.warning(_LW('Instance does not exists. Proceeding to ' LOG.warning('Instance does not exists. Proceeding to '
'delete instance properties on datastore'), 'delete instance properties on datastore',
instance=instance) instance=instance)
self._vmops.destroy(instance, destroy_disks) self._vmops.destroy(instance, destroy_disks)

View File

@ -24,7 +24,7 @@ from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil from oslo_vmware import vim_util as vutil
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LI from nova.i18n import _
from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vm_util
@ -277,7 +277,7 @@ def disk_move(session, dc_ref, src_file, dst_file):
destDatacenter=dc_ref, destDatacenter=dc_ref,
force=False) force=False)
session._wait_for_task(move_task) session._wait_for_task(move_task)
LOG.info(_LI("Moved virtual disk from %(src)s to %(dst)s."), LOG.info("Moved virtual disk from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file}) {'src': src_file, 'dst': dst_file})
@ -295,7 +295,7 @@ def disk_copy(session, dc_ref, src_file, dst_file):
destDatacenter=dc_ref, destDatacenter=dc_ref,
force=False) force=False)
session._wait_for_task(copy_disk_task) session._wait_for_task(copy_disk_task)
LOG.info(_LI("Copied virtual disk from %(src)s to %(dst)s."), LOG.info("Copied virtual disk from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file}) {'src': src_file, 'dst': dst_file})
@ -309,7 +309,7 @@ def disk_delete(session, dc_ref, file_path):
name=str(file_path), name=str(file_path),
datacenter=dc_ref) datacenter=dc_ref)
session._wait_for_task(delete_disk_task) session._wait_for_task(delete_disk_task)
LOG.info(_LI("Deleted virtual disk %s."), file_path) LOG.info("Deleted virtual disk %s.", file_path)
def file_move(session, dc_ref, src_file, dst_file): def file_move(session, dc_ref, src_file, dst_file):
@ -451,8 +451,7 @@ def _filter_datastores_matching_storage_policy(session, data_stores,
if oc.obj in matching_ds] if oc.obj in matching_ds]
data_stores.objects = object_contents data_stores.objects = object_contents
return data_stores return data_stores
LOG.error(_LE("Unable to retrieve storage policy with name %s"), LOG.error("Unable to retrieve storage policy with name %s", storage_policy)
storage_policy)
def _update_datacenter_cache_from_objects(session, dcs): def _update_datacenter_cache_from_objects(session, dcs):

View File

@ -25,7 +25,6 @@ from oslo_vmware import exceptions as vexc
import nova.conf import nova.conf
from nova import context from nova import context
from nova import exception from nova import exception
from nova.i18n import _LW
from nova import objects from nova import objects
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import ds_util
@ -79,8 +78,8 @@ class VCState(object):
about_info = self._session._call_method(vim_util, "get_about_info") about_info = self._session._call_method(vim_util, "get_about_info")
except (vexc.VimConnectionException, vexc.VimAttributeException) as ex: except (vexc.VimConnectionException, vexc.VimAttributeException) as ex:
# VimAttributeException is thrown when vpxd service is down # VimAttributeException is thrown when vpxd service is down
LOG.warning(_LW("Failed to connect with %(node)s. " LOG.warning("Failed to connect with %(node)s. "
"Error: %(error)s"), "Error: %(error)s",
{'node': self._host_name, 'error': ex}) {'node': self._host_name, 'error': ex})
self._set_host_enabled(False) self._set_host_enabled(False)
return data return data

View File

@ -42,7 +42,6 @@ from oslo_utils import timeutils
from oslo_vmware import exceptions as vexc from oslo_vmware import exceptions as vexc
from oslo_vmware import vim_util as vutil from oslo_vmware import vim_util as vutil
from nova.i18n import _LI, _LW
from nova.virt import imagecache from nova.virt import imagecache
from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import ds_util
@ -69,7 +68,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
vexc.FileLockedException) as e: vexc.FileLockedException) as e:
# There may be more than one process or thread that tries # There may be more than one process or thread that tries
# to delete the file. # to delete the file.
LOG.warning(_LW("Unable to delete %(file)s. Exception: %(ex)s"), LOG.warning("Unable to delete %(file)s. Exception: %(ex)s",
{'file': ds_path, 'ex': e}) {'file': ds_path, 'ex': e})
except vexc.FileNotFoundException: except vexc.FileNotFoundException:
LOG.debug("File not found: %s", ds_path) LOG.debug("File not found: %s", ds_path)
@ -157,13 +156,12 @@ class ImageCacheManager(imagecache.ImageCacheManager):
ds_util.mkdir(self._session, ts_path, dc_info.ref) ds_util.mkdir(self._session, ts_path, dc_info.ref)
except vexc.FileAlreadyExistsException: except vexc.FileAlreadyExistsException:
LOG.debug("Timestamp already exists.") LOG.debug("Timestamp already exists.")
LOG.info(_LI("Image %s is no longer used by this node. " LOG.info("Image %s is no longer used by this node. "
"Pending deletion!"), image) "Pending deletion!", image)
else: else:
dt = self._get_datetime_from_filename(str(ts)) dt = self._get_datetime_from_filename(str(ts))
if timeutils.is_older_than(dt, age_seconds): if timeutils.is_older_than(dt, age_seconds):
LOG.info(_LI("Image %s is no longer used. " LOG.info("Image %s is no longer used. Deleting!", path)
"Deleting!"), path)
# Image has aged - delete the image ID folder # Image has aged - delete the image ID folder
self._folder_delete(path, dc_info.ref) self._folder_delete(path, dc_info.ref)

View File

@ -31,7 +31,7 @@ from oslo_vmware import rw_handles
from nova import exception from nova import exception
from nova.i18n import _, _LI from nova.i18n import _
from nova import image from nova import image
from nova.objects import fields from nova.objects import fields
from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import constants
@ -358,11 +358,11 @@ def fetch_image_stream_optimized(context, instance, session, vm_name,
imported_vm_ref = write_handle.get_imported_vm() imported_vm_ref = write_handle.get_imported_vm()
LOG.info(_LI("Downloaded image file data %(image_ref)s"), LOG.info("Downloaded image file data %(image_ref)s",
{'image_ref': instance.image_ref}, instance=instance) {'image_ref': instance.image_ref}, instance=instance)
vmdk = vm_util.get_vmdk_info(session, imported_vm_ref, vm_name) vmdk = vm_util.get_vmdk_info(session, imported_vm_ref, vm_name)
session._call_method(session.vim, "UnregisterVM", imported_vm_ref) session._call_method(session.vim, "UnregisterVM", imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), instance=instance) LOG.info("The imported VM was unregistered", instance=instance)
return vmdk.capacity_in_bytes return vmdk.capacity_in_bytes
@ -420,15 +420,15 @@ def fetch_image_ova(context, instance, session, vm_name, ds_name,
vm_import_spec, vm_import_spec,
file_size) file_size)
image_transfer(extracted, write_handle) image_transfer(extracted, write_handle)
LOG.info(_LI("Downloaded OVA image file %(image_ref)s"), LOG.info("Downloaded OVA image file %(image_ref)s",
{'image_ref': instance.image_ref}, instance=instance) {'image_ref': instance.image_ref}, instance=instance)
imported_vm_ref = write_handle.get_imported_vm() imported_vm_ref = write_handle.get_imported_vm()
vmdk = vm_util.get_vmdk_info(session, vmdk = vm_util.get_vmdk_info(session,
imported_vm_ref, imported_vm_ref,
vm_name) vm_name)
session._call_method(session.vim, "UnregisterVM", session._call_method(session.vim, "UnregisterVM",
imported_vm_ref) imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), LOG.info("The imported VM was unregistered",
instance=instance) instance=instance)
return vmdk.capacity_in_bytes return vmdk.capacity_in_bytes
raise exception.ImageUnacceptable( raise exception.ImageUnacceptable(

View File

@ -21,7 +21,7 @@ from oslo_vmware import vim_util
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LI, _LW from nova.i18n import _
from nova.network import model from nova.network import model
from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import network_util
@ -96,8 +96,8 @@ def _check_ovs_supported_version(session):
vc_version = versionutils.convert_version_to_int( vc_version = versionutils.convert_version_to_int(
vim_util.get_vc_version(session)) vim_util.get_vc_version(session))
if vc_version < min_version: if vc_version < min_version:
LOG.warning(_LW('VMware vCenter version less than %(version)s ' LOG.warning('VMware vCenter version less than %(version)s '
'does not support the \'ovs\' port type.'), 'does not support the \'ovs\' port type.',
{'version': constants.MIN_VC_OVS_VERSION}) {'version': constants.MIN_VC_OVS_VERSION})
@ -118,9 +118,9 @@ def _get_neutron_network(session, cluster, vif):
if not net_id: if not net_id:
# Make use of the original one, in the event that the # Make use of the original one, in the event that the
# plugin does not pass the aforementioned id # plugin does not pass the aforementioned id
LOG.info(_LI('NSX Logical switch ID is not present. ' LOG.info('NSX Logical switch ID is not present. '
'Using network ID to attach to the ' 'Using network ID to attach to the '
'opaque network.')) 'opaque network.')
net_id = vif['network']['id'] net_id = vif['network']['id']
use_external_id = True use_external_id = True
network_type = 'nsx.LogicalSwitch' network_type = 'nsx.LogicalSwitch'

View File

@ -33,7 +33,7 @@ from oslo_vmware import vim_util as vutil
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LI, _LW from nova.i18n import _
from nova.network import model as network_model from nova.network import model as network_model
from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vim_util
@ -1280,7 +1280,7 @@ def get_all_cluster_mors(session):
return results.objects return results.objects
except Exception as excep: except Exception as excep:
LOG.warning(_LW("Failed to get cluster references %s"), excep) LOG.warning("Failed to get cluster references %s", excep)
def get_cluster_ref_by_name(session, cluster_name): def get_cluster_ref_by_name(session, cluster_name):
@ -1327,10 +1327,10 @@ def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
# Consequently, a value which we don't recognise may in fact be valid. # Consequently, a value which we don't recognise may in fact be valid.
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
if config_spec.guestId not in constants.VALID_OS_TYPES: if config_spec.guestId not in constants.VALID_OS_TYPES:
LOG.warning(_LW('vmware_ostype from image is not recognised: ' LOG.warning('vmware_ostype from image is not recognised: '
'\'%(ostype)s\'. An invalid os type may be ' '\'%(ostype)s\'. An invalid os type may be '
'one cause of this instance creation failure'), 'one cause of this instance creation failure',
{'ostype': config_spec.guestId}) {'ostype': config_spec.guestId})
LOG.debug("Created VM on the ESX host", instance=instance) LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result return task_info.result
@ -1344,9 +1344,9 @@ def destroy_vm(session, instance, vm_ref=None):
destroy_task = session._call_method(session.vim, "Destroy_Task", destroy_task = session._call_method(session.vim, "Destroy_Task",
vm_ref) vm_ref)
session._wait_for_task(destroy_task) session._wait_for_task(destroy_task)
LOG.info(_LI("Destroyed the VM"), instance=instance) LOG.info("Destroyed the VM", instance=instance)
except Exception: except Exception:
LOG.exception(_LE('Destroy VM failed'), instance=instance) LOG.exception(_('Destroy VM failed'), instance=instance)
def create_virtual_disk(session, dc_ref, adapter_type, disk_type, def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
@ -1606,7 +1606,7 @@ def create_folder(session, parent_folder_ref, name):
try: try:
folder = session._call_method(session.vim, "CreateFolder", folder = session._call_method(session.vim, "CreateFolder",
parent_folder_ref, name=name) parent_folder_ref, name=name)
LOG.info(_LI("Created folder: %(name)s in parent %(parent)s."), LOG.info("Created folder: %(name)s in parent %(parent)s.",
{'name': name, 'parent': parent_folder_ref.value}) {'name': name, 'parent': parent_folder_ref.value})
except vexc.DuplicateName as e: except vexc.DuplicateName as e:
LOG.debug("Folder already exists: %(name)s. Parent ref: %(parent)s.", LOG.debug("Folder already exists: %(name)s. Parent ref: %(parent)s.",

View File

@ -43,7 +43,7 @@ import nova.conf
from nova.console import type as ctype from nova.console import type as ctype
from nova import context as nova_context from nova import context as nova_context
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LI, _LW from nova.i18n import _
from nova import network from nova import network
from nova import objects from nova import objects
from nova import utils from nova import utils
@ -176,7 +176,7 @@ class VMwareVMOps(object):
self._session._wait_for_task(vmdk_extend_task) self._session._wait_for_task(vmdk_extend_task)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Extending virtual disk failed with error: %s'), LOG.error('Extending virtual disk failed with error: %s',
e, instance=instance) e, instance=instance)
# Clean up files created during the extend operation # Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name] files = [name.replace(".vmdk", "-flat.vmdk"), name]
@ -392,7 +392,7 @@ class VMwareVMOps(object):
host, cookies = self._get_esx_host_and_cookies(vi.datastore, host, cookies = self._get_esx_host_and_cookies(vi.datastore,
dc_path, image_ds_loc.rel_path) dc_path, image_ds_loc.rel_path)
except Exception as e: except Exception as e:
LOG.warning(_LW("Get esx cookies failed: %s"), e, LOG.warning("Get esx cookies failed: %s", e,
instance=vi.instance) instance=vi.instance)
dc_path = vutil.get_inventory_path(session.vim, vi.dc_info.ref) dc_path = vutil.get_inventory_path(session.vim, vi.dc_info.ref)
@ -507,8 +507,8 @@ class VMwareVMOps(object):
# due to action external to the process. # due to action external to the process.
# In the event of a FileAlreadyExists we continue, # In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised. # all other exceptions will be raised.
LOG.warning(_LW("Destination %s already exists! Concurrent moves " LOG.warning("Destination %s already exists! Concurrent moves "
"can lead to unexpected results."), "can lead to unexpected results.",
dst_folder_ds_path) dst_folder_ds_path)
def _cache_sparse_image(self, vi, tmp_image_ds_loc): def _cache_sparse_image(self, vi, tmp_image_ds_loc):
@ -833,7 +833,7 @@ class VMwareVMOps(object):
CONF.config_drive_format) CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason) raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_LI('Using config drive for instance'), instance=instance) LOG.info('Using config drive for instance', instance=instance)
extra_md = {} extra_md = {}
if admin_password: if admin_password:
extra_md['admin_pass'] = admin_password extra_md['admin_pass'] = admin_password
@ -861,7 +861,7 @@ class VMwareVMOps(object):
return upload_iso_path return upload_iso_path
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with error: %s'), LOG.error('Creating config drive failed with error: %s',
e, instance=instance) e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance, def _attach_cdrom_to_vm(self, vm_ref, instance,
@ -941,8 +941,7 @@ class VMwareVMOps(object):
name=vm_name, name=vm_name,
spec=clone_spec) spec=clone_spec)
self._session._wait_for_task(vm_clone_task) self._session._wait_for_task(vm_clone_task)
LOG.info(_LI("Created linked-clone VM from snapshot"), LOG.info("Created linked-clone VM from snapshot", instance=instance)
instance=instance)
task_info = self._session._call_method(vutil, task_info = self._session._call_method(vutil,
"get_object_property", "get_object_property",
vm_clone_task, vm_clone_task,
@ -1077,9 +1076,9 @@ class VMwareVMOps(object):
"UnregisterVM", vm_ref) "UnregisterVM", vm_ref)
LOG.debug("Unregistered the VM", instance=instance) LOG.debug("Unregistered the VM", instance=instance)
except Exception as excep: except Exception as excep:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, got " LOG.warning("In vmwareapi:vmops:_destroy_instance, got "
"this exception while un-registering the VM: " "this exception while un-registering the VM: %s",
"%s"), excep, instance=instance) excep, instance=instance)
# Delete the folder holding the VM related content on # Delete the folder holding the VM related content on
# the datastore. # the datastore.
if destroy_disks and vm_ds_path: if destroy_disks and vm_ds_path:
@ -1100,16 +1099,15 @@ class VMwareVMOps(object):
{'datastore_name': vm_ds_path.datastore}, {'datastore_name': vm_ds_path.datastore},
instance=instance) instance=instance)
except Exception: except Exception:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, " LOG.warning("In vmwareapi:vmops:_destroy_instance, "
"exception while deleting the VM contents " "exception while deleting the VM contents "
"from the disk"), "from the disk",
exc_info=True, instance=instance) exc_info=True, instance=instance)
except exception.InstanceNotFound: except exception.InstanceNotFound:
LOG.warning(_LW('Instance does not exist on backend'), LOG.warning('Instance does not exist on backend',
instance=instance) instance=instance)
except Exception: except Exception:
LOG.exception(_LE('Destroy instance failed'), LOG.exception(_('Destroy instance failed'), instance=instance)
instance=instance)
finally: finally:
vm_util.vm_ref_cache_delete(instance.uuid) vm_util.vm_ref_cache_delete(instance.uuid)
@ -1238,7 +1236,7 @@ class VMwareVMOps(object):
rescue_device = self._get_rescue_device(instance, vm_ref) rescue_device = self._get_rescue_device(instance, vm_ref)
except exception.NotFound: except exception.NotFound:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to access the rescue disk'), LOG.error('Unable to access the rescue disk',
instance=instance) instance=instance)
vm_util.power_off_instance(self._session, instance, vm_ref) vm_util.power_off_instance(self._session, instance, vm_ref)
self._volumeops.detach_disk_from_vm(vm_ref, instance, rescue_device, self._volumeops.detach_disk_from_vm(vm_ref, instance, rescue_device,
@ -1488,11 +1486,11 @@ class VMwareVMOps(object):
timeout=timeout) timeout=timeout)
if instances_info["instance_count"] > 0: if instances_info["instance_count"] > 0:
LOG.info(_LI("Found %(instance_count)d hung reboots " LOG.info("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds"), instances_info) "older than %(timeout)d seconds", instances_info)
for instance in instances: for instance in instances:
LOG.info(_LI("Automatically hard rebooting"), instance=instance) LOG.info("Automatically hard rebooting", instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD") self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance): def get_info(self, instance):
@ -1763,8 +1761,7 @@ class VMwareVMOps(object):
vm_util.reconfigure_vm(self._session, vm_ref, vm_util.reconfigure_vm(self._session, vm_ref,
attach_config_spec) attach_config_spec)
except Exception as e: except Exception as e:
LOG.error(_LE('Attaching network adapter failed. Exception: ' LOG.error('Attaching network adapter failed. Exception: %s',
'%s'),
e, instance=instance) e, instance=instance)
raise exception.InterfaceAttachFailed( raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid) instance_uuid=instance.uuid)
@ -1812,8 +1809,7 @@ class VMwareVMOps(object):
vm_util.reconfigure_vm(self._session, vm_ref, vm_util.reconfigure_vm(self._session, vm_ref,
detach_config_spec) detach_config_spec)
except Exception as e: except Exception as e:
LOG.error(_LE('Detaching network adapter failed. Exception: ' LOG.error('Detaching network adapter failed. Exception: %s',
'%s'),
e, instance=instance) e, instance=instance)
raise exception.InterfaceDetachFailed( raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid) instance_uuid=instance.uuid)
@ -1883,14 +1879,11 @@ class VMwareVMOps(object):
str(vi.cache_image_path), str(vi.cache_image_path),
str(sized_disk_ds_loc)) str(sized_disk_ds_loc))
except Exception as e: except Exception as e:
LOG.warning(_LW("Root disk file creation " LOG.warning("Root disk file creation failed - %s",
"failed - %s"),
e, instance=vi.instance) e, instance=vi.instance)
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to copy cached ' LOG.error('Failed to copy cached image %(source)s to '
'image %(source)s to ' '%(dest)s for resize: %(error)s',
'%(dest)s for resize: '
'%(error)s'),
{'source': vi.cache_image_path, {'source': vi.cache_image_path,
'dest': sized_disk_ds_loc, 'dest': sized_disk_ds_loc,
'error': e}, 'error': e},

View File

@ -23,7 +23,7 @@ from oslo_vmware import vim_util as vutil
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LI, _LW from nova.i18n import _
from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vm_util
@ -461,8 +461,8 @@ class VMwareVolumeOps(object):
# The volume has been moved from its original location. # The volume has been moved from its original location.
# Need to consolidate the VMDK files. # Need to consolidate the VMDK files.
LOG.info(_LI("The volume's backing has been relocated to %s. Need to " LOG.info("The volume's backing has been relocated to %s. Need to "
"consolidate backing disk file."), current_device_path) "consolidate backing disk file.", current_device_path)
# Pick the host and resource pool on which the instance resides. # Pick the host and resource pool on which the instance resides.
# Move the volume to the datastore where the new VMDK file is present. # Move the volume to the datastore where the new VMDK file is present.
@ -479,8 +479,8 @@ class VMwareVolumeOps(object):
except oslo_vmw_exceptions.FileNotFoundException: except oslo_vmw_exceptions.FileNotFoundException:
# Volume's vmdk was moved; remove the device so that we can # Volume's vmdk was moved; remove the device so that we can
# relocate the volume. # relocate the volume.
LOG.warning(_LW("Virtual disk: %s of volume's backing not found."), LOG.warning("Virtual disk: %s of volume's backing not found.",
original_device_path, exc_info=True) original_device_path, exc_info=True)
LOG.debug("Removing disk device of volume's backing and " LOG.debug("Removing disk device of volume's backing and "
"reattempting relocate.") "reattempting relocate.")
self.detach_disk_from_vm(volume_ref, instance, original_device) self.detach_disk_from_vm(volume_ref, instance, original_device)

View File

@ -34,7 +34,7 @@ import nova.conf
from nova import context from nova import context
from nova import crypto from nova import crypto
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LI, _LW from nova.i18n import _
from nova import objects from nova import objects
from nova import utils from nova import utils
@ -75,8 +75,8 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
except XenAPI.Failure as e: except XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1] err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg: if 'TIMEOUT:' in err_msg:
LOG.error(_LE('TIMEOUT: The call to %(method)s timed out. ' LOG.error('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'), 'args=%(args)r',
{'method': method, 'args': args}, instance=instance) {'method': method, 'args': args}, instance=instance)
raise exception.AgentTimeout(method=method.__name__) raise exception.AgentTimeout(method=method.__name__)
elif 'REBOOT:' in err_msg: elif 'REBOOT:' in err_msg:
@ -87,13 +87,13 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
return _call_agent(session, instance, vm_ref, method, return _call_agent(session, instance, vm_ref, method,
addl_args, timeout, success_codes) addl_args, timeout, success_codes)
elif 'NOT IMPLEMENTED:' in err_msg: elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_LE('NOT IMPLEMENTED: The call to %(method)s is not ' LOG.error('NOT IMPLEMENTED: The call to %(method)s is not '
'supported by the agent. args=%(args)r'), 'supported by the agent. args=%(args)r',
{'method': method, 'args': args}, instance=instance) {'method': method, 'args': args}, instance=instance)
raise exception.AgentNotImplemented(method=method.__name__) raise exception.AgentNotImplemented(method=method.__name__)
else: else:
LOG.error(_LE('The call to %(method)s returned an error: %(e)s. ' LOG.error('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'), 'args=%(args)r',
{'method': method, 'args': args, 'e': e}, {'method': method, 'args': args, 'e': e},
instance=instance) instance=instance)
raise exception.AgentError(method=method.__name__) raise exception.AgentError(method=method.__name__)
@ -102,15 +102,15 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
try: try:
ret = jsonutils.loads(ret) ret = jsonutils.loads(ret)
except TypeError: except TypeError:
LOG.error(_LE('The agent call to %(method)s returned an invalid ' LOG.error('The agent call to %(method)s returned an invalid '
'response: %(ret)r. args=%(args)r'), 'response: %(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args}, {'method': method, 'ret': ret, 'args': args},
instance=instance) instance=instance)
raise exception.AgentError(method=method.__name__) raise exception.AgentError(method=method.__name__)
if ret['returncode'] not in success_codes: if ret['returncode'] not in success_codes:
LOG.error(_LE('The agent call to %(method)s returned ' LOG.error('The agent call to %(method)s returned '
'an error: %(ret)r. args=%(args)r'), 'an error: %(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args}, {'method': method, 'ret': ret, 'args': args},
instance=instance) instance=instance)
raise exception.AgentError(method=method.__name__) raise exception.AgentError(method=method.__name__)
@ -157,9 +157,8 @@ class XenAPIBasedAgent(object):
self.vm_ref = vm_ref self.vm_ref = vm_ref
def _add_instance_fault(self, error, exc_info): def _add_instance_fault(self, error, exc_info):
LOG.warning(_LW("Ignoring error while configuring instance with " LOG.warning("Ignoring error while configuring instance with agent: %s",
"agent: %s"), error, error, instance=self.instance, exc_info=True)
instance=self.instance, exc_info=True)
try: try:
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc( compute_utils.add_instance_fault_from_exc(
@ -234,9 +233,8 @@ class XenAPIBasedAgent(object):
self._call_agent(host_agent.agent_update, args) self._call_agent(host_agent.agent_update, args)
except exception.AgentError as exc: except exception.AgentError as exc:
# Silently fail for agent upgrades # Silently fail for agent upgrades
LOG.warning(_LW("Unable to update the agent due " LOG.warning("Unable to update the agent due to: %(exc)s",
"to: %(exc)s"), dict(exc=exc), dict(exc=exc), instance=self.instance)
instance=self.instance)
def _exchange_key_with_agent(self): def _exchange_key_with_agent(self):
dh = SimpleDH() dh = SimpleDH()
@ -360,20 +358,19 @@ def find_guest_agent(base_dir):
# reconfigure the network from xenstore data, # reconfigure the network from xenstore data,
# so manipulation of files in /etc is not # so manipulation of files in /etc is not
# required # required
LOG.info(_LI('XenServer tools installed in this ' LOG.info('XenServer tools installed in this '
'image are capable of network injection. ' 'image are capable of network injection. '
'Networking files will not be' 'Networking files will not be'
'manipulated')) 'manipulated')
return True return True
xe_daemon_filename = os.path.join(base_dir, xe_daemon_filename = os.path.join(base_dir,
'usr', 'sbin', 'xe-daemon') 'usr', 'sbin', 'xe-daemon')
if os.path.isfile(xe_daemon_filename): if os.path.isfile(xe_daemon_filename):
LOG.info(_LI('XenServer tools are present ' LOG.info('XenServer tools are present '
'in this image but are not capable ' 'in this image but are not capable '
'of network injection')) 'of network injection')
else: else:
LOG.info(_LI('XenServer tools are not ' LOG.info('XenServer tools are not installed in this image')
'installed in this image'))
return False return False
@ -386,8 +383,8 @@ def should_use_agent(instance):
try: try:
return strutils.bool_from_string(use_agent_raw, strict=True) return strutils.bool_from_string(use_agent_raw, strict=True)
except ValueError: except ValueError:
LOG.warning(_LW("Invalid 'agent_present' value. " LOG.warning("Invalid 'agent_present' value. "
"Falling back to the default."), "Falling back to the default.",
instance=instance) instance=instance)
return CONF.xenserver.use_agent_default return CONF.xenserver.use_agent_default

View File

@ -34,8 +34,8 @@ from oslo_utils import versionutils
import six.moves.urllib.parse as urlparse import six.moves.urllib.parse as urlparse
import nova.conf import nova.conf
from nova.i18n import _, _LE, _LW
from nova import exception from nova import exception
from nova.i18n import _
from nova.virt import driver from nova.virt import driver
from nova.virt.xenapi import host from nova.virt.xenapi import host
from nova.virt.xenapi import pool from nova.virt.xenapi import pool
@ -53,10 +53,10 @@ OVERHEAD_PER_VCPU = 1.5
def invalid_option(option_name, recommended_value): def invalid_option(option_name, recommended_value):
LOG.exception(_LE('Current value of ' LOG.exception(_('Current value of '
'CONF.xenserver.%(option)s option incompatible with ' 'CONF.xenserver.%(option)s option incompatible with '
'CONF.xenserver.independent_compute=True. ' 'CONF.xenserver.independent_compute=True. '
'Consider using "%(recommended)s"'), 'Consider using "%(recommended)s"'),
{'option': option_name, {'option': option_name,
'recommended': recommended_value}) 'recommended': recommended_value})
raise exception.NotSupportedWithOption( raise exception.NotSupportedWithOption(
@ -120,7 +120,7 @@ class XenAPIDriver(driver.ComputeDriver):
try: try:
vm_utils.cleanup_attached_vdis(self._session) vm_utils.cleanup_attached_vdis(self._session)
except Exception: except Exception:
LOG.exception(_LE('Failure while cleaning up attached VDIs')) LOG.exception(_('Failure while cleaning up attached VDIs'))
def instance_exists(self, instance): def instance_exists(self, instance):
"""Checks existence of an instance on the host. """Checks existence of an instance on the host.
@ -363,7 +363,7 @@ class XenAPIDriver(driver.ComputeDriver):
self._initiator = stats['host_other-config']['iscsi_iqn'] self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname'] self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err: except (TypeError, KeyError) as err:
LOG.warning(_LW('Could not determine key: %s'), err, LOG.warning('Could not determine key: %s', err,
instance=instance) instance=instance)
self._initiator = None self._initiator = None
return { return {

View File

@ -30,7 +30,7 @@ from nova.compute import task_states
from nova.compute import vm_states from nova.compute import vm_states
from nova import context from nova import context
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LI, _LW from nova.i18n import _
from nova import objects from nova import objects
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
from nova.virt.xenapi import pool_states from nova.virt.xenapi import pool_states
@ -73,11 +73,11 @@ class Host(object):
name = vm_rec['name_label'] name = vm_rec['name_label']
uuid = _uuid_find(ctxt, host, name) uuid = _uuid_find(ctxt, host, name)
if not uuid: if not uuid:
LOG.info(_LI('Instance %(name)s running on ' LOG.info('Instance %(name)s running on '
'%(host)s could not be found in ' '%(host)s could not be found in '
'the database: assuming it is a ' 'the database: assuming it is a '
'worker VM and skip ping migration ' 'worker VM and skip ping migration '
'to a new host'), 'to a new host',
{'name': name, 'host': host}) {'name': name, 'host': host})
continue continue
instance = objects.Instance.get_by_uuid(ctxt, uuid) instance = objects.Instance.get_by_uuid(ctxt, uuid)
@ -105,8 +105,8 @@ class Host(object):
break break
except XenAPI.Failure: except XenAPI.Failure:
LOG.exception(_LE('Unable to migrate VM %(vm_ref)s ' LOG.exception(_('Unable to migrate VM %(vm_ref)s '
'from %(host)s'), 'from %(host)s'),
{'vm_ref': vm_ref, 'host': host}) {'vm_ref': vm_ref, 'host': host})
instance.host = host instance.host = host
instance.vm_state = vm_states.ACTIVE instance.vm_state = vm_states.ACTIVE
@ -262,7 +262,7 @@ class HostState(object):
allocated += vdi_physical allocated += vdi_physical
physical_used += vdi_physical physical_used += vdi_physical
except (ValueError, self._session.XenAPI.Failure): except (ValueError, self._session.XenAPI.Failure):
LOG.exception(_LE('Unable to get size for vdi %s'), vdi_ref) LOG.exception(_('Unable to get size for vdi %s'), vdi_ref)
return (allocated, physical_used) return (allocated, physical_used)
@ -298,8 +298,8 @@ class HostState(object):
del data['host_memory'] del data['host_memory']
if (data['host_hostname'] != if (data['host_hostname'] !=
self._stats.get('host_hostname', data['host_hostname'])): self._stats.get('host_hostname', data['host_hostname'])):
LOG.error(_LE('Hostname has changed from %(old)s to %(new)s. ' LOG.error('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.'), 'A restart is required to take effect.',
{'old': self._stats['host_hostname'], {'old': self._stats['host_hostname'],
'new': data['host_hostname']}) 'new': data['host_hostname']})
data['host_hostname'] = self._stats['host_hostname'] data['host_hostname'] = self._stats['host_hostname']
@ -330,7 +330,7 @@ def to_supported_instances(host_capabilities):
result.append((guestarch, obj_fields.HVType.XEN, ostype)) result.append((guestarch, obj_fields.HVType.XEN, ostype))
except ValueError: except ValueError:
LOG.warning(_LW("Failed to extract instance support from %s"), LOG.warning("Failed to extract instance support from %s",
capability) capability)
return result return result
@ -401,11 +401,11 @@ def call_xenhost(session, method, arg_dict):
return '' return ''
return jsonutils.loads(result) return jsonutils.loads(result)
except ValueError: except ValueError:
LOG.exception(_LE("Unable to get updated status")) LOG.exception(_("Unable to get updated status"))
return None return None
except session.XenAPI.Failure as e: except session.XenAPI.Failure as e:
LOG.error(_LE("The call to %(method)s returned " LOG.error("The call to %(method)s returned "
"an error: %(e)s."), {'method': method, 'e': e}) "an error: %(e)s.", {'method': method, 'e': e})
return e.details[1] return e.details[1]
@ -421,11 +421,11 @@ def _call_host_management(session, method, *args):
return '' return ''
return jsonutils.loads(result) return jsonutils.loads(result)
except ValueError: except ValueError:
LOG.exception(_LE("Unable to get updated status")) LOG.exception(_("Unable to get updated status"))
return None return None
except session.XenAPI.Failure as e: except session.XenAPI.Failure as e:
LOG.error(_LE("The call to %(method)s returned " LOG.error("The call to %(method)s returned an error: %(e)s.",
"an error: %(e)s."), {'method': method.__name__, 'e': e}) {'method': method.__name__, 'e': e})
return e.details[1] return e.details[1]

View File

@ -25,7 +25,7 @@ import six.moves.urllib.parse as urlparse
from nova.compute import rpcapi as compute_rpcapi from nova.compute import rpcapi as compute_rpcapi
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LE from nova.i18n import _
from nova.virt.xenapi import pool_states from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vm_utils
@ -54,8 +54,8 @@ class ResourcePool(object):
aggregate.update_metadata(metadata) aggregate.update_metadata(metadata)
op(host) op(host)
except Exception: except Exception:
LOG.exception(_LE('Aggregate %(aggregate_id)s: unrecoverable ' LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable '
'state during operation on %(host)s'), 'state during operation on %(host)s'),
{'aggregate_id': aggregate.id, 'host': host}) {'aggregate_id': aggregate.id, 'host': host})
def add_to_aggregate(self, context, aggregate, host, slave_info=None): def add_to_aggregate(self, context, aggregate, host, slave_info=None):
@ -171,7 +171,7 @@ class ResourcePool(object):
'master_pass': CONF.xenserver.connection_password, } 'master_pass': CONF.xenserver.connection_password, }
self._session.call_plugin('xenhost.py', 'host_join', args) self._session.call_plugin('xenhost.py', 'host_join', args)
except self._session.XenAPI.Failure as e: except self._session.XenAPI.Failure as e:
LOG.error(_LE("Pool-Join failed: %s"), e) LOG.error("Pool-Join failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id, raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate', action='add_to_aggregate',
reason=_('Unable to join %s ' reason=_('Unable to join %s '
@ -190,7 +190,7 @@ class ResourcePool(object):
host_ref = self._session.host.get_by_uuid(host_uuid) host_ref = self._session.host.get_by_uuid(host_uuid)
self._session.pool.eject(host_ref) self._session.pool.eject(host_ref)
except self._session.XenAPI.Failure as e: except self._session.XenAPI.Failure as e:
LOG.error(_LE("Pool-eject failed: %s"), e) LOG.error("Pool-eject failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id, raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate', action='remove_from_aggregate',
reason=six.text_type(e.details)) reason=six.text_type(e.details))
@ -201,7 +201,7 @@ class ResourcePool(object):
pool_ref = self._session.pool.get_all()[0] pool_ref = self._session.pool.get_all()[0]
self._session.pool.set_name_label(pool_ref, aggregate_name) self._session.pool.set_name_label(pool_ref, aggregate_name)
except self._session.XenAPI.Failure as e: except self._session.XenAPI.Failure as e:
LOG.error(_LE("Unable to set up pool: %s."), e) LOG.error("Unable to set up pool: %s.", e)
raise exception.AggregateError(aggregate_id=aggregate_id, raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate', action='add_to_aggregate',
reason=six.text_type(e.details)) reason=six.text_type(e.details))
@ -212,7 +212,7 @@ class ResourcePool(object):
pool_ref = self._session.pool.get_all()[0] pool_ref = self._session.pool.get_all()[0]
self._session.pool.set_name_label(pool_ref, '') self._session.pool.set_name_label(pool_ref, '')
except self._session.XenAPI.Failure as e: except self._session.XenAPI.Failure as e:
LOG.error(_LE("Pool-set_name_label failed: %s"), e) LOG.error("Pool-set_name_label failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id, raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate', action='remove_from_aggregate',
reason=six.text_type(e.details)) reason=six.text_type(e.details))

View File

@ -24,7 +24,6 @@ from nova.compute import power_state
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _
from nova.i18n import _LW
from nova.network import model as network_model from nova.network import model as network_model
from nova.virt.xenapi import network_utils from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vm_utils
@ -56,8 +55,8 @@ class XenVIFDriver(object):
try: try:
vif_ref = self._session.call_xenapi('VIF.create', vif_rec) vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
except Exception as e: except Exception as e:
LOG.warning(_LW("Failed to create vif, exception:%(exception)s, " LOG.warning("Failed to create vif, exception:%(exception)s, "
"vif:%(vif)s"), {'exception': e, 'vif': vif}) "vif:%(vif)s", {'exception': e, 'vif': vif})
raise exception.NovaException( raise exception.NovaException(
reason=_("Failed to create vif %s") % vif) reason=_("Failed to create vif %s") % vif)
@ -79,7 +78,7 @@ class XenVIFDriver(object):
self._session.call_xenapi('VIF.destroy', vif_ref) self._session.call_xenapi('VIF.destroy', vif_ref)
except Exception as e: except Exception as e:
LOG.warning( LOG.warning(
_LW("Fail to unplug vif:%(vif)s, exception:%(exception)s"), "Fail to unplug vif:%(vif)s, exception:%(exception)s",
{'vif': vif, 'exception': e}, instance=instance) {'vif': vif, 'exception': e}, instance=instance)
raise exception.NovaException( raise exception.NovaException(
reason=_("Failed to unplug vif %s") % vif) reason=_("Failed to unplug vif %s") % vif)
@ -324,8 +323,8 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# delete the patch port pair # delete the patch port pair
host_network.ovs_del_port(self._session, bridge_name, patch_port1) host_network.ovs_del_port(self._session, bridge_name, patch_port1)
except Exception as e: except Exception as e:
LOG.warning(_LW("Failed to delete patch port pair for vif %(if)s," LOG.warning("Failed to delete patch port pair for vif %(if)s,"
" exception:%(exception)s"), " exception:%(exception)s",
{'if': vif, 'exception': e}, instance=instance) {'if': vif, 'exception': e}, instance=instance)
raise exception.VirtualInterfaceUnplugException( raise exception.VirtualInterfaceUnplugException(
reason=_("Failed to delete patch port pair")) reason=_("Failed to delete patch port pair"))
@ -356,8 +355,8 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
CONF.xenserver.ovs_integration_bridge, CONF.xenserver.ovs_integration_bridge,
qvo_name) qvo_name)
except Exception as e: except Exception as e:
LOG.warning(_LW("Failed to delete bridge for vif %(if)s, " LOG.warning("Failed to delete bridge for vif %(if)s, "
"exception:%(exception)s"), "exception:%(exception)s",
{'if': vif, 'exception': e}, instance=instance) {'if': vif, 'exception': e}, instance=instance)
raise exception.VirtualInterfaceUnplugException( raise exception.VirtualInterfaceUnplugException(
reason=_("Failed to delete bridge")) reason=_("Failed to delete bridge"))
@ -507,8 +506,8 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
try: try:
network_ref = self._session.network.create(network_rec) network_ref = self._session.network.create(network_rec)
except Exception as e: except Exception as e:
LOG.warning(_LW("Failed to create interim network for vif %(if)s, " LOG.warning("Failed to create interim network for vif %(if)s, "
"exception:%(exception)s"), "exception:%(exception)s",
{'if': vif, 'exception': e}) {'if': vif, 'exception': e})
raise exception.VirtualInterfacePlugException( raise exception.VirtualInterfacePlugException(
_("Failed to create the interim network for vif")) _("Failed to create the interim network for vif"))

View File

@ -49,7 +49,7 @@ from nova.compute import power_state
from nova.compute import task_states from nova.compute import task_states
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LI, _LW from nova.i18n import _
from nova.network import model as network_model from nova.network import model as network_model
from nova.objects import diagnostics from nova.objects import diagnostics
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
@ -263,7 +263,7 @@ def destroy_vm(session, instance, vm_ref):
try: try:
session.VM.destroy(vm_ref) session.VM.destroy(vm_ref)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Destroy VM failed')) LOG.exception(_('Destroy VM failed'))
return return
LOG.debug("VM destroyed", instance=instance) LOG.debug("VM destroyed", instance=instance)
@ -271,7 +271,7 @@ def destroy_vm(session, instance, vm_ref):
def clean_shutdown_vm(session, instance, vm_ref): def clean_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref): if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."), LOG.warning("VM already halted, skipping shutdown...",
instance=instance) instance=instance)
return True return True
@ -279,14 +279,14 @@ def clean_shutdown_vm(session, instance, vm_ref):
try: try:
session.call_xenapi('VM.clean_shutdown', vm_ref) session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (cleanly) failed.')) LOG.exception(_('Shutting down VM (cleanly) failed.'))
return False return False
return True return True
def hard_shutdown_vm(session, instance, vm_ref): def hard_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref): if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."), LOG.warning("VM already halted, skipping shutdown...",
instance=instance) instance=instance)
return True return True
@ -294,7 +294,7 @@ def hard_shutdown_vm(session, instance, vm_ref):
try: try:
session.call_xenapi('VM.hard_shutdown', vm_ref) session.call_xenapi('VM.hard_shutdown', vm_ref)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (hard) failed')) LOG.exception(_('Shutting down VM (hard) failed'))
return False return False
return True return True
@ -339,15 +339,15 @@ def unplug_vbd(session, vbd_ref, this_vm_ref):
except session.XenAPI.Failure as exc: except session.XenAPI.Failure as exc:
err = len(exc.details) > 0 and exc.details[0] err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED': if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_LI('VBD %s already detached'), vbd_ref) LOG.info('VBD %s already detached', vbd_ref)
return return
elif _should_retry_unplug_vbd(err): elif _should_retry_unplug_vbd(err):
LOG.info(_LI('VBD %(vbd_ref)s unplug failed with "%(err)s", ' LOG.info('VBD %(vbd_ref)s unplug failed with "%(err)s", '
'attempt %(num_attempt)d/%(max_attempts)d'), 'attempt %(num_attempt)d/%(max_attempts)d',
{'vbd_ref': vbd_ref, 'num_attempt': num_attempt, {'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
'max_attempts': max_attempts, 'err': err}) 'max_attempts': max_attempts, 'err': err})
else: else:
LOG.exception(_LE('Unable to unplug VBD')) LOG.exception(_('Unable to unplug VBD'))
raise exception.StorageError( raise exception.StorageError(
reason=_('Unable to unplug VBD %s') % vbd_ref) reason=_('Unable to unplug VBD %s') % vbd_ref)
@ -362,7 +362,7 @@ def destroy_vbd(session, vbd_ref):
try: try:
session.call_xenapi('VBD.destroy', vbd_ref) session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Unable to destroy VBD')) LOG.exception(_('Unable to destroy VBD'))
raise exception.StorageError( raise exception.StorageError(
reason=_('Unable to destroy VBD %s') % vbd_ref) reason=_('Unable to destroy VBD %s') % vbd_ref)
@ -626,8 +626,7 @@ def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref):
# ensure garbage collector has been run # ensure garbage collector has been run
_scan_sr(session, sr_ref) _scan_sr(session, sr_ref)
LOG.info(_LI("Deleted %s snapshots."), number_of_snapshots, LOG.info("Deleted %s snapshots.", number_of_snapshots, instance=instance)
instance=instance)
def remove_old_snapshots(session, instance, vm_ref): def remove_old_snapshots(session, instance, vm_ref):
@ -788,7 +787,7 @@ def _find_cached_image(session, image_id, sr_ref):
number_found = len(recs) number_found = len(recs)
if number_found > 0: if number_found > 0:
if number_found > 1: if number_found > 1:
LOG.warning(_LW("Multiple base images for image: %s"), image_id) LOG.warning("Multiple base images for image: %s", image_id)
return list(recs.keys())[0] return list(recs.keys())[0]
@ -934,8 +933,7 @@ def try_auto_configure_disk(session, vdi_ref, new_gb):
try: try:
_auto_configure_disk(session, vdi_ref, new_gb) _auto_configure_disk(session, vdi_ref, new_gb)
except exception.CannotResizeDisk as e: except exception.CannotResizeDisk as e:
msg = _LW('Attempted auto_configure_disk failed because: %s') LOG.warning('Attempted auto_configure_disk failed because: %s', e)
LOG.warning(msg, e)
def _make_partition(session, dev, partition_start, partition_end): def _make_partition(session, dev, partition_start, partition_end):
@ -1204,9 +1202,9 @@ def _create_cached_image(context, session, instance, name_label,
sr_type = session.call_xenapi('SR.get_type', sr_ref) sr_type = session.call_xenapi('SR.get_type', sr_ref)
if CONF.use_cow_images and sr_type != "ext": if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_LW("Fast cloning is only supported on default local SR " LOG.warning("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of " "of type ext. SR on this system was found to be of "
"type %s. Ignoring the cow flag."), sr_type) "type %s. Ignoring the cow flag.", sr_type)
@utils.synchronized('xenapi-image-cache' + image_id) @utils.synchronized('xenapi-image-cache' + image_id)
def _create_cached_image_impl(context, session, instance, name_label, def _create_cached_image_impl(context, session, instance, name_label,
@ -1279,8 +1277,8 @@ def create_image(context, session, instance, name_label, image_id,
elif cache_images == 'none': elif cache_images == 'none':
cache = False cache = False
else: else:
LOG.warning(_LW("Unrecognized cache_images value '%s', defaulting to" LOG.warning("Unrecognized cache_images value '%s', defaulting to True",
" True"), CONF.xenserver.cache_images) CONF.xenserver.cache_images)
cache = True cache = True
# Fetch (and cache) the image # Fetch (and cache) the image
@ -1295,9 +1293,9 @@ def create_image(context, session, instance, name_label, image_id,
downloaded = True downloaded = True
duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
LOG.info(_LI("Image creation data, cacheable: %(cache)s, " LOG.info("Image creation data, cacheable: %(cache)s, "
"downloaded: %(downloaded)s duration: %(duration).2f secs " "downloaded: %(downloaded)s duration: %(duration).2f secs "
"for image %(image_id)s"), "for image %(image_id)s",
{'image_id': image_id, 'cache': cache, 'downloaded': downloaded, {'image_id': image_id, 'cache': cache, 'downloaded': downloaded,
'duration': duration}) 'duration': duration})
@ -1352,8 +1350,7 @@ def _default_download_handler():
def get_compression_level(): def get_compression_level():
level = CONF.xenserver.image_compression_level level = CONF.xenserver.image_compression_level
if level is not None and (level < 1 or level > 9): if level is not None and (level < 1 or level > 9):
LOG.warning(_LW("Invalid value '%d' for image_compression_level"), LOG.warning("Invalid value '%d' for image_compression_level", level)
level)
return None return None
return level return level
@ -1420,8 +1417,8 @@ def _check_vdi_size(context, session, instance, vdi_uuid):
size = _get_vdi_chain_size(session, vdi_uuid) size = _get_vdi_chain_size(session, vdi_uuid)
if size > allowed_size: if size > allowed_size:
LOG.error(_LE("Image size %(size)d exceeded flavor " LOG.error("Image size %(size)d exceeded flavor "
"allowed size %(allowed_size)d"), "allowed size %(allowed_size)d",
{'size': size, 'allowed_size': allowed_size}, {'size': size, 'allowed_size': allowed_size},
instance=instance) instance=instance)
@ -1512,8 +1509,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
return {vdi_role: dict(uuid=vdi_uuid, file=None)} return {vdi_role: dict(uuid=vdi_uuid, file=None)}
except (session.XenAPI.Failure, IOError, OSError) as e: except (session.XenAPI.Failure, IOError, OSError) as e:
# We look for XenAPI and OS failures. # We look for XenAPI and OS failures.
LOG.exception(_LE("Failed to fetch glance image"), LOG.exception(_("Failed to fetch glance image"), instance=instance)
instance=instance)
e.args = e.args + ([dict(type=ImageType.to_string(image_type), e.args = e.args + ([dict(type=ImageType.to_string(image_type),
uuid=vdi_uuid, uuid=vdi_uuid,
file=filename)],) file=filename)],)
@ -1608,7 +1604,7 @@ def lookup_vm_vdis(session, vm_ref):
# This is not an attached volume # This is not an attached volume
vdi_refs.append(vdi_ref) vdi_refs.append(vdi_ref)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('"Look for the VDIs failed')) LOG.exception(_('"Look for the VDIs failed'))
return vdi_refs return vdi_refs
@ -1796,7 +1792,7 @@ def compile_diagnostics(vm_rec):
return diags return diags
except expat.ExpatError as e: except expat.ExpatError as e:
LOG.exception(_LE('Unable to parse rrd of %s'), e) LOG.exception(_('Unable to parse rrd of %s'), e)
return {"Unable to retrieve diagnostics": e} return {"Unable to retrieve diagnostics": e}
@ -1826,8 +1822,8 @@ def _scan_sr(session, sr_ref=None, max_attempts=4):
if exc.details[0] == 'SR_BACKEND_FAILURE_40': if exc.details[0] == 'SR_BACKEND_FAILURE_40':
if attempt < max_attempts: if attempt < max_attempts:
ctxt.reraise = False ctxt.reraise = False
LOG.warning(_LW("Retry SR scan due to error: " LOG.warning("Retry SR scan due to error: %s",
"%s"), exc) exc)
greenthread.sleep(2 ** attempt) greenthread.sleep(2 ** attempt)
attempt += 1 attempt += 1
do_scan(sr_ref) do_scan(sr_ref)
@ -1859,8 +1855,8 @@ def _find_sr(session):
filter_pattern = tokens[1] filter_pattern = tokens[1]
except IndexError: except IndexError:
# oops, flag is invalid # oops, flag is invalid
LOG.warning(_LW("Flag sr_matching_filter '%s' does not respect " LOG.warning("Flag sr_matching_filter '%s' does not respect "
"formatting convention"), "formatting convention",
CONF.xenserver.sr_matching_filter) CONF.xenserver.sr_matching_filter)
return None return None
@ -1880,10 +1876,10 @@ def _find_sr(session):
if sr_ref: if sr_ref:
return sr_ref return sr_ref
# No SR found! # No SR found!
LOG.error(_LE("XenAPI is unable to find a Storage Repository to " LOG.error("XenAPI is unable to find a Storage Repository to "
"install guest instances on. Please check your " "install guest instances on. Please check your "
"configuration (e.g. set a default SR for the pool) " "configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'.")) "and/or configure the flag 'sr_matching_filter'.")
return None return None
@ -1946,8 +1942,8 @@ def _get_rrd(server, vm_uuid):
vm_uuid)) vm_uuid))
return xml.read() return xml.read()
except IOError: except IOError:
LOG.exception(_LE('Unable to obtain RRD XML for VM %(vm_uuid)s with ' LOG.exception(_('Unable to obtain RRD XML for VM %(vm_uuid)s with '
'server details: %(server)s.'), 'server details: %(server)s.'),
{'vm_uuid': vm_uuid, 'server': server}) {'vm_uuid': vm_uuid, 'server': server})
return None return None
@ -2161,7 +2157,7 @@ def cleanup_attached_vdis(session):
if 'nova_instance_uuid' in vdi_rec['other_config']: if 'nova_instance_uuid' in vdi_rec['other_config']:
# Belongs to an instance and probably left over after an # Belongs to an instance and probably left over after an
# unclean restart # unclean restart
LOG.info(_LI('Disconnecting stale VDI %s from compute domU'), LOG.info('Disconnecting stale VDI %s from compute domU',
vdi_rec['uuid']) vdi_rec['uuid'])
unplug_vbd(session, vbd_ref, this_vm_ref) unplug_vbd(session, vbd_ref, this_vm_ref)
destroy_vbd(session, vbd_ref) destroy_vbd(session, vbd_ref)
@ -2224,12 +2220,11 @@ def _get_dom0_ref(session):
def get_this_vm_uuid(session): def get_this_vm_uuid(session):
if CONF.xenserver.independent_compute: if CONF.xenserver.independent_compute:
msg = _LE("This host has been configured with the independent " LOG.error("This host has been configured with the independent "
"compute flag. An operation has been attempted which is " "compute flag. An operation has been attempted which is "
"incompatible with this flag, but should have been " "incompatible with this flag, but should have been "
"caught earlier. Please raise a bug against the " "caught earlier. Please raise a bug against the "
"OpenStack Nova project") "OpenStack Nova project")
LOG.error(msg)
raise exception.NotSupportedWithOption( raise exception.NotSupportedWithOption(
operation='uncaught operation', operation='uncaught operation',
option='CONF.xenserver.independent_compute') option='CONF.xenserver.independent_compute')
@ -2484,7 +2479,7 @@ def _mounted_processing(device, key, net, metadata):
vfs = vfsimpl.VFSLocalFS( vfs = vfsimpl.VFSLocalFS(
imgmodel.LocalFileImage(None, imgmodel.FORMAT_RAW), imgmodel.LocalFileImage(None, imgmodel.FORMAT_RAW),
imgdir=tmpdir) imgdir=tmpdir)
LOG.info(_LI('Manipulating interface files directly')) LOG.info('Manipulating interface files directly')
# for xenapi, we don't 'inject' admin_password here, # for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we # it's handled at instance startup time, nor do we
# support injecting arbitrary files here. # support injecting arbitrary files here.
@ -2493,8 +2488,8 @@ def _mounted_processing(device, key, net, metadata):
finally: finally:
utils.execute('umount', dev_path, run_as_root=True) utils.execute('umount', dev_path, run_as_root=True)
else: else:
LOG.info(_LI('Failed to mount filesystem (expected for ' LOG.info('Failed to mount filesystem (expected for '
'non-linux instances): %s'), err) 'non-linux instances): %s', err)
def ensure_correct_host(session): def ensure_correct_host(session):
@ -2607,14 +2602,14 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
""" """
boot_menu_url = CONF.xenserver.ipxe_boot_menu_url boot_menu_url = CONF.xenserver.ipxe_boot_menu_url
if not boot_menu_url: if not boot_menu_url:
LOG.warning(_LW('ipxe_boot_menu_url not set, user will have to' LOG.warning('ipxe_boot_menu_url not set, user will have to'
' enter URL manually...'), instance=instance) ' enter URL manually...', instance=instance)
return return
network_name = CONF.xenserver.ipxe_network_name network_name = CONF.xenserver.ipxe_network_name
if not network_name: if not network_name:
LOG.warning(_LW('ipxe_network_name not set, user will have to' LOG.warning('ipxe_network_name not set, user will have to'
' enter IP manually...'), instance=instance) ' enter IP manually...', instance=instance)
return return
network = None network = None
@ -2624,8 +2619,8 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
break break
if not network: if not network:
LOG.warning(_LW("Unable to find network matching '%(network_name)s', " LOG.warning("Unable to find network matching '%(network_name)s', "
"user will have to enter IP manually..."), "user will have to enter IP manually...",
{'network_name': network_name}, instance=instance) {'network_name': network_name}, instance=instance)
return return
@ -2649,7 +2644,7 @@ def handle_ipxe_iso(session, instance, cd_vdi, network_info):
except session.XenAPI.Failure as exc: except session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3] _type, _method, error = exc.details[:3]
if error == 'CommandNotFound': if error == 'CommandNotFound':
LOG.warning(_LW("ISO creation tool '%s' does not exist."), LOG.warning("ISO creation tool '%s' does not exist.",
CONF.xenserver.ipxe_mkisofs_cmd, instance=instance) CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
else: else:
raise raise

View File

@ -47,7 +47,7 @@ import nova.conf
from nova.console import type as ctype from nova.console import type as ctype
from nova import context as nova_context from nova import context as nova_context
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LI, _LW from nova.i18n import _
from nova import objects from nova import objects
from nova.objects import fields as obj_fields from nova.objects import fields as obj_fields
from nova.pci import manager as pci_manager from nova.pci import manager as pci_manager
@ -452,8 +452,8 @@ class VMOps(object):
vm_utils.handle_ipxe_iso( vm_utils.handle_ipxe_iso(
self._session, instance, vdis['iso'], network_info) self._session, instance, vdis['iso'], network_info)
else: else:
LOG.warning(_LW('ipxe_boot is True but no ISO image ' LOG.warning('ipxe_boot is True but no ISO image found',
'found'), instance=instance) instance=instance)
if resize: if resize:
self._resize_up_vdis(instance, vdis) self._resize_up_vdis(instance, vdis)
@ -620,7 +620,7 @@ class VMOps(object):
def _handle_neutron_event_timeout(self, instance, undo_mgr): def _handle_neutron_event_timeout(self, instance, undo_mgr):
# We didn't get callback from Neutron within given time # We didn't get callback from Neutron within given time
LOG.warning(_LW('Timeout waiting for vif plugging callback'), LOG.warning('Timeout waiting for vif plugging callback',
instance=instance) instance=instance)
if CONF.vif_plugging_is_fatal: if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException() raise exception.VirtualInterfaceCreateException()
@ -633,8 +633,8 @@ class VMOps(object):
self._update_last_dom_id(vm_ref) self._update_last_dom_id(vm_ref)
def _neutron_failed_callback(self, event_name, instance): def _neutron_failed_callback(self, event_name, instance):
LOG.warning(_LW('Neutron Reported failure on event %(event)s'), LOG.warning('Neutron Reported failure on event %(event)s',
{'event': event_name}, instance=instance) {'event': event_name}, instance=instance)
if CONF.vif_plugging_is_fatal: if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException() raise exception.VirtualInterfaceCreateException()
@ -1025,9 +1025,8 @@ class VMOps(object):
undo_mgr, old_vdi_ref) undo_mgr, old_vdi_ref)
transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid) transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid)
except Exception as error: except Exception as error:
LOG.exception(_LE("_migrate_disk_resizing_down failed. " LOG.exception(_("_migrate_disk_resizing_down failed. Restoring"
"Restoring orig vm"), "orig vm"), instance=instance)
instance=instance)
undo_mgr._rollback() undo_mgr._rollback()
raise exception.InstanceFaultRollback(error) raise exception.InstanceFaultRollback(error)
@ -1201,15 +1200,15 @@ class VMOps(object):
transfer_ephemeral_disks_then_all_leaf_vdis() transfer_ephemeral_disks_then_all_leaf_vdis()
except Exception as error: except Exception as error:
LOG.exception(_LE("_migrate_disk_resizing_up failed. " LOG.exception(_("_migrate_disk_resizing_up failed. "
"Restoring orig vm due_to: %s."), error, "Restoring orig vm due_to: %s."),
instance=instance) error, instance=instance)
try: try:
self._restore_orig_vm_and_cleanup_orphan(instance) self._restore_orig_vm_and_cleanup_orphan(instance)
# TODO(johngarbutt) should also cleanup VHDs at destination # TODO(johngarbutt) should also cleanup VHDs at destination
except Exception as rollback_error: except Exception as rollback_error:
LOG.warning(_LW("_migrate_disk_resizing_up failed to " LOG.warning("_migrate_disk_resizing_up failed to "
"rollback: %s"), rollback_error, "rollback: %s", rollback_error,
instance=instance) instance=instance)
raise exception.InstanceFaultRollback(error) raise exception.InstanceFaultRollback(error)
@ -1336,14 +1335,14 @@ class VMOps(object):
details = exc.details details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'): details[-1] == 'halted'):
LOG.info(_LI("Starting halted instance found during reboot"), LOG.info("Starting halted instance found during reboot",
instance=instance) instance=instance)
self._start(instance, vm_ref=vm_ref, self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback) bad_volumes_callback=bad_volumes_callback)
return return
elif details[0] == 'SR_BACKEND_FAILURE_46': elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warning(_LW("Reboot failed due to bad volumes, detaching " LOG.warning("Reboot failed due to bad volumes, detaching "
"bad volumes and starting halted instance"), "bad volumes and starting halted instance",
instance=instance) instance=instance)
self._start(instance, vm_ref=vm_ref, self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback) bad_volumes_callback=bad_volumes_callback)
@ -1420,7 +1419,7 @@ class VMOps(object):
# Skip the update when not possible, as the updated metadata will # Skip the update when not possible, as the updated metadata will
# get added when the VM is being booted up at the end of the # get added when the VM is being booted up at the end of the
# resize or rebuild. # resize or rebuild.
LOG.warning(_LW("Unable to update metadata, VM not found."), LOG.warning("Unable to update metadata, VM not found.",
instance=instance, exc_info=True) instance=instance, exc_info=True)
return return
@ -1540,7 +1539,7 @@ class VMOps(object):
destroy_* methods are internal. destroy_* methods are internal.
""" """
LOG.info(_LI("Destroying VM"), instance=instance) LOG.info("Destroying VM", instance=instance)
# We don't use _get_vm_opaque_ref because the instance may # We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid # truly not exist because of a failure during build. A valid
@ -1572,7 +1571,7 @@ class VMOps(object):
""" """
if vm_ref is None: if vm_ref is None:
LOG.warning(_LW("VM is not present, skipping destroy..."), LOG.warning("VM is not present, skipping destroy...",
instance=instance) instance=instance)
# NOTE(alaski): There should not be a block device mapping here, # NOTE(alaski): There should not be a block device mapping here,
# but if there is it very likely means there was an error cleaning # but if there is it very likely means there was an error cleaning
@ -1593,24 +1592,24 @@ class VMOps(object):
sr_uuid) sr_uuid)
if not sr_ref: if not sr_ref:
connection_data = bdm['connection_info']['data'] connection_data = bdm['connection_info']['data']
(sr_uuid, _, _) = volume_utils.parse_sr_info( (sr_uuid, unused, unused) = volume_utils.parse_sr_info(
connection_data) connection_data)
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_ref = volume_utils.find_sr_by_uuid(self._session,
sr_uuid) sr_uuid)
except Exception: except Exception:
LOG.exception(_LE('Failed to find an SR for volume %s'), LOG.exception(_('Failed to find an SR for volume %s'),
volume_id, instance=instance) volume_id, instance=instance)
try: try:
if sr_ref: if sr_ref:
volume_utils.forget_sr(self._session, sr_ref) volume_utils.forget_sr(self._session, sr_ref)
else: else:
LOG.error(_LE('Volume %s is associated with the ' LOG.error('Volume %s is associated with the '
'instance but no SR was found for it'), volume_id, 'instance but no SR was found for it',
instance=instance) volume_id, instance=instance)
except Exception: except Exception:
LOG.exception(_LE('Failed to forget the SR for volume %s'), LOG.exception(_('Failed to forget the SR for volume %s'),
volume_id, instance=instance) volume_id, instance=instance)
return return
# NOTE(alaski): Attempt clean shutdown first if there's an attached # NOTE(alaski): Attempt clean shutdown first if there's an attached
@ -1709,7 +1708,7 @@ class VMOps(object):
try: try:
vm_ref = self._get_vm_opaque_ref(instance) vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound: except exception.NotFound:
LOG.warning(_LW("VM is not present, skipping soft delete..."), LOG.warning("VM is not present, skipping soft delete...",
instance=instance) instance=instance)
else: else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
@ -1758,11 +1757,11 @@ class VMOps(object):
timeout=timeout) timeout=timeout)
if instances_info["instance_count"] > 0: if instances_info["instance_count"] > 0:
LOG.info(_LI("Found %(instance_count)d hung reboots " LOG.info("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds"), instances_info) "older than %(timeout)d seconds", instances_info)
for instance in instances: for instance in instances:
LOG.info(_LI("Automatically hard rebooting"), instance=instance) LOG.info("Automatically hard rebooting", instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD") self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None): def get_info(self, instance, vm_ref=None):
@ -1818,7 +1817,7 @@ class VMOps(object):
raw_console_data = vm_management.get_console_log( raw_console_data = vm_management.get_console_log(
self._session, dom_id) self._session, dom_id)
except self._session.XenAPI.Failure: except self._session.XenAPI.Failure:
LOG.exception(_LE("Guest does not have a console available")) LOG.exception(_("Guest does not have a console available"))
raise exception.ConsoleNotAvailable() raise exception.ConsoleNotAvailable()
return zlib.decompress(base64.b64decode(raw_console_data)) return zlib.decompress(base64.b64decode(raw_console_data))
@ -2048,15 +2047,15 @@ class VMOps(object):
def _process_plugin_exception(self, plugin_exception, method, instance): def _process_plugin_exception(self, plugin_exception, method, instance):
err_msg = plugin_exception.details[-1].splitlines()[-1] err_msg = plugin_exception.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg: if 'TIMEOUT:' in err_msg:
LOG.error(_LE('TIMEOUT: The call to %s timed out'), LOG.error('TIMEOUT: The call to %s timed out',
method, instance=instance) method, instance=instance)
return {'returncode': 'timeout', 'message': err_msg} return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg: elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_LE('NOT IMPLEMENTED: The call to %s is not supported' LOG.error('NOT IMPLEMENTED: The call to %s is not supported'
' by the agent.'), method, instance=instance) ' by the agent.', method, instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg} return {'returncode': 'notimplemented', 'message': err_msg}
else: else:
LOG.error(_LE('The call to %(method)s returned an error: %(e)s.'), LOG.error('The call to %(method)s returned an error: %(e)s.',
{'method': method, 'e': plugin_exception}, {'method': method, 'e': plugin_exception},
instance=instance) instance=instance)
return {'returncode': 'error', 'message': err_msg} return {'returncode': 'error', 'message': err_msg}
@ -2156,7 +2155,7 @@ class VMOps(object):
nwref, nwref,
options) options)
except self._session.XenAPI.Failure: except self._session.XenAPI.Failure:
LOG.exception(_LE('Migrate Receive failed')) LOG.exception(_('Migrate Receive failed'))
msg = _('Migrate Receive failed') msg = _('Migrate Receive failed')
raise exception.MigrationPreCheckError(reason=msg) raise exception.MigrationPreCheckError(reason=msg)
return migrate_data return migrate_data
@ -2434,7 +2433,7 @@ class VMOps(object):
self._call_live_migrate_command( self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data) "VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure: except self._session.XenAPI.Failure:
LOG.exception(_LE('Migrate Send failed')) LOG.exception(_('Migrate Send failed'))
raise exception.MigrationError( raise exception.MigrationError(
reason=_('Migrate Send failed')) reason=_('Migrate Send failed'))
@ -2491,7 +2490,7 @@ class VMOps(object):
if sr_ref: if sr_ref:
volume_utils.forget_sr(self._session, sr_ref) volume_utils.forget_sr(self._session, sr_ref)
except Exception: except Exception:
LOG.exception(_LE('Failed to forget the SR for volume %s'), LOG.exception(_('Failed to forget the SR for volume %s'),
params['id'], instance=instance) params['id'], instance=instance)
# delete VIF and network in destination host # delete VIF and network in destination host
@ -2505,8 +2504,8 @@ class VMOps(object):
try: try:
self.vif_driver.delete_network_and_bridge(instance, vif) self.vif_driver.delete_network_and_bridge(instance, vif)
except Exception: except Exception:
LOG.exception(_LE('Failed to delete networks and bridges with ' LOG.exception(_('Failed to delete networks and bridges with '
'VIF %s'), vif['id'], instance=instance) 'VIF %s'), vif['id'], instance=instance)
def get_per_instance_usage(self): def get_per_instance_usage(self):
"""Get usage info about each active instance.""" """Get usage info about each active instance."""
@ -2570,13 +2569,13 @@ class VMOps(object):
self.firewall_driver.setup_basic_filtering(instance, [vif]) self.firewall_driver.setup_basic_filtering(instance, [vif])
except exception.NovaException: except exception.NovaException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('attach network interface %s failed.'), LOG.exception(_('attach network interface %s failed.'),
vif['id'], instance=instance) vif['id'], instance=instance)
try: try:
self.vif_driver.unplug(instance, vif, vm_ref) self.vif_driver.unplug(instance, vif, vm_ref)
except exception.NovaException: except exception.NovaException:
# if unplug failed, no need to raise exception # if unplug failed, no need to raise exception
LOG.warning(_LW('Unplug VIF %s failed.'), LOG.warning('Unplug VIF %s failed.',
vif['id'], instance=instance) vif['id'], instance=instance)
_attach_interface(instance, vm_ref, vif) _attach_interface(instance, vm_ref, vif)
@ -2589,5 +2588,5 @@ class VMOps(object):
self.vif_driver.unplug(instance, vif, vm_ref) self.vif_driver.unplug(instance, vif, vm_ref)
except exception.NovaException: except exception.NovaException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('detach network interface %s failed.'), LOG.exception(_('detach network interface %s failed.'),
vif['id'], instance=instance) vif['id'], instance=instance)

View File

@ -30,8 +30,7 @@ import six
import nova.conf import nova.conf
from nova import exception from nova import exception
from nova.i18n import _
from nova.i18n import _, _LE, _LW
CONF = nova.conf.CONF CONF = nova.conf.CONF
@ -176,7 +175,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
session.call_xenapi("SR.scan", sr_ref) session.call_xenapi("SR.scan", sr_ref)
vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Unable to introduce VDI on SR')) LOG.exception(_('Unable to introduce VDI on SR'))
raise exception.StorageError( raise exception.StorageError(
reason=_('Unable to introduce VDI on SR %s') % sr_ref) reason=_('Unable to introduce VDI on SR %s') % sr_ref)
@ -191,7 +190,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
LOG.debug(vdi_rec) LOG.debug(vdi_rec)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Unable to get record of VDI')) LOG.exception(_('Unable to get record of VDI'))
raise exception.StorageError( raise exception.StorageError(
reason=_('Unable to get record of VDI %s on') % vdi_ref) reason=_('Unable to get record of VDI %s on') % vdi_ref)
@ -213,7 +212,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
vdi_rec['xenstore_data'], vdi_rec['xenstore_data'],
vdi_rec['sm_config']) vdi_rec['sm_config'])
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Unable to introduce VDI for SR')) LOG.exception(_('Unable to introduce VDI for SR'))
raise exception.StorageError( raise exception.StorageError(
reason=_('Unable to introduce VDI for SR %s') % sr_ref) reason=_('Unable to introduce VDI for SR %s') % sr_ref)
@ -242,7 +241,7 @@ def purge_sr(session, sr_ref):
for vdi_ref in vdi_refs: for vdi_ref in vdi_refs:
vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref) vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
if vbd_refs: if vbd_refs:
LOG.warning(_LW('Cannot purge SR with referenced VDIs')) LOG.warning('Cannot purge SR with referenced VDIs')
return return
forget_sr(session, sr_ref) forget_sr(session, sr_ref)
@ -259,16 +258,16 @@ def _unplug_pbds(session, sr_ref):
try: try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref) pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
except session.XenAPI.Failure as exc: except session.XenAPI.Failure as exc:
LOG.warning(_LW('Ignoring exception %(exc)s when getting PBDs' LOG.warning('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s'), {'exc': exc, 'sr_ref': sr_ref}) ' for %(sr_ref)s', {'exc': exc, 'sr_ref': sr_ref})
return return
for pbd in pbds: for pbd in pbds:
try: try:
session.call_xenapi("PBD.unplug", pbd) session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure as exc: except session.XenAPI.Failure as exc:
LOG.warning(_LW('Ignoring exception %(exc)s when unplugging' LOG.warning('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s'), {'exc': exc, 'pbd': pbd}) ' PBD %(pbd)s', {'exc': exc, 'pbd': pbd})
def get_device_number(mountpoint): def get_device_number(mountpoint):
@ -291,7 +290,7 @@ def _mountpoint_to_number(mountpoint):
elif re.match('^[0-9]+$', mountpoint): elif re.match('^[0-9]+$', mountpoint):
return int(mountpoint, 10) return int(mountpoint, 10)
else: else:
LOG.warning(_LW('Mountpoint cannot be translated: %s'), mountpoint) LOG.warning('Mountpoint cannot be translated: %s', mountpoint)
return -1 return -1
@ -311,7 +310,7 @@ def find_sr_from_vbd(session, vbd_ref):
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Unable to find SR from VBD')) LOG.exception(_('Unable to find SR from VBD'))
raise exception.StorageError( raise exception.StorageError(
reason=_('Unable to find SR from VBD %s') % vbd_ref) reason=_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref return sr_ref
@ -322,7 +321,7 @@ def find_sr_from_vdi(session, vdi_ref):
try: try:
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure: except session.XenAPI.Failure:
LOG.exception(_LE('Unable to find SR from VDI')) LOG.exception(_('Unable to find SR from VDI'))
raise exception.StorageError( raise exception.StorageError(
reason=_('Unable to find SR from VDI %s') % vdi_ref) reason=_('Unable to find SR from VDI %s') % vdi_ref)
return sr_ref return sr_ref
@ -393,6 +392,5 @@ def stream_to_vdi(session, instance, disk_format,
_stream_to_vdi(conn, vdi_import_path, file_size, file_obj) _stream_to_vdi(conn, vdi_import_path, file_size, file_obj)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Streaming disk to VDI failed ' LOG.error('Streaming disk to VDI failed with error: %s',
'with error: %s'),
e, instance=instance) e, instance=instance)

View File

@ -22,7 +22,6 @@ from oslo_utils import excutils
from oslo_utils import strutils from oslo_utils import strutils
from nova import exception from nova import exception
from nova.i18n import _LI, _LW
from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils from nova.virt.xenapi import volume_utils
@ -59,7 +58,7 @@ class VolumeOps(object):
vdi_ref = self._connect_hypervisor_to_volume(sr_ref, vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
connection_data) connection_data)
vdi_uuid = self._session.VDI.get_uuid(vdi_ref) vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
LOG.info(_LI('Connected volume (vdi_uuid): %s'), vdi_uuid) LOG.info('Connected volume (vdi_uuid): %s', vdi_uuid)
if vm_ref: if vm_ref:
self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name, self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
@ -127,8 +126,8 @@ class VolumeOps(object):
LOG.debug("Plugging VBD: %s", vbd_ref) LOG.debug("Plugging VBD: %s", vbd_ref)
self._session.VBD.plug(vbd_ref, vm_ref) self._session.VBD.plug(vbd_ref, vm_ref)
LOG.info(_LI('Dev %(dev_number)s attached to' LOG.info('Dev %(dev_number)s attached to'
' instance %(instance_name)s'), ' instance %(instance_name)s',
{'instance_name': instance_name, 'dev_number': dev_number}) {'instance_name': instance_name, 'dev_number': dev_number})
def detach_volume(self, connection_info, instance_name, mountpoint): def detach_volume(self, connection_info, instance_name, mountpoint):
@ -145,12 +144,12 @@ class VolumeOps(object):
if vbd_ref is None: if vbd_ref is None:
# NOTE(sirp): If we don't find the VBD then it must have been # NOTE(sirp): If we don't find the VBD then it must have been
# detached previously. # detached previously.
LOG.warning(_LW('Skipping detach because VBD for %s was ' LOG.warning('Skipping detach because VBD for %s was not found',
'not found'), instance_name) instance_name)
else: else:
self._detach_vbds_and_srs(vm_ref, [vbd_ref]) self._detach_vbds_and_srs(vm_ref, [vbd_ref])
LOG.info(_LI('Mountpoint %(mountpoint)s detached from instance' LOG.info('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s'), ' %(instance_name)s',
{'instance_name': instance_name, {'instance_name': instance_name,
'mountpoint': mountpoint}) 'mountpoint': mountpoint})