Replacement `_` on `_LI` in all LOG.info - part 1

oslo.i18n uses different marker functions to separate the
translatable messages into different catalogs, which the translation
teams can prioritize translating. For details, please refer to:
http://docs.openstack.org/developer/oslo.i18n/guidelines.html#guidelines-for-use-in-openstack

There were not marker fuctions some places in directory network.
This commit makes changes:
* Add missing marker functions
* Use ',' instead of '%' while adding variables to log messages

Added a hacking rule for the log info about checking
translation for it.

Change-Id: I96766d723b01082339876ed94bbaa77783322b8c
This commit is contained in:
Mike Durnosvistov 2014-09-18 13:08:37 +03:00
parent b32ccb7b41
commit 8431670ef8
27 changed files with 124 additions and 111 deletions

View File

@ -40,6 +40,7 @@ Nova Specific Commandments
- [N325] str() and unicode() cannot be used on an exception. Remove use or use six.text_type() - [N325] str() and unicode() cannot be used on an exception. Remove use or use six.text_type()
- [N326] Translated messages cannot be concatenated. String should be included in translated message. - [N326] Translated messages cannot be concatenated. String should be included in translated message.
- [N327] assert_called_once() is not a valid method - [N327] assert_called_once() is not a valid method
- [N328] Validate that LOG.info messages use _LI.
Creating Unit Tests Creating Unit Tests
------------------- -------------------

View File

@ -56,7 +56,9 @@ asse_equal_start_with_none_re = re.compile(
r"assertEqual\(None,") r"assertEqual\(None,")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w") conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
log_translation = re.compile( log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")") r"(.)*LOG\.(audit|error|warn|warning|critical|exception)\(\s*('|\")")
log_translation_info = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
translated_log = re.compile( translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)" r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")") "\(\s*_\(\s*('|\")")
@ -294,10 +296,22 @@ def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory # Translations are not required in the test directory
# and the Xen utilities # and the Xen utilities
if ("nova/tests" in filename or if ("nova/tests" in filename or
"plugins/xenserver/xenapi/etc/xapi.d" in filename): "plugins/xenserver/xenapi/etc/xapi.d" in filename or
# TODO(Mike_D):Needs to be remove with:
# Iaebb239ef20a0da3df1e3552baf26f412d0fcdc0
"nova/compute" in filename or
"nova/cells" in filename or
"nova/image" in filename or
"nova/conductor" in filename or
"nova/wsgi.py" in filename or
"nova/filters.py" in filename or
"nova/db" in filename):
return return
if pep8.noqa(physical_line): if pep8.noqa(physical_line):
return return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_info.match(logical_line):
yield (0, msg)
msg = "N321: Log messages require translations!" msg = "N321: Log messages require translations!"
if log_translation.match(logical_line): if log_translation.match(logical_line):
yield (0, msg) yield (0, msg)

View File

@ -22,7 +22,7 @@ from oslo.config import cfg
from nova.compute import flavors from nova.compute import flavors
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _LI
from nova.network import base_api from nova.network import base_api
from nova.network import floating_ips from nova.network import floating_ips
from nova.network import model as network_model from nova.network import model as network_model
@ -224,8 +224,8 @@ class API(base_api.NetworkAPI):
if orig_instance_uuid: if orig_instance_uuid:
msg_dict = dict(address=floating_address, msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid) instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from ' LOG.info(_LI('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict) 'instance %(instance_id)s'), msg_dict)
orig_instance = objects.Instance.get_by_uuid(context, orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid) orig_instance_uuid)

View File

@ -17,7 +17,7 @@ import sys
from oslo.config import cfg from oslo.config import cfg
from oslo.utils import importutils from oslo.utils import importutils
from nova.i18n import _, _LE from nova.i18n import _LE, _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
driver_opts = [ driver_opts = [
@ -39,6 +39,6 @@ def load_network_driver(network_driver=None):
LOG.error(_LE("Network driver option required, but not specified")) LOG.error(_LE("Network driver option required, but not specified"))
sys.exit(1) sys.exit(1)
LOG.info(_("Loading network driver '%s'") % network_driver) LOG.info(_LI("Loading network driver '%s'"), network_driver)
return importutils.import_module(network_driver) return importutils.import_module(network_driver)

View File

@ -25,7 +25,7 @@ import six
from nova import context from nova import context
from nova.db import base from nova.db import base
from nova import exception from nova import exception
from nova.i18n import _, _LE from nova.i18n import _, _LE, _LI
from nova.network import rpcapi as network_rpcapi from nova.network import rpcapi as network_rpcapi
from nova import objects from nova import objects
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
@ -173,7 +173,7 @@ class FloatingIP(object):
address, address,
affect_auto_assigned=True) affect_auto_assigned=True)
except exception.FloatingIpNotAssociated: except exception.FloatingIpNotAssociated:
LOG.info(_("Floating IP %s is not associated. Ignore."), LOG.info(_LI("Floating IP %s is not associated. Ignore."),
address) address)
# deallocate if auto_assigned # deallocate if auto_assigned
if floating_ip.auto_assigned: if floating_ip.auto_assigned:
@ -532,7 +532,7 @@ class FloatingIP(object):
if not floating_addresses or (source and source == dest): if not floating_addresses or (source and source == dest):
return return
LOG.info(_("Starting migration network for instance %s"), LOG.info(_LI("Starting migration network for instance %s"),
instance_uuid) instance_uuid)
for address in floating_addresses: for address in floating_addresses:
floating_ip = objects.FloatingIP.get_by_address(context, address) floating_ip = objects.FloatingIP.get_by_address(context, address)
@ -567,7 +567,7 @@ class FloatingIP(object):
if not floating_addresses or (source and source == dest): if not floating_addresses or (source and source == dest):
return return
LOG.info(_("Finishing migration network for instance %s"), LOG.info(_LI("Finishing migration network for instance %s"),
instance_uuid) instance_uuid)
for address in floating_addresses: for address in floating_addresses:

View File

@ -19,7 +19,7 @@ import tempfile
from oslo.config import cfg from oslo.config import cfg
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _, _LI
from nova.network import dns_driver from nova.network import dns_driver
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
@ -198,7 +198,7 @@ class MiniDNS(dns_driver.DNSDriver):
entry['domain'] != fqdomain.lower()): entry['domain'] != fqdomain.lower()):
outfile.write(line) outfile.write(line)
else: else:
LOG.info(_("deleted %s"), entry) LOG.info(_LI("deleted %s"), entry)
deleted = True deleted = True
infile.close() infile.close()
outfile.close() outfile.close()

View File

@ -28,7 +28,7 @@ from nova.api.openstack import extensions
from nova.compute import flavors from nova.compute import flavors
from nova.compute import utils as compute_utils from nova.compute import utils as compute_utils
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LW from nova.i18n import _, _LE, _LI, _LW
from nova.network import base_api from nova.network import base_api
from nova.network import model as network_model from nova.network import model as network_model
from nova.network import neutronv2 from nova.network import neutronv2
@ -530,7 +530,7 @@ class API(base_api.NetworkAPI):
neutronv2.get_client(context).update_port(port, neutronv2.get_client(context).update_port(port,
port_req_body) port_req_body)
except Exception: except Exception:
LOG.info(_('Unable to reset device ID for port %s'), port, LOG.info(_LI('Unable to reset device ID for port %s'), port,
instance=instance) instance=instance)
self._delete_ports(neutron, instance, ports, raise_if_fail=True) self._delete_ports(neutron, instance, ports, raise_if_fail=True)
@ -946,8 +946,8 @@ class API(base_api.NetworkAPI):
msg_dict = dict(address=floating_address, msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid) instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from ' LOG.info(_LI('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict) 'instance %(instance_id)s'), msg_dict)
orig_instance = objects.Instance.get_by_uuid(context, orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid) orig_instance_uuid)

View File

@ -24,7 +24,7 @@ from webob import exc
from nova.compute import api as compute_api from nova.compute import api as compute_api
from nova import exception from nova import exception
from nova.i18n import _, _LE from nova.i18n import _, _LE, _LI
from nova.network import neutronv2 from nova.network import neutronv2
from nova.network.security_group import security_group_base from nova.network.security_group import security_group_base
from nova import objects from nova import objects
@ -437,8 +437,8 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
port['security_groups'].append(security_group_id) port['security_groups'].append(security_group_id)
updated_port = {'security_groups': port['security_groups']} updated_port = {'security_groups': port['security_groups']}
try: try:
LOG.info(_("Adding security group %(security_group_id)s to " LOG.info(_LI("Adding security group %(security_group_id)s to "
"port %(port_id)s"), "port %(port_id)s"),
{'security_group_id': security_group_id, {'security_group_id': security_group_id,
'port_id': port['id']}) 'port_id': port['id']})
neutron.update_port(port['id'], {'port': updated_port}) neutron.update_port(port['id'], {'port': updated_port})
@ -492,8 +492,8 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
updated_port = {'security_groups': port['security_groups']} updated_port = {'security_groups': port['security_groups']}
try: try:
LOG.info(_("Adding security group %(security_group_id)s to " LOG.info(_LI("Adding security group %(security_group_id)s to "
"port %(port_id)s"), "port %(port_id)s"),
{'security_group_id': security_group_id, {'security_group_id': security_group_id,
'port_id': port['id']}) 'port_id': port['id']})
neutron.update_port(port['id'], {'port': updated_port}) neutron.update_port(port['id'], {'port': updated_port})

View File

@ -25,7 +25,7 @@ from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi from nova.compute import rpcapi as compute_rpcapi
from nova import exception from nova import exception
from nova.i18n import _, _LW from nova.i18n import _, _LI, _LW
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova import rpc from nova import rpc
from nova.scheduler import driver from nova.scheduler import driver
@ -75,10 +75,10 @@ class FilterScheduler(driver.Scheduler):
self.notifier.info(context, 'scheduler.run_instance.start', payload) self.notifier.info(context, 'scheduler.run_instance.start', payload)
instance_uuids = request_spec.get('instance_uuids') instance_uuids = request_spec.get('instance_uuids')
LOG.info(_("Attempting to build %(num_instances)d instance(s) " LOG.info(_LI("Attempting to build %(num_instances)d instance(s) "
"uuids: %(instance_uuids)s"), "uuids: %(instance_uuids)s"),
{'num_instances': len(instance_uuids), {'num_instances': len(instance_uuids),
'instance_uuids': instance_uuids}) 'instance_uuids': instance_uuids})
LOG.debug("Request Spec: %s" % request_spec) LOG.debug("Request Spec: %s" % request_spec)
# check retry policy. Rather ugly use of instance_uuids[0]... # check retry policy. Rather ugly use of instance_uuids[0]...
@ -104,10 +104,10 @@ class FilterScheduler(driver.Scheduler):
try: try:
try: try:
weighed_host = weighed_hosts.pop(0) weighed_host = weighed_hosts.pop(0)
LOG.info(_("Choosing host %(weighed_host)s " LOG.info(_LI("Choosing host %(weighed_host)s "
"for instance %(instance_uuid)s"), "for instance %(instance_uuid)s"),
{'weighed_host': weighed_host, {'weighed_host': weighed_host,
'instance_uuid': instance_uuid}) 'instance_uuid': instance_uuid})
except IndexError: except IndexError:
raise exception.NoValidHost(reason="") raise exception.NoValidHost(reason="")

View File

@ -28,7 +28,7 @@ from nova.compute import task_states
from nova.compute import vm_states from nova.compute import vm_states
from nova import db from nova import db
from nova import exception from nova import exception
from nova.i18n import _, _LW from nova.i18n import _, _LI, _LW
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.pci import stats as pci_stats from nova.pci import stats as pci_stats
from nova.scheduler import filters from nova.scheduler import filters
@ -419,8 +419,8 @@ class HostManager(object):
dead_nodes = set(self.host_state_map.keys()) - seen_nodes dead_nodes = set(self.host_state_map.keys()) - seen_nodes
for state_key in dead_nodes: for state_key in dead_nodes:
host, node = state_key host, node = state_key
LOG.info(_("Removing dead compute node %(host)s:%(node)s " LOG.info(_LI("Removing dead compute node %(host)s:%(node)s "
"from scheduler") % {'host': host, 'node': node}) "from scheduler"), {'host': host, 'node': node})
del self.host_state_map[state_key] del self.host_state_map[state_key]
return self.host_state_map.itervalues() return self.host_state_map.itervalues()

View File

@ -179,18 +179,16 @@ class HackingTestCase(test.NoDBTestCase):
'exception'] 'exception']
levels = ['_LI', '_LW', '_LE', '_LC'] levels = ['_LI', '_LW', '_LE', '_LC']
debug = "LOG.debug('OK')" debug = "LOG.debug('OK')"
self.assertEqual(0, audit = "LOG.audit(_('OK'))"
len(list( self.assertEqual(
checks.validate_log_translations(debug, debug, 'f')))) 0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
self.assertEqual(
0, len(list(checks.validate_log_translations(audit, audit, 'f'))))
for log in logs: for log in logs:
bad = 'LOG.%s("Bad")' % log bad = 'LOG.%s("Bad")' % log
self.assertEqual(1, self.assertEqual(1,
len(list( len(list(
checks.validate_log_translations(bad, bad, 'f')))) checks.validate_log_translations(bad, bad, 'f'))))
ok = "LOG.%s(_('OK'))" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
ok = "LOG.%s('OK') # noqa" % log ok = "LOG.%s('OK') # noqa" % log
self.assertEqual(0, self.assertEqual(0,
len(list( len(list(

View File

@ -18,7 +18,7 @@ import time
from oslo.utils import importutils from oslo.utils import importutils
from nova.i18n import _ from nova.i18n import _, _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova import utils from nova import utils
@ -118,7 +118,7 @@ class Mount(object):
start_time = time.time() start_time = time.time()
device = self._inner_get_dev() device = self._inner_get_dev()
while not device: while not device:
LOG.info(_('Device allocation failed. Will retry in 2 seconds.')) LOG.info(_LI('Device allocation failed. Will retry in 2 seconds.'))
time.sleep(2) time.sleep(2)
if time.time() - start_time > MAX_DEVICE_WAIT: if time.time() - start_time > MAX_DEVICE_WAIT:
LOG.warn(_('Device allocation failed after repeated retries.')) LOG.warn(_('Device allocation failed after repeated retries.'))

View File

@ -13,7 +13,7 @@
# under the License. # under the License.
"""Support for mounting images with the loop device.""" """Support for mounting images with the loop device."""
from nova.i18n import _ from nova.i18n import _, _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova import utils from nova import utils
from nova.virt.disk.mount import api from nova.virt.disk.mount import api
@ -30,7 +30,7 @@ class LoopMount(api.Mount):
run_as_root=True) run_as_root=True)
if err: if err:
self.error = _('Could not attach image to loopback: %s') % err self.error = _('Could not attach image to loopback: %s') % err
LOG.info(_('Loop mount error: %s'), self.error) LOG.info(_LI('Loop mount error: %s'), self.error)
self.linked = False self.linked = False
self.device = None self.device = None
return False return False

View File

@ -20,7 +20,7 @@ import time
from oslo.config import cfg from oslo.config import cfg
from nova.i18n import _, _LE from nova.i18n import _, _LE, _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova import utils from nova import utils
from nova.virt.disk.mount import api from nova.virt.disk.mount import api
@ -88,7 +88,7 @@ class NbdMount(api.Mount):
run_as_root=True) run_as_root=True)
if err: if err:
self.error = _('qemu-nbd error: %s') % err self.error = _('qemu-nbd error: %s') % err
LOG.info(_('NBD mount error: %s'), self.error) LOG.info(_LI('NBD mount error: %s'), self.error)
return False return False
# NOTE(vish): this forks into another process, so give it a chance # NOTE(vish): this forks into another process, so give it a chance
@ -101,7 +101,7 @@ class NbdMount(api.Mount):
time.sleep(1) time.sleep(1)
else: else:
self.error = _('nbd device %s did not show up') % device self.error = _('nbd device %s did not show up') % device
LOG.info(_('NBD mount error: %s'), self.error) LOG.info(_LI('NBD mount error: %s'), self.error)
# Cleanup # Cleanup
_out, err = utils.trycmd('qemu-nbd', '-d', device, _out, err = utils.trycmd('qemu-nbd', '-d', device,

View File

@ -25,7 +25,7 @@ import sys
from oslo.config import cfg from oslo.config import cfg
from oslo.utils import importutils from oslo.utils import importutils
from nova.i18n import _, _LE from nova.i18n import _, _LE, _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova import utils from nova import utils
from nova.virt import event as virtevent from nova.virt import event as virtevent
@ -1374,7 +1374,7 @@ def load_compute_driver(virtapi, compute_driver=None):
LOG.error(_LE("Compute driver option required, but not specified")) LOG.error(_LE("Compute driver option required, but not specified"))
sys.exit(1) sys.exit(1)
LOG.info(_("Loading compute driver '%s'") % compute_driver) LOG.info(_LI("Loading compute driver '%s'"), compute_driver)
try: try:
driver = importutils.import_object_ns('nova.virt', driver = importutils.import_object_ns('nova.virt',
compute_driver, compute_driver,

View File

@ -20,7 +20,6 @@ from oslo.utils import importutils
from nova.compute import utils as compute_utils from nova.compute import utils as compute_utils
from nova import context from nova import context
from nova.i18n import _
from nova.i18n import _LI from nova.i18n import _LI
from nova.network import linux_net from nova.network import linux_net
from nova import objects from nova import objects
@ -171,8 +170,8 @@ class IptablesFirewallDriver(FirewallDriver):
self.remove_filters_for_instance(instance) self.remove_filters_for_instance(instance)
self.iptables.apply() self.iptables.apply()
else: else:
LOG.info(_('Attempted to unfilter instance which is not ' LOG.info(_LI('Attempted to unfilter instance which is not '
'filtered'), instance=instance) 'filtered'), instance=instance)
def prepare_instance_filter(self, instance, network_info): def prepare_instance_filter(self, instance, network_info):
self.instance_info[instance['id']] = (instance, network_info) self.instance_info[instance['id']] = (instance, network_info)

View File

@ -28,7 +28,7 @@ if sys.platform == 'win32':
import wmi import wmi
from nova import block_device from nova import block_device
from nova.i18n import _ from nova.i18n import _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.virt import driver from nova.virt import driver
@ -68,8 +68,8 @@ class BaseVolumeUtils(object):
initiator_name = str(temp[0]) initiator_name = str(temp[0])
_winreg.CloseKey(key) _winreg.CloseKey(key)
except Exception: except Exception:
LOG.info(_("The ISCSI initiator name can't be found. " LOG.info(_LI("The ISCSI initiator name can't be found. "
"Choosing the default one")) "Choosing the default one"))
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower() initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
if computer_system.PartofDomain: if computer_system.PartofDomain:
initiator_name += '.' + computer_system.Domain.lower() initiator_name += '.' + computer_system.Domain.lower()

View File

@ -246,7 +246,7 @@ class VMOps(object):
def spawn(self, context, instance, image_meta, injected_files, def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None): admin_password, network_info, block_device_info=None):
"""Create a new VM and start it.""" """Create a new VM and start it."""
LOG.info(_("Spawning new instance"), instance=instance) LOG.info(_LI("Spawning new instance"), instance=instance)
instance_name = instance['name'] instance_name = instance['name']
if self._vmutils.vm_exists(instance_name): if self._vmutils.vm_exists(instance_name):
@ -328,7 +328,7 @@ class VMOps(object):
_('Invalid config_drive_format "%s"') % _('Invalid config_drive_format "%s"') %
CONF.config_drive_format) CONF.config_drive_format)
LOG.info(_('Using config drive for instance'), instance=instance) LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {} extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password: if admin_password and CONF.hyperv.config_drive_inject_password:
@ -341,7 +341,7 @@ class VMOps(object):
instance_path = self._pathutils.get_instance_dir( instance_path = self._pathutils.get_instance_dir(
instance['name']) instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso') configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'), LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance) {'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
@ -389,7 +389,7 @@ class VMOps(object):
def destroy(self, instance, network_info=None, block_device_info=None, def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True): destroy_disks=True):
instance_name = instance['name'] instance_name = instance['name']
LOG.info(_("Got request to destroy instance"), instance=instance) LOG.info(_LI("Got request to destroy instance"), instance=instance)
try: try:
if self._vmutils.vm_exists(instance_name): if self._vmutils.vm_exists(instance_name):

View File

@ -31,7 +31,7 @@ from oslo.vmware import vim_util
import suds import suds
from nova import exception from nova import exception
from nova.i18n import _, _LW from nova.i18n import _, _LI, _LW
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils from nova.openstack.common import uuidutils
from nova.virt import driver from nova.virt import driver
@ -455,8 +455,8 @@ class VMwareVCDriver(driver.ComputeDriver):
stats_dict = self._get_available_resources(host_stats) stats_dict = self._get_available_resources(host_stats)
else: else:
LOG.info(_("Invalid cluster or resource pool" LOG.info(_LI("Invalid cluster or resource pool"
" name : %s") % nodename) " name : %s"), nodename)
return stats_dict return stats_dict

View File

@ -40,7 +40,7 @@ from oslo.config import cfg
from oslo.utils import timeutils from oslo.utils import timeutils
from oslo.vmware import exceptions as vexc from oslo.vmware import exceptions as vexc
from nova.i18n import _ from nova.i18n import _, _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.virt import imagecache from nova.virt import imagecache
from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import ds_util
@ -159,13 +159,13 @@ class ImageCacheManager(imagecache.ImageCacheManager):
ds_util.mkdir(self._session, ts_path, dc_info.ref) ds_util.mkdir(self._session, ts_path, dc_info.ref)
except vexc.FileAlreadyExistsException: except vexc.FileAlreadyExistsException:
LOG.debug("Timestamp already exists.") LOG.debug("Timestamp already exists.")
LOG.info(_("Image %s is no longer used by this node. " LOG.info(_LI("Image %s is no longer used by this node. "
"Pending deletion!"), image) "Pending deletion!"), image)
else: else:
dt = self._get_datetime_from_filename(str(ts)) dt = self._get_datetime_from_filename(str(ts))
if timeutils.is_older_than(dt, age_seconds): if timeutils.is_older_than(dt, age_seconds):
LOG.info(_("Image %s is no longer used. " LOG.info(_LI("Image %s is no longer used. "
"Deleting!"), path) "Deleting!"), path)
# Image has aged - delete the image ID folder # Image has aged - delete the image ID folder
self._folder_delete(path, dc_info.ref) self._folder_delete(path, dc_info.ref)

View File

@ -39,7 +39,7 @@ from nova.compute import vm_states
from nova.console import type as ctype from nova.console import type as ctype
from nova import context as nova_context from nova import context as nova_context
from nova import exception from nova import exception
from nova.i18n import _, _LE, _LW from nova.i18n import _, _LE, _LI, _LW
from nova import objects from nova import objects
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils from nova.openstack.common import uuidutils
@ -506,7 +506,7 @@ class VMwareVMOps(object):
CONF.config_drive_format) CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason) raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_('Using config drive for instance'), instance=instance) LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {} extra_md = {}
if admin_password: if admin_password:
extra_md['admin_pass'] = admin_password extra_md['admin_pass'] = admin_password
@ -1133,11 +1133,11 @@ class VMwareVMOps(object):
timeout=timeout) timeout=timeout)
if instances_info["instance_count"] > 0: if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots " LOG.info(_LI("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info) "older than %(timeout)d seconds"), instances_info)
for instance in instances: for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance) LOG.info(_LI("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD") self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance): def get_info(self, instance):

View File

@ -21,7 +21,7 @@ from oslo.config import cfg
from oslo.vmware import vim_util as vutil from oslo.vmware import vim_util as vutil
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _, _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vm_util
@ -443,8 +443,8 @@ class VMwareVolumeOps(object):
# The volume has been moved from its original location. # The volume has been moved from its original location.
# Need to consolidate the VMDK files. # Need to consolidate the VMDK files.
LOG.info(_("The volume's backing has been relocated to %s. Need to " LOG.info(_LI("The volume's backing has been relocated to %s. Need to "
"consolidate backing disk file."), current_device_path) "consolidate backing disk file."), current_device_path)
# Pick the resource pool on which the instance resides. # Pick the resource pool on which the instance resides.
# Move the volume to the datastore where the new VMDK file is present. # Move the volume to the datastore where the new VMDK file is present.

View File

@ -30,7 +30,7 @@ from nova.compute import utils as compute_utils
from nova import context from nova import context
from nova import crypto from nova import crypto
from nova import exception from nova import exception
from nova.i18n import _, _LE from nova.i18n import _, _LE, _LI
from nova import objects from nova import objects
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova import utils from nova import utils
@ -393,20 +393,20 @@ def find_guest_agent(base_dir):
# reconfigure the network from xenstore data, # reconfigure the network from xenstore data,
# so manipulation of files in /etc is not # so manipulation of files in /etc is not
# required # required
LOG.info(_('XenServer tools installed in this ' LOG.info(_LI('XenServer tools installed in this '
'image are capable of network injection. ' 'image are capable of network injection. '
'Networking files will not be' 'Networking files will not be'
'manipulated')) 'manipulated'))
return True return True
xe_daemon_filename = os.path.join(base_dir, xe_daemon_filename = os.path.join(base_dir,
'usr', 'sbin', 'xe-daemon') 'usr', 'sbin', 'xe-daemon')
if os.path.isfile(xe_daemon_filename): if os.path.isfile(xe_daemon_filename):
LOG.info(_('XenServer tools are present ' LOG.info(_LI('XenServer tools are present '
'in this image but are not capable ' 'in this image but are not capable '
'of network injection')) 'of network injection'))
else: else:
LOG.info(_('XenServer tools are not ' LOG.info(_LI('XenServer tools are not '
'installed in this image')) 'installed in this image'))
return False return False

View File

@ -29,7 +29,7 @@ from nova.compute import vm_mode
from nova.compute import vm_states from nova.compute import vm_states
from nova import context from nova import context
from nova import exception from nova import exception
from nova.i18n import _, _LE from nova.i18n import _, _LE, _LI
from nova import objects from nova import objects
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.pci import whitelist as pci_whitelist from nova.pci import whitelist as pci_whitelist
@ -73,10 +73,11 @@ class Host(object):
name = vm_rec['name_label'] name = vm_rec['name_label']
uuid = _uuid_find(ctxt, host, name) uuid = _uuid_find(ctxt, host, name)
if not uuid: if not uuid:
LOG.info(_('Instance %(name)s running on %(host)s' LOG.info(_LI('Instance %(name)s running on '
' could not be found in the database:' '%(host)s could not be found in '
' assuming it is a worker VM and skip' 'the database: assuming it is a '
' ping migration to a new host'), 'worker VM and skip ping migration '
'to a new host'),
{'name': name, 'host': host}) {'name': name, 'host': host})
continue continue
instance = objects.Instance.get_by_uuid(ctxt, uuid) instance = objects.Instance.get_by_uuid(ctxt, uuid)

View File

@ -390,11 +390,11 @@ def unplug_vbd(session, vbd_ref, this_vm_ref):
except session.XenAPI.Failure as exc: except session.XenAPI.Failure as exc:
err = len(exc.details) > 0 and exc.details[0] err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED': if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_('VBD %s already detached'), vbd_ref) LOG.info(_LI('VBD %s already detached'), vbd_ref)
return return
elif _should_retry_unplug_vbd(err): elif _should_retry_unplug_vbd(err):
LOG.info(_('VBD %(vbd_ref)s uplug failed with "%(err)s", ' LOG.info(_LI('VBD %(vbd_ref)s uplug failed with "%(err)s", '
'attempt %(num_attempt)d/%(max_attempts)d'), 'attempt %(num_attempt)d/%(max_attempts)d'),
{'vbd_ref': vbd_ref, 'num_attempt': num_attempt, {'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
'max_attempts': max_attempts, 'err': err}) 'max_attempts': max_attempts, 'err': err})
else: else:
@ -2171,7 +2171,7 @@ def cleanup_attached_vdis(session):
if 'nova_instance_uuid' in vdi_rec['other_config']: if 'nova_instance_uuid' in vdi_rec['other_config']:
# Belongs to an instance and probably left over after an # Belongs to an instance and probably left over after an
# unclean restart # unclean restart
LOG.info(_('Disconnecting stale VDI %s from compute domU'), LOG.info(_LI('Disconnecting stale VDI %s from compute domU'),
vdi_rec['uuid']) vdi_rec['uuid'])
unplug_vbd(session, vbd_ref, this_vm_ref) unplug_vbd(session, vbd_ref, this_vm_ref)
destroy_vbd(session, vbd_ref) destroy_vbd(session, vbd_ref)
@ -2464,7 +2464,7 @@ def _mounted_processing(device, key, net, metadata):
vfs = vfsimpl.VFSLocalFS(imgfile=None, vfs = vfsimpl.VFSLocalFS(imgfile=None,
imgfmt=None, imgfmt=None,
imgdir=tmpdir) imgdir=tmpdir)
LOG.info(_('Manipulating interface files directly')) LOG.info(_LI('Manipulating interface files directly'))
# for xenapi, we don't 'inject' admin_password here, # for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we # it's handled at instance startup time, nor do we
# support injecting arbitrary files here. # support injecting arbitrary files here.
@ -2473,8 +2473,8 @@ def _mounted_processing(device, key, net, metadata):
finally: finally:
utils.execute('umount', dev_path, run_as_root=True) utils.execute('umount', dev_path, run_as_root=True)
else: else:
LOG.info(_('Failed to mount filesystem (expected for ' LOG.info(_LI('Failed to mount filesystem (expected for '
'non-linux instances): %s') % err) 'non-linux instances): %s'), err)
def ensure_correct_host(session): def ensure_correct_host(session):

View File

@ -41,7 +41,7 @@ from nova.compute import vm_states
from nova.console import type as ctype from nova.console import type as ctype
from nova import context as nova_context from nova import context as nova_context
from nova import exception from nova import exception
from nova.i18n import _, _LE from nova.i18n import _, _LE, _LI
from nova import objects from nova import objects
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.pci import manager as pci_manager from nova.pci import manager as pci_manager
@ -1210,8 +1210,8 @@ class VMOps(object):
details = exc.details details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'): details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"), LOG.info(_LI("Starting halted instance found during reboot"),
instance=instance) instance=instance)
self._start(instance, vm_ref=vm_ref, self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback) bad_volumes_callback=bad_volumes_callback)
return return
@ -1414,7 +1414,7 @@ class VMOps(object):
destroy_* methods are internal. destroy_* methods are internal.
""" """
LOG.info(_("Destroying VM"), instance=instance) LOG.info(_LI("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may # We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid # truly not exist because of a failure during build. A valid
@ -1616,11 +1616,11 @@ class VMOps(object):
timeout=timeout) timeout=timeout)
if instances_info["instance_count"] > 0: if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots " LOG.info(_LI("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info) "older than %(timeout)d seconds") % instances_info)
for instance in instances: for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance) LOG.info(_LI("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD") self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None): def get_info(self, instance, vm_ref=None):

View File

@ -20,7 +20,7 @@ Management class for Storage-related functions (attach, detach, etc).
from oslo.utils import excutils from oslo.utils import excutils
from nova import exception from nova import exception
from nova.i18n import _ from nova.i18n import _, _LI
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils from nova.virt.xenapi import volume_utils
@ -61,7 +61,7 @@ class VolumeOps(object):
vdi_ref = self._connect_hypervisor_to_volume(sr_ref, vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
connection_data) connection_data)
vdi_uuid = self._session.VDI.get_uuid(vdi_ref) vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
LOG.info(_('Connected volume (vdi_uuid): %s'), vdi_uuid) LOG.info(_LI('Connected volume (vdi_uuid): %s'), vdi_uuid)
if vm_ref: if vm_ref:
self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name, self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
@ -124,8 +124,8 @@ class VolumeOps(object):
LOG.debug("Plugging VBD: %s", vbd_ref) LOG.debug("Plugging VBD: %s", vbd_ref)
self._session.VBD.plug(vbd_ref, vm_ref) self._session.VBD.plug(vbd_ref, vm_ref)
LOG.info(_('Dev %(dev_number)s attached to' LOG.info(_LI('Dev %(dev_number)s attached to'
' instance %(instance_name)s'), ' instance %(instance_name)s'),
{'instance_name': instance_name, 'dev_number': dev_number}) {'instance_name': instance_name, 'dev_number': dev_number})
def detach_volume(self, connection_info, instance_name, mountpoint): def detach_volume(self, connection_info, instance_name, mountpoint):
@ -146,8 +146,8 @@ class VolumeOps(object):
instance_name) instance_name)
else: else:
self._detach_vbds_and_srs(vm_ref, [vbd_ref]) self._detach_vbds_and_srs(vm_ref, [vbd_ref])
LOG.info(_('Mountpoint %(mountpoint)s detached from instance' LOG.info(_LI('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s'), ' %(instance_name)s'),
{'instance_name': instance_name, {'instance_name': instance_name,
'mountpoint': mountpoint}) 'mountpoint': mountpoint})