Implementing the use of _L’x’/i18n markers

Placing the _Lx markers back into the code. No other cleaner solution has
has been implemented. Patches will be submitted in a series of sub
directories and in a fashion that is manageable.
eighth commit of this kind
This is the last run through to pick up the ones that were missed

Change-Id: Ifd9d647175a840939bf01fa3bcecfa6384965e3b
Closes-Bug: #1384312
This commit is contained in:
Mike Mason 2014-12-04 09:17:57 +00:00
parent 4f27af39d3
commit 9ad858c9c9
63 changed files with 459 additions and 437 deletions

View File

@ -18,7 +18,7 @@
from oslo.config import cfg from oslo.config import cfg
import paste.urlmap import paste.urlmap
from cinder.i18n import _ from cinder.i18n import _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
@ -28,9 +28,9 @@ LOG = logging.getLogger(__name__)
def root_app_factory(loader, global_conf, **local_conf): def root_app_factory(loader, global_conf, **local_conf):
if CONF.enable_v1_api: if CONF.enable_v1_api:
LOG.warn(_('The v1 api is deprecated and will be removed after the ' LOG.warn(_LW('The v1 api is deprecated and will be removed after the '
'Juno release. You should set enable_v1_api=false and ' 'Juno release. You should set enable_v1_api=false and '
'enable_v2_api=true in your cinder.conf file.')) 'enable_v2_api=true in your cinder.conf file.'))
else: else:
del local_conf['/v1'] del local_conf['/v1']
if not CONF.enable_v2_api: if not CONF.enable_v2_api:

View File

@ -25,7 +25,7 @@ import cinder.api.openstack
from cinder.api.openstack import wsgi from cinder.api.openstack import wsgi
from cinder.api import xmlutil from cinder.api import xmlutil
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE, _LI from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
import cinder.policy import cinder.policy
@ -273,8 +273,8 @@ class ExtensionManager(object):
try: try:
self.load_extension(ext_factory) self.load_extension(ext_factory)
except Exception as exc: except Exception as exc:
LOG.warn(_('Failed to load extension %(ext_factory)s: ' LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
'%(exc)s'), '%(exc)s'),
{'ext_factory': ext_factory, 'exc': exc}) {'ext_factory': ext_factory, 'exc': exc})

View File

@ -27,7 +27,7 @@ import webob
from cinder import exception from cinder import exception
from cinder import i18n from cinder import i18n
from cinder.i18n import _, _LI from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import utils from cinder import utils
from cinder import wsgi from cinder import wsgi
@ -750,7 +750,7 @@ class ResourceExceptionHandler(object):
code=ex_value.code, explanation=ex_value.msg)) code=ex_value.code, explanation=ex_value.msg))
elif isinstance(ex_value, TypeError): elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback) exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_( LOG.error(_LE(
'Exception handling resource: %s') % 'Exception handling resource: %s') %
ex_value, exc_info=exc_info) ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest()) raise Fault(webob.exc.HTTPBadRequest())

View File

@ -173,8 +173,8 @@ class BackupManager(manager.SchedulerDependentManager):
driver.do_setup(ctxt) driver.do_setup(ctxt)
driver.check_for_setup_error() driver.check_for_setup_error()
except Exception as ex: except Exception as ex:
LOG.error(_("Error encountered during initialization of driver: " LOG.error(_LE("Error encountered during initialization of driver: "
"%(name)s.") % "%(name)s.") %
{'name': driver.__class__.__name__}) {'name': driver.__class__.__name__})
LOG.exception(ex) LOG.exception(ex)
# we don't want to continue since we failed # we don't want to continue since we failed

View File

@ -26,7 +26,7 @@ from cinder.brick.initiator import host_driver
from cinder.brick.initiator import linuxfc from cinder.brick.initiator import linuxfc
from cinder.brick.initiator import linuxscsi from cinder.brick.initiator import linuxscsi
from cinder.brick.remotefs import remotefs from cinder.brick.remotefs import remotefs
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
@ -227,8 +227,8 @@ class ISCSIConnector(InitiatorConnector):
if tries >= self.device_scan_attempts: if tries >= self.device_scan_attempts:
raise exception.VolumeDeviceNotFound(device=host_device) raise exception.VolumeDeviceNotFound(device=host_device)
LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. " LOG.warn(_LW("ISCSI volume not yet found at: %(host_device)s. "
"Will rescan & retry. Try number: %(tries)s"), "Will rescan & retry. Try number: %(tries)s"),
{'host_device': host_device, {'host_device': host_device,
'tries': tries}) 'tries': tries})
@ -634,8 +634,8 @@ class FibreChannelConnector(InitiatorConnector):
LOG.error(msg) LOG.error(msg)
raise exception.NoFibreChannelVolumeDeviceFound() raise exception.NoFibreChannelVolumeDeviceFound()
LOG.warn(_("Fibre volume not yet found. " LOG.warn(_LW("Fibre volume not yet found. "
"Will rescan & retry. Try number: %(tries)s"), "Will rescan & retry. Try number: %(tries)s"),
{'tries': tries}) {'tries': tries})
self._linuxfc.rescan_hosts(hbas) self._linuxfc.rescan_hosts(hbas)
@ -778,8 +778,8 @@ class AoEConnector(InitiatorConnector):
if waiting_status['tries'] >= self.device_scan_attempts: if waiting_status['tries'] >= self.device_scan_attempts:
raise exception.VolumeDeviceNotFound(device=aoe_path) raise exception.VolumeDeviceNotFound(device=aoe_path)
LOG.warn(_("AoE volume not yet found at: %(path)s. " LOG.warn(_LW("AoE volume not yet found at: %(path)s. "
"Try number: %(tries)s"), "Try number: %(tries)s"),
{'path': aoe_device, {'path': aoe_device,
'tries': waiting_status['tries']}) 'tries': waiting_status['tries']})
@ -860,8 +860,8 @@ class RemoteFsConnector(InitiatorConnector):
kwargs.get('glusterfs_mount_point_base') or\ kwargs.get('glusterfs_mount_point_base') or\
mount_point_base mount_point_base
else: else:
LOG.warn(_("Connection details not present." LOG.warn(_LW("Connection details not present."
" RemoteFsClient may not initialize properly.")) " RemoteFsClient may not initialize properly."))
self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper, self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper,
execute=execute, execute=execute,
*args, **kwargs) *args, **kwargs)

View File

@ -19,7 +19,7 @@ import errno
from oslo.concurrency import processutils as putils from oslo.concurrency import processutils as putils
from cinder.brick.initiator import linuxscsi from cinder.brick.initiator import linuxscsi
from cinder.i18n import _ from cinder.i18n import _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -48,13 +48,13 @@ class LinuxFibreChannel(linuxscsi.LinuxSCSI):
# and systool is not installed # and systool is not installed
# 96 = nova.cmd.rootwrap.RC_NOEXECFOUND: # 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
if exc.exit_code == 96: if exc.exit_code == 96:
LOG.warn(_("systool is not installed")) LOG.warn(_LW("systool is not installed"))
return [] return []
except OSError as exc: except OSError as exc:
# This handles the case where rootwrap is NOT used # This handles the case where rootwrap is NOT used
# and systool is not installed # and systool is not installed
if exc.errno == errno.ENOENT: if exc.errno == errno.ENOENT:
LOG.warn(_("systool is not installed")) LOG.warn(_LW("systool is not installed"))
return [] return []
# No FC HBAs were found # No FC HBAs were found

View File

@ -22,7 +22,7 @@ import re
from oslo.concurrency import processutils as putils from oslo.concurrency import processutils as putils
from cinder.brick import executor from cinder.brick import executor
from cinder.i18n import _ from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -115,7 +115,7 @@ class LinuxSCSI(executor.Executor):
self._execute('multipath', '-f', device, run_as_root=True, self._execute('multipath', '-f', device, run_as_root=True,
root_helper=self._root_helper) root_helper=self._root_helper)
except putils.ProcessExecutionError as exc: except putils.ProcessExecutionError as exc:
LOG.warn(_("multipath call failed exit (%(code)s)") LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code}) % {'code': exc.exit_code})
def flush_multipath_devices(self): def flush_multipath_devices(self):
@ -123,7 +123,7 @@ class LinuxSCSI(executor.Executor):
self._execute('multipath', '-F', run_as_root=True, self._execute('multipath', '-F', run_as_root=True,
root_helper=self._root_helper) root_helper=self._root_helper)
except putils.ProcessExecutionError as exc: except putils.ProcessExecutionError as exc:
LOG.warn(_("multipath call failed exit (%(code)s)") LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code}) % {'code': exc.exit_code})
def find_multipath_device(self, device): def find_multipath_device(self, device):
@ -140,7 +140,7 @@ class LinuxSCSI(executor.Executor):
run_as_root=True, run_as_root=True,
root_helper=self._root_helper) root_helper=self._root_helper)
except putils.ProcessExecutionError as exc: except putils.ProcessExecutionError as exc:
LOG.warn(_("multipath call failed exit (%(code)s)") LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code}) % {'code': exc.exit_code})
return None return None
@ -163,7 +163,7 @@ class LinuxSCSI(executor.Executor):
mdev_id = mdev_id.replace(')', '') mdev_id = mdev_id.replace(')', '')
if mdev is None: if mdev is None:
LOG.warn(_("Couldn't find multipath device %(line)s") LOG.warn(_LW("Couldn't find multipath device %(line)s")
% {'line': line}) % {'line': line})
return None return None

View File

@ -82,7 +82,7 @@ class LVM(executor.Executor):
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name) raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False: if self._vg_exists() is False:
LOG.error(_('Unable to locate Volume Group %s') % vg_name) LOG.error(_LE('Unable to locate Volume Group %s') % vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name) raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder # NOTE: we assume that the VG has been activated outside of Cinder
@ -396,7 +396,7 @@ class LVM(executor.Executor):
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name) vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1: if len(vg_list) != 1:
LOG.error(_('Unable to find VG: %s') % self.vg_name) LOG.error(_LE('Unable to find VG: %s') % self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name) raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size']) self.vg_size = float(vg_list[0]['size'])
@ -448,9 +448,9 @@ class LVM(executor.Executor):
""" """
if not self.supports_thin_provisioning(self._root_helper): if not self.supports_thin_provisioning(self._root_helper):
LOG.error(_('Requested to setup thin provisioning, ' LOG.error(_LE('Requested to setup thin provisioning, '
'however current LVM version does not ' 'however current LVM version does not '
'support it.')) 'support it.'))
return None return None
if name is None: if name is None:
@ -521,7 +521,7 @@ class LVM(executor.Executor):
""" """
source_lvref = self.get_volume(source_lv_name) source_lvref = self.get_volume(source_lv_name)
if source_lvref is None: if source_lvref is None:
LOG.error(_("Trying to create snapshot by non-existent LV: %s") LOG.error(_LE("Trying to create snapshot by non-existent LV: %s")
% source_lv_name) % source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name) raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = ['lvcreate', '--name', name, cmd = ['lvcreate', '--name', name,

View File

@ -21,7 +21,7 @@
import sqlalchemy import sqlalchemy
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
@ -64,7 +64,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys: if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check # TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id # the actual primary key, rather than assuming its id
LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs)) assert(not (sort_dir and sort_dirs))

View File

@ -26,7 +26,7 @@ from oslo.utils import timeutils
from cinder.db import base from cinder.db import base
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
import cinder.policy import cinder.policy
from cinder import quota from cinder import quota
@ -136,8 +136,8 @@ class API(base.Base):
group = self.db.consistencygroup_create(context, options) group = self.db.consistencygroup_create(context, options)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error occurred when creating consistency group" LOG.error(_LE("Error occurred when creating consistency group"
" %s."), name) " %s."), name)
request_spec_list = [] request_spec_list = []
filter_properties_list = [] filter_properties_list = []
@ -199,9 +199,9 @@ class API(base.Base):
try: try:
self.db.consistencygroup_destroy(context, group_id) self.db.consistencygroup_destroy(context, group_id)
finally: finally:
LOG.error(_("Error occurred when building " LOG.error(_LE("Error occurred when building "
"request spec list for consistency group " "request spec list for consistency group "
"%s."), group_id) "%s."), group_id)
# Cast to the scheduler and let it handle whatever is needed # Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group. # to select the target host for this group.
@ -226,8 +226,8 @@ class API(base.Base):
self.db.consistencygroup_destroy(context.elevated(), self.db.consistencygroup_destroy(context.elevated(),
group_id) group_id)
finally: finally:
LOG.error(_("Failed to update quota for " LOG.error(_LE("Failed to update quota for "
"consistency group %s."), group_id) "consistency group %s."), group_id)
@wrap_check_policy @wrap_check_policy
def delete(self, context, group, force=False): def delete(self, context, group, force=False):
@ -368,8 +368,8 @@ class API(base.Base):
try: try:
self.db.cgsnapshot_destroy(context, cgsnapshot_id) self.db.cgsnapshot_destroy(context, cgsnapshot_id)
finally: finally:
LOG.error(_("Error occurred when creating cgsnapshot" LOG.error(_LE("Error occurred when creating cgsnapshot"
" %s."), cgsnapshot_id) " %s."), cgsnapshot_id)
self.volume_rpcapi.create_cgsnapshot(context, group, cgsnapshot) self.volume_rpcapi.create_cgsnapshot(context, group, cgsnapshot)

View File

@ -210,8 +210,8 @@ def _retry_on_deadlock(f):
try: try:
return f(*args, **kwargs) return f(*args, **kwargs)
except db_exc.DBDeadlock: except db_exc.DBDeadlock:
LOG.warn(_("Deadlock detected when running " LOG.warn(_LW("Deadlock detected when running "
"'%(func_name)s': Retrying..."), "'%(func_name)s': Retrying..."),
dict(func_name=f.__name__)) dict(func_name=f.__name__))
# Retry! # Retry!
time.sleep(0.5) time.sleep(0.5)

View File

@ -28,7 +28,7 @@ from oslo.config import cfg
from oslo.utils import excutils from oslo.utils import excutils
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LE
from cinder.keymgr import key as keymgr_key from cinder.keymgr import key as keymgr_key
from cinder.keymgr import key_mgr from cinder.keymgr import key_mgr
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
@ -73,7 +73,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
endpoint=self._barbican_endpoint) endpoint=self._barbican_endpoint)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error creating Barbican client: %s"), (e)) LOG.error(_LE("Error creating Barbican client: %s"), (e))
return self._barbican_client return self._barbican_client
@ -110,7 +110,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
return secret_uuid return secret_uuid
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error creating key: %s"), (e)) LOG.error(_LE("Error creating key: %s"), (e))
def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key', def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key',
payload_content_type='application/octet-stream', payload_content_type='application/octet-stream',
@ -165,7 +165,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
return secret_uuid return secret_uuid
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error storing key: %s"), (e)) LOG.error(_LE("Error storing key: %s"), (e))
def copy_key(self, ctxt, key_id): def copy_key(self, ctxt, key_id):
"""Copies (i.e., clones) a key stored by barbican. """Copies (i.e., clones) a key stored by barbican.
@ -193,7 +193,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
return copy_uuid return copy_uuid
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error copying key: %s"), (e)) LOG.error(_LE("Error copying key: %s"), (e))
def _create_secret_ref(self, key_id, barbican_client): def _create_secret_ref(self, key_id, barbican_client):
"""Creates the URL required for accessing a secret. """Creates the URL required for accessing a secret.
@ -230,7 +230,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
return secret_data return secret_data
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error getting secret data: %s"), (e)) LOG.error(_LE("Error getting secret data: %s"), (e))
def _get_secret(self, ctxt, secret_ref): def _get_secret(self, ctxt, secret_ref):
"""Creates the URL required for accessing a secret's metadata. """Creates the URL required for accessing a secret's metadata.
@ -249,7 +249,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
return barbican_client.secrets.get(secret_ref) return barbican_client.secrets.get(secret_ref)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error getting secret metadata: %s"), (e)) LOG.error(_LE("Error getting secret metadata: %s"), (e))
def get_key(self, ctxt, key_id, def get_key(self, ctxt, key_id,
payload_content_type='application/octet-stream'): payload_content_type='application/octet-stream'):
@ -278,7 +278,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
return key return key
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error getting key: %s"), (e)) LOG.error(_LE("Error getting key: %s"), (e))
def delete_key(self, ctxt, key_id): def delete_key(self, ctxt, key_id):
"""Deletes the specified key. """Deletes the specified key.
@ -295,4 +295,4 @@ class BarbicanKeyManager(key_mgr.KeyManager):
barbican_client.secrets.delete(secret_ref) barbican_client.secrets.delete(secret_ref)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error deleting key: %s"), (e)) LOG.error(_LE("Error deleting key: %s"), (e))

View File

@ -36,7 +36,7 @@ import array
from oslo.config import cfg from oslo.config import cfg
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LW
from cinder.keymgr import key from cinder.keymgr import key
from cinder.keymgr import key_mgr from cinder.keymgr import key_mgr
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
@ -75,8 +75,8 @@ class ConfKeyManager(key_mgr.KeyManager):
def _generate_hex_key(self, **kwargs): def _generate_hex_key(self, **kwargs):
if CONF.keymgr.fixed_key is None: if CONF.keymgr.fixed_key is None:
LOG.warn(_('config option keymgr.fixed_key has not been defined: ' LOG.warn(_LW('config option keymgr.fixed_key has not been defined:'
'some operations may fail unexpectedly')) ' some operations may fail unexpectedly'))
raise ValueError(_('keymgr.fixed_key not defined')) raise ValueError(_('keymgr.fixed_key not defined'))
return CONF.keymgr.fixed_key return CONF.keymgr.fixed_key
@ -131,4 +131,4 @@ class ConfKeyManager(key_mgr.KeyManager):
raise exception.KeyManagerError( raise exception.KeyManagerError(
reason="cannot delete non-existent key") reason="cannot delete non-existent key")
LOG.warn(_("Not deleting key %s"), key_id) LOG.warn(_LW("Not deleting key %s"), key_id)

View File

@ -20,7 +20,7 @@ Utilities for linking request ID's across service calls.
import logging import logging
from openstack.common.gettextutils import _ # noqa from openstack.common.gettextutils import _ # noqa
from cinder.i18n import _LI
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -73,12 +73,12 @@ def link_request_ids(context, source_id, target_id=None, stage=None,
if target_name or target_id: if target_name or target_id:
arrow = " -> " arrow = " -> "
LOG.info(_("Request ID Link: %(event_name)s '%(source_id)s'%(arrow)s" LOG.info(_LI("Request ID Link: %(event_name)s '%(source_id)s'%(arrow)s"
"%(target_name)s%(target_id)s") % {"event_name": event_name, "%(target_name)s%(target_id)s") % {"event_name": event_name,
"source_id": source_id, "source_id": source_id,
"target_name": rtarget_name, "target_name": rtarget_name,
"arrow": arrow, "arrow": arrow,
"target_id": rtarget_id}) "target_id": rtarget_id})
if notifier: if notifier:
payload = {"source_request_id": source_id, payload = {"source_request_id": source_id,

View File

@ -24,7 +24,7 @@ from oslo.utils import timeutils
from cinder import db from cinder import db
from cinder import exception from cinder import exception
from cinder.i18n import _, _LI from cinder.i18n import _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import filters from cinder.openstack.common.scheduler import filters
from cinder.openstack.common.scheduler import weights from cinder.openstack.common.scheduler import weights
@ -452,7 +452,7 @@ class HostManager(object):
for service in volume_services: for service in volume_services:
host = service['host'] host = service['host']
if not utils.service_is_up(service): if not utils.service_is_up(service):
LOG.warn(_("volume service is down. (host: %s)") % host) LOG.warn(_LW("volume service is down. (host: %s)") % host)
continue continue
capabilities = self.service_states.get(host, None) capabilities = self.service_states.get(host, None)
host_state = self.host_state_map.get(host) host_state = self.host_state_map.get(host)

View File

@ -21,7 +21,7 @@ from oslo.concurrency import processutils as putils
from cinder.brick import exception from cinder.brick import exception
from cinder.brick.initiator import connector from cinder.brick.initiator import connector
from cinder.brick.initiator import host_driver from cinder.brick.initiator import host_driver
from cinder.i18n import _ from cinder.i18n import _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder import test from cinder import test
@ -490,7 +490,7 @@ class FakeFixedIntervalLoopingCall(object):
except loopingcall.LoopingCallDone: except loopingcall.LoopingCallDone:
return self return self
except Exception: except Exception:
LOG.exception(_('in fixed duration looping call')) LOG.exception(_LE('in fixed duration looping call'))
raise raise

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from cinder.i18n import _ from cinder.i18n import _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.tests.brick.fake_lvm import FakeBrickLVM from cinder.tests.brick.fake_lvm import FakeBrickLVM
from cinder.volume import driver from cinder.volume import driver
@ -132,7 +132,7 @@ class LoggingVolumeDriver(driver.VolumeDriver):
self.log_action('clear_volume', volume) self.log_action('clear_volume', volume)
def local_path(self, volume): def local_path(self, volume):
LOG.error(_("local_path not implemented")) LOG.error(_LE("local_path not implemented"))
raise NotImplementedError() raise NotImplementedError()
def ensure_export(self, context, volume): def ensure_export(self, context, volume):

View File

@ -17,7 +17,7 @@ from oslo.serialization import jsonutils
import requests import requests
import six.moves.urllib.parse as urlparse import six.moves.urllib.parse as urlparse
from cinder.i18n import _ from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
@ -94,10 +94,10 @@ class TestOpenStackClient(object):
relative_url = parsed_url.path relative_url = parsed_url.path
if parsed_url.query: if parsed_url.query:
relative_url = relative_url + "?" + parsed_url.query relative_url = relative_url + "?" + parsed_url.query
LOG.info(_("Doing %(method)s on %(relative_url)s"), LOG.info(_LI("Doing %(method)s on %(relative_url)s"),
{'method': method, 'relative_url': relative_url}) {'method': method, 'relative_url': relative_url})
if body: if body:
LOG.info(_("Body: %s") % body) LOG.info(_LI("Body: %s") % body)
if port: if port:
_url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url) _url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url)

View File

@ -20,7 +20,7 @@ import tempfile
import mock import mock
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import test from cinder import test
from cinder.volume.drivers.fujitsu_eternus_dx_common import FJDXCommon from cinder.volume.drivers.fujitsu_eternus_dx_common import FJDXCommon
@ -173,7 +173,7 @@ class FakeEcomConnection():
rc = 0L rc = 0L
job = {} job = {}
else: else:
LOG.warn(_('method is not exist ')) LOG.warn(_LW('method is not exist '))
raise exception.VolumeBackendAPIException(data="invoke method") raise exception.VolumeBackendAPIException(data="invoke method")
LOG.debug('exit InvokeMethod:MAP_STAT: %s VOL_STAT: %s' LOG.debug('exit InvokeMethod:MAP_STAT: %s VOL_STAT: %s'
' Method: %s rc: %d job: %s' % ' Method: %s rc: %d job: %s' %

View File

@ -25,7 +25,7 @@ from oslo.utils import importutils
import paramiko import paramiko
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _LI
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import test from cinder import test
from cinder.volume import configuration as conf from cinder.volume import configuration as conf
@ -124,10 +124,10 @@ class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
"""Normal flow for i-t mode.""" """Normal flow for i-t mode."""
GlobalVars._is_normal_test = True GlobalVars._is_normal_test = True
GlobalVars._zone_state = [] GlobalVars._zone_state = []
LOG.info(_("In Add GlobalVars._is_normal_test: " LOG.info(_LI("In Add GlobalVars._is_normal_test: "
"%s"), GlobalVars._is_normal_test) "%s"), GlobalVars._is_normal_test)
LOG.info(_("In Add GlobalVars._zone_state:" LOG.info(_LI("In Add GlobalVars._zone_state:"
" %s"), GlobalVars._zone_state) " %s"), GlobalVars._zone_state)
get_active_zs_mock.return_value = _active_cfg_before_add get_active_zs_mock.return_value = _active_cfg_before_add
self.driver.add_connection('BRCD_FAB_1', _initiator_target_map) self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
self.assertTrue(_zone_name in GlobalVars._zone_state) self.assertTrue(_zone_name in GlobalVars._zone_state)
@ -181,8 +181,8 @@ class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
class FakeBrcdFCZoneClientCLI(object): class FakeBrcdFCZoneClientCLI(object):
def __init__(self, ipaddress, username, password, port): def __init__(self, ipaddress, username, password, port):
LOG.info(_("User: %s"), username) LOG.info(_LI("User: %s"), username)
LOG.info(_("_zone_state: %s"), GlobalVars._zone_state) LOG.info(_LI("_zone_state: %s"), GlobalVars._zone_state)
self.firmware_supported = True self.firmware_supported = True
if not GlobalVars._is_normal_test: if not GlobalVars._is_normal_test:
raise paramiko.SSHException("Unable to connect to fabric") raise paramiko.SSHException("Unable to connect to fabric")

View File

@ -121,7 +121,8 @@ class API(base.Base):
try: try:
transfer = self.db.transfer_create(context, transfer_rec) transfer = self.db.transfer_create(context, transfer_rec)
except Exception: except Exception:
LOG.error(_("Failed to create transfer record for %s") % volume_id) LOG.error(_LE("Failed to create transfer record "
"for %s") % volume_id)
raise raise
return {'id': transfer['id'], return {'id': transfer['id'],
'volume_id': transfer['volume_id'], 'volume_id': transfer['volume_id'],

View File

@ -44,7 +44,7 @@ import six
from cinder.brick.initiator import connector from cinder.brick.initiator import connector
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
@ -614,7 +614,7 @@ def require_driver_initialized(driver):
# we can't do anything if the driver didn't init # we can't do anything if the driver didn't init
if not driver.initialized: if not driver.initialized:
driver_name = driver.__class__.__name__ driver_name = driver.__class__.__name__
LOG.error(_("Volume driver %s not initialized") % driver_name) LOG.error(_LE("Volume driver %s not initialized") % driver_name)
raise exception.DriverNotInitialized() raise exception.DriverNotInitialized()

View File

@ -1236,7 +1236,7 @@ class API(base.Base):
elevated, svc_host, CONF.volume_topic) elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound: except exception.ServiceNotFound:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_('Unable to find service for given host.')) LOG.error(_LE('Unable to find service for given host.'))
availability_zone = service.get('availability_zone') availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None volume_type_id = volume_type['id'] if volume_type else None

View File

@ -24,7 +24,7 @@ from oslo.config import cfg
from oslo.utils import excutils from oslo.utils import excutils
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LW
from cinder.image import image_utils from cinder.image import image_utils
from cinder.openstack.common import fileutils from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
@ -889,7 +889,8 @@ class ISCSIDriver(VolumeDriver):
def _do_iscsi_discovery(self, volume): def _do_iscsi_discovery(self, volume):
# TODO(justinsb): Deprecate discovery and use stored info # TODO(justinsb): Deprecate discovery and use stored info
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
LOG.warn(_("ISCSI provider_location not stored, using discovery")) LOG.warn(_LW("ISCSI provider_location not "
"stored, using discovery"))
volume_name = volume['name'] volume_name = volume['name']
@ -902,7 +903,7 @@ class ISCSIDriver(VolumeDriver):
volume['host'].split('@')[0], volume['host'].split('@')[0],
run_as_root=True) run_as_root=True)
except processutils.ProcessExecutionError as ex: except processutils.ProcessExecutionError as ex:
LOG.error(_("ISCSI discovery attempt failed for:%s") % LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
volume['host'].split('@')[0]) volume['host'].split('@')[0])
LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr) LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr)
return None return None

View File

@ -20,7 +20,7 @@ from oslo.config import cfg
from cinder import context from cinder import context
from cinder.db.sqlalchemy import api from cinder.db.sqlalchemy import api
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LI
from cinder.image import image_utils from cinder.image import image_utils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume import driver from cinder.volume import driver
@ -139,7 +139,7 @@ class BlockDeviceDriver(driver.ISCSIDriver):
self.local_path(volume)) self.local_path(volume))
def create_cloned_volume(self, volume, src_vref): def create_cloned_volume(self, volume, src_vref):
LOG.info(_('Creating clone of volume: %s') % src_vref['id']) LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
device = self.find_appropriate_size_device(src_vref['size']) device = self.find_appropriate_size_device(src_vref['size'])
volutils.copy_volume( volutils.copy_volume(
self.local_path(src_vref), device, self.local_path(src_vref), device,

View File

@ -20,7 +20,7 @@ import six
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume import driver from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vmax_common from cinder.volume.drivers.emc import emc_vmax_common
@ -145,7 +145,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
iscsi_properties = self.smis_get_iscsi_properties( iscsi_properties = self.smis_get_iscsi_properties(
volume, connector) volume, connector)
LOG.info(_("Leaving initialize_connection: %s") % (iscsi_properties)) LOG.info(_LI("Leaving initialize_connection: %s") % (iscsi_properties))
return { return {
'driver_volume_type': 'iscsi', 'driver_volume_type': 'iscsi',
'data': iscsi_properties 'data': iscsi_properties
@ -153,14 +153,14 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
def smis_do_iscsi_discovery(self, volume): def smis_do_iscsi_discovery(self, volume):
LOG.info(_("ISCSI provider_location not stored, using discovery.")) LOG.info(_LI("ISCSI provider_location not stored, using discovery."))
(out, _err) = self._execute('iscsiadm', '-m', 'discovery', (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p', '-t', 'sendtargets', '-p',
self.configuration.iscsi_ip_address, self.configuration.iscsi_ip_address,
run_as_root=True) run_as_root=True)
LOG.info(_( LOG.info(_LI(
"smis_do_iscsi_discovery is: %(out)s") "smis_do_iscsi_discovery is: %(out)s")
% {'out': out}) % {'out': out})
targets = [] targets = []
@ -206,7 +206,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
device_number = device_info['hostlunid'] device_number = device_info['hostlunid']
LOG.info(_( LOG.info(_LI(
"location is: %(location)s") % {'location': location}) "location is: %(location)s") % {'location': location})
for loc in location: for loc in location:
@ -218,14 +218,14 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
properties['volume_id'] = volume['id'] properties['volume_id'] = volume['id']
LOG.info(_("ISCSI properties: %(properties)s") LOG.info(_LI("ISCSI properties: %(properties)s")
% {'properties': properties}) % {'properties': properties})
LOG.info(_("ISCSI volume is: %(volume)s") LOG.info(_LI("ISCSI volume is: %(volume)s")
% {'volume': volume}) % {'volume': volume})
if 'provider_auth' in volume: if 'provider_auth' in volume:
auth = volume['provider_auth'] auth = volume['provider_auth']
LOG.info(_("AUTH properties: %(authProps)s") LOG.info(_LI("AUTH properties: %(authProps)s")
% {'authProps': auth}) % {'authProps': auth})
if auth is not None: if auth is not None:
@ -235,7 +235,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
properties['auth_username'] = auth_username properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret properties['auth_password'] = auth_secret
LOG.info(_("AUTH properties: %s") % (properties)) LOG.info(_LI("AUTH properties: %s") % (properties))
return properties return properties

View File

@ -2368,7 +2368,8 @@ class EMCVnxCliBase(object):
properties['auth_username'] = auth_username properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret properties['auth_password'] = auth_secret
else: else:
LOG.error(_('Failed to find an available iSCSI targets for %s.'), LOG.error(_LE('Failed to find an available '
'iSCSI targets for %s.'),
storage_group) storage_group)
return properties return properties

View File

@ -125,8 +125,8 @@ class XtremIOVolumeDriver(san.SanDriver):
try: try:
return json.loads(str_result) return json.loads(str_result)
except Exception: except Exception:
LOG.exception(_('querying %(typ)s, %(req)s failed to ' LOG.exception(_LE('querying %(typ)s, %(req)s failed to '
'parse result, return value = %(res)s'), 'parse result, return value = %(res)s'),
{'typ': object_type, {'typ': object_type,
'req': request_typ, 'req': request_typ,
'res': str_result}) 'res': str_result})

View File

@ -30,7 +30,7 @@ from oslo.utils import units
import six import six
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE, _LW from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder.volume import volume_types from cinder.volume import volume_types
@ -157,7 +157,7 @@ class FJDXCommon(object):
volumesize = int(volume['size']) * units.Gi volumesize = int(volume['size']) * units.Gi
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
LOG.info(_('Create Volume: %(volume)s Size: %(size)lu') LOG.info(_LI('Create Volume: %(volume)s Size: %(size)lu')
% {'volume': volumename, % {'volume': volumename,
'size': volumesize}) 'size': volumesize})
@ -287,8 +287,8 @@ class FJDXCommon(object):
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
vol_instance = None vol_instance = None
LOG.info(_('Create Volume from Snapshot: Volume: %(volumename)s ' LOG.info(_LI('Create Volume from Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s') 'Snapshot: %(snapshotname)s')
% {'volumename': volumename, % {'volumename': volumename,
'snapshotname': snapshotname}) 'snapshotname': snapshotname})
@ -396,8 +396,8 @@ class FJDXCommon(object):
srcname = self._create_volume_name(src_vref['id']) srcname = self._create_volume_name(src_vref['id'])
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
LOG.info(_('Create a Clone from Volume: Volume: %(volumename)s ' LOG.info(_LI('Create a Clone from Volume: Volume: %(volumename)s '
'Source Volume: %(srcname)s') 'Source Volume: %(srcname)s')
% {'volumename': volumename, % {'volumename': volumename,
'srcname': srcname}) 'srcname': srcname})
@ -500,7 +500,7 @@ class FJDXCommon(object):
"""Deletes an volume.""" """Deletes an volume."""
LOG.debug('Entering delete_volume.') LOG.debug('Entering delete_volume.')
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
LOG.info(_('Delete Volume: %(volume)s') LOG.info(_LI('Delete Volume: %(volume)s')
% {'volume': volumename}) % {'volume': volumename})
self.conn = self._get_ecom_connection() self.conn = self._get_ecom_connection()
@ -574,7 +574,7 @@ class FJDXCommon(object):
snapshotname = self._create_volume_name(snapshot['id']) snapshotname = self._create_volume_name(snapshot['id'])
volumename = snapshot['volume_name'] volumename = snapshot['volume_name']
LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s') LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s')
% {'snapshot': snapshotname, % {'snapshot': snapshotname,
'volume': volumename}) 'volume': volumename})
@ -702,7 +702,7 @@ class FJDXCommon(object):
snapshotname = snapshot['name'] snapshotname = snapshot['name']
volumename = snapshot['volume_name'] volumename = snapshot['volume_name']
LOG.info(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s') LOG.info(_LI('Delete Snapshot: %(snapshot)s: volume: %(volume)s')
% {'snapshot': snapshotname, % {'snapshot': snapshotname,
'volume': volumename}) 'volume': volumename})
@ -783,8 +783,8 @@ class FJDXCommon(object):
sync_name, storage_system =\ sync_name, storage_system =\
self._find_storage_sync_sv_sv(snapshot, volume, False) self._find_storage_sync_sv_sv(snapshot, volume, False)
if sync_name is None: if sync_name is None:
LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. ' LOG.info(_LI('Snapshot: %(snapshot)s: volume: %(volume)s. '
'Snapshot is deleted.') 'Snapshot is deleted.')
% {'snapshot': snapshotname, % {'snapshot': snapshotname,
'volume': volumename}) 'volume': volumename})
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
@ -797,8 +797,8 @@ class FJDXCommon(object):
except Exception as ex: except Exception as ex:
if ex.args[0] == 6: if ex.args[0] == 6:
# 6 means object not found, so snapshot is deleted cleanly # 6 means object not found, so snapshot is deleted cleanly
LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. ' LOG.info(_LI('Snapshot: %(snapshot)s: volume: %(volume)s. '
'Snapshot is deleted.') 'Snapshot is deleted.')
% {'snapshot': snapshotname, % {'snapshot': snapshotname,
'volume': volumename}) 'volume': volumename})
else: else:
@ -931,7 +931,7 @@ class FJDXCommon(object):
def _map_lun(self, volume, connector): def _map_lun(self, volume, connector):
"""Maps a volume to the host.""" """Maps a volume to the host."""
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
LOG.info(_('Map volume: %(volume)s') LOG.info(_LI('Map volume: %(volume)s')
% {'volume': volumename}) % {'volume': volumename})
vol_instance = self._find_lun(volume) vol_instance = self._find_lun(volume)
@ -950,13 +950,13 @@ class FJDXCommon(object):
def _unmap_lun(self, volume, connector): def _unmap_lun(self, volume, connector):
"""Unmaps a volume from the host.""" """Unmaps a volume from the host."""
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
LOG.info(_('Unmap volume: %(volume)s') LOG.info(_LI('Unmap volume: %(volume)s')
% {'volume': volumename}) % {'volume': volumename})
device_info = self.find_device_number(volume, connector) device_info = self.find_device_number(volume, connector)
device_number = device_info['hostlunid'] device_number = device_info['hostlunid']
if device_number is None: if device_number is None:
LOG.info(_("Volume %s is not mapped. No volume to unmap.") LOG.info(_LI("Volume %s is not mapped. No volume to unmap.")
% (volumename)) % (volumename))
return return
@ -975,13 +975,13 @@ class FJDXCommon(object):
def initialize_connection(self, volume, connector): def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.""" """Initializes the connection and returns connection info."""
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
LOG.info(_('Initialize connection: %(volume)s') LOG.info(_LI('Initialize connection: %(volume)s')
% {'volume': volumename}) % {'volume': volumename})
self.conn = self._get_ecom_connection() self.conn = self._get_ecom_connection()
device_info = self.find_device_number(volume, connector) device_info = self.find_device_number(volume, connector)
device_number = device_info['hostlunid'] device_number = device_info['hostlunid']
if device_number is not None: if device_number is not None:
LOG.info(_("Volume %s is already mapped.") LOG.info(_LI("Volume %s is already mapped.")
% (volumename)) % (volumename))
else: else:
self._map_lun(volume, connector) self._map_lun(volume, connector)
@ -993,7 +993,7 @@ class FJDXCommon(object):
def terminate_connection(self, volume, connector): def terminate_connection(self, volume, connector):
"""Disallow connection from connector.""" """Disallow connection from connector."""
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
LOG.info(_('Terminate connection: %(volume)s') LOG.info(_LI('Terminate connection: %(volume)s')
% {'volume': volumename}) % {'volume': volumename})
self.conn = self._get_ecom_connection() self.conn = self._get_ecom_connection()
self._unmap_lun(volume, connector) self._unmap_lun(volume, connector)
@ -1010,7 +1010,7 @@ class FJDXCommon(object):
volumesize = int(new_size) * units.Gi volumesize = int(new_size) * units.Gi
volumename = self._create_volume_name(volume['id']) volumename = self._create_volume_name(volume['id'])
LOG.info(_('Extend Volume: %(volume)s New size: %(size)lu') LOG.info(_LI('Extend Volume: %(volume)s New size: %(size)lu')
% {'volume': volumename, % {'volume': volumename,
'size': volumesize}) 'size': volumesize})
@ -1353,8 +1353,9 @@ class FJDXCommon(object):
snapshot_instance = self._find_lun(snapshot) snapshot_instance = self._find_lun(snapshot)
volume_instance = self._find_lun(volume) volume_instance = self._find_lun(volume)
if snapshot_instance is None or volume_instance is None: if snapshot_instance is None or volume_instance is None:
LOG.info(_('Snapshot Volume %(snapshotname)s, ' LOG.info(_LI('Snapshot Volume %(snapshotname)s, '
'Source Volume %(volumename)s not found on the array.') 'Source Volume %(volumename)s not '
'found on the array.')
% {'snapshotname': snapshotname, % {'snapshotname': snapshotname,
'volumename': volumename}) 'volumename': volumename})
return None, None return None, None
@ -1415,8 +1416,8 @@ class FJDXCommon(object):
if self._is_job_finished(conn, job): if self._is_job_finished(conn, job):
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
if self.retries > JOB_RETRIES: if self.retries > JOB_RETRIES:
LOG.error(_("_wait_for_job_complete failed after %(retries)d " LOG.error(_LE("_wait_for_job_complete failed after %(retries)d"
"tries") % {'retries': self.retries}) " tries") % {'retries': self.retries})
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
try: try:
self.retries += 1 self.retries += 1
@ -1424,7 +1425,7 @@ class FJDXCommon(object):
if self._is_job_finished(conn, job): if self._is_job_finished(conn, job):
self.wait_for_job_called = True self.wait_for_job_called = True
except Exception as e: except Exception as e:
LOG.error(_("Exception: %s") % six.text_type(e)) LOG.error(_LE("Exception: %s") % six.text_type(e))
exceptionMessage = (_("Issue encountered waiting for job.")) exceptionMessage = (_("Issue encountered waiting for job."))
LOG.error(exceptionMessage) LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage) raise exception.VolumeBackendAPIException(exceptionMessage)
@ -1479,7 +1480,7 @@ class FJDXCommon(object):
if self._is_sync_complete(conn, syncName): if self._is_sync_complete(conn, syncName):
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
if self.retries > JOB_RETRIES: if self.retries > JOB_RETRIES:
LOG.error(_("_wait_for_sync failed after %(retries)d tries") LOG.error(_LE("_wait_for_sync failed after %(retries)d tries")
% {'retries': self.retries}) % {'retries': self.retries})
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
try: try:
@ -1488,7 +1489,7 @@ class FJDXCommon(object):
if self._is_sync_complete(conn, syncName): if self._is_sync_complete(conn, syncName):
self.wait_for_sync_called = True self.wait_for_sync_called = True
except Exception as e: except Exception as e:
LOG.error(_("Exception: %s") % six.text_type(e)) LOG.error(_LE("Exception: %s") % six.text_type(e))
exceptionMessage = (_("Issue encountered waiting for " exceptionMessage = (_("Issue encountered waiting for "
"synchronization.")) "synchronization."))
LOG.error(exceptionMessage) LOG.error(exceptionMessage)
@ -1668,8 +1669,8 @@ class FJDXCommon(object):
break break
if out_num_device_number is None: if out_num_device_number is None:
LOG.info(_("Device number not found for volume " LOG.info(_LI("Device number not found for volume "
"%(volumename)s %(vol_instance)s.") "%(volumename)s %(vol_instance)s.")
% {'volumename': volumename, % {'volumename': volumename,
'vol_instance': vol_instance.path}) 'vol_instance': vol_instance.path})
else: else:

View File

@ -22,7 +22,7 @@ import six
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume import driver from cinder.volume import driver
from cinder.volume.drivers import fujitsu_eternus_dx_common from cinder.volume.drivers import fujitsu_eternus_dx_common
@ -147,7 +147,7 @@ class FJDXISCSIDriver(driver.ISCSIDriver):
def _do_iscsi_discovery(self, volume): def _do_iscsi_discovery(self, volume):
LOG.warn(_("ISCSI provider_location not stored, using discovery")) LOG.warn(_LW("ISCSI provider_location not stored, using discovery"))
(out, _err) = self._execute('iscsiadm', '-m', 'discovery', (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p', '-t', 'sendtargets', '-p',

View File

@ -28,7 +28,7 @@ from oslo.utils import units
import requests import requests
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder.volume.drivers.san.san import SanISCSIDriver from cinder.volume.drivers.san.san import SanISCSIDriver
@ -251,8 +251,8 @@ class FIOioControlDriver(SanISCSIDriver):
if i.key == 'fio-qos' and i.value in valid_presets] if i.key == 'fio-qos' and i.value in valid_presets]
if len(presets) > 0: if len(presets) > 0:
if len(presets) > 1: if len(presets) > 1:
LOG.warning(_('More than one valid preset was ' LOG.warning(_LW('More than one valid preset was '
'detected, using %s') % presets[0]) 'detected, using %s') % presets[0])
return self.fio_qos_dict[presets[0]] return self.fio_qos_dict[presets[0]]
def _set_qos_by_volume_type(self, type_id): def _set_qos_by_volume_type(self, type_id):

View File

@ -27,7 +27,7 @@ import six
from cinder.db.sqlalchemy import api from cinder.db.sqlalchemy import api
from cinder.db.sqlalchemy import models from cinder.db.sqlalchemy import models
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _LE, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import utils from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
@ -389,14 +389,14 @@ class HBSDCommon(object):
try: try:
self.command.restart_pair_horcm() self.command.restart_pair_horcm()
except Exception as e: except Exception as e:
LOG.warning(_('Failed to restart horcm: %s') % LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(e)) six.text_type(e))
else: else:
if (all_split or is_vvol) and restart: if (all_split or is_vvol) and restart:
try: try:
self.command.restart_pair_horcm() self.command.restart_pair_horcm()
except Exception as e: except Exception as e:
LOG.warning(_('Failed to restart horcm: %s') % LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(e)) six.text_type(e))
def copy_async_data(self, pvol, svol, is_vvol): def copy_async_data(self, pvol, svol, is_vvol):

View File

@ -25,7 +25,7 @@ from oslo.utils import excutils
import six import six
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import utils from cinder import utils
import cinder.volume.driver import cinder.volume.driver
@ -181,7 +181,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
try: try:
self._fill_group(hgs, port, host_grp_name, wwns_copy) self._fill_group(hgs, port, host_grp_name, wwns_copy)
except Exception as ex: except Exception as ex:
LOG.warning(_('Failed to add host group: %s') % LOG.warning(_LW('Failed to add host group: %s') %
six.text_type(ex)) six.text_type(ex))
msg = basic_lib.set_msg( msg = basic_lib.set_msg(
308, port=port, name=host_grp_name) 308, port=port, name=host_grp_name)

View File

@ -26,7 +26,7 @@ from oslo.utils import excutils
import six import six
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _LE, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder import utils from cinder import utils
@ -894,7 +894,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
try: try:
self.comm_modify_ldev(ldev) self.comm_modify_ldev(ldev)
except Exception as e: except Exception as e:
LOG.warning(_('Failed to discard zero page: %s') % LOG.warning(_LW('Failed to discard zero page: %s') %
six.text_type(e)) six.text_type(e))
@storage_synchronized @storage_synchronized
@ -1393,7 +1393,7 @@ HORCM_CMD
[basic_lib.PSUS], timeout, [basic_lib.PSUS], timeout,
interval, check_svol=True) interval, check_svol=True)
except Exception as ex: except Exception as ex:
LOG.warning(_('Failed to create pair: %s') % LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex)) six.text_type(ex))
try: try:
@ -1403,7 +1403,7 @@ HORCM_CMD
[basic_lib.SMPL], timeout, [basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval) self.conf.hitachi_async_copy_check_interval)
except Exception as ex: except Exception as ex:
LOG.warning(_('Failed to create pair: %s') % LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex)) six.text_type(ex))
if self.is_smpl(copy_group, ldev_name): if self.is_smpl(copy_group, ldev_name):
@ -1411,14 +1411,14 @@ HORCM_CMD
self.delete_pair_config(pvol, svol, copy_group, self.delete_pair_config(pvol, svol, copy_group,
ldev_name) ldev_name)
except Exception as ex: except Exception as ex:
LOG.warning(_('Failed to create pair: %s') % LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex)) six.text_type(ex))
if restart: if restart:
try: try:
self.restart_pair_horcm() self.restart_pair_horcm()
except Exception as ex: except Exception as ex:
LOG.warning(_('Failed to restart horcm: %s') % LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(ex)) six.text_type(ex))
else: else:
@ -1437,7 +1437,7 @@ HORCM_CMD
pvol, svol, [basic_lib.SMPL], timeout, pvol, svol, [basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval) self.conf.hitachi_async_copy_check_interval)
except Exception as ex: except Exception as ex:
LOG.warning(_('Failed to create pair: %s') % LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex)) six.text_type(ex))
def delete_pair(self, pvol, svol, is_vvol): def delete_pair(self, pvol, svol, is_vvol):

View File

@ -21,7 +21,7 @@ import time
import six import six
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _LE, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder import utils from cinder import utils
@ -126,8 +126,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
used_list.append(int(line[2])) used_list.append(int(line[2]))
if int(line[3]) == ldev: if int(line[3]) == ldev:
hlu = int(line[2]) hlu = int(line[2])
LOG.warning(_('ldev(%(ldev)d) is already mapped ' LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
'(hlun: %(hlu)d)') '(hlun: %(hlu)d)')
% {'ldev': ldev, 'hlu': hlu}) % {'ldev': ldev, 'hlu': hlu})
return hlu return hlu
return None return None

View File

@ -21,7 +21,7 @@ import re
import time import time
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume import driver from cinder.volume import driver
from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import huawei_utils
@ -350,8 +350,8 @@ class HuaweiTISCSIDriver(driver.ISCSIDriver):
port_num -= 1 port_num -= 1
break break
else: else:
LOG.warn(_('_remove_iscsi_port: iSCSI port was not found ' LOG.warn(_LW('_remove_iscsi_port: iSCSI port was not found '
'on host %(hostid)s.') % {'hostid': hostid}) 'on host %(hostid)s.') % {'hostid': hostid})
# Delete host if no initiator added to it. # Delete host if no initiator added to it.
if port_num == 0: if port_num == 0:
@ -579,8 +579,8 @@ class HuaweiTFCDriver(driver.FibreChannelDriver):
self.common._delete_hostport(port[0]) self.common._delete_hostport(port[0])
port_num -= 1 port_num -= 1
else: else:
LOG.warn(_('_remove_fc_ports: FC port was not found ' LOG.warn(_LW('_remove_fc_ports: FC port was not found '
'on host %(hostid)s.') % {'hostid': hostid}) 'on host %(hostid)s.') % {'hostid': hostid})
if port_num == 0: if port_num == 0:
self.common._delete_host(hostid) self.common._delete_host(hostid)

View File

@ -28,7 +28,7 @@ from oslo.utils import units
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import utils from cinder import utils
from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import huawei_utils
@ -206,7 +206,7 @@ class HVSCommon():
if policy_id: if policy_id:
self._update_qos_policy_lunlist(lun_list, policy_id) self._update_qos_policy_lunlist(lun_list, policy_id)
else: else:
LOG.warn(_("Can't find the Qos policy in array")) LOG.warn(_LW("Can't find the Qos policy in array"))
# Create lun group and add LUN into to lun group # Create lun group and add LUN into to lun group
lungroup_id = self._create_lungroup(volume_name) lungroup_id = self._create_lungroup(volume_name)
@ -244,7 +244,7 @@ class HVSCommon():
self._delete_lungroup(lungroup_id) self._delete_lungroup(lungroup_id)
self._delete_lun(lun_id) self._delete_lun(lun_id)
else: else:
LOG.warn(_("Can't find lun or lun group in array")) LOG.warn(_LW("Can't find lun or lun group in array"))
def _delete_lun_from_qos_policy(self, volume, lun_id): def _delete_lun_from_qos_policy(self, volume, lun_id):
"""Remove lun from qos policy.""" """Remove lun from qos policy."""
@ -1155,10 +1155,11 @@ class HVSCommon():
params[key] = value.strip() params[key] = value.strip()
else: else:
conf = self.configuration.cinder_huawei_conf_file conf = self.configuration.cinder_huawei_conf_file
LOG.warn(_('_parse_volume_type: Unacceptable parameter ' LOG.warn(_LW('_parse_volume_type: Unacceptable parameter '
'%(key)s. Please check this key in extra_specs ' '%(key)s. Please check this key in '
'and make it consistent with the configuration ' 'extra_specs and make it consistent with the '
'file %(conf)s.') % {'key': key, 'conf': conf}) 'configuration file '
'%(conf)s.') % {'key': key, 'conf': conf})
LOG.debug("The config parameters are: %s" % params) LOG.debug("The config parameters are: %s" % params)
return params return params
@ -1223,7 +1224,7 @@ class HVSCommon():
try: try:
tree.write(filename, 'UTF-8') tree.write(filename, 'UTF-8')
except Exception as err: except Exception as err:
LOG.warn(_('%s') % err) LOG.warn(_LW('%s') % err)
return logininfo return logininfo
@ -1298,4 +1299,4 @@ class HVSCommon():
result = self.call(url, data, "PUT") result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Extend lun error.') self._assert_rest_result(result, 'Extend lun error.')
else: else:
LOG.warn(_('Can not find lun in array')) LOG.warn(_LW('Can not find lun in array'))

View File

@ -30,7 +30,7 @@ from oslo.utils import excutils
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE, _LI from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import ssh_utils from cinder import ssh_utils
from cinder import utils from cinder import utils
@ -278,10 +278,11 @@ class TseriesCommon():
params[key] = value.strip() params[key] = value.strip()
else: else:
conf = self.configuration.cinder_huawei_conf_file conf = self.configuration.cinder_huawei_conf_file
LOG.warn(_('_parse_volume_type: Unacceptable parameter ' LOG.warn(_LW('_parse_volume_type: Unacceptable parameter '
'%(key)s. Please check this key in extra_specs ' '%(key)s. Please check this key in '
'and make it consistent with the element in ' 'extra_specs '
'configuration file %(conf)s.') 'and make it consistent with the element in '
'configuration file %(conf)s.')
% {'key': key, % {'key': key,
'conf': conf}) 'conf': conf})
@ -1118,9 +1119,9 @@ class TseriesCommon():
if map_id is not None: if map_id is not None:
self._delete_map(map_id) self._delete_map(map_id)
else: else:
LOG.warn(_('remove_map: No map between host %(host)s and ' LOG.warn(_LW('remove_map: No map between host %(host)s and '
'volume %(volume)s.') % {'host': host_name, 'volume %(volume)s.') % {'host': host_name,
'volume': volume_id}) 'volume': volume_id})
return host_id return host_id
def _delete_map(self, mapid, attempts=2): def _delete_map(self, mapid, attempts=2):

View File

@ -801,8 +801,8 @@ class GPFSDriver(driver.VolumeDriver):
try: try:
image_utils.resize_image(vol_path, new_size, run_as_root=True) image_utils.resize_image(vol_path, new_size, run_as_root=True)
except processutils.ProcessExecutionError as exc: except processutils.ProcessExecutionError as exc:
LOG.error(_("Failed to resize volume " LOG.error(_LE("Failed to resize volume "
"%(volume_id)s, error: %(error)s.") % "%(volume_id)s, error: %(error)s.") %
{'volume_id': volume['id'], {'volume_id': volume['id'],
'error': exc.stderr}) 'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr)
@ -875,9 +875,9 @@ class GPFSDriver(driver.VolumeDriver):
self._execute('mv', local_path, new_path, run_as_root=True) self._execute('mv', local_path, new_path, run_as_root=True)
return (True, None) return (True, None)
except processutils.ProcessExecutionError as exc: except processutils.ProcessExecutionError as exc:
LOG.error(_('Driver-based migration of volume %(vol)s failed. ' LOG.error(_LE('Driver-based migration of volume %(vol)s failed. '
'Move from %(src)s to %(dst)s failed with error: ' 'Move from %(src)s to %(dst)s failed with error: '
'%(error)s.') % '%(error)s.') %
{'vol': volume['name'], {'vol': volume['name'],
'src': local_path, 'src': local_path,
'dst': new_path, 'dst': new_path,

View File

@ -43,7 +43,7 @@ from oslo.utils import units
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder import utils from cinder import utils
@ -366,8 +366,8 @@ class StorwizeSVCDriver(san.SanDriver):
if chap_enabled and chap_secret is None: if chap_enabled and chap_secret is None:
chap_secret = self._helpers.add_chap_secret_to_host(host_name) chap_secret = self._helpers.add_chap_secret_to_host(host_name)
elif not chap_enabled and chap_secret: elif not chap_enabled and chap_secret:
LOG.warning(_('CHAP secret exists for host but CHAP is ' LOG.warning(_LW('CHAP secret exists for host but CHAP is '
'disabled')) 'disabled'))
volume_attributes = self._helpers.get_vdisk_attributes(volume_name) volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
if volume_attributes is None: if volume_attributes is None:
@ -411,8 +411,8 @@ class StorwizeSVCDriver(san.SanDriver):
if not preferred_node_entry and not vol_opts['multipath']: if not preferred_node_entry and not vol_opts['multipath']:
# Get 1st node in I/O group # Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0] preferred_node_entry = io_group_nodes[0]
LOG.warn(_('initialize_connection: Did not find a preferred ' LOG.warn(_LW('initialize_connection: Did not find a preferred '
'node for volume %s') % volume_name) 'node for volume %s') % volume_name)
properties = {} properties = {}
properties['target_discovered'] = False properties['target_discovered'] = False
@ -462,10 +462,10 @@ class StorwizeSVCDriver(san.SanDriver):
properties['target_wwn'] = WWPN properties['target_wwn'] = WWPN
break break
else: else:
LOG.warning(_('Unable to find a preferred node match ' LOG.warning(_LW('Unable to find a preferred node match'
'for node %(node)s in the list of ' ' for node %(node)s in the list of '
'available WWPNs on %(host)s. ' 'available WWPNs on %(host)s. '
'Using first available.') % 'Using first available.') %
{'node': preferred_node, {'node': preferred_node,
'host': host_name}) 'host': host_name})
properties['target_wwn'] = conn_wwpns[0] properties['target_wwn'] = conn_wwpns[0]
@ -767,7 +767,7 @@ class StorwizeSVCDriver(san.SanDriver):
try: try:
volume = self.db.volume_get(ctxt, vol_id) volume = self.db.volume_get(ctxt, vol_id)
except Exception: except Exception:
LOG.warn(_('Volume %s does not exist.'), vol_id) LOG.warn(_LW('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id] del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops): if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop.stop()
@ -1028,7 +1028,7 @@ class StorwizeSVCDriver(san.SanDriver):
attributes = self._helpers.get_pool_attrs(pool) attributes = self._helpers.get_pool_attrs(pool)
if not attributes: if not attributes:
LOG.error(_('Could not get pool data from the storage')) LOG.error(_LE('Could not get pool data from the storage'))
exception_message = (_('_update_volume_stats: ' exception_message = (_('_update_volume_stats: '
'Could not get storage pool data')) 'Could not get storage pool data'))
raise exception.VolumeBackendAPIException(data=exception_message) raise exception.VolumeBackendAPIException(data=exception_message)

View File

@ -25,7 +25,7 @@ import six
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder.volume.drivers.ibm.storwize_svc import ssh as storwize_ssh from cinder.volume.drivers.ibm.storwize_svc import ssh as storwize_ssh
@ -152,7 +152,7 @@ class StorwizeHelpers(object):
if 'unconfigured' != s: if 'unconfigured' != s:
wwpns.add(i) wwpns.add(i)
node['WWPN'] = list(wwpns) node['WWPN'] = list(wwpns)
LOG.info(_('WWPN on node %(node)s: %(wwpn)s') LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s')
% {'node': node['id'], 'wwpn': node['WWPN']}) % {'node': node['id'], 'wwpn': node['WWPN']})
def add_chap_secret_to_host(self, host_name): def add_chap_secret_to_host(self, host_name):
@ -341,15 +341,15 @@ class StorwizeHelpers(object):
# Check if the mapping exists # Check if the mapping exists
resp = self.ssh.lsvdiskhostmap(volume_name) resp = self.ssh.lsvdiskhostmap(volume_name)
if not len(resp): if not len(resp):
LOG.warning(_('unmap_vol_from_host: No mapping of volume ' LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to any host found.') % '%(vol_name)s to any host found.') %
{'vol_name': volume_name}) {'vol_name': volume_name})
return return
if host_name is None: if host_name is None:
if len(resp) > 1: if len(resp) > 1:
LOG.warning(_('unmap_vol_from_host: Multiple mappings of ' LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
'volume %(vol_name)s found, no host ' 'volume %(vol_name)s found, no host '
'specified.') % {'vol_name': volume_name}) 'specified.') % {'vol_name': volume_name})
return return
else: else:
host_name = resp[0]['host_name'] host_name = resp[0]['host_name']
@ -359,8 +359,8 @@ class StorwizeHelpers(object):
if h == host_name: if h == host_name:
found = True found = True
if not found: if not found:
LOG.warning(_('unmap_vol_from_host: No mapping of volume ' LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to host %(host)s found.') % '%(vol_name)s to host %(host)s found.') %
{'vol_name': volume_name, 'host': host_name}) {'vol_name': volume_name, 'host': host_name})
# We now know that the mapping exists # We now know that the mapping exists
@ -797,7 +797,7 @@ class StorwizeHelpers(object):
"""Ensures that vdisk is not part of FC mapping and deletes it.""" """Ensures that vdisk is not part of FC mapping and deletes it."""
LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk) LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk)
if not self.is_vdisk_defined(vdisk): if not self.is_vdisk_defined(vdisk):
LOG.info(_('Tried to delete non-existant vdisk %s.') % vdisk) LOG.info(_LI('Tried to delete non-existant vdisk %s.') % vdisk)
return return
self.ensure_vdisk_no_fc_mappings(vdisk) self.ensure_vdisk_no_fc_mappings(vdisk)
self.ssh.rmvdisk(vdisk, force=force) self.ssh.rmvdisk(vdisk, force=force)

View File

@ -24,7 +24,7 @@ import six
from cinder.brick.remotefs import remotefs as remotefs_brick from cinder.brick.remotefs import remotefs as remotefs_brick
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils from cinder.image import image_utils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import utils from cinder import utils
@ -165,8 +165,8 @@ class NfsDriver(remotefs.RemoteFSDriver):
if attempt == (num_attempts - 1): if attempt == (num_attempts - 1):
LOG.error(_LE('Mount failure for %(share)s after ' LOG.error(_LE('Mount failure for %(share)s after '
'%(count)d attempts.') % { '%(count)d attempts.') % {
'share': nfs_share, 'share': nfs_share,
'count': num_attempts}) 'count': num_attempts})
raise exception.NfsException(e) raise exception.NfsException(e)
LOG.debug('Mount attempt %d failed: %s.\nRetrying mount ...' % LOG.debug('Mount attempt %d failed: %s.\nRetrying mount ...' %
(attempt, six.text_type(e))) (attempt, six.text_type(e)))
@ -278,7 +278,7 @@ class NfsDriver(remotefs.RemoteFSDriver):
def extend_volume(self, volume, new_size): def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size.""" """Extend an existing volume to the new size."""
LOG.info(_('Extending volume %s.'), volume['id']) LOG.info(_LI('Extending volume %s.'), volume['id'])
extend_by = int(new_size) - volume['size'] extend_by = int(new_size) - volume['size']
if not self._is_share_eligible(volume['provider_location'], if not self._is_share_eligible(volume['provider_location'],
extend_by): extend_by):
@ -286,7 +286,7 @@ class NfsDriver(remotefs.RemoteFSDriver):
' extend volume %s to %sG' ' extend volume %s to %sG'
% (volume['id'], new_size)) % (volume['id'], new_size))
path = self.local_path(volume) path = self.local_path(volume)
LOG.info(_('Resizing file to %sG...'), new_size) LOG.info(_LI('Resizing file to %sG...'), new_size)
image_utils.resize_image(path, new_size, image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root) run_as_root=self._execute_as_root)
if not self._is_file_size_equal(path, new_size): if not self._is_file_size_equal(path, new_size):
@ -328,10 +328,11 @@ class NfsDriver(remotefs.RemoteFSDriver):
self.configuration.nas_secure_file_permissions) self.configuration.nas_secure_file_permissions)
if self.configuration.nas_secure_file_permissions == 'false': if self.configuration.nas_secure_file_permissions == 'false':
LOG.warn(_("The NAS file permissions mode will be 666 (allowing " LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered " "other/world read & write access). "
"an insecure NAS environment. Please see %s for " "This is considered an insecure NAS environment. "
"information on a secure NFS configuration.") % "Please see %s for information on a secure "
"NFS configuration.") %
doc_html) doc_html)
self.configuration.nas_secure_file_operations = \ self.configuration.nas_secure_file_operations = \
@ -348,8 +349,9 @@ class NfsDriver(remotefs.RemoteFSDriver):
self.configuration.nas_secure_file_operations) self.configuration.nas_secure_file_operations)
if self.configuration.nas_secure_file_operations == 'false': if self.configuration.nas_secure_file_operations == 'false':
LOG.warn(_("The NAS file operations will be run as root: allowing " LOG.warn(_LW("The NAS file operations will be run as "
"root level access at the storage backend. This is " "root: allowing root level access at the storage "
"considered an insecure NAS environment. Please see %s " "backend. This is considered an insecure NAS "
"for information on a secure NAS configuration.") % "environment. Please see %s "
"for information on a secure NAS configuration.") %
doc_html) doc_html)

View File

@ -449,7 +449,7 @@ class NimbleAPIExecutor:
def login(self): def login(self):
"""Execute Https Login API.""" """Execute Https Login API."""
response = self._execute_login() response = self._execute_login()
LOG.info(_('Successful login by user %s') % self.username) LOG.info(_LI('Successful login by user %s') % self.username)
self.sid = response['authInfo']['sid'] self.sid = response['authInfo']['sid']
@_connection_checker @_connection_checker
@ -573,7 +573,7 @@ class NimbleAPIExecutor:
@_response_checker @_response_checker
def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs): def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs):
"""Execute onlineSnap API.""" """Execute onlineSnap API."""
LOG.info(_('Setting snapshot %(snap)s to online_flag %(flag)s') LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s')
% {'snap': snap_name, 'flag': online_flag}) % {'snap': snap_name, 'flag': online_flag})
return self.client.service.onlineSnap(request={'sid': self.sid, return self.client.service.onlineSnap(request={'sid': self.sid,
'vol': vol_name, 'vol': vol_name,

View File

@ -1422,8 +1422,8 @@ class DPLCOMMONDriver(driver.VolumeDriver):
ret = 0 ret = 0
output = status.get('output', {}) output = status.get('output', {})
else: else:
LOG.error(_('Flexvisor failed to get pool info ' LOG.error(_LE('Flexvisor failed to get pool info '
'(failed to get event)%s.') % (poolid)) '(failed to get event)%s.') % (poolid))
raise exception.VolumeBackendAPIException( raise exception.VolumeBackendAPIException(
data="failed to get event") data="failed to get event")
elif ret != 0: elif ret != 0:

View File

@ -57,7 +57,7 @@ from oslo.utils import units
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder import flow_utils from cinder import flow_utils
from cinder.i18n import _, _LE, _LI from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder.volume import qos_specs from cinder.volume import qos_specs
@ -399,8 +399,8 @@ class HP3PARCommon(object):
'new_type': volume_type.get('name')}) 'new_type': volume_type.get('name')})
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.warning(_("Failed to manage virtual volume %(disp)s " LOG.warning(_LW("Failed to manage virtual volume %(disp)s "
"due to error during retype.") % "due to error during retype.") %
{'disp': display_name}) {'disp': display_name})
# Try to undo the rename and clear the new comment. # Try to undo the rename and clear the new comment.
self.client.modifyVolume( self.client.modifyVolume(

View File

@ -36,7 +36,7 @@ except ImportError:
hpexceptions = None hpexceptions = None
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
import cinder.volume.driver import cinder.volume.driver
from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
@ -436,9 +436,10 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
host = common._get_3par_host(hostname) host = common._get_3par_host(hostname)
elif (not host['initiatorChapEnabled'] and elif (not host['initiatorChapEnabled'] and
self.configuration.hp3par_iscsi_chap_enabled): self.configuration.hp3par_iscsi_chap_enabled):
LOG.warn(_("Host exists without CHAP credentials set and has " LOG.warn(_LW("Host exists without CHAP credentials set "
"iSCSI attachments but CHAP is enabled. Updating " "and has iSCSI attachments but CHAP is "
"host with new CHAP credentials.")) "enabled. Updating host with new CHAP "
"credentials."))
self._set_3par_chaps( self._set_3par_chaps(
common, common,
hostname, hostname,
@ -468,11 +469,11 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
host_info = common.client.getHost(chap_username) host_info = common.client.getHost(chap_username)
if not host_info['initiatorChapEnabled']: if not host_info['initiatorChapEnabled']:
LOG.warn(_("Host has no CHAP key, but CHAP is enabled.")) LOG.warn(_LW("Host has no CHAP key, but CHAP is enabled."))
except hpexceptions.HTTPNotFound: except hpexceptions.HTTPNotFound:
chap_password = volume_utils.generate_password(16) chap_password = volume_utils.generate_password(16)
LOG.warn(_("No host or VLUNs exist. Generating new CHAP key.")) LOG.warn(_LW("No host or VLUNs exist. Generating new CHAP key."))
else: else:
# Get a list of all iSCSI VLUNs and see if there is already a CHAP # Get a list of all iSCSI VLUNs and see if there is already a CHAP
# key assigned to one of them. Use that CHAP key if present, # key assigned to one of them. Use that CHAP key if present,
@ -500,12 +501,12 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
"but CHAP is enabled. Skipping." % "but CHAP is enabled. Skipping." %
vlun['remoteName']) vlun['remoteName'])
else: else:
LOG.warn(_("Non-iSCSI VLUN detected.")) LOG.warn(_LW("Non-iSCSI VLUN detected."))
if not chap_exists: if not chap_exists:
chap_password = volume_utils.generate_password(16) chap_password = volume_utils.generate_password(16)
LOG.warn(_("No VLUN contained CHAP credentials. " LOG.warn(_LW("No VLUN contained CHAP credentials. "
"Generating new CHAP key.")) "Generating new CHAP key."))
# Add CHAP credentials to the volume metadata # Add CHAP credentials to the volume metadata
vol_name = common._get_3par_vol_name(volume['id']) vol_name = common._get_3par_vol_name(volume['id'])

View File

@ -32,7 +32,7 @@ hplefthand_password for credentials to talk to the REST service on the
LeftHand array. LeftHand array.
""" """
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _LI
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import utils from cinder import utils
from cinder.volume.driver import VolumeDriver from cinder.volume.driver import VolumeDriver
@ -77,7 +77,8 @@ class HPLeftHandISCSIDriver(VolumeDriver):
self.proxy = self._create_proxy(*self.args, **self.kwargs) self.proxy = self._create_proxy(*self.args, **self.kwargs)
self.proxy.do_setup(context) self.proxy.do_setup(context)
LOG.info(_("HPLeftHand driver %(driver_ver)s, proxy %(proxy_ver)s") % { LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
"proxy %(proxy_ver)s") % {
"driver_ver": self.VERSION, "driver_ver": self.VERSION,
"proxy_ver": self.proxy.get_version_string()}) "proxy_ver": self.proxy.get_version_string()})

View File

@ -20,7 +20,7 @@ from oslo.utils import units
from cinder import context from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume.driver import ISCSIDriver from cinder.volume.driver import ISCSIDriver
from cinder.volume import utils from cinder.volume import utils
@ -377,11 +377,11 @@ class HPLeftHandRESTProxy(ISCSIDriver):
server_info = self.client.getServerByName(connector['host']) server_info = self.client.getServerByName(connector['host'])
chap_secret = server_info['chapTargetSecret'] chap_secret = server_info['chapTargetSecret']
if not chap_enabled and chap_secret: if not chap_enabled and chap_secret:
LOG.warning(_('CHAP secret exists for host %s but CHAP is ' LOG.warning(_LW('CHAP secret exists for host %s but CHAP is '
'disabled') % connector['host']) 'disabled') % connector['host'])
if chap_enabled and chap_secret is None: if chap_enabled and chap_secret is None:
LOG.warning(_('CHAP is enabled, but server secret not ' LOG.warning(_LW('CHAP is enabled, but server secret not '
'configured on server %s') % connector['host']) 'configured on server %s') % connector['host'])
return server_info return server_info
except hpexceptions.HTTPNotFound: except hpexceptions.HTTPNotFound:
# server does not exist, so create one # server does not exist, so create one
@ -498,20 +498,20 @@ class HPLeftHandRESTProxy(ISCSIDriver):
virtual_ips = cluster_info['virtualIPAddresses'] virtual_ips = cluster_info['virtualIPAddresses']
if driver != self.__class__.__name__: if driver != self.__class__.__name__:
LOG.info(_("Cannot provide backend assisted migration for " LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because volume is from a different " "volume: %s because volume is from a different "
"backend.") % volume['name']) "backend.") % volume['name'])
return false_ret return false_ret
if vip != virtual_ips[0]['ipV4Address']: if vip != virtual_ips[0]['ipV4Address']:
LOG.info(_("Cannot provide backend assisted migration for " LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because cluster exists in different " "volume: %s because cluster exists in different "
"management group.") % volume['name']) "management group.") % volume['name'])
return false_ret return false_ret
except hpexceptions.HTTPNotFound: except hpexceptions.HTTPNotFound:
LOG.info(_("Cannot provide backend assisted migration for " LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because cluster exists in different " "volume: %s because cluster exists in different "
"management group.") % volume['name']) "management group.") % volume['name'])
return false_ret return false_ret
try: try:
@ -520,9 +520,9 @@ class HPLeftHandRESTProxy(ISCSIDriver):
# can't migrate if server is attached # can't migrate if server is attached
if volume_info['iscsiSessions'] is not None: if volume_info['iscsiSessions'] is not None:
LOG.info(_("Cannot provide backend assisted migration " LOG.info(_LI("Cannot provide backend assisted migration "
"for volume: %s because the volume has been " "for volume: %s because the volume has been "
"exported.") % volume['name']) "exported.") % volume['name'])
return false_ret return false_ret
# can't migrate if volume has snapshots # can't migrate if volume has snapshots
@ -531,17 +531,17 @@ class HPLeftHandRESTProxy(ISCSIDriver):
'fields=snapshots,snapshots[resource[members[name]]]') 'fields=snapshots,snapshots[resource[members[name]]]')
LOG.debug('Snapshot info: %s' % snap_info) LOG.debug('Snapshot info: %s' % snap_info)
if snap_info['snapshots']['resource'] is not None: if snap_info['snapshots']['resource'] is not None:
LOG.info(_("Cannot provide backend assisted migration " LOG.info(_LI("Cannot provide backend assisted migration "
"for volume: %s because the volume has " "for volume: %s because the volume has "
"snapshots.") % volume['name']) "snapshots.") % volume['name'])
return false_ret return false_ret
options = {'clusterName': cluster} options = {'clusterName': cluster}
self.client.modifyVolume(volume_info['id'], options) self.client.modifyVolume(volume_info['id'], options)
except hpexceptions.HTTPNotFound: except hpexceptions.HTTPNotFound:
LOG.info(_("Cannot provide backend assisted migration for " LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because volume does not exist in this " "volume: %s because volume does not exist in this "
"management group.") % volume['name']) "management group.") % volume['name'])
return false_ret return false_ret
except hpexceptions.HTTPServerError as ex: except hpexceptions.HTTPServerError as ex:
LOG.error(ex) LOG.error(ex)

View File

@ -27,7 +27,7 @@ from oslo.config import cfg
from oslo.utils import excutils from oslo.utils import excutils
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import ssh_utils from cinder import ssh_utils
from cinder import utils from cinder import utils
@ -148,7 +148,7 @@ class SanDriver(driver.VolumeDriver):
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error running SSH command: %s") % command) LOG.error(_LE("Error running SSH command: %s") % command)
def ensure_export(self, context, volume): def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume.""" """Synchronously recreates an export for a logical volume."""

View File

@ -22,7 +22,7 @@ from oslo.utils import units
from cinder.brick.remotefs import remotefs from cinder.brick.remotefs import remotefs
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LI, _LW
from cinder.image import image_utils from cinder.image import image_utils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import utils from cinder import utils
@ -205,8 +205,8 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
def delete_volume(self, volume): def delete_volume(self, volume):
"""Deletes a logical volume.""" """Deletes a logical volume."""
if not volume['provider_location']: if not volume['provider_location']:
LOG.warn(_('Volume %s does not have provider_location specified, ' LOG.warn(_LW('Volume %s does not have provider_location '
'skipping.'), volume['name']) 'specified, skipping.'), volume['name'])
return return
self._ensure_share_mounted(volume['provider_location']) self._ensure_share_mounted(volume['provider_location'])
@ -227,7 +227,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
pattern = r"qemu-img version ([0-9\.]*)" pattern = r"qemu-img version ([0-9\.]*)"
version = re.match(pattern, info) version = re.match(pattern, info)
if not version: if not version:
LOG.warn(_("qemu-img is not installed.")) LOG.warn(_LW("qemu-img is not installed."))
return None return None
return [int(x) for x in version.groups()[0].split('.')] return [int(x) for x in version.groups()[0].split('.')]
@ -404,14 +404,14 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
@utils.synchronized('smbfs', external=False) @utils.synchronized('smbfs', external=False)
def extend_volume(self, volume, size_gb): def extend_volume(self, volume, size_gb):
LOG.info(_('Extending volume %s.'), volume['id']) LOG.info(_LI('Extending volume %s.'), volume['id'])
self._extend_volume(volume, size_gb) self._extend_volume(volume, size_gb)
def _extend_volume(self, volume, size_gb): def _extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume) volume_path = self.local_path(volume)
self._check_extend_volume_support(volume, size_gb) self._check_extend_volume_support(volume, size_gb)
LOG.info(_('Resizing file to %sG...') % size_gb) LOG.info(_LI('Resizing file to %sG...') % size_gb)
self._do_extend_volume(volume_path, size_gb, volume['name']) self._do_extend_volume(volume_path, size_gb, volume['name'])

View File

@ -18,7 +18,7 @@ Session and API call management for VMware ESX/VC server.
Provides abstraction over cinder.volume.drivers.vmware.vim.Vim SOAP calls. Provides abstraction over cinder.volume.drivers.vmware.vim.Vim SOAP calls.
""" """
from cinder.i18n import _ from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder.volume.drivers.vmware import error_util from cinder.volume.drivers.vmware import error_util
@ -69,8 +69,8 @@ class Retry(object):
try: try:
result = f(*args, **kwargs) result = f(*args, **kwargs)
except self._exceptions as excep: except self._exceptions as excep:
LOG.exception(_("Failure while invoking function: " LOG.exception(_LE("Failure while invoking function: "
"%(func)s. Error: %(excep)s.") % "%(func)s. Error: %(excep)s.") %
{'func': f.__name__, 'excep': excep}) {'func': f.__name__, 'excep': excep})
if (self._max_retry_count != -1 and if (self._max_retry_count != -1 and
self._retry_count >= self._max_retry_count): self._retry_count >= self._max_retry_count):
@ -167,7 +167,7 @@ class VMwareAPISession(object):
# have been cleared. We could have made a call to # have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we # SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession. # anyway would have to call TerminateSession.
LOG.exception(_("Error while terminating session: %s.") % LOG.exception(_LE("Error while terminating session: %s.") %
excep) excep)
self._session_id = session.key self._session_id = session.key
@ -180,21 +180,21 @@ class VMwareAPISession(object):
if self.pbm: if self.pbm:
self.pbm.set_cookie() self.pbm.set_cookie()
LOG.info(_("Successfully established connection to the server.")) LOG.info(_LI("Successfully established connection to the server."))
def __del__(self): def __del__(self):
"""Logs-out the sessions.""" """Logs-out the sessions."""
try: try:
self.vim.Logout(self.vim.service_content.sessionManager) self.vim.Logout(self.vim.service_content.sessionManager)
except Exception as excep: except Exception as excep:
LOG.exception(_("Error while logging out from vim session: %s."), LOG.exception(_LE("Error while logging out from vim session: %s."),
excep) excep)
if self._pbm: if self._pbm:
try: try:
self.pbm.Logout(self.pbm.service_content.sessionManager) self.pbm.Logout(self.pbm.service_content.sessionManager)
except Exception as excep: except Exception as excep:
LOG.exception(_("Error while logging out from pbm session: " LOG.exception(_LE("Error while logging out from pbm session: "
"%s."), excep) "%s."), excep)
def invoke_api(self, module, method, *args, **kwargs): def invoke_api(self, module, method, *args, **kwargs):
"""Wrapper method for invoking APIs. """Wrapper method for invoking APIs.
@ -242,9 +242,9 @@ class VMwareAPISession(object):
return [] return []
# empty response is due to an inactive session # empty response is due to an inactive session
LOG.warn(_("Current session: %(session)s is inactive; " LOG.warn(_LW("Current session: %(session)s is inactive; "
"re-creating the session while invoking " "re-creating the session while invoking "
"method %(module)s.%(method)s."), "method %(module)s.%(method)s."),
{'session': self._session_id, {'session': self._session_id,
'module': module, 'module': module,
'method': method}, 'method': method},
@ -268,8 +268,8 @@ class VMwareAPISession(object):
sessionID=self._session_id, sessionID=self._session_id,
userName=self._session_username) userName=self._session_username)
except error_util.VimException: except error_util.VimException:
LOG.warn(_("Error occurred while checking whether the " LOG.warn(_LW("Error occurred while checking whether the "
"current session: %s is active."), "current session: %s is active."),
self._session_id, self._session_id,
exc_info=True) exc_info=True)
@ -310,11 +310,13 @@ class VMwareAPISession(object):
LOG.debug("Task %s status: success." % task) LOG.debug("Task %s status: success." % task)
else: else:
error_msg = str(task_info.error.localizedMessage) error_msg = str(task_info.error.localizedMessage)
LOG.exception(_("Task: %(task)s failed with error: %(err)s.") % LOG.exception(_LE("Task: %(task)s failed with "
"error: %(err)s.") %
{'task': task, 'err': error_msg}) {'task': task, 'err': error_msg})
raise error_util.VimFaultException([], error_msg) raise error_util.VimFaultException([], error_msg)
except Exception as excep: except Exception as excep:
LOG.exception(_("Task: %(task)s failed with error: %(err)s.") % LOG.exception(_LE("Task: %(task)s failed with "
"error: %(err)s.") %
{'task': task, 'err': excep}) {'task': task, 'err': excep})
raise excep raise excep
# got the result. So stop the loop. # got the result. So stop the loop.

View File

@ -32,7 +32,7 @@ from oslo.utils import excutils
from oslo.utils import units from oslo.utils import units
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import fileutils from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.openstack.common import uuidutils from cinder.openstack.common import uuidutils
@ -194,9 +194,10 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
VERSION = '1.4.0' VERSION = '1.4.0'
def _do_deprecation_warning(self): def _do_deprecation_warning(self):
LOG.warn(_('The VMware ESX VMDK driver is now deprecated and will be ' LOG.warn(_LW('The VMware ESX VMDK driver is now deprecated '
'removed in the Juno release. The VMware vCenter VMDK ' 'and will be removed in the Juno release. The VMware '
'driver will remain and continue to be supported.')) 'vCenter VMDK driver will remain and continue to be '
'supported.'))
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs) super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
@ -262,8 +263,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
max_objects = self.configuration.vmware_max_objects_retrieval max_objects = self.configuration.vmware_max_objects_retrieval
self._volumeops = volumeops.VMwareVolumeOps(self.session, self._volumeops = volumeops.VMwareVolumeOps(self.session,
max_objects) max_objects)
LOG.info(_("Successfully setup driver: %(driver)s for " LOG.info(_LI("Successfully setup driver: %(driver)s for "
"server: %(ip)s.") % "server: %(ip)s.") %
{'driver': driver, {'driver': driver,
'ip': self.configuration.vmware_host_ip}) 'ip': self.configuration.vmware_host_ip})
@ -327,7 +328,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
""" """
backing = self.volumeops.get_backing(volume['name']) backing = self.volumeops.get_backing(volume['name'])
if not backing: if not backing:
LOG.info(_("Backing not available, no operation to be performed.")) LOG.info(_LI("Backing not available, no operation "
"to be performed."))
return return
self.volumeops.delete_backing(backing) self.volumeops.delete_backing(backing)
@ -467,9 +469,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
LOG.error(msg, storage_profile) LOG.error(msg, storage_profile)
raise error_util.VimException(msg % storage_profile) raise error_util.VimException(msg % storage_profile)
elif storage_profile: elif storage_profile:
LOG.warn(_("Ignoring storage profile %s requirement for this " LOG.warn(_LW("Ignoring storage profile %s requirement for this "
"volume since policy based placement is " "volume since policy based placement is "
"disabled."), storage_profile) "disabled."), storage_profile)
size_bytes = volume['size'] * units.Gi size_bytes = volume['size'] * units.Gi
datastore_summary = self._select_datastore_summary(size_bytes, datastore_summary = self._select_datastore_summary(size_bytes,
@ -583,9 +585,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
selected_host = host selected_host = host
break break
except error_util.VimException as excep: except error_util.VimException as excep:
LOG.warn(_("Unable to find suitable datastore for volume " LOG.warn(_LW("Unable to find suitable datastore for volume"
"of size: %(vol)s GB under host: %(host)s. " " of size: %(vol)s GB under host: %(host)s. "
"More details: %(excep)s") % "More details: %(excep)s") %
{'vol': volume['size'], {'vol': volume['size'],
'host': host, 'excep': excep}) 'host': host, 'excep': excep})
if selected_host: if selected_host:
@ -624,9 +626,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
if backing: if backing:
break break
except error_util.VimException as excep: except error_util.VimException as excep:
LOG.warn(_("Unable to find suitable datastore for " LOG.warn(_LW("Unable to find suitable datastore for "
"volume: %(vol)s under host: %(host)s. " "volume: %(vol)s under host: %(host)s. "
"More details: %(excep)s") % "More details: %(excep)s") %
{'vol': volume['name'], {'vol': volume['name'],
'host': host.obj, 'excep': excep}) 'host': host.obj, 'excep': excep})
if backing: if backing:
@ -660,8 +662,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
if not backing: if not backing:
# Create a backing in case it does not exist under the # Create a backing in case it does not exist under the
# host managing the instance. # host managing the instance.
LOG.info(_("There is no backing for the volume: %s. " LOG.info(_LI("There is no backing for the volume: %s. "
"Need to create one.") % volume['name']) "Need to create one.") % volume['name'])
backing = self._create_backing(volume, host) backing = self._create_backing(volume, host)
else: else:
# Relocate volume is necessary # Relocate volume is necessary
@ -673,7 +675,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
if not backing: if not backing:
# Create a backing in case it does not exist. It is a bad use # Create a backing in case it does not exist. It is a bad use
# case to boot from an empty volume. # case to boot from an empty volume.
LOG.warn(_("Trying to boot from an empty volume: %s.") % LOG.warn(_LW("Trying to boot from an empty volume: %s.") %
volume['name']) volume['name'])
# Create backing # Create backing
backing = self._create_backing_in_inventory(volume) backing = self._create_backing_in_inventory(volume)
@ -682,8 +684,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
connection_info['data'] = {'volume': backing.value, connection_info['data'] = {'volume': backing.value,
'volume_id': volume['id']} 'volume_id': volume['id']}
LOG.info(_("Returning connection_info: %(info)s for volume: " LOG.info(_LI("Returning connection_info: %(info)s for volume: "
"%(volume)s with connector: %(connector)s.") % "%(volume)s with connector: %(connector)s.") %
{'info': connection_info, {'info': connection_info,
'volume': volume['name'], 'volume': volume['name'],
'connector': connector}) 'connector': connector})
@ -735,12 +737,12 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
raise exception.InvalidVolume(msg % volume['status']) raise exception.InvalidVolume(msg % volume['status'])
backing = self.volumeops.get_backing(snapshot['volume_name']) backing = self.volumeops.get_backing(snapshot['volume_name'])
if not backing: if not backing:
LOG.info(_("There is no backing, so will not create " LOG.info(_LI("There is no backing, so will not create "
"snapshot: %s.") % snapshot['name']) "snapshot: %s.") % snapshot['name'])
return return
self.volumeops.create_snapshot(backing, snapshot['name'], self.volumeops.create_snapshot(backing, snapshot['name'],
snapshot['display_description']) snapshot['display_description'])
LOG.info(_("Successfully created snapshot: %s.") % snapshot['name']) LOG.info(_LI("Successfully created snapshot: %s.") % snapshot['name'])
def create_snapshot(self, snapshot): def create_snapshot(self, snapshot):
"""Creates a snapshot. """Creates a snapshot.
@ -766,11 +768,11 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
raise exception.InvalidVolume(msg % volume['status']) raise exception.InvalidVolume(msg % volume['status'])
backing = self.volumeops.get_backing(snapshot['volume_name']) backing = self.volumeops.get_backing(snapshot['volume_name'])
if not backing: if not backing:
LOG.info(_("There is no backing, and so there is no " LOG.info(_LI("There is no backing, and so there is no "
"snapshot: %s.") % snapshot['name']) "snapshot: %s.") % snapshot['name'])
else: else:
self.volumeops.delete_snapshot(backing, snapshot['name']) self.volumeops.delete_snapshot(backing, snapshot['name'])
LOG.info(_("Successfully deleted snapshot: %s.") % LOG.info(_LI("Successfully deleted snapshot: %s.") %
snapshot['name']) snapshot['name'])
def delete_snapshot(self, snapshot): def delete_snapshot(self, snapshot):
@ -811,8 +813,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
if volume['size'] > src_size_in_gb: if volume['size'] > src_size_in_gb:
self._extend_volumeops_virtual_disk(volume['size'], dest_vmdk_path, self._extend_volumeops_virtual_disk(volume['size'], dest_vmdk_path,
datacenter) datacenter)
LOG.info(_("Successfully cloned new backing: %(back)s from " LOG.info(_LI("Successfully cloned new backing: %(back)s from "
"source VMDK file: %(vmdk)s.") % "source VMDK file: %(vmdk)s.") %
{'back': backing, 'vmdk': src_vmdk_path}) {'back': backing, 'vmdk': src_vmdk_path})
def _create_cloned_volume(self, volume, src_vref): def _create_cloned_volume(self, volume, src_vref):
@ -828,9 +830,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self._verify_volume_creation(volume) self._verify_volume_creation(volume)
backing = self.volumeops.get_backing(src_vref['name']) backing = self.volumeops.get_backing(src_vref['name'])
if not backing: if not backing:
LOG.info(_("There is no backing for the source volume: " LOG.info(_LI("There is no backing for the source volume: "
"%(svol)s. Not creating any backing for the " "%(svol)s. Not creating any backing for the "
"volume: %(vol)s.") % "volume: %(vol)s.") %
{'svol': src_vref['name'], {'svol': src_vref['name'],
'vol': volume['name']}) 'vol': volume['name']})
return return
@ -859,18 +861,18 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self._verify_volume_creation(volume) self._verify_volume_creation(volume)
backing = self.volumeops.get_backing(snapshot['volume_name']) backing = self.volumeops.get_backing(snapshot['volume_name'])
if not backing: if not backing:
LOG.info(_("There is no backing for the source snapshot: " LOG.info(_LI("There is no backing for the source snapshot: "
"%(snap)s. Not creating any backing for the " "%(snap)s. Not creating any backing for the "
"volume: %(vol)s.") % "volume: %(vol)s.") %
{'snap': snapshot['name'], {'snap': snapshot['name'],
'vol': volume['name']}) 'vol': volume['name']})
return return
snapshot_moref = self.volumeops.get_snapshot(backing, snapshot_moref = self.volumeops.get_snapshot(backing,
snapshot['name']) snapshot['name'])
if not snapshot_moref: if not snapshot_moref:
LOG.info(_("There is no snapshot point for the snapshotted " LOG.info(_LI("There is no snapshot point for the snapshotted "
"volume: %(snap)s. Not creating any backing for " "volume: %(snap)s. Not creating any backing for "
"the volume: %(vol)s.") % "the volume: %(vol)s.") %
{'snap': snapshot['name'], 'vol': volume['name']}) {'snap': snapshot['name'], 'vol': volume['name']})
return return
src_vmdk_path = self.volumeops.get_vmdk_path(snapshot_moref) src_vmdk_path = self.volumeops.get_vmdk_path(snapshot_moref)
@ -942,8 +944,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.delete_vmdk_file( self.volumeops.delete_vmdk_file(
descriptor_ds_file_path, dc_ref) descriptor_ds_file_path, dc_ref)
except error_util.VimException: except error_util.VimException:
LOG.warn(_("Error occurred while deleting temporary " LOG.warn(_LW("Error occurred while deleting temporary "
"disk: %s."), "disk: %s."),
descriptor_ds_file_path, descriptor_ds_file_path,
exc_info=True) exc_info=True)
@ -956,8 +958,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
dest_path.get_descriptor_ds_file_path()) dest_path.get_descriptor_ds_file_path())
except error_util.VimException: except error_util.VimException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Error occurred while copying %(src)s to " LOG.exception(_LE("Error occurred while copying %(src)s to "
"%(dst)s."), "%(dst)s."),
{'src': src_path.get_descriptor_ds_file_path(), {'src': src_path.get_descriptor_ds_file_path(),
'dst': dest_path.get_descriptor_ds_file_path()}) 'dst': dest_path.get_descriptor_ds_file_path()})
finally: finally:
@ -1018,8 +1020,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
except Exception: except Exception:
# Delete the descriptor. # Delete the descriptor.
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Error occurred while copying image: " LOG.exception(_LE("Error occurred while copying image: "
"%(image_id)s to %(path)s."), "%(image_id)s to %(path)s."),
{'path': path.get_descriptor_ds_file_path(), {'path': path.get_descriptor_ds_file_path(),
'image_id': image_id}) 'image_id': image_id})
LOG.debug("Deleting descriptor: %s.", LOG.debug("Deleting descriptor: %s.",
@ -1028,8 +1030,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.delete_file( self.volumeops.delete_file(
path.get_descriptor_ds_file_path(), dc_ref) path.get_descriptor_ds_file_path(), dc_ref)
except error_util.VimException: except error_util.VimException:
LOG.warn(_("Error occurred while deleting " LOG.warn(_LW("Error occurred while deleting "
"descriptor: %s."), "descriptor: %s."),
path.get_descriptor_ds_file_path(), path.get_descriptor_ds_file_path(),
exc_info=True) exc_info=True)
@ -1057,7 +1059,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
try: try:
self.volumeops.delete_backing(backing) self.volumeops.delete_backing(backing)
except error_util.VimException: except error_util.VimException:
LOG.warn(_("Error occurred while deleting backing: %s."), LOG.warn(_LW("Error occurred while deleting backing: %s."),
backing, backing,
exc_info=True) exc_info=True)
@ -1143,8 +1145,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
except Exception: except Exception:
# Delete backing and virtual disk created from image. # Delete backing and virtual disk created from image.
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Error occurred while creating volume: %(id)s" LOG.exception(_LE("Error occurred while creating "
" from image: %(image_id)s."), "volume: %(id)s"
" from image: %(image_id)s."),
{'id': volume['id'], {'id': volume['id'],
'image_id': image_id}) 'image_id': image_id})
self._delete_temp_backing(backing) self._delete_temp_backing(backing)
@ -1210,15 +1213,15 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
image_size=image_size) image_size=image_size)
except exception.CinderException as excep: except exception.CinderException as excep:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Exception in copy_image_to_volume: %s."), LOG.exception(_LE("Exception in copy_image_to_volume: %s."),
excep) excep)
backing = self.volumeops.get_backing(volume['name']) backing = self.volumeops.get_backing(volume['name'])
if backing: if backing:
LOG.exception(_("Deleting the backing: %s") % backing) LOG.exception(_LE("Deleting the backing: %s") % backing)
# delete the backing # delete the backing
self.volumeops.delete_backing(backing) self.volumeops.delete_backing(backing)
LOG.info(_("Done copying image: %(id)s to volume: %(vol)s.") % LOG.info(_LI("Done copying image: %(id)s to volume: %(vol)s.") %
{'id': image_id, 'vol': volume['name']}) {'id': image_id, 'vol': volume['name']})
def _extend_vmdk_virtual_disk(self, name, new_size_in_gb): def _extend_vmdk_virtual_disk(self, name, new_size_in_gb):
@ -1229,9 +1232,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
""" """
backing = self.volumeops.get_backing(name) backing = self.volumeops.get_backing(name)
if not backing: if not backing:
LOG.info(_("The backing is not found, so there is no need " LOG.info(_LI("The backing is not found, so there is no need "
"to extend the vmdk virtual disk for the volume " "to extend the vmdk virtual disk for the volume "
"%s."), name) "%s."), name)
else: else:
root_vmdk_path = self.volumeops.get_vmdk_path(backing) root_vmdk_path = self.volumeops.get_vmdk_path(backing)
datacenter = self.volumeops.get_dc(backing) datacenter = self.volumeops.get_dc(backing)
@ -1251,8 +1254,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
root_vmdk_path, datacenter) root_vmdk_path, datacenter)
except error_util.VimException: except error_util.VimException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Unable to extend the size of the " LOG.exception(_LE("Unable to extend the size of the "
"vmdk virtual disk at the path %s."), "vmdk virtual disk at the path %s."),
root_vmdk_path) root_vmdk_path)
def copy_image_to_volume(self, context, volume, image_service, image_id): def copy_image_to_volume(self, context, volume, image_service, image_id):
@ -1301,8 +1304,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
image_size_in_bytes, image_adapter_type, image_disk_type) image_size_in_bytes, image_adapter_type, image_disk_type)
except exception.CinderException as excep: except exception.CinderException as excep:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Exception in copying the image to the " LOG.exception(_LE("Exception in copying the image to the "
"volume: %s."), excep) "volume: %s."), excep)
LOG.debug("Volume: %(id)s created from image: %(image_id)s.", LOG.debug("Volume: %(id)s created from image: %(image_id)s.",
{'id': volume['id'], {'id': volume['id'],
@ -1349,7 +1352,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
# get backing vm of volume and its vmdk path # get backing vm of volume and its vmdk path
backing = self.volumeops.get_backing(volume['name']) backing = self.volumeops.get_backing(volume['name'])
if not backing: if not backing:
LOG.info(_("Backing not found, creating for volume: %s") % LOG.info(_LI("Backing not found, creating for volume: %s") %
volume['name']) volume['name'])
backing = self._create_backing_in_inventory(volume) backing = self._create_backing_in_inventory(volume)
vmdk_file_path = self.volumeops.get_vmdk_path(backing) vmdk_file_path = self.volumeops.get_vmdk_path(backing)
@ -1368,7 +1371,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
vmdk_size=volume['size'] * units.Gi, vmdk_size=volume['size'] * units.Gi,
image_name=image_meta['name'], image_name=image_meta['name'],
image_version=1) image_version=1)
LOG.info(_("Done copying volume %(vol)s to a new image %(img)s") % LOG.info(_LI("Done copying volume %(vol)s to a new image %(img)s") %
{'vol': volume['name'], 'img': image_meta['name']}) {'vol': volume['name'], 'img': image_meta['name']})
def _in_use(self, volume): def _in_use(self, volume):
@ -1397,7 +1400,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
""" """
# Can't attempt retype if the volume is in use. # Can't attempt retype if the volume is in use.
if self._in_use(volume): if self._in_use(volume):
LOG.warn(_("Volume: %s is in use, can't retype."), LOG.warn(_LW("Volume: %s is in use, can't retype."),
volume['name']) volume['name'])
return False return False
@ -1466,8 +1469,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
best_candidate = self.ds_sel.select_datastore(req) best_candidate = self.ds_sel.select_datastore(req)
if not best_candidate: if not best_candidate:
# No candidate datastores; can't retype. # No candidate datastores; can't retype.
LOG.warn(_("There are no datastores matching new requirements;" LOG.warn(_LW("There are no datastores matching new "
" can't retype volume: %s."), "requirements; can't retype volume: %s."),
volume['name']) volume['name'])
return False return False
@ -1503,8 +1506,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
backing = new_backing backing = new_backing
except error_util.VimException: except error_util.VimException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Error occurred while cloning backing:" LOG.exception(_LE("Error occurred while cloning "
" %s during retype."), "backing:"
" %s during retype."),
backing) backing)
if renamed: if renamed:
LOG.debug("Undo rename of backing: %(backing)s; " LOG.debug("Undo rename of backing: %(backing)s; "
@ -1517,9 +1521,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.rename_backing(backing, self.volumeops.rename_backing(backing,
volume['name']) volume['name'])
except error_util.VimException: except error_util.VimException:
LOG.warn(_("Changing backing: %(backing)s name" LOG.warn(_LW("Changing backing: %(backing)s "
" from %(new_name)s to %(old_name)s" "name from %(new_name)s to "
" failed."), "%(old_name)s failed."),
{'backing': backing, {'backing': backing,
'new_name': tmp_name, 'new_name': tmp_name,
'old_name': volume['name']}) 'old_name': volume['name']})
@ -1553,24 +1557,25 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
# try extending vmdk in place # try extending vmdk in place
try: try:
self._extend_vmdk_virtual_disk(vol_name, new_size) self._extend_vmdk_virtual_disk(vol_name, new_size)
LOG.info(_("Done extending volume %(vol)s to size %(size)s GB.") % LOG.info(_LI("Done extending volume %(vol)s "
"to size %(size)s GB.") %
{'vol': vol_name, 'size': new_size}) {'vol': vol_name, 'size': new_size})
return return
except error_util.VimFaultException: except error_util.VimFaultException:
LOG.info(_("Relocating volume %s vmdk to a different " LOG.info(_LI("Relocating volume %s vmdk to a different "
"datastore since trying to extend vmdk file " "datastore since trying to extend vmdk file "
"in place failed."), vol_name) "in place failed."), vol_name)
# If in place extend fails, then try to relocate the volume # If in place extend fails, then try to relocate the volume
try: try:
(host, rp, folder, summary) = self._select_ds_for_volume(new_size) (host, rp, folder, summary) = self._select_ds_for_volume(new_size)
except error_util.VimException: except error_util.VimException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Not able to find a different datastore to " LOG.exception(_LE("Not able to find a different datastore to "
"place the extended volume %s."), vol_name) "place the extended volume %s."), vol_name)
LOG.info(_("Selected datastore %(ds)s to place extended volume of " LOG.info(_LI("Selected datastore %(ds)s to place extended volume of "
"size %(size)s GB.") % {'ds': summary.name, "size %(size)s GB.") % {'ds': summary.name,
'size': new_size}) 'size': new_size})
try: try:
backing = self.volumeops.get_backing(vol_name) backing = self.volumeops.get_backing(vol_name)
@ -1580,9 +1585,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.move_backing_to_folder(backing, folder) self.volumeops.move_backing_to_folder(backing, folder)
except error_util.VimException: except error_util.VimException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Not able to relocate volume %s for " LOG.exception(_LE("Not able to relocate volume %s for "
"extending."), vol_name) "extending."), vol_name)
LOG.info(_("Done extending volume %(vol)s to size %(size)s GB.") % LOG.info(_LI("Done extending volume %(vol)s to size %(size)s GB.") %
{'vol': vol_name, 'size': new_size}) {'vol': vol_name, 'size': new_size})
@contextlib.contextmanager @contextlib.contextmanager
@ -1681,8 +1686,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
return vm_ref return vm_ref
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Error occurred while creating temporary " LOG.exception(_LE("Error occurred while creating temporary "
"backing.")) "backing."))
backing = self.volumeops.get_backing(name) backing = self.volumeops.get_backing(name)
if backing is not None: if backing is not None:
self._delete_temp_backing(backing) self._delete_temp_backing(backing)
@ -1746,9 +1751,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.rename_backing(backing, self.volumeops.rename_backing(backing,
volume['name']) volume['name'])
except error_util.VimException: except error_util.VimException:
LOG.warn(_("Cannot undo volume rename; old name " LOG.warn(_LW("Cannot undo volume rename; old name "
"was %(old_name)s and new name is " "was %(old_name)s and new name is "
"%(new_name)s."), "%(new_name)s."),
{'old_name': volume['name'], {'old_name': volume['name'],
'new_name': tmp_backing_name}, 'new_name': tmp_backing_name},
exc_info=True) exc_info=True)
@ -1850,10 +1855,10 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
pbm_service_wsdl = os.path.join(curr_dir, 'wsdl', major_minor, pbm_service_wsdl = os.path.join(curr_dir, 'wsdl', major_minor,
'pbmService.wsdl') 'pbmService.wsdl')
if not os.path.exists(pbm_service_wsdl): if not os.path.exists(pbm_service_wsdl):
LOG.warn(_("PBM WSDL file %s is missing!"), pbm_service_wsdl) LOG.warn(_LW("PBM WSDL file %s is missing!"), pbm_service_wsdl)
return return
pbm_wsdl = 'file://' + pbm_service_wsdl pbm_wsdl = 'file://' + pbm_service_wsdl
LOG.info(_("Using PBM WSDL location: %s"), pbm_wsdl) LOG.info(_LI("Using PBM WSDL location: %s"), pbm_wsdl)
return pbm_wsdl return pbm_wsdl
def _get_vc_version(self): def _get_vc_version(self):
@ -1864,18 +1869,18 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
""" """
version_str = self.configuration.vmware_host_version version_str = self.configuration.vmware_host_version
if version_str: if version_str:
LOG.info(_("Using overridden vmware_host_version from config: " LOG.info(_LI("Using overridden vmware_host_version from config: "
"%s"), version_str) "%s"), version_str)
else: else:
version_str = self.session.vim.service_content.about.version version_str = self.session.vim.service_content.about.version
LOG.info(_("Fetched VC server version: %s"), version_str) LOG.info(_LI("Fetched VC server version: %s"), version_str)
# convert version_str to LooseVersion and return # convert version_str to LooseVersion and return
version = None version = None
try: try:
version = dist_version.LooseVersion(version_str) version = dist_version.LooseVersion(version_str)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Version string '%s' is not parseable"), LOG.exception(_LE("Version string '%s' is not parseable"),
version_str) version_str)
return version return version
@ -1902,9 +1907,9 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects) self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects)
self._ds_sel = hub.DatastoreSelector(self.volumeops, self.session) self._ds_sel = hub.DatastoreSelector(self.volumeops, self.session)
LOG.info(_("Successfully setup driver: %(driver)s for server: " LOG.info(_LI("Successfully setup driver: %(driver)s for server: "
"%(ip)s.") % {'driver': self.__class__.__name__, "%(ip)s.") % {'driver': self.__class__.__name__,
'ip': self.configuration.vmware_host_ip}) 'ip': self.configuration.vmware_host_ip})
def _get_volume_group_folder(self, datacenter): def _get_volume_group_folder(self, datacenter):
"""Get volume group folder. """Get volume group folder.
@ -1950,7 +1955,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
(folder, summary) = self._get_folder_ds_summary(volume, (folder, summary) = self._get_folder_ds_summary(volume,
resource_pool, resource_pool,
datastores) datastores)
LOG.info(_("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") % LOG.info(_LI("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") %
{'backing': backing, 'ds': summary, 'rp': resource_pool}) {'backing': backing, 'ds': summary, 'rp': resource_pool})
# Relocate the backing to the datastore and folder # Relocate the backing to the datastore and folder
self.volumeops.relocate_backing(backing, summary.datastore, self.volumeops.relocate_backing(backing, summary.datastore,
@ -1998,7 +2003,7 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
# the size of the source volume to the volume size. # the size of the source volume to the volume size.
if volume['size'] > src_vsize: if volume['size'] > src_vsize:
self._extend_vmdk_virtual_disk(volume['name'], volume['size']) self._extend_vmdk_virtual_disk(volume['name'], volume['size'])
LOG.info(_("Successfully created clone: %s.") % clone) LOG.info(_LI("Successfully created clone: %s.") % clone)
def _create_volume_from_snapshot(self, volume, snapshot): def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot. """Creates a volume from a snapshot.
@ -2012,17 +2017,17 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
self._verify_volume_creation(volume) self._verify_volume_creation(volume)
backing = self.volumeops.get_backing(snapshot['volume_name']) backing = self.volumeops.get_backing(snapshot['volume_name'])
if not backing: if not backing:
LOG.info(_("There is no backing for the snapshotted volume: " LOG.info(_LI("There is no backing for the snapshotted volume: "
"%(snap)s. Not creating any backing for the " "%(snap)s. Not creating any backing for the "
"volume: %(vol)s.") % "volume: %(vol)s.") %
{'snap': snapshot['name'], 'vol': volume['name']}) {'snap': snapshot['name'], 'vol': volume['name']})
return return
snapshot_moref = self.volumeops.get_snapshot(backing, snapshot_moref = self.volumeops.get_snapshot(backing,
snapshot['name']) snapshot['name'])
if not snapshot_moref: if not snapshot_moref:
LOG.info(_("There is no snapshot point for the snapshotted " LOG.info(_LI("There is no snapshot point for the snapshotted "
"volume: %(snap)s. Not creating any backing for " "volume: %(snap)s. Not creating any backing for "
"the volume: %(vol)s.") % "the volume: %(vol)s.") %
{'snap': snapshot['name'], 'vol': volume['name']}) {'snap': snapshot['name'], 'vol': volume['name']})
return return
clone_type = VMwareVcVmdkDriver._get_clone_type(volume) clone_type = VMwareVcVmdkDriver._get_clone_type(volume)
@ -2049,8 +2054,8 @@ class VMwareVcVmdkDriver(VMwareEsxVmdkDriver):
self._verify_volume_creation(volume) self._verify_volume_creation(volume)
backing = self.volumeops.get_backing(src_vref['name']) backing = self.volumeops.get_backing(src_vref['name'])
if not backing: if not backing:
LOG.info(_("There is no backing for the source volume: %(src)s. " LOG.info(_LI("There is no backing for the source volume: %(src)s. "
"Not creating any backing for volume: %(vol)s.") % "Not creating any backing for volume: %(vol)s.") %
{'src': src_vref['name'], 'vol': volume['name']}) {'src': src_vref['name'], 'vol': volume['name']})
return return
clone_type = VMwareVcVmdkDriver._get_clone_type(volume) clone_type = VMwareVcVmdkDriver._get_clone_type(volume)

View File

@ -18,7 +18,7 @@ Utility functions for Image transfer.
from eventlet import timeout from eventlet import timeout
from cinder.i18n import _ from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume.drivers.vmware import error_util from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import io_util from cinder.volume.drivers.vmware import io_util
@ -79,7 +79,7 @@ def start_transfer(context, timeout_secs, read_file_handle, max_data_size,
write_thread.stop() write_thread.stop()
# Log and raise the exception. # Log and raise the exception.
LOG.exception(_("Error occurred during image transfer.")) LOG.exception(_LE("Error occurred during image transfer."))
if isinstance(exc, error_util.ImageTransferException): if isinstance(exc, error_util.ImageTransferException):
raise raise
raise error_util.ImageTransferException(exc) raise error_util.ImageTransferException(exc)
@ -107,7 +107,8 @@ def fetch_flat_image(context, timeout_secs, image_service, image_id, **kwargs):
file_size) file_size)
start_transfer(context, timeout_secs, read_handle, file_size, start_transfer(context, timeout_secs, read_handle, file_size,
write_file_handle=write_handle) write_file_handle=write_handle)
LOG.info(_("Downloaded image: %s from glance image server.") % image_id) LOG.info(_LI("Downloaded image: %s from glance "
"image server.") % image_id)
def fetch_stream_optimized_image(context, timeout_secs, image_service, def fetch_stream_optimized_image(context, timeout_secs, image_service,
@ -126,7 +127,8 @@ def fetch_stream_optimized_image(context, timeout_secs, image_service,
file_size) file_size)
start_transfer(context, timeout_secs, read_handle, file_size, start_transfer(context, timeout_secs, read_handle, file_size,
write_file_handle=write_handle) write_file_handle=write_handle)
LOG.info(_("Downloaded image: %s from glance image server.") % image_id) LOG.info(_LI("Downloaded image: %s from glance image "
"server.") % image_id)
def upload_image(context, timeout_secs, image_service, image_id, owner_id, def upload_image(context, timeout_secs, image_service, image_id, owner_id,
@ -158,7 +160,7 @@ def upload_image(context, timeout_secs, image_service, image_id, owner_id,
start_transfer(context, timeout_secs, read_handle, file_size, start_transfer(context, timeout_secs, read_handle, file_size,
image_service=image_service, image_id=image_id, image_service=image_service, image_id=image_id,
image_meta=image_metadata) image_meta=image_metadata)
LOG.info(_("Uploaded image: %s to the Glance image server.") % image_id) LOG.info(_LI("Uploaded image: %s to the Glance image server.") % image_id)
def download_stream_optimized_disk( def download_stream_optimized_disk(

View File

@ -22,7 +22,7 @@ import os
from oslo.config import cfg from oslo.config import cfg
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume.drivers.windows import constants from cinder.volume.drivers.windows import constants
@ -270,8 +270,8 @@ class WindowsUtils(object):
LOG.error(err_msg) LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg) raise exception.VolumeBackendAPIException(data=err_msg)
else: else:
LOG.info(_('Ignored target creation error "%s"' LOG.info(_LI('Ignored target creation error "%s"'
' while ensuring export'), exc) ' while ensuring export'), exc)
def remove_iscsi_target(self, target_name): def remove_iscsi_target(self, target_name):
"""Removes ISCSI target.""" """Removes ISCSI target."""

View File

@ -26,7 +26,7 @@ from lxml import etree
from oslo.config import cfg from oslo.config import cfg
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume import driver from cinder.volume import driver
@ -462,16 +462,16 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
cg_name = self._get_volume_cg_name(volume_name) cg_name = self._get_volume_cg_name(volume_name)
if not cg_name: if not cg_name:
# If the volume isn't present, then don't attempt to delete # If the volume isn't present, then don't attempt to delete
LOG.warning(_("snapshot: original volume %s not found, " LOG.warning(_LW("snapshot: original volume %s not found, "
"skipping delete operation") "skipping delete operation")
% snapshot['volume_name']) % snapshot['volume_name'])
return True return True
snap_id = self._get_snap_id(cg_name, snapshot['name']) snap_id = self._get_snap_id(cg_name, snapshot['name'])
if not snap_id: if not snap_id:
# If the snapshot isn't present, then don't attempt to delete # If the snapshot isn't present, then don't attempt to delete
LOG.warning(_("snapshot: snapshot %s not found, " LOG.warning(_LW("snapshot: snapshot %s not found, "
"skipping delete operation") "skipping delete operation")
% snapshot['name']) % snapshot['name'])
return True return True

View File

@ -21,7 +21,7 @@ import StringIO
import time import time
import urllib2 import urllib2
from cinder.i18n import _, _LE from cinder.i18n import _LE, _LI
from cinder.openstack.common import log from cinder.openstack.common import log
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -176,7 +176,7 @@ class RestClientURL(object):
self.headers['x-auth-session'] = \ self.headers['x-auth-session'] = \
result.get_header('x-auth-session') result.get_header('x-auth-session')
self.do_logout = True self.do_logout = True
LOG.info(_('ZFSSA version: %s') % LOG.info(_LI('ZFSSA version: %s') %
result.get_header('x-zfssa-version')) result.get_header('x-zfssa-version'))
elif result.status == httplib.NOT_FOUND: elif result.status == httplib.NOT_FOUND:

View File

@ -140,7 +140,8 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
self.db.volume_update(context, volume_id, update) self.db.volume_update(context, volume_id, update)
except exception.CinderException: except exception.CinderException:
# Don't let resetting the status cause the rescheduling to fail. # Don't let resetting the status cause the rescheduling to fail.
LOG.exception(_("Volume %s: resetting 'creating' status failed."), LOG.exception(_LE("Volume %s: resetting 'creating' "
"status failed."),
volume_id) volume_id)
def revert(self, context, result, flow_failures, **kwargs): def revert(self, context, result, flow_failures, **kwargs):
@ -159,7 +160,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
self._reschedule(context, cause, **kwargs) self._reschedule(context, cause, **kwargs)
self._post_reschedule(context, volume_id) self._post_reschedule(context, volume_id)
except exception.CinderException: except exception.CinderException:
LOG.exception(_("Volume %s: rescheduling failed"), volume_id) LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id)
class ExtractVolumeRefTask(flow_utils.CinderTask): class ExtractVolumeRefTask(flow_utils.CinderTask):
@ -315,8 +316,8 @@ class NotifyVolumeActionTask(flow_utils.CinderTask):
# If notification sending of volume database entry reading fails # If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is # then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate # not always information that must be sent for volumes to operate
LOG.exception(_("Failed notifying about the volume" LOG.exception(_LE("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s") % " action %(event)s for volume %(volume_id)s") %
{'event': self.event_suffix, {'event': self.event_suffix,
'volume_id': volume_id}) 'volume_id': volume_id})
@ -414,9 +415,10 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
snapshot_ref['volume_id']) snapshot_ref['volume_id'])
make_bootable = originating_vref.bootable make_bootable = originating_vref.bootable
except exception.CinderException as ex: except exception.CinderException as ex:
LOG.exception(_("Failed fetching snapshot %(snapshot_id)s bootable" LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s "
" flag using the provided glance snapshot " "bootable"
"%(snapshot_ref_id)s volume reference") % " flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference") %
{'snapshot_id': snapshot_id, {'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot_ref['volume_id']}) 'snapshot_ref_id': snapshot_ref['volume_id']})
raise exception.MetadataUpdateFailure(reason=ex) raise exception.MetadataUpdateFailure(reason=ex)
@ -430,8 +432,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
LOG.debug('Marking volume %s as bootable.', volume_id) LOG.debug('Marking volume %s as bootable.', volume_id)
self.db.volume_update(context, volume_id, {'bootable': True}) self.db.volume_update(context, volume_id, {'bootable': True})
except exception.CinderException as ex: except exception.CinderException as ex:
LOG.exception(_("Failed updating volume %(volume_id)s bootable" LOG.exception(_LE("Failed updating volume %(volume_id)s bootable "
" flag to true") % {'volume_id': volume_id}) "flag to true") % {'volume_id': volume_id})
raise exception.MetadataUpdateFailure(reason=ex) raise exception.MetadataUpdateFailure(reason=ex)
def _create_from_source_volume(self, context, volume_ref, def _create_from_source_volume(self, context, volume_ref,
@ -582,8 +584,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
volume_ref = self.db.volume_update(context, volume_ref = self.db.volume_update(context,
volume_ref['id'], updates) volume_ref['id'], updates)
except exception.CinderException: except exception.CinderException:
LOG.exception(_("Failed updating volume %(volume_id)s with " LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(updates)s") % "%(updates)s") %
{'volume_id': volume_ref['id'], {'volume_id': volume_ref['id'],
'updates': updates}) 'updates': updates})
self._copy_image_to_volume(context, volume_ref, self._copy_image_to_volume(context, volume_ref,
@ -648,8 +650,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# If somehow the update failed we want to ensure that the # If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at # failure is logged (but not try rescheduling since the volume at
# this point has been created). # this point has been created).
LOG.exception(_("Failed updating model of volume %(volume_id)s" LOG.exception(_LE("Failed updating model of volume %(volume_id)s "
" with creation provided model %(model)s") % "with creation provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update}) {'volume_id': volume_id, 'model': model_update})
raise raise
@ -691,9 +693,9 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
# Now use the parent to notify. # Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref) super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
except exception.CinderException: except exception.CinderException:
LOG.exception(_("Failed updating volume %(volume_id)s with " LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(update)s") % {'volume_id': volume_id, "%(update)s") % {'volume_id': volume_id,
'update': update}) 'update': update})
# Even if the update fails, the volume is ready. # Even if the update fails, the volume is ready.
msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully") msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
LOG.info(msg % { LOG.info(msg % {

View File

@ -82,8 +82,8 @@ class ManageExistingTask(flow_utils.CinderTask):
volume_ref = self.db.volume_update(context, volume_ref['id'], volume_ref = self.db.volume_update(context, volume_ref['id'],
model_update) model_update)
except exception.CinderException: except exception.CinderException:
LOG.exception(_("Failed updating model of volume %(volume_id)s" LOG.exception(_LE("Failed updating model of volume %(volume_id)s"
" with creation provided model %(model)s") % " with creation provided model %(model)s") %
{'volume_id': volume_ref['id'], {'volume_id': volume_ref['id'],
'model': model_update}) 'model': model_update})
raise raise

View File

@ -22,7 +22,7 @@ from oslo.db import exception as db_exc
from cinder import context from cinder import context
from cinder import db from cinder import db
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder.volume import volume_types from cinder.volume import volume_types
@ -196,8 +196,8 @@ def associate_qos_with_type(context, specs_id, type_id):
db.qos_specs_associate(context, specs_id, type_id) db.qos_specs_associate(context, specs_id, type_id)
except db_exc.DBError as e: except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % e) LOG.exception(_LE('DB error: %s') % e)
LOG.warn(_('Failed to associate qos specs ' LOG.warn(_LW('Failed to associate qos specs '
'%(id)s with type: %(vol_type_id)s') % '%(id)s with type: %(vol_type_id)s') %
dict(id=specs_id, vol_type_id=type_id)) dict(id=specs_id, vol_type_id=type_id))
raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, raise exception.QoSSpecsAssociateFailed(specs_id=specs_id,
type_id=type_id) type_id=type_id)
@ -210,8 +210,8 @@ def disassociate_qos_specs(context, specs_id, type_id):
db.qos_specs_disassociate(context, specs_id, type_id) db.qos_specs_disassociate(context, specs_id, type_id)
except db_exc.DBError as e: except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % e) LOG.exception(_LE('DB error: %s') % e)
LOG.warn(_('Failed to disassociate qos specs ' LOG.warn(_LW('Failed to disassociate qos specs '
'%(id)s with type: %(vol_type_id)s') % '%(id)s with type: %(vol_type_id)s') %
dict(id=specs_id, vol_type_id=type_id)) dict(id=specs_id, vol_type_id=type_id))
raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
type_id=type_id) type_id=type_id)
@ -224,7 +224,7 @@ def disassociate_all(context, specs_id):
db.qos_specs_disassociate_all(context, specs_id) db.qos_specs_disassociate_all(context, specs_id)
except db_exc.DBError as e: except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % e) LOG.exception(_LE('DB error: %s') % e)
LOG.warn(_('Failed to disassociate qos specs %s.') % specs_id) LOG.warn(_LW('Failed to disassociate qos specs %s.') % specs_id)
raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
type_id=None) type_id=None)

View File

@ -26,7 +26,7 @@ from oslo.utils import units
from cinder.brick.local_dev import lvm as brick_lvm from cinder.brick.local_dev import lvm as brick_lvm
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import log as logging from cinder.openstack.common import log as logging
from cinder import rpc from cinder import rpc
from cinder import utils from cinder import utils
@ -242,7 +242,7 @@ def setup_blkio_cgroup(srcpath, dstpath, bps_limit, execute=utils.execute):
try: try:
execute('cgcreate', '-g', 'blkio:%s' % group_name, run_as_root=True) execute('cgcreate', '-g', 'blkio:%s' % group_name, run_as_root=True)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.warn(_('Failed to create blkio cgroup')) LOG.warn(_LW('Failed to create blkio cgroup'))
return None return None
try: try:
@ -362,7 +362,7 @@ def clear_volume(volume_size, volume_path, volume_clear=None,
if volume_clear_ionice is None: if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice volume_clear_ionice = CONF.volume_clear_ionice
LOG.info(_("Performing secure delete on volume: %s") % volume_path) LOG.info(_LI("Performing secure delete on volume: %s") % volume_path)
if volume_clear == 'zero': if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size, return copy_volume('/dev/zero', volume_path, volume_clear_size,
@ -387,7 +387,7 @@ def clear_volume(volume_size, volume_path, volume_clear=None,
# some incredible event this is 0 (cirros image?) don't barf # some incredible event this is 0 (cirros image?) don't barf
if duration < 1: if duration < 1:
duration = 1 duration = 1
LOG.info(_('Elapsed time for clear volume: %.2f sec') % duration) LOG.info(_LI('Elapsed time for clear volume: %.2f sec') % duration)
def supports_thin_provisioning(): def supports_thin_provisioning():

View File

@ -139,8 +139,9 @@ def get_default_volume_type():
# Couldn't find volume type with the name in default_volume_type # Couldn't find volume type with the name in default_volume_type
# flag, record this issue and move on # flag, record this issue and move on
#TODO(zhiteng) consider add notification to warn admin #TODO(zhiteng) consider add notification to warn admin
LOG.exception(_('Default volume type is not found, ' LOG.exception(_LE('Default volume type is not found, '
'please check default_volume_type config: %s'), e) 'please check default_volume_type '
'config: %s'), e)
return vol_type return vol_type

View File

@ -217,7 +217,7 @@ class CiscoFCZoneDriver(FCZoneDriver):
msg = _("Exception: %s") % six.text_type(cisco_ex) msg = _("Exception: %s") % six.text_type(cisco_ex)
raise exception.FCZoneDriverException(msg) raise exception.FCZoneDriverException(msg)
except Exception as e: except Exception as e:
LOG.error(_("Exception: %s") % six.text_type(e)) LOG.error(_LE("Exception: %s") % six.text_type(e))
msg = (_("Failed to add zoning configuration %s") % msg = (_("Failed to add zoning configuration %s") %
six.text_type(e)) six.text_type(e))
raise exception.FCZoneDriverException(msg) raise exception.FCZoneDriverException(msg)

View File

@ -19,7 +19,7 @@ Utility functions related to the Zone Manager.
""" """
import logging import logging
from cinder.i18n import _, _LI from cinder.i18n import _LI, _LW
from cinder.openstack.common import log from cinder.openstack.common import log
from cinder.volume.configuration import Configuration from cinder.volume.configuration import Configuration
from cinder.volume import manager from cinder.volume import manager
@ -75,8 +75,8 @@ def AddFCZone(initialize_connection):
def decorator(self, *args, **kwargs): def decorator(self, *args, **kwargs):
conn_info = initialize_connection(self, *args, **kwargs) conn_info = initialize_connection(self, *args, **kwargs)
if not conn_info: if not conn_info:
LOG.warn(_("Driver didn't return connection info, " LOG.warn(_LW("Driver didn't return connection info, "
"can't add zone.")) "can't add zone."))
return None return None
vol_type = conn_info.get('driver_volume_type', None) vol_type = conn_info.get('driver_volume_type', None)
@ -100,8 +100,8 @@ def RemoveFCZone(terminate_connection):
def decorator(self, *args, **kwargs): def decorator(self, *args, **kwargs):
conn_info = terminate_connection(self, *args, **kwargs) conn_info = terminate_connection(self, *args, **kwargs)
if not conn_info: if not conn_info:
LOG.warn(_("Driver didn't return connection info from " LOG.warn(_LW("Driver didn't return connection info from "
"terminate_connection call.")) "terminate_connection call."))
return None return None
vol_type = conn_info.get('driver_volume_type', None) vol_type = conn_info.get('driver_volume_type', None)