Remove log translations in share and share_group 4/5

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.
This is the 4/5 commit.
Old commit will be abandoned: https://review.openstack.org/#/c/447822/

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: Ia46e9dc4953c788274f5c9b763b2fed96c28d60e
Depends-On: I9fd264a443c634465b8548067f86ac14c1a51faa
Partial-Bug: #1674542
This commit is contained in:
yfzhao 2017-03-24 16:38:27 +08:00
parent ffe135a5b3
commit 059fae0ed5
69 changed files with 926 additions and 943 deletions

View File

@ -18,7 +18,7 @@ import copy
from oslo_log import log from oslo_log import log
from manila.common import constants from manila.common import constants
from manila.i18n import _, _LI from manila.i18n import _
from manila import utils from manila import utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -493,8 +493,8 @@ class ShareInstanceAccess(ShareInstanceAccessDatabaseMixin):
remove_rules, share_instance, share_server): remove_rules, share_instance, share_server):
for rule in add_rules: for rule in add_rules:
LOG.info( LOG.info(
_LI("Applying access rule '%(rule)s' for share " "Applying access rule '%(rule)s' for share "
"instance '%(instance)s'"), "instance '%(instance)s'",
{'rule': rule['id'], 'instance': share_instance['id']} {'rule': rule['id'], 'instance': share_instance['id']}
) )
@ -511,8 +511,8 @@ class ShareInstanceAccess(ShareInstanceAccessDatabaseMixin):
for rule in delete_rules: for rule in delete_rules:
LOG.info( LOG.info(
_LI("Denying access rule '%(rule)s' from share " "Denying access rule '%(rule)s' from share "
"instance '%(instance)s'"), "instance '%(instance)s'",
{'rule': rule['id'], 'instance': share_instance['id']} {'rule': rule['id'], 'instance': share_instance['id']}
) )

View File

@ -32,7 +32,7 @@ from manila.common import constants
from manila.data import rpcapi as data_rpcapi from manila.data import rpcapi as data_rpcapi
from manila.db import base from manila.db import base
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila import policy from manila import policy
from manila import quota from manila import quota
from manila.scheduler import rpcapi as scheduler_rpcapi from manila.scheduler import rpcapi as scheduler_rpcapi
@ -148,22 +148,22 @@ class API(base.Base):
return (usages[name]['reserved'] + usages[name]['in_use']) return (usages[name]['reserved'] + usages[name]['in_use'])
if 'gigabytes' in overs: if 'gigabytes' in overs:
LOG.warning(_LW("Quota exceeded for %(s_pid)s, " LOG.warning("Quota exceeded for %(s_pid)s, "
"tried to create " "tried to create "
"%(s_size)sG share (%(d_consumed)dG of " "%(s_size)sG share (%(d_consumed)dG of "
"%(d_quota)dG already consumed)."), { "%(d_quota)dG already consumed).", {
's_pid': context.project_id, 's_pid': context.project_id,
's_size': size, 's_size': size,
'd_consumed': _consumed('gigabytes'), 'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']}) 'd_quota': quotas['gigabytes']})
raise exception.ShareSizeExceedsAvailableQuota() raise exception.ShareSizeExceedsAvailableQuota()
elif 'shares' in overs: elif 'shares' in overs:
LOG.warning(_LW("Quota exceeded for %(s_pid)s, " LOG.warning("Quota exceeded for %(s_pid)s, "
"tried to create " "tried to create "
"share (%(d_consumed)d shares " "share (%(d_consumed)d shares "
"already consumed)."), { "already consumed).", {
's_pid': context.project_id, 's_pid': context.project_id,
'd_consumed': _consumed('shares')}) 'd_consumed': _consumed('shares')})
raise exception.ShareLimitExceeded(allowed=quotas['shares']) raise exception.ShareLimitExceeded(allowed=quotas['shares'])
try: try:
@ -524,7 +524,7 @@ class API(base.Base):
msg = _("Cannot delete last active replica.") msg = _("Cannot delete last active replica.")
raise exception.ReplicationException(reason=msg) raise exception.ReplicationException(reason=msg)
LOG.info(_LI("Deleting replica %s."), id) LOG.info("Deleting replica %s.", id)
self.db.share_replica_update( self.db.share_replica_update(
context, share_replica['id'], context, share_replica['id'],
@ -927,7 +927,7 @@ class API(base.Base):
except Exception as e: except Exception as e:
reservations = None reservations = None
LOG.exception( LOG.exception(
_LE("Failed to update quota for deleting share: %s"), e) ("Failed to update quota for deleting share: %s"), e)
for share_instance in share.instances: for share_instance in share.instances:
if share_instance['host']: if share_instance['host']:
@ -984,7 +984,7 @@ class API(base.Base):
share_groups = self.db.share_group_get_all_by_share_server( share_groups = self.db.share_group_get_all_by_share_server(
context, server['id']) context, server['id'])
if share_groups: if share_groups:
LOG.error(_LE("share server '%(ssid)s' in use by share groups."), LOG.error("share server '%(ssid)s' in use by share groups.",
{'ssid': server['id']}) {'ssid': server['id']})
raise exception.ShareServerInUse(share_server_id=server['id']) raise exception.ShareServerInUse(share_server_id=server['id'])
@ -1020,18 +1020,18 @@ class API(base.Base):
return (usages[name]['reserved'] + usages[name]['in_use']) return (usages[name]['reserved'] + usages[name]['in_use'])
if 'snapshot_gigabytes' in overs: if 'snapshot_gigabytes' in overs:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create " msg = ("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of " "%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).") "%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id, LOG.warning(msg, {'s_pid': context.project_id,
's_size': size, 's_size': size,
'd_consumed': _consumed('gigabytes'), 'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['snapshot_gigabytes']}) 'd_quota': quotas['snapshot_gigabytes']})
raise exception.SnapshotSizeExceedsAvailableQuota() raise exception.SnapshotSizeExceedsAvailableQuota()
elif 'snapshots' in overs: elif 'snapshots' in overs:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create " msg = ("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots " "snapshot (%(d_consumed)d snapshots "
"already consumed).") "already consumed).")
LOG.warning(msg, {'s_pid': context.project_id, LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed('snapshots')}) 'd_consumed': _consumed('snapshots')})
raise exception.SnapshotLimitExceeded( raise exception.SnapshotLimitExceeded(
@ -1190,17 +1190,17 @@ class API(base.Base):
if (new_share_network_id == share_instance['share_network_id'] and if (new_share_network_id == share_instance['share_network_id'] and
new_share_type_id == share_instance['share_type_id'] and new_share_type_id == share_instance['share_type_id'] and
dest_host == share_instance['host']): dest_host == share_instance['host']):
msg = _LI("Destination host (%(dest_host)s), share network " msg = ("Destination host (%(dest_host)s), share network "
"(%(dest_sn)s) or share type (%(dest_st)s) are the same " "(%(dest_sn)s) or share type (%(dest_st)s) are the same "
"as the current host's '%(src_host)s', '%(src_sn)s' and " "as the current host's '%(src_host)s', '%(src_sn)s' and "
"'%(src_st)s' respectively. Nothing to be done.") % { "'%(src_st)s' respectively. Nothing to be done.") % {
'dest_host': dest_host, 'dest_host': dest_host,
'dest_sn': new_share_network_id, 'dest_sn': new_share_network_id,
'dest_st': new_share_type_id, 'dest_st': new_share_type_id,
'src_host': share_instance['host'], 'src_host': share_instance['host'],
'src_sn': share_instance['share_network_id'], 'src_sn': share_instance['share_network_id'],
'src_st': share_instance['share_type_id'], 'src_st': share_instance['share_type_id'],
} }
LOG.info(msg) LOG.info(msg)
self.db.share_update( self.db.share_update(
context, share['id'], context, share['id'],
@ -1304,8 +1304,8 @@ class API(base.Base):
elif share['task_state'] == ( elif share['task_state'] == (
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
data_rpc = data_rpcapi.DataAPI() data_rpc = data_rpcapi.DataAPI()
LOG.info(_LI("Sending request to get share migration information" LOG.info("Sending request to get share migration information"
" of share %s.") % share['id']) " of share %s." % share['id'])
services = self.db.service_get_all_by_topic(context, 'manila-data') services = self.db.service_get_all_by_topic(context, 'manila-data')
@ -1404,8 +1404,8 @@ class API(base.Base):
constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): constants.TASK_STATE_DATA_COPYING_IN_PROGRESS):
data_rpc = data_rpcapi.DataAPI() data_rpc = data_rpcapi.DataAPI()
LOG.info(_LI("Sending request to cancel migration of " LOG.info("Sending request to cancel migration of "
"share %s.") % share['id']) "share %s." % share['id'])
services = self.db.service_get_all_by_topic(context, 'manila-data') services = self.db.service_get_all_by_topic(context, 'manila-data')
@ -1796,9 +1796,9 @@ class API(base.Base):
def _consumed(name): def _consumed(name):
return usages[name]['reserved'] + usages[name]['in_use'] return usages[name]['reserved'] + usages[name]['in_use']
msg = _LE("Quota exceeded for %(s_pid)s, tried to extend share " msg = ("Quota exceeded for %(s_pid)s, tried to extend share "
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG " "by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed).") "already consumed).")
LOG.error(msg, {'s_pid': context.project_id, LOG.error(msg, {'s_pid': context.project_id,
's_size': size_increase, 's_size': size_increase,
'd_consumed': _consumed('gigabytes'), 'd_consumed': _consumed('gigabytes'),
@ -1810,7 +1810,7 @@ class API(base.Base):
self.update(context, share, {'status': constants.STATUS_EXTENDING}) self.update(context, share, {'status': constants.STATUS_EXTENDING})
self.share_rpcapi.extend_share(context, share, new_size, reservations) self.share_rpcapi.extend_share(context, share, new_size, reservations)
LOG.info(_LI("Extend share request issued successfully."), LOG.info("Extend share request issued successfully.",
resource=share) resource=share)
def shrink(self, context, share, new_size): def shrink(self, context, share, new_size):
@ -1843,9 +1843,9 @@ class API(base.Base):
self.update(context, share, {'status': constants.STATUS_SHRINKING}) self.update(context, share, {'status': constants.STATUS_SHRINKING})
self.share_rpcapi.shrink_share(context, share, new_size) self.share_rpcapi.shrink_share(context, share, new_size)
LOG.info(_LI("Shrink share (id=%(id)s) request issued successfully." LOG.info("Shrink share (id=%(id)s) request issued successfully."
" New size: %(size)s") % {'id': share['id'], " New size: %(size)s" % {'id': share['id'],
'size': new_size}) 'size': new_size})
def snapshot_allow_access(self, context, snapshot, access_type, access_to): def snapshot_allow_access(self, context, snapshot, access_type, access_to):
"""Allow access to a share snapshot.""" """Allow access to a share snapshot."""

View File

@ -25,7 +25,7 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW from manila.i18n import _
from manila import network from manila import network
from manila import utils from manila import utils
@ -213,8 +213,8 @@ class ExecuteMixin(object):
tries += 1 tries += 1
if tries >= self.configuration.num_shell_tries: if tries >= self.configuration.num_shell_tries:
raise raise
LOG.exception(_LE("Recovering from a failed execute. " LOG.exception("Recovering from a failed execute. "
"Try number %s"), tries) "Try number %s", tries)
time.sleep(tries ** 2) time.sleep(tries ** 2)
@ -1295,8 +1295,8 @@ class ShareDriver(object):
self.delete_snapshot( self.delete_snapshot(
context, share_snapshot, share_server=share_server) context, share_snapshot, share_server=share_server)
except exception.ManilaException: except exception.ManilaException:
msg = _LE('Could not delete share group snapshot member %(snap)s ' msg = ('Could not delete share group snapshot member %(snap)s '
'for share %(share)s.') 'for share %(share)s.')
LOG.error(msg % { LOG.error(msg % {
'snap': share_snapshot['id'], 'snap': share_snapshot['id'],
'share': share_snapshot['share_id'], 'share': share_snapshot['share_id'],
@ -1361,7 +1361,7 @@ class ShareDriver(object):
raise exception.ShareGroupSnapshotNotSupported( raise exception.ShareGroupSnapshotNotSupported(
share_group=snap_dict['share_group_id']) share_group=snap_dict['share_group_id'])
elif not snapshot_members: elif not snapshot_members:
LOG.warning(_LW('No shares in share group to create snapshot.')) LOG.warning('No shares in share group to create snapshot.')
return None, None return None, None
else: else:
share_snapshots = [] share_snapshots = []
@ -1386,9 +1386,9 @@ class ShareDriver(object):
snapshot_members_updates.append(member_update) snapshot_members_updates.append(member_update)
share_snapshots.append(share_snapshot) share_snapshots.append(share_snapshot)
except exception.ManilaException as e: except exception.ManilaException as e:
msg = _LE('Could not create share group snapshot. Failed ' msg = ('Could not create share group snapshot. Failed '
'to create share snapshot %(snap)s for ' 'to create share snapshot %(snap)s for '
'share %(share)s.') 'share %(share)s.')
LOG.exception(msg % { LOG.exception(msg % {
'snap': share_snapshot['id'], 'snap': share_snapshot['id'],
'share': share_snapshot['share_id'] 'share': share_snapshot['share_id']

View File

@ -20,7 +20,7 @@ from oslo_utils import units
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LI, _LW from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share import share_types from manila.share import share_types
@ -142,7 +142,7 @@ class CephFSNativeDriver(driver.ShareDriver,):
auth_id = self.configuration.safe_get('cephfs_auth_id') auth_id = self.configuration.safe_get('cephfs_auth_id')
self._volume_client = ceph_volume_client.CephFSVolumeClient( self._volume_client = ceph_volume_client.CephFSVolumeClient(
auth_id, conf_path, cluster_name) auth_id, conf_path, cluster_name)
LOG.info(_LI("[%(be)s}] Ceph client found, connecting..."), LOG.info("[%(be)s}] Ceph client found, connecting...",
{"be": self.backend_name}) {"be": self.backend_name})
if auth_id != CEPH_DEFAULT_AUTH_ID: if auth_id != CEPH_DEFAULT_AUTH_ID:
# Evict any other manila sessions. Only do this if we're # Evict any other manila sessions. Only do this if we're
@ -157,7 +157,7 @@ class CephFSNativeDriver(driver.ShareDriver,):
self._volume_client = None self._volume_client = None
raise raise
else: else:
LOG.info(_LI("[%(be)s] Ceph client connection complete."), LOG.info("[%(be)s] Ceph client connection complete.",
{"be": self.backend_name}) {"be": self.backend_name})
return self._volume_client return self._volume_client
@ -199,7 +199,7 @@ class CephFSNativeDriver(driver.ShareDriver,):
addrs=",".join(mon_addrs), addrs=",".join(mon_addrs),
path=volume['mount_path']) path=volume['mount_path'])
LOG.info(_LI("Calculated export location for share %(id)s: %(loc)s"), LOG.info("Calculated export location for share %(id)s: %(loc)s",
{"id": share['id'], "loc": export_location}) {"id": share['id'], "loc": export_location})
return { return {
@ -244,8 +244,8 @@ class CephFSNativeDriver(driver.ShareDriver,):
def _deny_access(self, context, share, access, share_server=None): def _deny_access(self, context, share, access, share_server=None):
if access['access_type'] != CEPHX_ACCESS_TYPE: if access['access_type'] != CEPHX_ACCESS_TYPE:
LOG.warning(_LW("Invalid access type '%(type)s', " LOG.warning("Invalid access type '%(type)s', "
"ignoring in deny."), "ignoring in deny.",
{"type": access['access_type']}) {"type": access['access_type']})
return return

View File

@ -18,7 +18,7 @@ import uuid
from oslo_log import log from oslo_log import log
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI from manila.i18n import _
from manila.share import driver from manila.share import driver
@ -66,8 +66,8 @@ class DockerExecHelper(driver.ExecuteMixin):
if result[1] != "": if result[1] != "":
raise exception.ManilaException( raise exception.ManilaException(
_("Container %s has failed to start.") % name) _("Container %s has failed to start.") % name)
LOG.info(_LI("A container has been successfully started! Its id is " LOG.info("A container has been successfully started! Its id is "
"%s."), result[0].rstrip('\n')) "%s.", result[0].rstrip('\n'))
def stop_container(self, name): def stop_container(self, name):
LOG.debug("Stopping container %s.", name) LOG.debug("Stopping container %s.", name)
@ -76,7 +76,7 @@ class DockerExecHelper(driver.ExecuteMixin):
if result[1] != '': if result[1] != '':
raise exception.ManilaException( raise exception.ManilaException(
_("Container %s has failed to stop properly.") % name) _("Container %s has failed to stop properly.") % name)
LOG.info(_LI("Container %s is successfully stopped."), name) LOG.info("Container %s is successfully stopped.", name)
def execute(self, name=None, cmd=None): def execute(self, name=None, cmd=None):
if name is None: if name is None:
@ -94,7 +94,7 @@ class DockerExecHelper(driver.ExecuteMixin):
try: try:
result = self._execute(*cmd, run_as_root=True) result = self._execute(*cmd, run_as_root=True)
except Exception: except Exception:
LOG.exception(_LE("Executing command failed.")) LOG.exception("Executing command failed.")
return None return None
LOG.debug("Execution result: %s.", result) LOG.debug("Execution result: %s.", result)
return result return result

View File

@ -27,7 +27,7 @@ from oslo_log import log
from oslo_utils import importutils from oslo_utils import importutils
from manila import exception from manila import exception
from manila.i18n import _, _LI, _LW from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila import utils from manila import utils
@ -156,9 +156,9 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
["rm", "-fR", "/shares/%s" % share.share_id] ["rm", "-fR", "/shares/%s" % share.share_id]
) )
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to remove /shares/%(share)s directory in " LOG.warning("Failed to remove /shares/%(share)s directory in "
"container %(cont)s."), {"share": share.share_id, "container %(cont)s.", {"share": share.share_id,
"cont": server_id}) "cont": server_id})
LOG.error(e) LOG.error(e)
self.storage.remove_storage(share) self.storage.remove_storage(share)
@ -213,8 +213,8 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
"specified.") % neutron_class "specified.") % neutron_class
raise exception.ManilaException(msg) raise exception.ManilaException(msg)
elif host_id is None: elif host_id is None:
LOG.warning(_LW("neutron_host_id is not specified. This driver " LOG.warning("neutron_host_id is not specified. This driver "
"might not work as expected without it.")) "might not work as expected without it.")
def _connect_to_network(self, server_id, network_info, host_veth): def _connect_to_network(self, server_id, network_info, host_veth):
LOG.debug("Attempting to connect container to neutron network.") LOG.debug("Attempting to connect container to neutron network.")
@ -280,8 +280,8 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
self.configuration.container_ovs_bridge_name, self.configuration.container_ovs_bridge_name,
veth, run_as_root=True) veth, run_as_root=True)
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to delete port %s: port " LOG.warning("Failed to delete port %s: port "
"vanished."), veth) "vanished.", veth)
LOG.error(e) LOG.error(e)
def _get_veth_state(self): def _get_veth_state(self):
@ -317,5 +317,5 @@ class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin):
veth = self._get_corresponding_veth(veths_before, veths_after) veth = self._get_corresponding_veth(veths_before, veths_after)
self._connect_to_network(server_id, network_info, veth) self._connect_to_network(server_id, network_info, veth)
LOG.info(_LI("Container %s was created."), server_id) LOG.info("Container %s was created.", server_id)
return {"id": network_info["server_id"]} return {"id": network_info["server_id"]}

View File

@ -17,7 +17,7 @@ from oslo_log import log
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LW from manila.i18n import _
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -110,7 +110,7 @@ class DockerCIFSHelper(object):
existing_users = self._get_existing_users(server_id, share_name, existing_users = self._get_existing_users(server_id, share_name,
access) access)
except TypeError: except TypeError:
LOG.warning(_LW("Can't access smbd at share %s.") % share_name) LOG.warning("Can't access smbd at share %s." % share_name)
return return
else: else:
allowed_users = " ".join(sorted(set(existing_users.split()) - allowed_users = " ".join(sorted(set(existing_users.split()) -

View File

@ -20,7 +20,7 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
from manila import exception from manila import exception
from manila.i18n import _, _LW from manila.i18n import _
from manila.share import driver from manila.share import driver
@ -85,7 +85,7 @@ class LVMHelper(driver.ExecuteMixin):
try: try:
self._execute("umount", to_remove, run_as_root=True) self._execute("umount", to_remove, run_as_root=True)
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to umount helper directory %s."), LOG.warning("Failed to umount helper directory %s.",
to_remove) to_remove)
LOG.error(e) LOG.error(e)
# (aovchinnikov): bug 1621784 manifests itself in jamming logical # (aovchinnikov): bug 1621784 manifests itself in jamming logical
@ -94,7 +94,7 @@ class LVMHelper(driver.ExecuteMixin):
self._execute("lvremove", "-f", "--autobackup", "n", self._execute("lvremove", "-f", "--autobackup", "n",
to_remove, run_as_root=True) to_remove, run_as_root=True)
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to remove logical volume %s.") % to_remove) LOG.warning("Failed to remove logical volume %s." % to_remove)
LOG.error(e) LOG.error(e)
def extend_share(self, share, new_size, share_server=None): def extend_share(self, share, new_size, share_server=None):

View File

@ -24,7 +24,7 @@ import six
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LW from manila.i18n import _
from manila.share.drivers.dell_emc.plugins import base from manila.share.drivers.dell_emc.plugins import base
from manila.share.drivers.dell_emc.plugins.isilon import isilon_api from manila.share.drivers.dell_emc.plugins.isilon import isilon_api
@ -139,8 +139,8 @@ class IsilonStorageConnection(base.StorageConnection):
self._root_dir + '/' + share['name']) self._root_dir + '/' + share['name'])
if share_id is None: if share_id is None:
lw = _LW('Attempted to delete NFS Share "%s", but the share does ' lw = ('Attempted to delete NFS Share "%s", but the share does '
'not appear to exist.') 'not appear to exist.')
LOG.warning(lw, share['name']) LOG.warning(lw, share['name'])
else: else:
# attempt to delete the share # attempt to delete the share
@ -154,8 +154,8 @@ class IsilonStorageConnection(base.StorageConnection):
"""Is called to remove CIFS share.""" """Is called to remove CIFS share."""
smb_share = self._isilon_api.lookup_smb_share(share['name']) smb_share = self._isilon_api.lookup_smb_share(share['name'])
if smb_share is None: if smb_share is None:
lw = _LW('Attempted to delete CIFS Share "%s", but the share does ' lw = ('Attempted to delete CIFS Share "%s", but the share does '
'not appear to exist.') 'not appear to exist.')
LOG.warning(lw, share['name']) LOG.warning(lw, share['name'])
else: else:
share_deleted = self._isilon_api.delete_smb_share(share['name']) share_deleted = self._isilon_api.delete_smb_share(share['name'])

View File

@ -25,7 +25,7 @@ if storops:
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LI, _LE, _LW from manila.i18n import _
from manila.share.drivers.dell_emc.plugins.unity import utils from manila.share.drivers.dell_emc.plugins.unity import utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -34,7 +34,7 @@ LOG = log.getLogger(__name__)
class UnityClient(object): class UnityClient(object):
def __init__(self, host, username, password): def __init__(self, host, username, password):
if storops is None: if storops is None:
LOG.error(_LE('StorOps is required to run EMC Unity driver.')) LOG.error('StorOps is required to run EMC Unity driver.')
self.system = storops.UnitySystem(host, username, password) self.system = storops.UnitySystem(host, username, password)
def create_cifs_share(self, resource, share_name): def create_cifs_share(self, resource, share_name):
@ -51,7 +51,7 @@ class UnityClient(object):
# based share. Log the internal error if it happens. # based share. Log the internal error if it happens.
share.enable_ace() share.enable_ace()
except storops_ex.UnityException: except storops_ex.UnityException:
msg = _LE('Failed to enabled ACE for share: {}.') msg = ('Failed to enabled ACE for share: {}.')
LOG.exception(msg.format(share_name)) LOG.exception(msg.format(share_name))
return share return share
except storops_ex.UnitySmbShareNameExistedError: except storops_ex.UnitySmbShareNameExistedError:
@ -116,22 +116,22 @@ class UnityClient(object):
try: try:
filesystem.delete() filesystem.delete()
except storops_ex.UnityResourceNotFoundError: except storops_ex.UnityResourceNotFoundError:
LOG.info(_LI('Filesystem %s is already removed.'), filesystem.name) LOG.info('Filesystem %s is already removed.', filesystem.name)
def create_nas_server(self, name, sp, pool, tenant=None): def create_nas_server(self, name, sp, pool, tenant=None):
try: try:
return self.system.create_nas_server(name, sp, pool, return self.system.create_nas_server(name, sp, pool,
tenant=tenant) tenant=tenant)
except storops_ex.UnityNasServerNameUsedError: except storops_ex.UnityNasServerNameUsedError:
LOG.info(_LI('Share server %s already exists, ignoring share ' LOG.info('Share server %s already exists, ignoring share '
'server creation.'), name) 'server creation.', name)
return self.get_nas_server(name) return self.get_nas_server(name)
def get_nas_server(self, name): def get_nas_server(self, name):
try: try:
return self.system.get_nas_server(name=name) return self.system.get_nas_server(name=name)
except storops_ex.UnityResourceNotFoundError: except storops_ex.UnityResourceNotFoundError:
LOG.info(_LI('NAS server %s not found.'), name) LOG.info('NAS server %s not found.', name)
raise raise
def delete_nas_server(self, name, username=None, password=None): def delete_nas_server(self, name, username=None, password=None):
@ -141,7 +141,7 @@ class UnityClient(object):
tenant = nas_server.tenant tenant = nas_server.tenant
nas_server.delete(username=username, password=password) nas_server.delete(username=username, password=password)
except storops_ex.UnityResourceNotFoundError: except storops_ex.UnityResourceNotFoundError:
LOG.info(_LI('NAS server %s not found.'), name) LOG.info('NAS server %s not found.', name)
if tenant is not None: if tenant is not None:
self._delete_tenant(tenant) self._delete_tenant(tenant)
@ -156,8 +156,8 @@ class UnityClient(object):
try: try:
tenant.delete(delete_hosts=True) tenant.delete(delete_hosts=True)
except storops_ex.UnityException as ex: except storops_ex.UnityException as ex:
LOG.warning(_LW('Delete tenant %(tenant)s failed with error: ' LOG.warning('Delete tenant %(tenant)s failed with error: '
'%(ex)s. Leave the tenant on the system.'), '%(ex)s. Leave the tenant on the system.',
{'tenant': tenant.get_id(), {'tenant': tenant.get_id(),
'ex': ex}) 'ex': ex})
@ -166,8 +166,8 @@ class UnityClient(object):
try: try:
nas_server.create_dns_server(domain, dns_ip) nas_server.create_dns_server(domain, dns_ip)
except storops_ex.UnityOneDnsPerNasServerError: except storops_ex.UnityOneDnsPerNasServerError:
LOG.info(_LI('DNS server %s already exists, ' LOG.info('DNS server %s already exists, '
'ignoring DNS server creation.'), domain) 'ignoring DNS server creation.', domain)
@staticmethod @staticmethod
def create_interface(nas_server, ip_addr, netmask, gateway, port_id, def create_interface(nas_server, ip_addr, netmask, gateway, port_id,
@ -190,16 +190,16 @@ class UnityClient(object):
domain_username=username, domain_username=username,
domain_password=password) domain_password=password)
except storops_ex.UnitySmbNameInUseError: except storops_ex.UnitySmbNameInUseError:
LOG.info(_LI('CIFS service on NAS server %s is ' LOG.info('CIFS service on NAS server %s is '
'already enabled.'), nas_server.name) 'already enabled.', nas_server.name)
@staticmethod @staticmethod
def enable_nfs_service(nas_server): def enable_nfs_service(nas_server):
try: try:
nas_server.enable_nfs_service() nas_server.enable_nfs_service()
except storops_ex.UnityNfsAlreadyEnabledError: except storops_ex.UnityNfsAlreadyEnabledError:
LOG.info(_LI('NFS service on NAS server %s is ' LOG.info('NFS service on NAS server %s is '
'already enabled.'), nas_server.name) 'already enabled.', nas_server.name)
@staticmethod @staticmethod
def create_snapshot(filesystem, name): def create_snapshot(filesystem, name):
@ -207,8 +207,8 @@ class UnityClient(object):
try: try:
return filesystem.create_snap(name, fs_access_type=access_type) return filesystem.create_snap(name, fs_access_type=access_type)
except storops_ex.UnitySnapNameInUseError: except storops_ex.UnitySnapNameInUseError:
LOG.info(_LI('Snapshot %(snap)s on Filesystem %(fs)s already ' LOG.info('Snapshot %(snap)s on Filesystem %(fs)s already '
'exists.'), {'snap': name, 'fs': filesystem.name}) 'exists.', {'snap': name, 'fs': filesystem.name})
def create_snap_of_snap(self, src_snap, dst_snap_name, snap_type): def create_snap_of_snap(self, src_snap, dst_snap_name, snap_type):
access_type = enums.FilesystemSnapAccessTypeEnum.PROTOCOL access_type = enums.FilesystemSnapAccessTypeEnum.PROTOCOL
@ -233,7 +233,7 @@ class UnityClient(object):
try: try:
snap.delete() snap.delete()
except storops_ex.UnityResourceNotFoundError: except storops_ex.UnityResourceNotFoundError:
LOG.info(_LI('Snapshot %s is already removed.'), snap.name) LOG.info('Snapshot %s is already removed.', snap.name)
def get_pool(self, name=None): def get_pool(self, name=None):
return self.system.get_pool(name=name) return self.system.get_pool(name=name)
@ -283,7 +283,7 @@ class UnityClient(object):
try: try:
share.delete_access(host_ip) share.delete_access(host_ip)
except storops_ex.UnityHostNotFoundException: except storops_ex.UnityHostNotFoundException:
LOG.info(_LI('%(host)s access to %(share)s is already removed.'), LOG.info('%(host)s access to %(share)s is already removed.',
{'host': host_ip, 'share': share_name}) {'host': host_ip, 'share': share_name})
def get_file_ports(self): def get_file_ports(self):
@ -328,6 +328,6 @@ class UnityClient(object):
"Use the existing VLAN tenant.", vlan_id) "Use the existing VLAN tenant.", vlan_id)
exc.reraise = False exc.reraise = False
except storops_ex.SystemAPINotSupported: except storops_ex.SystemAPINotSupported:
LOG.info(_LI("This system doesn't support tenant.")) LOG.info("This system doesn't support tenant.")
return tenant return tenant

View File

@ -27,7 +27,7 @@ if storops:
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW, _LI from manila.i18n import _
from manila.share.drivers.dell_emc.plugins import base as driver from manila.share.drivers.dell_emc.plugins import base as driver
from manila.share.drivers.dell_emc.plugins.unity import client from manila.share.drivers.dell_emc.plugins.unity import client
from manila.share.drivers.dell_emc.plugins.unity import utils as unity_utils from manila.share.drivers.dell_emc.plugins.unity import utils as unity_utils
@ -127,9 +127,9 @@ class UnityStorageConnection(driver.StorageConnection):
raise exception.BadConfigurationException(reason=msg) raise exception.BadConfigurationException(reason=msg)
if unmanaged_port_ids: if unmanaged_port_ids:
LOG.info(_LI("The following specified ports are not managed by " LOG.info("The following specified ports are not managed by "
"the backend: %(unmanaged)s. This host will only " "the backend: %(unmanaged)s. This host will only "
"manage the storage ports: %(exist)s"), "manage the storage ports: %(exist)s",
{'unmanaged': ",".join(unmanaged_port_ids), {'unmanaged': ",".join(unmanaged_port_ids),
'exist': ",".join(map(",".join, 'exist': ",".join(map(",".join,
sp_ports_map.values()))}) sp_ports_map.values()))})
@ -138,8 +138,8 @@ class UnityStorageConnection(driver.StorageConnection):
",".join(map(",".join, sp_ports_map.values()))) ",".join(map(",".join, sp_ports_map.values())))
if len(sp_ports_map) == 1: if len(sp_ports_map) == 1:
LOG.info(_LI("Only ports of %s are configured. Configure ports " LOG.info("Only ports of %s are configured. Configure ports "
"of both SPA and SPB to use both of the SPs."), "of both SPA and SPB to use both of the SPs.",
list(sp_ports_map)[0]) list(sp_ports_map)[0])
return sp_ports_map return sp_ports_map
@ -237,7 +237,7 @@ class UnityStorageConnection(driver.StorageConnection):
backend_share = self.client.get_share(share_name, backend_share = self.client.get_share(share_name,
share['share_proto']) share['share_proto'])
except storops_ex.UnityResourceNotFoundError: except storops_ex.UnityResourceNotFoundError:
LOG.warning(_LW("Share %s is not found when deleting the share"), LOG.warning("Share %s is not found when deleting the share",
share_name) share_name)
return return
@ -261,8 +261,8 @@ class UnityStorageConnection(driver.StorageConnection):
new_size) new_size)
else: else:
share_id = share['id'] share_id = share['id']
reason = _LE("Driver does not support extending a " reason = ("Driver does not support extending a "
"snapshot based share.") "snapshot based share.")
raise exception.ShareExtendingError(share_id=share_id, raise exception.ShareExtendingError(share_id=share_id,
reason=reason) reason=reason)
@ -422,7 +422,7 @@ class UnityStorageConnection(driver.StorageConnection):
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('Could not setup server.')) LOG.exception('Could not setup server.')
server_details = {'share_server_name': server_name} server_details = {'share_server_name': server_name}
self.teardown_server( self.teardown_server(
server_details, network_info['security_services']) server_details, network_info['security_services'])
@ -538,10 +538,10 @@ class UnityStorageConnection(driver.StorageConnection):
raise exception.BadConfigurationException(reason=msg) raise exception.BadConfigurationException(reason=msg)
if unmanaged_pools: if unmanaged_pools:
LOG.info(_LI("The following specified storage pools " LOG.info("The following specified storage pools "
"are not managed by the backend: " "are not managed by the backend: "
"%(un_managed)s. This host will only manage " "%(un_managed)s. This host will only manage "
"the storage pools: %(exist)s"), "the storage pools: %(exist)s",
{'un_managed': ",".join(unmanaged_pools), {'un_managed': ",".join(unmanaged_pools),
'exist': ",".join(matched_pools)}) 'exist': ",".join(matched_pools)})
else: else:
@ -621,13 +621,13 @@ class UnityStorageConnection(driver.StorageConnection):
# Enable NFS service with kerberos # Enable NFS service with kerberos
kerberos_enabled = True kerberos_enabled = True
# TODO(jay.xu): enable nfs service with kerberos # TODO(jay.xu): enable nfs service with kerberos
LOG.warning(_LW('Kerberos is not supported by ' LOG.warning('Kerberos is not supported by '
'EMC Unity manila driver plugin.')) 'EMC Unity manila driver plugin.')
elif service_type == 'ldap': elif service_type == 'ldap':
LOG.warning(_LW('LDAP is not supported by ' LOG.warning('LDAP is not supported by '
'EMC Unity manila driver plugin.')) 'EMC Unity manila driver plugin.')
else: else:
LOG.warning(_LW('Unknown security service type: %s.'), LOG.warning('Unknown security service type: %s.',
service_type) service_type)
if not kerberos_enabled: if not kerberos_enabled:

View File

@ -24,7 +24,7 @@ from oslo_utils import units
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share.drivers.dell_emc.plugins import base as driver from manila.share.drivers.dell_emc.plugins import base as driver
from manila.share.drivers.dell_emc.plugins.vmax import ( from manila.share.drivers.dell_emc.plugins.vmax import (
object_manager as manager) object_manager as manager)
@ -250,8 +250,8 @@ class VMAXStorageConnection(driver.StorageConnection):
def delete_share(self, context, share, share_server=None): def delete_share(self, context, share, share_server=None):
"""Delete a share.""" """Delete a share."""
if share_server is None: if share_server is None:
LOG.warning(_LW("Share network should be specified for " LOG.warning("Share network should be specified for "
"share deletion.")) "share deletion.")
return return
share_proto = share['share_proto'].upper() share_proto = share['share_proto'].upper()
@ -295,20 +295,20 @@ class VMAXStorageConnection(driver.StorageConnection):
# Delete mount point # Delete mount point
self._get_context('MountPoint').delete(path, vdm_name) self._get_context('MountPoint').delete(path, vdm_name)
except exception.EMCVmaxXMLAPIError as e: except exception.EMCVmaxXMLAPIError as e:
LOG.exception(_LE("CIFS server %(name)s on mover %(mover_name)s " LOG.exception("CIFS server %(name)s on mover %(mover_name)s "
"not found due to error %(err)s. Skip the " "not found due to error %(err)s. Skip the "
"deletion."), "deletion.",
{'name': path, 'mover_name': vdm_name, {'name': path, 'mover_name': vdm_name,
'err': e.message}) 'err': e.message})
try: try:
# Delete file system # Delete file system
self._get_context('FileSystem').delete(share_name) self._get_context('FileSystem').delete(share_name)
except exception.EMCVmaxXMLAPIError as e: except exception.EMCVmaxXMLAPIError as e:
LOG.exception(_LE("File system %(share_name)s not found due to" LOG.exception("File system %(share_name)s not found due to"
"error %(err)s. Skip the deletion."), "error %(err)s. Skip the deletion.",
{'share_name': share_name, {'share_name': share_name,
'err': e.message}) 'err': e.message})
def delete_snapshot(self, context, snapshot, share_server=None): def delete_snapshot(self, context, snapshot, share_server=None):
"""Delete a snapshot.""" """Delete a snapshot."""
@ -471,7 +471,7 @@ class VMAXStorageConnection(driver.StorageConnection):
share_name = share['id'] share_name = share['id']
if access['access_type'] != 'user': if access['access_type'] != 'user':
LOG.warning(_LW("Only user access type allowed for CIFS share.")) LOG.warning("Only user access type allowed for CIFS share.")
return return
user_name = access['access_to'] user_name = access['access_to']
@ -505,7 +505,7 @@ class VMAXStorageConnection(driver.StorageConnection):
access_type = access['access_type'] access_type = access['access_type']
if access_type != 'ip': if access_type != 'ip':
LOG.warning(_LW("Only ip access type allowed.")) LOG.warning("Only ip access type allowed.")
return return
host_ip = access['access_to'] host_ip = access['access_to']
@ -550,7 +550,7 @@ class VMAXStorageConnection(driver.StorageConnection):
",".join(real_pools)) ",".join(real_pools))
raise exception.InvalidParameterValue(err=msg) raise exception.InvalidParameterValue(err=msg)
LOG.info(_LI("Storage pools: %s will be managed."), LOG.info("Storage pools: %s will be managed.",
",".join(matched_pools)) ",".join(matched_pools))
else: else:
LOG.debug("No storage pool is specified, so all pools " LOG.debug("No storage pool is specified, so all pools "
@ -722,7 +722,7 @@ class VMAXStorageConnection(driver.StorageConnection):
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('Could not setup server')) LOG.exception('Could not setup server')
server_details = self._construct_backend_details( server_details = self._construct_backend_details(
vdm_name, allocated_interfaces) vdm_name, allocated_interfaces)
self.teardown_server( self.teardown_server(
@ -810,7 +810,7 @@ class VMAXStorageConnection(driver.StorageConnection):
status, servers = self._get_context('CIFSServer').get_all( status, servers = self._get_context('CIFSServer').get_all(
vdm_name) vdm_name)
if constants.STATUS_OK != status: if constants.STATUS_OK != status:
LOG.error(_LE('Could not find CIFS server by name: %s.'), LOG.error('Could not find CIFS server by name: %s.',
vdm_name) vdm_name)
else: else:
cifs_servers = copy.deepcopy(servers) cifs_servers = copy.deepcopy(servers)

View File

@ -25,7 +25,6 @@ from six.moves.urllib import request as url_request # pylint: disable=E0611
from manila import exception from manila import exception
from manila.i18n import _ from manila.i18n import _
from manila.i18n import _LE
from manila.share.drivers.dell_emc.plugins.vmax import constants from manila.share.drivers.dell_emc.plugins.vmax import constants
from manila import utils from manila import utils
@ -154,8 +153,8 @@ class SSHConnector(object):
return out, err return out, err
except processutils.ProcessExecutionError as e: except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Error running SSH command: %(cmd)s. ' LOG.error('Error running SSH command: %(cmd)s. '
'Error: %(excmsg)s.'), 'Error: %(excmsg)s.',
{'cmd': command, 'excmsg': e}) {'cmd': command, 'excmsg': e})
def log_request(self, cmd, out, err): def log_request(self, cmd, out, err):

View File

@ -24,7 +24,7 @@ import six
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LI, _LW, _LE from manila.i18n import _
from manila.share.drivers.dell_emc.plugins.vmax import connector from manila.share.drivers.dell_emc.plugins.vmax import connector
from manila.share.drivers.dell_emc.plugins.vmax import constants from manila.share.drivers.dell_emc.plugins.vmax import constants
from manila.share.drivers.dell_emc.plugins.vmax import utils as vmax_utils from manila.share.drivers.dell_emc.plugins.vmax import utils as vmax_utils
@ -89,8 +89,8 @@ class StorageObject(object):
constants.STATUS_INFO): constants.STATUS_INFO):
response['maxSeverity'] = constants.STATUS_OK response['maxSeverity'] = constants.STATUS_OK
LOG.warning(_LW("Translated status from %(old)s to %(new)s. " LOG.warning("Translated status from %(old)s to %(new)s. "
"Message: %(info)s."), "Message: %(info)s.",
{'old': old_Severity, {'old': old_Severity,
'new': response['maxSeverity'], 'new': response['maxSeverity'],
'info': response}) 'info': response})
@ -252,8 +252,8 @@ class FileSystem(StorageObject):
raise exception.EMCVmaxInvalidMoverID(id=mover_id) raise exception.EMCVmaxInvalidMoverID(id=mover_id)
elif self._response_validation( elif self._response_validation(
response, constants.MSG_FILESYSTEM_EXIST): response, constants.MSG_FILESYSTEM_EXIST):
LOG.warning(_LW("File system %s already exists. " LOG.warning("File system %s already exists. "
"Skip the creation."), name) "Skip the creation.", name)
return return
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create file system %(name)s. " message = (_("Failed to create file system %(name)s. "
@ -306,7 +306,7 @@ class FileSystem(StorageObject):
def delete(self, name): def delete(self, name):
status, out = self.get(name) status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("File system %s not found. Skip the deletion."), LOG.warning("File system %s not found. Skip the deletion.",
name) name)
return return
elif constants.STATUS_OK != status: elif constants.STATUS_OK != status:
@ -440,8 +440,8 @@ class FileSystem(StorageObject):
try: try:
self._execute_cmd(copy_ckpt_cmd, check_exit_code=True) self._execute_cmd(copy_ckpt_cmd, check_exit_code=True)
except processutils.ProcessExecutionError as expt: except processutils.ProcessExecutionError as expt:
LOG.error(_LE("Failed to copy content from snapshot %(snap)s to " LOG.error("Failed to copy content from snapshot %(snap)s to "
"file system %(filesystem)s. Reason: %(err)s."), "file system %(filesystem)s. Reason: %(err)s.",
{'snap': snap_name, {'snap': snap_name,
'filesystem': name, 'filesystem': name,
'err': expt}) 'err': expt})
@ -576,8 +576,8 @@ class MountPoint(StorageObject):
self.xml_retry = True self.xml_retry = True
raise exception.EMCVmaxInvalidMoverID(id=mover_id) raise exception.EMCVmaxInvalidMoverID(id=mover_id)
elif self._is_mount_point_already_existent(response): elif self._is_mount_point_already_existent(response):
LOG.warning(_LW("Mount Point %(mount)s already exists. " LOG.warning("Mount Point %(mount)s already exists. "
"Skip the creation."), {'mount': mount_path}) "Skip the creation.", {'mount': mount_path})
return return
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_('Failed to create Mount Point %(mount)s for ' message = (_('Failed to create Mount Point %(mount)s for '
@ -642,8 +642,8 @@ class MountPoint(StorageObject):
self.xml_retry = True self.xml_retry = True
raise exception.EMCVmaxInvalidMoverID(id=mover_id) raise exception.EMCVmaxInvalidMoverID(id=mover_id)
elif self._is_mount_point_nonexistent(response): elif self._is_mount_point_nonexistent(response):
LOG.warning(_LW('Mount point %(mount)s on mover %(mover_name)s ' LOG.warning('Mount point %(mount)s on mover %(mover_name)s '
'not found.'), 'not found.',
{'mount': mount_path, 'mover_name': mover_name}) {'mount': mount_path, 'mover_name': mover_name})
return return
@ -817,8 +817,8 @@ class Mover(StorageObject):
lines = out.strip().split('\n') lines = out.strip().split('\n')
for line in lines: for line in lines:
if line.strip().split() == header: if line.strip().split() == header:
LOG.info(_LI('Found the header of the command ' LOG.info('Found the header of the command '
'/nas/bin/nas_cel -interconnect -l.')) '/nas/bin/nas_cel -interconnect -l.')
else: else:
interconn = line.strip().split() interconn = line.strip().split()
if interconn[2] == source and interconn[4] == destination: if interconn[2] == source and interconn[4] == destination:
@ -874,7 +874,7 @@ class VDM(StorageObject):
self.xml_retry = True self.xml_retry = True
raise exception.EMCVmaxInvalidMoverID(id=mover_id) raise exception.EMCVmaxInvalidMoverID(id=mover_id)
elif self._response_validation(response, constants.MSG_VDM_EXIST): elif self._response_validation(response, constants.MSG_VDM_EXIST):
LOG.warning(_LW("VDM %(name)s already exists. Skip the creation."), LOG.warning("VDM %(name)s already exists. Skip the creation.",
{'name': name}) {'name': name})
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create VDM %(name)s on mover " message = (_("Failed to create VDM %(name)s on mover "
@ -918,7 +918,7 @@ class VDM(StorageObject):
def delete(self, name): def delete(self, name):
status, out = self.get(name) status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("VDM %s not found. Skip the deletion."), LOG.warning("VDM %s not found. Skip the deletion.",
name) name)
return return
elif constants.STATUS_OK != status: elif constants.STATUS_OK != status:
@ -1053,8 +1053,8 @@ class Snapshot(StorageObject):
response = self._send_request(request) response = self._send_request(request)
if self._response_validation(response, constants.MSG_SNAP_EXIST): if self._response_validation(response, constants.MSG_SNAP_EXIST):
LOG.warning(_LW("Snapshot %(name)s already exists. " LOG.warning("Snapshot %(name)s already exists. "
"Skip the creation."), "Skip the creation.",
{'name': name}) {'name': name})
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create snapshot %(name)s on " message = (_("Failed to create snapshot %(name)s on "
@ -1098,7 +1098,7 @@ class Snapshot(StorageObject):
def delete(self, name): def delete(self, name):
status, out = self.get(name) status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("Snapshot %s not found. Skip the deletion."), LOG.warning("Snapshot %s not found. Skip the deletion.",
name) name)
return return
elif constants.STATUS_OK != status: elif constants.STATUS_OK != status:
@ -1182,12 +1182,12 @@ class MoverInterface(StorageObject):
raise exception.EMCVmaxInvalidMoverID(id=mover_id) raise exception.EMCVmaxInvalidMoverID(id=mover_id)
elif self._response_validation( elif self._response_validation(
response, constants.MSG_INTERFACE_NAME_EXIST): response, constants.MSG_INTERFACE_NAME_EXIST):
LOG.warning(_LW("Mover interface name %s already exists. " LOG.warning("Mover interface name %s already exists. "
"Skip the creation."), name) "Skip the creation.", name)
elif self._response_validation( elif self._response_validation(
response, constants.MSG_INTERFACE_EXIST): response, constants.MSG_INTERFACE_EXIST):
LOG.warning(_LW("Mover interface IP %s already exists. " LOG.warning("Mover interface IP %s already exists. "
"Skip the creation."), ip_addr) "Skip the creation.", ip_addr)
elif self._response_validation( elif self._response_validation(
response, constants.MSG_INTERFACE_INVALID_VLAN_ID): response, constants.MSG_INTERFACE_INVALID_VLAN_ID):
# When fail to create a mover interface with the specified # When fail to create a mover interface with the specified
@ -1246,8 +1246,8 @@ class MoverInterface(StorageObject):
raise exception.EMCVmaxInvalidMoverID(id=mover_id) raise exception.EMCVmaxInvalidMoverID(id=mover_id)
elif self._response_validation( elif self._response_validation(
response, constants.MSG_INTERFACE_NON_EXISTENT): response, constants.MSG_INTERFACE_NON_EXISTENT):
LOG.warning(_LW("Mover interface %s not found. " LOG.warning("Mover interface %s not found. "
"Skip the deletion."), ip_addr) "Skip the deletion.", ip_addr)
return return
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to delete mover interface %(ip)s on mover " message = (_("Failed to delete mover interface %(ip)s on mover "
@ -1316,8 +1316,8 @@ class DNSDomain(StorageObject):
self.xml_retry = True self.xml_retry = True
raise exception.EMCVmaxInvalidMoverID(id=mover_id) raise exception.EMCVmaxInvalidMoverID(id=mover_id)
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
LOG.warning(_LW("Failed to delete DNS domain %(name)s. " LOG.warning("Failed to delete DNS domain %(name)s. "
"Reason: %(err)s."), "Reason: %(err)s.",
{'name': name, 'err': response['problems']}) {'name': name, 'err': response['problems']})
@ -1508,13 +1508,13 @@ class CIFSServer(StorageObject):
status, out = self.get( status, out = self.get(
computer_name.lower(), mover_name, is_vdm, self.xml_retry) computer_name.lower(), mover_name, is_vdm, self.xml_retry)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s " LOG.warning("CIFS server %(name)s on mover %(mover_name)s "
"not found. Skip the deletion."), "not found. Skip the deletion.",
{'name': computer_name, 'mover_name': mover_name}) {'name': computer_name, 'mover_name': mover_name})
return return
except exception.EMCVmaxXMLAPIError: except exception.EMCVmaxXMLAPIError:
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s " LOG.warning("CIFS server %(name)s on mover %(mover_name)s "
"not found. Skip the deletion."), "not found. Skip the deletion.",
{'name': computer_name, 'mover_name': mover_name}) {'name': computer_name, 'mover_name': mover_name})
return return
@ -1606,7 +1606,7 @@ class CIFSShare(StorageObject):
def delete(self, name, mover_name, is_vdm=True): def delete(self, name, mover_name, is_vdm=True):
status, out = self.get(name) status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("CIFS share %s not found. Skip the deletion."), LOG.warning("CIFS share %s not found. Skip the deletion.",
name) name)
return return
elif constants.STATUS_OK != status: elif constants.STATUS_OK != status:
@ -1684,8 +1684,8 @@ class CIFSShare(StorageObject):
dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' % dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' %
{'domain': domain, 'user': user_name}, re.I) {'domain': domain, 'user': user_name}, re.I)
if re.search(dup_msg, expt.stdout): if re.search(dup_msg, expt.stdout):
LOG.warning(_LW("Duplicate access control entry, " LOG.warning("Duplicate access control entry, "
"skipping allow...")) "skipping allow...")
else: else:
message = (_('Failed to allow the access %(access)s to ' message = (_('Failed to allow the access %(access)s to '
'CIFS share %(name)s. Reason: %(err)s.') % 'CIFS share %(name)s. Reason: %(err)s.') %
@ -1716,10 +1716,10 @@ class CIFSShare(StorageObject):
% {'domain': domain, 'user': user_name}, re.I) % {'domain': domain, 'user': user_name}, re.I)
if re.search(not_found_msg, expt.stdout): if re.search(not_found_msg, expt.stdout):
LOG.warning(_LW("No access control entry found, " LOG.warning("No access control entry found, "
"skipping deny...")) "skipping deny...")
elif re.search(user_err_msg, expt.stdout): elif re.search(user_err_msg, expt.stdout):
LOG.warning(_LW("User not found on domain, skipping deny...")) LOG.warning("User not found on domain, skipping deny...")
else: else:
message = (_('Failed to deny the access %(access)s to ' message = (_('Failed to deny the access %(access)s to '
'CIFS share %(name)s. Reason: %(err)s.') % 'CIFS share %(name)s. Reason: %(err)s.') %
@ -1798,7 +1798,7 @@ class NFSShare(StorageObject):
status, out = self.get(name, mover_name) status, out = self.get(name, mover_name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("NFS share %s not found. Skip the deletion."), LOG.warning("NFS share %s not found. Skip the deletion.",
path) path)
return return
@ -1849,7 +1849,7 @@ class NFSShare(StorageObject):
dup_msg = (r'%(mover_name)s : No such file or directory' % dup_msg = (r'%(mover_name)s : No such file or directory' %
{'mover_name': mover_name}) {'mover_name': mover_name})
if re.search(dup_msg, expt.stdout): if re.search(dup_msg, expt.stdout):
LOG.warning(_LW("NFS share %s not found."), name) LOG.warning("NFS share %s not found.", name)
return constants.STATUS_NOT_FOUND, None return constants.STATUS_NOT_FOUND, None
else: else:
message = (_('Failed to list NFS share %(name)s on ' message = (_('Failed to list NFS share %(name)s on '

View File

@ -24,7 +24,7 @@ from oslo_utils import units
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share.drivers.dell_emc.plugins import base as driver from manila.share.drivers.dell_emc.plugins import base as driver
from manila.share.drivers.dell_emc.plugins.vnx import constants from manila.share.drivers.dell_emc.plugins.vnx import constants
from manila.share.drivers.dell_emc.plugins.vnx import object_manager as manager from manila.share.drivers.dell_emc.plugins.vnx import object_manager as manager
@ -250,9 +250,9 @@ class VNXStorageConnection(driver.StorageConnection):
def delete_share(self, context, share, share_server=None): def delete_share(self, context, share, share_server=None):
"""Delete a share.""" """Delete a share."""
if share_server is None: if share_server is None:
LOG.warning(_LW("Driver does not support share deletion without " LOG.warning("Driver does not support share deletion without "
"share network specified. Return directly because " "share network specified. Return directly because "
"there is nothing to clean.")) "there is nothing to clean.")
return return
share_proto = share['share_proto'] share_proto = share['share_proto']
@ -545,7 +545,7 @@ class VNXStorageConnection(driver.StorageConnection):
",".join(real_pools)) ",".join(real_pools))
raise exception.InvalidParameterValue(err=msg) raise exception.InvalidParameterValue(err=msg)
LOG.info(_LI("Storage pools: %s will be managed."), LOG.info("Storage pools: %s will be managed.",
",".join(matched_pools)) ",".join(matched_pools))
else: else:
LOG.debug("No storage pool is specified, so all pools " LOG.debug("No storage pool is specified, so all pools "
@ -711,7 +711,7 @@ class VNXStorageConnection(driver.StorageConnection):
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('Could not setup server.')) LOG.exception('Could not setup server.')
server_details = self._construct_backend_details( server_details = self._construct_backend_details(
vdm_name, allocated_interfaces) vdm_name, allocated_interfaces)
self.teardown_server( self.teardown_server(
@ -799,7 +799,7 @@ class VNXStorageConnection(driver.StorageConnection):
status, servers = self._get_context('CIFSServer').get_all( status, servers = self._get_context('CIFSServer').get_all(
vdm_name) vdm_name)
if constants.STATUS_OK != status: if constants.STATUS_OK != status:
LOG.error(_LE('Could not find CIFS server by name: %s.'), LOG.error('Could not find CIFS server by name: %s.',
vdm_name) vdm_name)
else: else:
cifs_servers = copy.deepcopy(servers) cifs_servers = copy.deepcopy(servers)

View File

@ -25,7 +25,6 @@ from six.moves.urllib import request as url_request # pylint: disable=E0611
from manila import exception from manila import exception
from manila.i18n import _ from manila.i18n import _
from manila.i18n import _LE
from manila.share.drivers.dell_emc.plugins.vnx import constants from manila.share.drivers.dell_emc.plugins.vnx import constants
from manila import utils from manila import utils
@ -154,7 +153,7 @@ class SSHConnector(object):
return out, err return out, err
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error running SSH command: %(cmd)s.'), LOG.exception('Error running SSH command: %(cmd)s.',
{'cmd': command}) {'cmd': command})
def log_request(self, cmd, out, err): def log_request(self, cmd, out, err):

View File

@ -24,7 +24,7 @@ import six
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share.drivers.dell_emc.plugins.vnx import connector from manila.share.drivers.dell_emc.plugins.vnx import connector
from manila.share.drivers.dell_emc.plugins.vnx import constants from manila.share.drivers.dell_emc.plugins.vnx import constants
from manila.share.drivers.dell_emc.plugins.vnx import utils as vnx_utils from manila.share.drivers.dell_emc.plugins.vnx import utils as vnx_utils
@ -89,8 +89,8 @@ class StorageObject(object):
constants.STATUS_INFO): constants.STATUS_INFO):
response['maxSeverity'] = constants.STATUS_OK response['maxSeverity'] = constants.STATUS_OK
LOG.warning(_LW("Translated status from %(old)s to %(new)s. " LOG.warning("Translated status from %(old)s to %(new)s. "
"Message: %(info)s."), "Message: %(info)s.",
{'old': old_Severity, {'old': old_Severity,
'new': response['maxSeverity'], 'new': response['maxSeverity'],
'info': response}) 'info': response})
@ -252,8 +252,8 @@ class FileSystem(StorageObject):
raise exception.EMCVnxInvalidMoverID(id=mover_id) raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._response_validation( elif self._response_validation(
response, constants.MSG_FILESYSTEM_EXIST): response, constants.MSG_FILESYSTEM_EXIST):
LOG.warning(_LW("File system %s already exists. " LOG.warning("File system %s already exists. "
"Skip the creation."), name) "Skip the creation.", name)
return return
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create file system %(name)s. " message = (_("Failed to create file system %(name)s. "
@ -306,7 +306,7 @@ class FileSystem(StorageObject):
def delete(self, name): def delete(self, name):
status, out = self.get(name) status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("File system %s not found. Skip the deletion."), LOG.warning("File system %s not found. Skip the deletion.",
name) name)
return return
elif constants.STATUS_OK != status: elif constants.STATUS_OK != status:
@ -440,8 +440,8 @@ class FileSystem(StorageObject):
try: try:
self._execute_cmd(copy_ckpt_cmd, check_exit_code=True) self._execute_cmd(copy_ckpt_cmd, check_exit_code=True)
except processutils.ProcessExecutionError: except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed to copy content from snapshot %(snap)s " LOG.exception("Failed to copy content from snapshot %(snap)s "
"to file system %(filesystem)s."), "to file system %(filesystem)s.",
{'snap': snap_name, {'snap': snap_name,
'filesystem': name}) 'filesystem': name})
@ -575,8 +575,8 @@ class MountPoint(StorageObject):
self.xml_retry = True self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id) raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._is_mount_point_already_existent(response): elif self._is_mount_point_already_existent(response):
LOG.warning(_LW("Mount Point %(mount)s already exists. " LOG.warning("Mount Point %(mount)s already exists. "
"Skip the creation."), {'mount': mount_path}) "Skip the creation.", {'mount': mount_path})
return return
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_('Failed to create Mount Point %(mount)s for ' message = (_('Failed to create Mount Point %(mount)s for '
@ -641,8 +641,8 @@ class MountPoint(StorageObject):
self.xml_retry = True self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id) raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._is_mount_point_nonexistent(response): elif self._is_mount_point_nonexistent(response):
LOG.warning(_LW('Mount point %(mount)s on mover %(mover_name)s ' LOG.warning('Mount point %(mount)s on mover %(mover_name)s '
'not found.'), 'not found.',
{'mount': mount_path, 'mover_name': mover_name}) {'mount': mount_path, 'mover_name': mover_name})
return return
@ -816,8 +816,8 @@ class Mover(StorageObject):
lines = out.strip().split('\n') lines = out.strip().split('\n')
for line in lines: for line in lines:
if line.strip().split() == header: if line.strip().split() == header:
LOG.info(_LI('Found the header of the command ' LOG.info('Found the header of the command '
'/nas/bin/nas_cel -interconnect -l.')) '/nas/bin/nas_cel -interconnect -l.')
else: else:
interconn = line.strip().split() interconn = line.strip().split()
if interconn[2] == source and interconn[4] == destination: if interconn[2] == source and interconn[4] == destination:
@ -873,7 +873,7 @@ class VDM(StorageObject):
self.xml_retry = True self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id) raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._response_validation(response, constants.MSG_VDM_EXIST): elif self._response_validation(response, constants.MSG_VDM_EXIST):
LOG.warning(_LW("VDM %(name)s already exists. Skip the creation."), LOG.warning("VDM %(name)s already exists. Skip the creation.",
{'name': name}) {'name': name})
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create VDM %(name)s on mover " message = (_("Failed to create VDM %(name)s on mover "
@ -917,7 +917,7 @@ class VDM(StorageObject):
def delete(self, name): def delete(self, name):
status, out = self.get(name) status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("VDM %s not found. Skip the deletion."), LOG.warning("VDM %s not found. Skip the deletion.",
name) name)
return return
elif constants.STATUS_OK != status: elif constants.STATUS_OK != status:
@ -1052,8 +1052,8 @@ class Snapshot(StorageObject):
response = self._send_request(request) response = self._send_request(request)
if self._response_validation(response, constants.MSG_SNAP_EXIST): if self._response_validation(response, constants.MSG_SNAP_EXIST):
LOG.warning(_LW("Snapshot %(name)s already exists. " LOG.warning("Snapshot %(name)s already exists. "
"Skip the creation."), "Skip the creation.",
{'name': name}) {'name': name})
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create snapshot %(name)s on " message = (_("Failed to create snapshot %(name)s on "
@ -1097,7 +1097,7 @@ class Snapshot(StorageObject):
def delete(self, name): def delete(self, name):
status, out = self.get(name) status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("Snapshot %s not found. Skip the deletion."), LOG.warning("Snapshot %s not found. Skip the deletion.",
name) name)
return return
elif constants.STATUS_OK != status: elif constants.STATUS_OK != status:
@ -1181,13 +1181,13 @@ class MoverInterface(StorageObject):
raise exception.EMCVnxInvalidMoverID(id=mover_id) raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._response_validation( elif self._response_validation(
response, constants.MSG_INTERFACE_NAME_EXIST): response, constants.MSG_INTERFACE_NAME_EXIST):
LOG.warning(_LW("Mover interface name %s already exists. " LOG.warning("Mover interface name %s already exists. "
"Skip the creation."), name) "Skip the creation.", name)
return return
elif self._response_validation( elif self._response_validation(
response, constants.MSG_INTERFACE_EXIST): response, constants.MSG_INTERFACE_EXIST):
LOG.warning(_LW("Mover interface IP %s already exists. " LOG.warning("Mover interface IP %s already exists. "
"Skip the creation."), ip_addr) "Skip the creation.", ip_addr)
return return
elif self._response_validation( elif self._response_validation(
response, constants.MSG_INTERFACE_INVALID_VLAN_ID): response, constants.MSG_INTERFACE_INVALID_VLAN_ID):
@ -1247,8 +1247,8 @@ class MoverInterface(StorageObject):
raise exception.EMCVnxInvalidMoverID(id=mover_id) raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._response_validation( elif self._response_validation(
response, constants.MSG_INTERFACE_NON_EXISTENT): response, constants.MSG_INTERFACE_NON_EXISTENT):
LOG.warning(_LW("Mover interface %s not found. " LOG.warning("Mover interface %s not found. "
"Skip the deletion."), ip_addr) "Skip the deletion.", ip_addr)
return return
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to delete mover interface %(ip)s on mover " message = (_("Failed to delete mover interface %(ip)s on mover "
@ -1317,8 +1317,8 @@ class DNSDomain(StorageObject):
self.xml_retry = True self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id) raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif constants.STATUS_OK != response['maxSeverity']: elif constants.STATUS_OK != response['maxSeverity']:
LOG.warning(_LW("Failed to delete DNS domain %(name)s. " LOG.warning("Failed to delete DNS domain %(name)s. "
"Reason: %(err)s."), "Reason: %(err)s.",
{'name': name, 'err': response['problems']}) {'name': name, 'err': response['problems']})
@ -1509,13 +1509,13 @@ class CIFSServer(StorageObject):
status, out = self.get( status, out = self.get(
computer_name.lower(), mover_name, is_vdm, self.xml_retry) computer_name.lower(), mover_name, is_vdm, self.xml_retry)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s " LOG.warning("CIFS server %(name)s on mover %(mover_name)s "
"not found. Skip the deletion."), "not found. Skip the deletion.",
{'name': computer_name, 'mover_name': mover_name}) {'name': computer_name, 'mover_name': mover_name})
return return
except exception.EMCVnxXMLAPIError: except exception.EMCVnxXMLAPIError:
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s " LOG.warning("CIFS server %(name)s on mover %(mover_name)s "
"not found. Skip the deletion."), "not found. Skip the deletion.",
{'name': computer_name, 'mover_name': mover_name}) {'name': computer_name, 'mover_name': mover_name})
return return
@ -1607,7 +1607,7 @@ class CIFSShare(StorageObject):
def delete(self, name, mover_name, is_vdm=True): def delete(self, name, mover_name, is_vdm=True):
status, out = self.get(name) status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("CIFS share %s not found. Skip the deletion."), LOG.warning("CIFS share %s not found. Skip the deletion.",
name) name)
return return
elif constants.STATUS_OK != status: elif constants.STATUS_OK != status:
@ -1685,8 +1685,8 @@ class CIFSShare(StorageObject):
dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' % dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' %
{'domain': domain, 'user': user_name}, re.I) {'domain': domain, 'user': user_name}, re.I)
if re.search(dup_msg, expt.stdout): if re.search(dup_msg, expt.stdout):
LOG.warning(_LW("Duplicate access control entry, " LOG.warning("Duplicate access control entry, "
"skipping allow...")) "skipping allow...")
else: else:
message = (_('Failed to allow the access %(access)s to ' message = (_('Failed to allow the access %(access)s to '
'CIFS share %(name)s. Reason: %(err)s.') % 'CIFS share %(name)s. Reason: %(err)s.') %
@ -1717,10 +1717,10 @@ class CIFSShare(StorageObject):
% {'domain': domain, 'user': user_name}, re.I) % {'domain': domain, 'user': user_name}, re.I)
if re.search(not_found_msg, expt.stdout): if re.search(not_found_msg, expt.stdout):
LOG.warning(_LW("No access control entry found, " LOG.warning("No access control entry found, "
"skipping deny...")) "skipping deny...")
elif re.search(user_err_msg, expt.stdout): elif re.search(user_err_msg, expt.stdout):
LOG.warning(_LW("User not found on domain, skipping deny...")) LOG.warning("User not found on domain, skipping deny...")
else: else:
message = (_('Failed to deny the access %(access)s to ' message = (_('Failed to deny the access %(access)s to '
'CIFS share %(name)s. Reason: %(err)s.') % 'CIFS share %(name)s. Reason: %(err)s.') %
@ -1799,7 +1799,7 @@ class NFSShare(StorageObject):
status, out = self.get(name, mover_name) status, out = self.get(name, mover_name)
if constants.STATUS_NOT_FOUND == status: if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("NFS share %s not found. Skip the deletion."), LOG.warning("NFS share %s not found. Skip the deletion.",
path) path)
return return
@ -1850,7 +1850,7 @@ class NFSShare(StorageObject):
dup_msg = (r'%(mover_name)s : No such file or directory' % dup_msg = (r'%(mover_name)s : No such file or directory' %
{'mover_name': mover_name}) {'mover_name': mover_name})
if re.search(dup_msg, expt.stdout): if re.search(dup_msg, expt.stdout):
LOG.warning(_LW("NFS share %s not found."), name) LOG.warning("NFS share %s not found.", name)
return constants.STATUS_NOT_FOUND, None return constants.STATUS_NOT_FOUND, None
else: else:
message = (_('Failed to list NFS share %(name)s on ' message = (_('Failed to list NFS share %(name)s on '

View File

@ -24,7 +24,6 @@ import six
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _LI
from manila.share.drivers.ganesha import manager as ganesha_manager from manila.share.drivers.ganesha import manager as ganesha_manager
from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.ganesha import utils as ganesha_utils
@ -76,7 +75,7 @@ class GaneshaNASHelper(NASHelperBase):
if e.errno != errno.ENOENT or must_exist: if e.errno != errno.ENOENT or must_exist:
raise raise
dirlist = [] dirlist = []
LOG.info(_LI('Loading Ganesha config from %s.'), dirpath) LOG.info('Loading Ganesha config from %s.', dirpath)
conf_files = list(filter(self._confrx.search, dirlist)) conf_files = list(filter(self._confrx.search, dirlist))
conf_files.sort() conf_files.sort()
export_template = {} export_template = {}

View File

@ -24,7 +24,6 @@ import six
from manila import exception from manila import exception
from manila.i18n import _ from manila.i18n import _
from manila.i18n import _LE
from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.ganesha import utils as ganesha_utils
from manila import utils from manila import utils
@ -187,8 +186,8 @@ class GaneshaManager(object):
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
if makelog: if makelog:
LOG.error( LOG.error(
_LE("Error while executing management command on " ("Error while executing management command on "
"Ganesha node %(tag)s: %(msg)s."), "Ganesha node %(tag)s: %(msg)s."),
{'tag': tag, 'msg': msg}) {'tag': tag, 'msg': msg})
raise exception.GaneshaCommandFailure( raise exception.GaneshaCommandFailure(
stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code, stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code,
@ -324,8 +323,8 @@ class GaneshaManager(object):
run_as_root=False)[0] run_as_root=False)[0]
match = re.search('\Aexportid\|(\d+)$', out) match = re.search('\Aexportid\|(\d+)$', out)
if not match: if not match:
LOG.error(_LE("Invalid export database on " LOG.error("Invalid export database on "
"Ganesha node %(tag)s: %(db)s."), "Ganesha node %(tag)s: %(db)s.",
{'tag': self.tag, 'db': self.ganesha_db_path}) {'tag': self.tag, 'db': self.ganesha_db_path})
raise exception.InvalidSqliteDB() raise exception.InvalidSqliteDB()
return int(match.groups()[0]) return int(match.groups()[0])

View File

@ -30,7 +30,7 @@ from manila.common import constants as const
from manila import compute from manila import compute
from manila import context from manila import context
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers import service_instance from manila.share.drivers import service_instance
from manila import utils from manila import utils
@ -193,11 +193,11 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
if not common_sv_available: if not common_sv_available:
time.sleep(sv_fetch_retry_interval) time.sleep(sv_fetch_retry_interval)
LOG.warning(_LW("Waiting for the common service VM to become " LOG.warning("Waiting for the common service VM to become "
"available. " "available. "
"Driver is currently uninitialized. " "Driver is currently uninitialized. "
"Share server: %(share_server)s " "Share server: %(share_server)s "
"Retry interval: %(retry_interval)s"), "Retry interval: %(retry_interval)s",
dict(share_server=share_server, dict(share_server=share_server,
retry_interval=sv_fetch_retry_interval)) retry_interval=sv_fetch_retry_interval))
@ -293,14 +293,14 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE], ['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE],
) )
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to sync mount files on server '%s'."), LOG.error("Failed to sync mount files on server '%s'.",
server_details['instance_id']) server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e)) raise exception.ShareBackendException(msg=six.text_type(e))
try: try:
# Remount it to avoid postponed point of failure # Remount it to avoid postponed point of failure
self._ssh_exec(server_details, ['sudo', 'mount', '-a']) self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to mount all shares on server '%s'."), LOG.error("Failed to mount all shares on server '%s'.",
server_details['instance_id']) server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e)) raise exception.ShareBackendException(msg=six.text_type(e))
@ -346,8 +346,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
# Add mount permanently # Add mount permanently
self._sync_mount_temp_and_perm_files(server_details) self._sync_mount_temp_and_perm_files(server_details)
else: else:
LOG.warning(_LW("Mount point '%(path)s' already exists on " LOG.warning("Mount point '%(path)s' already exists on "
"server '%(server)s'."), log_data) "server '%(server)s'.", log_data)
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
raise exception.ShareBackendException(msg=six.text_type(e)) raise exception.ShareBackendException(msg=six.text_type(e))
return _mount_device_with_lock() return _mount_device_with_lock()
@ -373,8 +373,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
# Remove mount permanently # Remove mount permanently
self._sync_mount_temp_and_perm_files(server_details) self._sync_mount_temp_and_perm_files(server_details)
else: else:
LOG.warning(_LW("Mount point '%(path)s' does not exist on " LOG.warning("Mount point '%(path)s' does not exist on "
"server '%(server)s'."), log_data) "server '%(server)s'.", log_data)
return _unmount_device_with_lock() return _unmount_device_with_lock()
def _get_mount_path(self, share): def _get_mount_path(self, share):
@ -449,10 +449,10 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
return volumes_list[0] return volumes_list[0]
elif len(volumes_list) > 1: elif len(volumes_list) > 1:
LOG.error( LOG.error(
_LE("Expected only one volume in volume list with name " "Expected only one volume in volume list with name "
"'%(name)s', but got more than one in a result - " "'%(name)s', but got more than one in a result - "
"'%(result)s'."), { "'%(result)s'.", {
'name': volume_name, 'result': volumes_list}) 'name': volume_name, 'result': volumes_list})
raise exception.ManilaException( raise exception.ManilaException(
_("Error. Ambiguous volumes for name '%s'") % volume_name) _("Error. Ambiguous volumes for name '%s'") % volume_name)
return None return None
@ -479,11 +479,11 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
volume_snapshot = volume_snapshot_list[0] volume_snapshot = volume_snapshot_list[0]
elif len(volume_snapshot_list) > 1: elif len(volume_snapshot_list) > 1:
LOG.error( LOG.error(
_LE("Expected only one volume snapshot in list with name " "Expected only one volume snapshot in list with name"
"'%(name)s', but got more than one in a result - " "'%(name)s', but got more than one in a result - "
"'%(result)s'."), { "'%(result)s'.", {
'name': volume_snapshot_name, 'name': volume_snapshot_name,
'result': volume_snapshot_list}) 'result': volume_snapshot_list})
raise exception.ManilaException( raise exception.ManilaException(
_('Error. Ambiguous volume snaphots')) _('Error. Ambiguous volume snaphots'))
return volume_snapshot return volume_snapshot
@ -501,8 +501,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
try: try:
volume = self._get_volume(context, share['id']) volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound: except exception.VolumeNotFound:
LOG.warning(_LW("Volume not found for share %s. " LOG.warning("Volume not found for share %s. "
"Possibly already deleted."), share['id']) "Possibly already deleted.", share['id'])
volume = None volume = None
if volume and volume['id'] in attached_volumes: if volume and volume['id'] in attached_volumes:
self.compute_api.instance_volume_detach( self.compute_api.instance_volume_detach(
@ -587,7 +587,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
try: try:
volume = self._get_volume(context, share['id']) volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound: except exception.VolumeNotFound:
LOG.info(_LI("Volume not found. Already deleted?")) LOG.info("Volume not found. Already deleted?")
volume = None volume = None
if volume: if volume:
if volume['status'] == 'in-use': if volume['status'] == 'in-use':

View File

@ -24,7 +24,7 @@ from oslo_log import log
import six import six
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW from manila.i18n import _
from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.ganesha import utils as ganesha_utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -205,7 +205,7 @@ class GlusterManager(object):
exc.exit_code in error_policy): exc.exit_code in error_policy):
return return
if logmsg: if logmsg:
LOG.error(_LE("%s: GlusterFS instrumentation failed.") % LOG.error("%s: GlusterFS instrumentation failed." %
logmsg) logmsg)
raise exception.GlusterfsException( raise exception.GlusterfsException(
_("GlusterFS management command '%(cmd)s' failed " _("GlusterFS management command '%(cmd)s' failed "
@ -248,7 +248,7 @@ class GlusterManager(object):
def _get_vol_option_via_info(self, option): def _get_vol_option_via_info(self, option):
"""Get the value of an option set on a GlusterFS volume via volinfo.""" """Get the value of an option set on a GlusterFS volume via volinfo."""
args = ('--xml', 'volume', 'info', self.volume) args = ('--xml', 'volume', 'info', self.volume)
out, err = self.gluster_call(*args, log=_LE("retrieving volume info")) out, err = self.gluster_call(*args, log=("retrieving volume info"))
if not out: if not out:
raise exception.GlusterfsException( raise exception.GlusterfsException(
@ -332,7 +332,7 @@ class GlusterManager(object):
:returns: version (as tuple of strings, example: ('3', '6', '0beta2')) :returns: version (as tuple of strings, example: ('3', '6', '0beta2'))
""" """
out, err = self.gluster_call('--version', out, err = self.gluster_call('--version',
log=_LE("GlusterFS version query")) log=("GlusterFS version query"))
try: try:
owords = out.split() owords = out.split()
if owords[0] != 'glusterfs': if owords[0] != 'glusterfs':
@ -393,7 +393,7 @@ def _mount_gluster_vol(execute, gluster_export, mount_path, ensure=False):
execute(*command, run_as_root=True) execute(*command, run_as_root=True)
except exception.ProcessExecutionError as exc: except exception.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.stderr: if ensure and 'already mounted' in exc.stderr:
LOG.warning(_LW("%s is already mounted."), gluster_export) LOG.warning("%s is already mounted.", gluster_export)
else: else:
raise exception.GlusterfsException( raise exception.GlusterfsException(
'Unable to mount Gluster volume' 'Unable to mount Gluster volume'
@ -431,8 +431,8 @@ def _restart_gluster_vol(gluster_mgr):
# this odd-behaviour of Gluster-CLI. # this odd-behaviour of Gluster-CLI.
gluster_mgr.gluster_call( gluster_mgr.gluster_call(
'volume', 'stop', gluster_mgr.volume, '--mode=script', 'volume', 'stop', gluster_mgr.volume, '--mode=script',
log=_LE("stopping GlusterFS volume %s") % gluster_mgr.volume) log=("stopping GlusterFS volume %s") % gluster_mgr.volume)
gluster_mgr.gluster_call( gluster_mgr.gluster_call(
'volume', 'start', gluster_mgr.volume, 'volume', 'start', gluster_mgr.volume,
log=_LE("starting GlusterFS volume %s") % gluster_mgr.volume) log=("starting GlusterFS volume %s") % gluster_mgr.volume)

View File

@ -22,7 +22,7 @@ from oslo_log import log
import six import six
from manila import exception from manila import exception
from manila.i18n import _, _LE from manila.i18n import _
from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import common
from manila.share.drivers.glusterfs import layout from manila.share.drivers.glusterfs import layout
@ -82,8 +82,8 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
except exception.GlusterfsException: except exception.GlusterfsException:
if (self.gluster_manager. if (self.gluster_manager.
get_vol_option('features.quota')) != 'on': get_vol_option('features.quota')) != 'on':
LOG.exception(_LE("Error in tuning GlusterFS volume to enable " LOG.exception("Error in tuning GlusterFS volume to enable "
"creation of shares of specific size.")) "creation of shares of specific size.")
raise raise
self._ensure_gluster_vol_mounted() self._ensure_gluster_vol_mounted()
@ -106,7 +106,7 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
self.gluster_manager.export, mount_path, self.gluster_manager.export, mount_path,
ensure=True) ensure=True)
except exception.GlusterfsException: except exception.GlusterfsException:
LOG.exception(_LE('Could not mount the Gluster volume %s'), LOG.exception('Could not mount the Gluster volume %s',
self.gluster_manager.volume) self.gluster_manager.volume)
raise raise
@ -152,7 +152,7 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
exc = exception.GlusterfsException(exc) exc = exception.GlusterfsException(exc)
if isinstance(exc, exception.GlusterfsException): if isinstance(exc, exception.GlusterfsException):
self._cleanup_create_share(local_share_path, share['name']) self._cleanup_create_share(local_share_path, share['name'])
LOG.error(_LE('Unable to create share %s'), share['name']) LOG.error('Unable to create share %s', share['name'])
raise exc raise exc
comp_share = self.gluster_manager.components.copy() comp_share = self.gluster_manager.components.copy()
@ -170,9 +170,9 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
try: try:
self.driver._execute(*cmd, run_as_root=True) self.driver._execute(*cmd, run_as_root=True)
except exception.ProcessExecutionError as exc: except exception.ProcessExecutionError as exc:
LOG.error(_LE('Cannot cleanup share, %s, that errored out ' LOG.error('Cannot cleanup share, %s, that errored out '
'during its creation, but exists in GlusterFS ' 'during its creation, but exists in GlusterFS '
'volume.'), share_name) 'volume.', share_name)
raise exception.GlusterfsException(exc) raise exception.GlusterfsException(exc)
def delete_share(self, context, share, share_server=None): def delete_share(self, context, share, share_server=None):
@ -182,7 +182,7 @@ class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase):
try: try:
self.driver._execute(*cmd, run_as_root=True) self.driver._execute(*cmd, run_as_root=True)
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
LOG.exception(_LE('Unable to delete share %s'), share['name']) LOG.exception('Unable to delete share %s', share['name'])
raise raise
def ensure_share(self, context, share, share_server=None): def ensure_share(self, context, share, share_server=None):

View File

@ -28,7 +28,7 @@ from oslo_log import log
import six import six
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import common
from manila.share.drivers.glusterfs import layout from manila.share.drivers.glusterfs import layout
from manila import utils from manila import utils
@ -129,8 +129,8 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
exceptions[srvaddr] = six.text_type(exc) exceptions[srvaddr] = six.text_type(exc)
if exceptions: if exceptions:
for srvaddr, excmsg in exceptions.items(): for srvaddr, excmsg in exceptions.items():
LOG.error(_LE("'gluster version' failed on server " LOG.error("'gluster version' failed on server "
"%(server)s with: %(message)s"), "%(server)s with: %(message)s",
{'server': srvaddr, 'message': excmsg}) {'server': srvaddr, 'message': excmsg})
raise exception.GlusterfsException(_( raise exception.GlusterfsException(_(
"'gluster version' failed on servers %s") % ( "'gluster version' failed on servers %s") % (
@ -143,9 +143,9 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
gluster_version_min_str = '.'.join( gluster_version_min_str = '.'.join(
six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN) six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN)
for srvaddr in notsupp_servers: for srvaddr in notsupp_servers:
LOG.error(_LE("GlusterFS version %(version)s on server " LOG.error("GlusterFS version %(version)s on server "
"%(server)s is not supported, " "%(server)s is not supported, "
"minimum requirement: %(minvers)s"), "minimum requirement: %(minvers)s",
{'server': srvaddr, {'server': srvaddr,
'version': '.'.join(glusterfs_versions[srvaddr]), 'version': '.'.join(glusterfs_versions[srvaddr]),
'minvers': gluster_version_min_str}) 'minvers': gluster_version_min_str})
@ -167,8 +167,8 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
LOG.error(msg) LOG.error(msg)
raise exception.GlusterfsException(msg) raise exception.GlusterfsException(msg)
LOG.info(_LI("Found %d Gluster volumes allocated for Manila." LOG.info("Found %d Gluster volumes allocated for Manila.",
), len(gluster_volumes_initial)) len(gluster_volumes_initial))
self._check_mount_glusterfs() self._check_mount_glusterfs()
@ -203,10 +203,10 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
for srvaddr in self.configuration.glusterfs_servers: for srvaddr in self.configuration.glusterfs_servers:
gluster_mgr = self._glustermanager(srvaddr, False) gluster_mgr = self._glustermanager(srvaddr, False)
if gluster_mgr.user: if gluster_mgr.user:
logmsg = _LE("Retrieving volume list " logmsg = ("Retrieving volume list "
"on host %s") % gluster_mgr.host "on host %s") % gluster_mgr.host
else: else:
logmsg = _LE("Retrieving volume list") logmsg = ("Retrieving volume list")
out, err = gluster_mgr.gluster_call('volume', 'list', log=logmsg) out, err = gluster_mgr.gluster_call('volume', 'list', log=logmsg)
for volname in out.split("\n"): for volname in out.split("\n"):
patmatch = self.volume_pattern.match(volname) patmatch = self.volume_pattern.match(volname)
@ -251,17 +251,17 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
if not unused_vols: if not unused_vols:
# No volumes available for use as share. Warn user. # No volumes available for use as share. Warn user.
LOG.warning(_LW("No unused gluster volumes available for use as " LOG.warning("No unused gluster volumes available for use as "
"share! Create share won't be supported unless " "share! Create share won't be supported unless "
"existing shares are deleted or some gluster " "existing shares are deleted or some gluster "
"volumes are created with names matching " "volumes are created with names matching "
"'glusterfs_volume_pattern'.")) "'glusterfs_volume_pattern'.")
else: else:
LOG.info(_LI("Number of gluster volumes in use: " LOG.info("Number of gluster volumes in use: "
"%(inuse-numvols)s. Number of gluster volumes " "%(inuse-numvols)s. Number of gluster volumes "
"available for use as share: %(unused-numvols)s"), "available for use as share: %(unused-numvols)s",
{'inuse-numvols': len(self.gluster_used_vols), {'inuse-numvols': len(self.gluster_used_vols),
'unused-numvols': len(unused_vols)}) 'unused-numvols': len(unused_vols)})
# volmap is the data structure used to categorize and sort # volmap is the data structure used to categorize and sort
# the unused volumes. It's a nested dictionary of structure # the unused volumes. It's a nested dictionary of structure
@ -385,7 +385,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
try: try:
vol = self._pop_gluster_vol(share['size']) vol = self._pop_gluster_vol(share['size'])
except exception.GlusterfsException: except exception.GlusterfsException:
msg = (_LE("Error creating share %(share_id)s"), msg = ("Error creating share %(share_id)s",
{'share_id': share['id']}) {'share_id': share['id']})
LOG.error(msg) LOG.error(msg)
raise raise
@ -401,7 +401,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
# For native protocol, the export_location should be of the form: # For native protocol, the export_location should be of the form:
# server:/volname # server:/volname
LOG.info(_LI("export_location sent back from create_share: %s"), LOG.info("export_location sent back from create_share: %s",
export) export)
return export return export
@ -436,8 +436,8 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
self._push_gluster_vol(gmgr.qualified) self._push_gluster_vol(gmgr.qualified)
except exception.GlusterfsException: except exception.GlusterfsException:
msg = (_LE("Error during delete_share request for " msg = ("Error during delete_share request for "
"share %(share_id)s"), {'share_id': share['id']}) "share %(share_id)s", {'share_id': share['id']})
LOG.error(msg) LOG.error(msg)
raise raise
@ -449,7 +449,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script') args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script')
out, err = gluster_mgr.gluster_call( out, err = gluster_mgr.gluster_call(
*args, *args,
log=_LE("Retrieving snapshot list")) log=("Retrieving snapshot list"))
snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n"))) snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n")))
if len(snapgrep) != 1: if len(snapgrep) != 1:
msg = (_("Failed to identify backing GlusterFS object " msg = (_("Failed to identify backing GlusterFS object "
@ -493,7 +493,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
for args in args_tuple: for args in args_tuple:
out, err = old_gmgr.gluster_call( out, err = old_gmgr.gluster_call(
*args, *args,
log=_LE("Creating share from snapshot")) log=("Creating share from snapshot"))
# Get a manager for the new volume/share. # Get a manager for the new volume/share.
comp_vol = old_gmgr.components.copy() comp_vol = old_gmgr.components.copy()
@ -509,7 +509,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
('start', [])) ('start', []))
for op, opargs in argseq: for op, opargs in argseq:
args = ['volume', op, gmgr.volume] + opargs args = ['volume', op, gmgr.volume] + opargs
gmgr.gluster_call(*args, log=_LE("Creating share from snapshot")) gmgr.gluster_call(*args, log=("Creating share from snapshot"))
self.gluster_used_vols.add(gmgr.qualified) self.gluster_used_vols.add(gmgr.qualified)
self.private_storage.update(share['id'], {'volume': gmgr.qualified}) self.private_storage.update(share['id'], {'volume': gmgr.qualified})
@ -528,7 +528,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
gluster_mgr.volume) gluster_mgr.volume)
out, err = gluster_mgr.gluster_call( out, err = gluster_mgr.gluster_call(
*args, *args,
log=_LE("Retrieving volume info")) log=("Retrieving volume info"))
if not out: if not out:
raise exception.GlusterfsException( raise exception.GlusterfsException(
@ -570,7 +570,7 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
'--mode=script') '--mode=script')
out, err = gluster_mgr.gluster_call( out, err = gluster_mgr.gluster_call(
*args, *args,
log=_LE("Error deleting snapshot")) log=("Error deleting snapshot"))
if not out: if not out:
raise exception.GlusterfsException( raise exception.GlusterfsException(

View File

@ -20,7 +20,7 @@ from oslo_log import log
from manila.common import constants as const from manila.common import constants as const
from manila import exception from manila import exception
from manila.i18n import _, _LW from manila.i18n import _
from manila import utils from manila import utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -242,12 +242,12 @@ class NFSHelper(NASHelperBase):
(const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW))
except (exception.InvalidShareAccess, except (exception.InvalidShareAccess,
exception.InvalidShareAccessLevel): exception.InvalidShareAccessLevel):
LOG.warning(_LW( LOG.warning(
"Unsupported access level %(level)s or access type " "Unsupported access level %(level)s or access type "
"%(type)s, skipping removal of access rule to " "%(type)s, skipping removal of access rule to "
"%(to)s.") % {'level': access['access_level'], "%(to)s." % {'level': access['access_level'],
'type': access['access_type'], 'type': access['access_type'],
'to': access['access_to']}) 'to': access['access_to']})
continue continue
self._ssh_exec(server, ['sudo', 'exportfs', '-u', self._ssh_exec(server, ['sudo', 'exportfs', '-u',
':'.join((access['access_to'], local_path))]) ':'.join((access['access_to'], local_path))])
@ -260,12 +260,12 @@ class NFSHelper(NASHelperBase):
re.escape(local_path) + '[\s\n]*' + re.escape( re.escape(local_path) + '[\s\n]*' + re.escape(
access['access_to']), out) access['access_to']), out)
if found_item is not None: if found_item is not None:
LOG.warning(_LW("Access rule %(type)s:%(to)s already " LOG.warning("Access rule %(type)s:%(to)s already "
"exists for share %(name)s") % { "exists for share %(name)s" % {
'to': access['access_to'], 'to': access['access_to'],
'type': access['access_type'], 'type': access['access_type'],
'name': share_name 'name': share_name
}) })
else: else:
rules_options = '%s,no_subtree_check' rules_options = '%s,no_subtree_check'
if access['access_level'] == const.ACCESS_LEVEL_RW: if access['access_level'] == const.ACCESS_LEVEL_RW:
@ -433,8 +433,8 @@ class CIFSHelperIPAccess(CIFSHelperBase):
self._ssh_exec( self._ssh_exec(
server, ['sudo', 'net', 'conf', 'delshare', share_name]) server, ['sudo', 'net', 'conf', 'delshare', share_name])
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW("Caught error trying delete share: %(error)s, try" LOG.warning("Caught error trying delete share: %(error)s, try"
"ing delete it forcibly."), {'error': e.stderr}) "ing delete it forcibly.", {'error': e.stderr})
self._ssh_exec(server, ['sudo', 'smbcontrol', 'all', 'close-share', self._ssh_exec(server, ['sudo', 'smbcontrol', 'all', 'close-share',
share_name]) share_name])

View File

@ -23,7 +23,7 @@ import six
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LI, _LW from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share import utils from manila.share import utils
@ -280,10 +280,10 @@ class HitachiHNASDriver(driver.ShareDriver):
for rule in delete_rules: for rule in delete_rules:
if rule['access_type'].lower() != 'user': if rule['access_type'].lower() != 'user':
LOG.warning(_LW('Only USER access type is allowed for ' LOG.warning('Only USER access type is allowed for '
'CIFS. %(entity_type)s ' 'CIFS. %(entity_type)s '
'provided %(share)s with ' 'provided %(share)s with '
'protocol %(proto)s.'), 'protocol %(proto)s.',
{'entity_type': entity_type.capitalize(), {'entity_type': entity_type.capitalize(),
'share': share_or_snapshot['id'], 'share': share_or_snapshot['id'],
'proto': share_proto}) 'proto': share_proto})
@ -411,7 +411,7 @@ class HitachiHNASDriver(driver.ShareDriver):
'snap_id': snapshot['id']}) 'snap_id': snapshot['id']})
export_locations = self._create_snapshot(hnas_share_id, snapshot) export_locations = self._create_snapshot(hnas_share_id, snapshot)
LOG.info(_LI("Snapshot %(id)s successfully created."), LOG.info("Snapshot %(id)s successfully created.",
{'id': snapshot['id']}) {'id': snapshot['id']})
output = { output = {
@ -443,7 +443,7 @@ class HitachiHNASDriver(driver.ShareDriver):
self._delete_snapshot(snapshot['share'], self._delete_snapshot(snapshot['share'],
hnas_share_id, hnas_snapshot_id) hnas_share_id, hnas_snapshot_id)
LOG.info(_LI("Snapshot %(id)s successfully deleted."), LOG.info("Snapshot %(id)s successfully deleted.",
{'id': snapshot['id']}) {'id': snapshot['id']})
def create_share_from_snapshot(self, context, share, snapshot, def create_share_from_snapshot(self, context, share, snapshot,
@ -598,8 +598,8 @@ class HitachiHNASDriver(driver.ShareDriver):
{'shr_id': share['id']}) {'shr_id': share['id']})
self._extend_share(hnas_share_id, share, new_size) self._extend_share(hnas_share_id, share, new_size)
LOG.info(_LI("Share %(shr_id)s successfully extended to " LOG.info("Share %(shr_id)s successfully extended to "
"%(shr_size)s."), "%(shr_size)s.",
{'shr_id': share['id'], {'shr_id': share['id'],
'shr_size': six.text_type(new_size)}) 'shr_size': six.text_type(new_size)})
@ -639,7 +639,7 @@ class HitachiHNASDriver(driver.ShareDriver):
'mount_snapshot_support': True, 'mount_snapshot_support': True,
} }
LOG.info(_LI("HNAS Capabilities: %(data)s."), LOG.info("HNAS Capabilities: %(data)s.",
{'data': six.text_type(data)}) {'data': six.text_type(data)})
super(HitachiHNASDriver, self)._update_share_stats(data) super(HitachiHNASDriver, self)._update_share_stats(data)
@ -719,8 +719,8 @@ class HitachiHNASDriver(driver.ShareDriver):
if share['share_proto'].lower() == 'nfs': if share['share_proto'].lower() == 'nfs':
# 10.0.0.1:/shares/example # 10.0.0.1:/shares/example
LOG.info(_LI("Share %(shr_path)s will be managed with ID " LOG.info("Share %(shr_path)s will be managed with ID "
"%(shr_id)s."), "%(shr_id)s.",
{'shr_path': share['export_locations'][0]['path'], {'shr_path': share['export_locations'][0]['path'],
'shr_id': share['id']}) 'shr_id': share['id']})
@ -764,8 +764,8 @@ class HitachiHNASDriver(driver.ShareDriver):
"Share ID %(share_id)s", {'hnas_id': hnas_share_id, "Share ID %(share_id)s", {'hnas_id': hnas_share_id,
'share_id': share['id']}) 'share_id': share['id']})
LOG.info(_LI("Share %(shr_path)s was successfully managed with ID " LOG.info("Share %(shr_path)s was successfully managed with ID "
"%(shr_id)s."), "%(shr_id)s.",
{'shr_path': share['export_locations'][0]['path'], {'shr_path': share['export_locations'][0]['path'],
'shr_id': share['id']}) 'shr_id': share['id']})
@ -779,13 +779,13 @@ class HitachiHNASDriver(driver.ShareDriver):
self.private_storage.delete(share['id']) self.private_storage.delete(share['id'])
if len(share['export_locations']) == 0: if len(share['export_locations']) == 0:
LOG.info(_LI("The share with ID %(shr_id)s is no longer being " LOG.info("The share with ID %(shr_id)s is no longer being "
"managed."), {'shr_id': share['id']}) "managed.", {'shr_id': share['id']})
else: else:
LOG.info(_LI("The share with current path %(shr_path)s and ID " LOG.info("The share with current path %(shr_path)s and ID "
"%(shr_id)s is no longer being managed."), "%(shr_id)s is no longer being managed.",
{'shr_path': share['export_locations'][0]['path'], {'shr_path': share['export_locations'][0]['path'],
'shr_id': share['id']}) 'shr_id': share['id']})
def shrink_share(self, share, new_size, share_server=None): def shrink_share(self, share, new_size, share_server=None):
"""Shrinks a share to new size. """Shrinks a share to new size.
@ -801,8 +801,8 @@ class HitachiHNASDriver(driver.ShareDriver):
{'shr_id': share['id']}) {'shr_id': share['id']})
self._shrink_share(hnas_share_id, share, new_size) self._shrink_share(hnas_share_id, share, new_size)
LOG.info(_LI("Share %(shr_id)s successfully shrunk to " LOG.info("Share %(shr_id)s successfully shrunk to "
"%(shr_size)sG."), "%(shr_size)sG.",
{'shr_id': share['id'], {'shr_id': share['id'],
'shr_size': six.text_type(new_size)}) 'shr_size': six.text_type(new_size)})
@ -836,12 +836,12 @@ class HitachiHNASDriver(driver.ShareDriver):
try: try:
self.hnas.tree_clone(src_path, dest_path) self.hnas.tree_clone(src_path, dest_path)
except exception.HNASNothingToCloneException: except exception.HNASNothingToCloneException:
LOG.warning(_LW("Source directory is empty, creating an empty " LOG.warning("Source directory is empty, creating an empty "
"directory.")) "directory.")
LOG.info(_LI("Share %(share)s successfully reverted to snapshot " LOG.info("Share %(share)s successfully reverted to snapshot "
"%(snapshot)s."), {'share': snapshot['share_id'], "%(snapshot)s.", {'share': snapshot['share_id'],
'snapshot': snapshot['id']}) 'snapshot': snapshot['id']})
def _get_hnas_share_id(self, share_id): def _get_hnas_share_id(self, share_id):
hnas_id = self.private_storage.get(share_id, 'hnas_id') hnas_id = self.private_storage.get(share_id, 'hnas_id')
@ -1056,8 +1056,8 @@ class HitachiHNASDriver(driver.ShareDriver):
try: try:
self.hnas.tree_clone(src_path, dest_path) self.hnas.tree_clone(src_path, dest_path)
except exception.HNASNothingToCloneException: except exception.HNASNothingToCloneException:
LOG.warning(_LW("Source directory is empty, creating an empty " LOG.warning("Source directory is empty, creating an empty "
"directory.")) "directory.")
self.hnas.create_directory(dest_path) self.hnas.create_directory(dest_path)
finally: finally:
if share_proto.lower() == 'nfs': if share_proto.lower() == 'nfs':
@ -1125,8 +1125,8 @@ class HitachiHNASDriver(driver.ShareDriver):
try: try:
self.hnas.tree_clone(src_path, dest_path) self.hnas.tree_clone(src_path, dest_path)
except exception.HNASNothingToCloneException: except exception.HNASNothingToCloneException:
LOG.warning(_LW("Source directory is empty, exporting " LOG.warning("Source directory is empty, exporting "
"directory.")) "directory.")
self._check_protocol(share['id'], share['share_proto']) self._check_protocol(share['id'], share['share_proto'])
@ -1348,8 +1348,8 @@ class HitachiHNASDriver(driver.ShareDriver):
try: try:
self._ensure_snapshot(snapshot, hnas_snapshot_id) self._ensure_snapshot(snapshot, hnas_snapshot_id)
except exception.HNASItemNotFoundException: except exception.HNASItemNotFoundException:
LOG.warning(_LW("Export does not exist for snapshot %s, " LOG.warning("Export does not exist for snapshot %s, "
"creating a new one."), snapshot['id']) "creating a new one.", snapshot['id'])
self._create_export(hnas_share_id, self._create_export(hnas_share_id,
snapshot['share']['share_proto'], snapshot['share']['share_proto'],
snapshot_id=hnas_snapshot_id) snapshot_id=hnas_snapshot_id)
@ -1362,8 +1362,8 @@ class HitachiHNASDriver(driver.ShareDriver):
is_snapshot=True) is_snapshot=True)
output['export_locations'] = export_locations output['export_locations'] = export_locations
LOG.info(_LI("Snapshot %(snap_path)s for share %(shr_id)s was " LOG.info("Snapshot %(snap_path)s for share %(shr_id)s was "
"successfully managed with ID %(snap_id)s."), "successfully managed with ID %(snap_id)s.",
{'snap_path': snapshot['provider_location'], {'snap_path': snapshot['provider_location'],
'shr_id': snapshot['share_id'], 'shr_id': snapshot['share_id'],
'snap_id': snapshot['id']}) 'snap_id': snapshot['id']})
@ -1375,9 +1375,9 @@ class HitachiHNASDriver(driver.ShareDriver):
:param snapshot: Snapshot that will be unmanaged. :param snapshot: Snapshot that will be unmanaged.
""" """
LOG.info(_LI("The snapshot with ID %(snap_id)s from share " LOG.info("The snapshot with ID %(snap_id)s from share "
"%(share_id)s is no longer being managed by Manila. " "%(share_id)s is no longer being managed by Manila. "
"However, it is not deleted and can be found in HNAS."), "However, it is not deleted and can be found in HNAS.",
{'snap_id': snapshot['id'], {'snap_id': snapshot['id'],
'share_id': snapshot['share_id']}) 'share_id': snapshot['share_id']})

View File

@ -24,7 +24,7 @@ import os
import time import time
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW from manila.i18n import _
from manila import utils as mutils from manila import utils as mutils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -98,8 +98,8 @@ class HNASSSHBackend(object):
self._execute(command) self._execute(command)
except processutils.ProcessExecutionError as e: except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr: if 'does not exist' in e.stderr:
LOG.warning(_LW("Export %s does not exist on " LOG.warning("Export %s does not exist on "
"backend anymore."), name) "backend anymore.", name)
else: else:
msg = _("Could not delete NFS export %s.") % name msg = _("Could not delete NFS export %s.") % name
LOG.exception(msg) LOG.exception(msg)
@ -128,8 +128,8 @@ class HNASSSHBackend(object):
self._execute(command) self._execute(command)
except processutils.ProcessExecutionError as e: except processutils.ProcessExecutionError as e:
if e.exit_code == 1: if e.exit_code == 1:
LOG.warning(_LW("CIFS share %s does not exist on " LOG.warning("CIFS share %s does not exist on "
"backend anymore."), name) "backend anymore.", name)
else: else:
msg = _("Could not delete CIFS share %s.") % name msg = _("Could not delete CIFS share %s.") % name
LOG.exception(msg) LOG.exception(msg)
@ -232,18 +232,18 @@ class HNASSSHBackend(object):
except processutils.ProcessExecutionError as e: except processutils.ProcessExecutionError as e:
if ('not listed as a user' in e.stderr or if ('not listed as a user' in e.stderr or
'Could not delete user/group' in e.stderr): 'Could not delete user/group' in e.stderr):
LOG.warning(_LW('User %(user)s already not allowed to access ' LOG.warning('User %(user)s already not allowed to access '
'%(entity_type)s %(name)s.'), { '%(entity_type)s %(name)s.', {
'entity_type': entity_type, 'entity_type': entity_type,
'user': user, 'user': user,
'name': name 'name': name
}) })
else: else:
msg = _("Could not delete access of user %(user)s to " msg = _("Could not delete access of user %(user)s to "
"%(entity_type)s %(name)s.") % { "%(entity_type)s %(name)s.") % {
'user': user, 'user': user,
'name': name, 'name': name,
'entity_type': entity_type, 'entity_type': entity_type,
} }
LOG.exception(msg) LOG.exception(msg)
raise exception.HNASBackendException(msg=msg) raise exception.HNASBackendException(msg=msg)
@ -312,8 +312,8 @@ class HNASSSHBackend(object):
if now > deadline: if now > deadline:
command = ['tree-clone-job-abort', job_id] command = ['tree-clone-job-abort', job_id]
self._execute(command) self._execute(command)
LOG.error(_LE("Timeout in snapshot creation from " LOG.error("Timeout in snapshot creation from "
"source path %s.") % src_path) "source path %s." % src_path)
msg = _("Share snapshot of source path %s " msg = _("Share snapshot of source path %s "
"was not created.") % src_path "was not created.") % src_path
raise exception.HNASBackendException(msg=msg) raise exception.HNASBackendException(msg=msg)
@ -332,7 +332,7 @@ class HNASSSHBackend(object):
{'src': src_path, {'src': src_path,
'dest': dest_path}) 'dest': dest_path})
else: else:
LOG.error(_LE('Error creating snapshot of source path %s.'), LOG.error('Error creating snapshot of source path %s.',
src_path) src_path)
msg = _('Snapshot of source path %s was not ' msg = _('Snapshot of source path %s was not '
'created.') % src_path 'created.') % src_path
@ -345,8 +345,8 @@ class HNASSSHBackend(object):
self._execute(command) self._execute(command)
except processutils.ProcessExecutionError as e: except processutils.ProcessExecutionError as e:
if 'Source path: Cannot access' in e.stderr: if 'Source path: Cannot access' in e.stderr:
LOG.warning(_LW("Attempted to delete path %s " LOG.warning("Attempted to delete path %s "
"but it does not exist."), path) "but it does not exist.", path)
else: else:
msg = _("Could not submit tree delete job to delete path " msg = _("Could not submit tree delete job to delete path "
"%s.") % path "%s.") % path
@ -449,7 +449,7 @@ class HNASSSHBackend(object):
self._execute(command) self._execute(command)
except processutils.ProcessExecutionError as e: except processutils.ProcessExecutionError as e:
if 'Source path: Cannot access' in e.stderr: if 'Source path: Cannot access' in e.stderr:
LOG.warning(_LW("Share %s does not exist."), vvol_name) LOG.warning("Share %s does not exist.", vvol_name)
else: else:
msg = _("Failed to delete vvol %s.") % vvol_name msg = _("Failed to delete vvol %s.") % vvol_name
LOG.exception(msg) LOG.exception(msg)
@ -699,8 +699,8 @@ class HNASSSHBackend(object):
LOG.debug(msg) LOG.debug(msg)
raise exception.HNASDirectoryNotEmpty(msg=msg) raise exception.HNASDirectoryNotEmpty(msg=msg)
elif 'cannot remove' in e.stderr and 'NotFound' in e.stderr: elif 'cannot remove' in e.stderr and 'NotFound' in e.stderr:
LOG.warning(_LW("Attempted to delete path %s but it does " LOG.warning("Attempted to delete path %s but it does "
"not exist."), path) "not exist.", path)
elif 'Current file system invalid: VolumeNotFound' in e.stderr: elif 'Current file system invalid: VolumeNotFound' in e.stderr:
msg = _("Command to delete empty directory %s failed due " msg = _("Command to delete empty directory %s failed due "
"to context change.") % path "to context change.") % path

View File

@ -20,7 +20,7 @@ from oslo_utils import units
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers.hitachi.hsp import rest from manila.share.drivers.hitachi.hsp import rest
@ -92,7 +92,7 @@ class HitachiHSPDriver(driver.ShareDriver):
}], }],
} }
LOG.info(_LI("Hitachi HSP Capabilities: %(data)s."), LOG.info("Hitachi HSP Capabilities: %(data)s.",
{'data': data}) {'data': data})
super(HitachiHSPDriver, self)._update_share_stats(data) super(HitachiHSPDriver, self)._update_share_stats(data)
@ -111,7 +111,7 @@ class HitachiHSPDriver(driver.ShareDriver):
except exception.HSPBackendException: except exception.HSPBackendException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.hsp.delete_file_system(filesystem_id) self.hsp.delete_file_system(filesystem_id)
msg = _LE("Could not create share %s on HSP.") msg = ("Could not create share %s on HSP.")
LOG.exception(msg, share['id']) LOG.exception(msg, share['id'])
uri = self.hsp_host + ':/' + share['id'] uri = self.hsp_host + ':/' + share['id']
@ -133,7 +133,7 @@ class HitachiHSPDriver(driver.ShareDriver):
filesystem_id = self.hsp.get_file_system(share['id'])['id'] filesystem_id = self.hsp.get_file_system(share['id'])['id']
hsp_share_id = self.hsp.get_share(filesystem_id)['id'] hsp_share_id = self.hsp.get_share(filesystem_id)['id']
except exception.HSPItemNotFoundException: except exception.HSPItemNotFoundException:
LOG.info(_LI("Share %(shr)s already removed from backend."), LOG.info("Share %(shr)s already removed from backend.",
{'shr': share['id']}) {'shr': share['id']})
if hsp_share_id: if hsp_share_id:
@ -278,8 +278,8 @@ class HitachiHSPDriver(driver.ShareDriver):
% share['id']) % share['id'])
raise exception.HSPBackendException(msg=msg) raise exception.HSPBackendException(msg=msg)
LOG.info(_LI("Share %(shr_id)s successfully extended to " LOG.info("Share %(shr_id)s successfully extended to "
"%(shr_size)sG."), "%(shr_size)sG.",
{'shr_id': share['id'], {'shr_id': share['id'],
'shr_size': new_size}) 'shr_size': new_size})
@ -299,8 +299,8 @@ class HitachiHSPDriver(driver.ShareDriver):
raise exception.ShareShrinkingPossibleDataLoss( raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id']) share_id=share['id'])
LOG.info(_LI("Share %(shr_id)s successfully shrunk to " LOG.info("Share %(shr_id)s successfully shrunk to "
"%(shr_size)sG."), "%(shr_size)sG.",
{'shr_id': share['id'], {'shr_id': share['id'],
'shr_size': new_size}) 'shr_size': new_size})
@ -333,8 +333,8 @@ class HitachiHSPDriver(driver.ShareDriver):
file_system = self.hsp.get_file_system(share['id']) file_system = self.hsp.get_file_system(share['id'])
LOG.info(_LI("Share %(shr_path)s was successfully managed with ID " LOG.info("Share %(shr_path)s was successfully managed with ID "
"%(shr_id)s."), "%(shr_id)s.",
{'shr_path': share['export_locations'][0]['path'], {'shr_path': share['export_locations'][0]['path'],
'shr_id': share['id']}) 'shr_id': share['id']})
@ -357,8 +357,8 @@ class HitachiHSPDriver(driver.ShareDriver):
self.private_storage.delete(share['id']) self.private_storage.delete(share['id'])
LOG.info(_LI("The share with current path %(shr_path)s and ID " LOG.info("The share with current path %(shr_path)s and ID "
"%(shr_id)s is no longer being managed."), "%(shr_id)s is no longer being managed.",
{'shr_path': share['export_locations'][0]['path'], {'shr_path': share['export_locations'][0]['path'],
'shr_id': share['id']}) 'shr_id': share['id']})

View File

@ -27,7 +27,7 @@ import six
from manila.common import config from manila.common import config
from manila import exception from manila import exception
from manila.i18n import _, _LI from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers.hpe import hpe_3par_mediator from manila.share.drivers.hpe import hpe_3par_mediator
from manila.share import share_types from manila.share import share_types
@ -237,7 +237,7 @@ class HPE3ParShareDriver(driver.ShareDriver):
def do_setup(self, context): def do_setup(self, context):
"""Any initialization the share driver does while starting.""" """Any initialization the share driver does while starting."""
LOG.info(_LI("Starting share driver %(driver_name)s (%(version)s)"), LOG.info("Starting share driver %(driver_name)s (%(version)s)",
{'driver_name': self.__class__.__name__, {'driver_name': self.__class__.__name__,
'version': self.VERSION}) 'version': self.VERSION})
@ -637,8 +637,8 @@ class HPE3ParShareDriver(driver.ShareDriver):
if not self._hpe3par: if not self._hpe3par:
LOG.info( LOG.info(
_LI("Skipping capacity and capabilities update. Setup has not " "Skipping capacity and capabilities update. Setup has not "
"completed.")) "completed.")
else: else:
for fpg in self.fpgs: for fpg in self.fpgs:
fpg_status = self._hpe3par.get_fpg_status(fpg) fpg_status = self._hpe3par.get_fpg_status(fpg)

View File

@ -25,8 +25,8 @@ import six
from manila.data import utils as data_utils from manila.data import utils as data_utils
from manila import exception from manila import exception
from manila.i18n import _
from manila import utils from manila import utils
from manila.i18n import _, _LE, _LI, _LW
hpe3parclient = importutils.try_import("hpe3parclient") hpe3parclient = importutils.try_import("hpe3parclient")
if hpe3parclient: if hpe3parclient:
@ -167,14 +167,14 @@ class HPE3ParMediator(object):
LOG.exception(msg) LOG.exception(msg)
raise exception.ShareBackendException(message=msg) raise exception.ShareBackendException(message=msg)
LOG.info(_LI("HPE3ParMediator %(version)s, " LOG.info("HPE3ParMediator %(version)s, "
"hpe3parclient %(client_version)s"), "hpe3parclient %(client_version)s",
{"version": self.VERSION, {"version": self.VERSION,
"client_version": hpe3parclient.get_version_string()}) "client_version": hpe3parclient.get_version_string()})
try: try:
wsapi_version = self._client.getWsApiVersion()['build'] wsapi_version = self._client.getWsApiVersion()['build']
LOG.info(_LI("3PAR WSAPI %s"), wsapi_version) LOG.info("3PAR WSAPI %s", wsapi_version)
except Exception as e: except Exception as e:
msg = (_('Failed to get 3PAR WSAPI version: %s') % msg = (_('Failed to get 3PAR WSAPI version: %s') %
six.text_type(e)) six.text_type(e))
@ -200,7 +200,7 @@ class HPE3ParMediator(object):
try: try:
self._client.http.unauthenticate() self._client.http.unauthenticate()
except Exception as e: except Exception as e:
msg = _LW("Failed to Logout from 3PAR (%(url)s) because %(err)s") msg = ("Failed to Logout from 3PAR (%(url)s) because %(err)s")
LOG.warning(msg, {'url': self.hpe3par_api_url, LOG.warning(msg, {'url': self.hpe3par_api_url,
'err': six.text_type(e)}) 'err': six.text_type(e)})
# don't raise exception on logout() # don't raise exception on logout()
@ -346,8 +346,8 @@ class HPE3ParMediator(object):
if nfs_options is None: if nfs_options is None:
nfs_options = extra_specs.get('hp3par:nfs_options') nfs_options = extra_specs.get('hp3par:nfs_options')
if nfs_options: if nfs_options:
msg = _LW("hp3par:nfs_options is deprecated. Use " msg = ("hp3par:nfs_options is deprecated. Use "
"hpe3par:nfs_options instead.") "hpe3par:nfs_options instead.")
LOG.warning(msg) LOG.warning(msg)
if nfs_options: if nfs_options:
@ -391,8 +391,8 @@ class HPE3ParMediator(object):
comment=comment) comment=comment)
if 'hp3par_flash_cache' in extra_specs: if 'hp3par_flash_cache' in extra_specs:
msg = _LW("hp3par_flash_cache is deprecated. Use " msg = ("hp3par_flash_cache is deprecated. Use "
"hpe3par_flash_cache instead.") "hpe3par_flash_cache instead.")
LOG.warning(msg) LOG.warning(msg)
if protocol == 'nfs': if protocol == 'nfs':
@ -425,8 +425,8 @@ class HPE3ParMediator(object):
if opt_value is None: if opt_value is None:
opt_value = extra_specs.get('hp3par:smb_%s' % smb_opt) opt_value = extra_specs.get('hp3par:smb_%s' % smb_opt)
if opt_value: if opt_value:
msg = _LW("hp3par:smb_* is deprecated. Use " msg = ("hp3par:smb_* is deprecated. Use "
"hpe3par:smb_* instead.") "hpe3par:smb_* instead.")
LOG.warning(msg) LOG.warning(msg)
if opt_value: if opt_value:
@ -653,10 +653,10 @@ class HPE3ParMediator(object):
if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username
or not self.hpe3par_cifs_admin_access_password): or not self.hpe3par_cifs_admin_access_password):
LOG.warning(_LW("hpe3par_cifs_admin_access_username and " LOG.warning("hpe3par_cifs_admin_access_username and "
"hpe3par_cifs_admin_access_password must be " "hpe3par_cifs_admin_access_password must be "
"provided in order for CIFS shares created from " "provided in order for CIFS shares created from "
"snapshots to be writable.")) "snapshots to be writable.")
return self.create_share( return self.create_share(
orig_project_id, orig_project_id,
share_id, share_id,
@ -735,8 +735,8 @@ class HPE3ParMediator(object):
protocol, fpg, vfs, fstore, comment) protocol, fpg, vfs, fstore, comment)
except Exception as e: except Exception as e:
msg = _LE('Exception during mount and copy from RO snapshot ' msg = ('Exception during mount and copy from RO snapshot '
'to RW share: %s') 'to RW share: %s')
LOG.error(msg, e) LOG.error(msg, e)
self._delete_share(share_name, protocol, fpg, vfs, fstore) self._delete_share(share_name, protocol, fpg, vfs, fstore)
raise raise
@ -862,8 +862,8 @@ class HPE3ParMediator(object):
self._update_capacity_quotas( self._update_capacity_quotas(
fstore, 0, share_size, fpg, vfs) fstore, 0, share_size, fpg, vfs)
except Exception as e: except Exception as e:
msg = _LW('Exception during cleanup of deleted ' msg = ('Exception during cleanup of deleted '
'share %(share)s in filestore %(fstore)s: %(e)s') 'share %(share)s in filestore %(fstore)s: %(e)s')
data = { data = {
'fstore': fstore, 'fstore': fstore,
'share': share_name, 'share': share_name,
@ -878,10 +878,10 @@ class HPE3ParMediator(object):
# return out and log a warning. # return out and log a warning.
if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username
or not self.hpe3par_cifs_admin_access_password): or not self.hpe3par_cifs_admin_access_password):
LOG.warning(_LW("hpe3par_cifs_admin_access_username and " LOG.warning("hpe3par_cifs_admin_access_username and "
"hpe3par_cifs_admin_access_password must be " "hpe3par_cifs_admin_access_password must be "
"provided in order for the file tree to be " "provided in order for the file tree to be "
"properly deleted.")) "properly deleted.")
return return
mount_location = "%s%s" % (self.hpe3par_share_mount_path, share_name) mount_location = "%s%s" % (self.hpe3par_share_mount_path, share_name)
@ -978,8 +978,8 @@ class HPE3ParMediator(object):
try: try:
utils.execute('mkdir', mount_location, run_as_root=True) utils.execute('mkdir', mount_location, run_as_root=True)
except Exception as err: except Exception as err:
message = (_LW("There was an error creating mount directory: " message = ("There was an error creating mount directory: "
"%s. The nested file tree will not be deleted."), "%s. The nested file tree will not be deleted.",
six.text_type(err)) six.text_type(err))
LOG.warning(message) LOG.warning(message)
@ -1004,8 +1004,8 @@ class HPE3ParMediator(object):
protocol, fpg, vfs, fstore, share_ip) protocol, fpg, vfs, fstore, share_ip)
self._mount_share(protocol, mount_location, mount_dir) self._mount_share(protocol, mount_location, mount_dir)
except Exception as err: except Exception as err:
message = (_LW("There was an error mounting the super share: " message = ("There was an error mounting the super share: "
"%s. The nested file tree will not be deleted."), "%s. The nested file tree will not be deleted.",
six.text_type(err)) six.text_type(err))
LOG.warning(message) LOG.warning(message)
@ -1013,8 +1013,8 @@ class HPE3ParMediator(object):
try: try:
utils.execute('umount', mount_location, run_as_root=True) utils.execute('umount', mount_location, run_as_root=True)
except Exception as err: except Exception as err:
message = _LW("There was an error unmounting the share at " message = ("There was an error unmounting the share at "
"%(mount_location)s: %(error)s") "%(mount_location)s: %(error)s")
msg_data = { msg_data = {
'mount_location': mount_location, 'mount_location': mount_location,
'error': six.text_type(err), 'error': six.text_type(err),
@ -1025,8 +1025,8 @@ class HPE3ParMediator(object):
try: try:
utils.execute('rm', '-rf', directory, run_as_root=True) utils.execute('rm', '-rf', directory, run_as_root=True)
except Exception as err: except Exception as err:
message = (_LW("There was an error removing the share: " message = ("There was an error removing the share: "
"%s. The nested file tree will not be deleted."), "%s. The nested file tree will not be deleted.",
six.text_type(err)) six.text_type(err))
LOG.warning(message) LOG.warning(message)
@ -1212,8 +1212,8 @@ class HPE3ParMediator(object):
self._client.startfsnapclean(fpg, reclaimStrategy='maxspeed') self._client.startfsnapclean(fpg, reclaimStrategy='maxspeed')
except Exception: except Exception:
# Remove already happened so only log this. # Remove already happened so only log this.
LOG.exception(_LE('Unexpected exception calling startfsnapclean ' LOG.exception('Unexpected exception calling startfsnapclean '
'for FPG %(fpg)s.'), {'fpg': fpg}) 'for FPG %(fpg)s.', {'fpg': fpg})
@staticmethod @staticmethod
def _validate_access_type(protocol, access_type): def _validate_access_type(protocol, access_type):

View File

@ -17,7 +17,6 @@ import copy
from oslo_log import log from oslo_log import log
from manila.i18n import _LE
from manila.share.drivers.huawei import constants from manila.share.drivers.huawei import constants
from manila.share import share_types from manila.share import share_types
@ -66,8 +65,8 @@ def _get_opts_from_specs(specs):
words = value.split() words = value.split()
if not (words and len(words) == 2 and words[0] == '<is>'): if not (words and len(words) == 2 and words[0] == '<is>'):
LOG.error(_LE("Extra specs must be specified as " LOG.error("Extra specs must be specified as "
"capabilities:%s='<is> True'."), key) "capabilities:%s='<is> True'.", key)
else: else:
opts[key] = words[1].lower() opts[key] = words[1].lower()

View File

@ -31,7 +31,7 @@ import six
from manila.common import constants as common_constants from manila.common import constants as common_constants
from manila.data import utils as data_utils from manila.data import utils as data_utils
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila import rpc from manila import rpc
from manila.share.drivers.huawei import base as driver from manila.share.drivers.huawei import base as driver
from manila.share.drivers.huawei import constants from manila.share.drivers.huawei import constants
@ -275,7 +275,7 @@ class V3StorageConnection(driver.HuaweiBase):
snapshot_name = "share_snapshot_" + snap_name snapshot_name = "share_snapshot_" + snap_name
snap_id = self.helper._create_snapshot(sharefsid, snap_id = self.helper._create_snapshot(sharefsid,
snapshot_name) snapshot_name)
LOG.info(_LI('Creating snapshot id %s.'), snap_id) LOG.info('Creating snapshot id %s.', snap_id)
return snapshot_name.replace("-", "_") return snapshot_name.replace("-", "_")
def delete_snapshot(self, snapshot, share_server=None): def delete_snapshot(self, snapshot, share_server=None):
@ -286,8 +286,8 @@ class V3StorageConnection(driver.HuaweiBase):
sharefsid = self.helper.get_fsid_by_name(snapshot['share_name']) sharefsid = self.helper.get_fsid_by_name(snapshot['share_name'])
if sharefsid is None: if sharefsid is None:
LOG.warning(_LW('Delete snapshot share id %s fs has been ' LOG.warning('Delete snapshot share id %s fs has been '
'deleted.'), snap_name) 'deleted.', snap_name)
return return
snapshot_id = self.helper._get_snapshot_id(sharefsid, snap_name) snapshot_id = self.helper._get_snapshot_id(sharefsid, snap_name)
@ -297,7 +297,7 @@ class V3StorageConnection(driver.HuaweiBase):
if snapshot_flag: if snapshot_flag:
self.helper._delete_snapshot(snapshot_id) self.helper._delete_snapshot(snapshot_id)
else: else:
LOG.warning(_LW("Can not find snapshot %s on array."), snap_name) LOG.warning("Can not find snapshot %s on array.", snap_name)
def update_share_stats(self, stats_dict): def update_share_stats(self, stats_dict):
"""Retrieve status info from share group.""" """Retrieve status info from share group."""
@ -358,13 +358,13 @@ class V3StorageConnection(driver.HuaweiBase):
share = self.helper._get_share_by_name(share_name, share_url_type) share = self.helper._get_share_by_name(share_name, share_url_type)
if not share: if not share:
LOG.warning(_LW('The share was not found. Share name:%s'), LOG.warning('The share was not found. Share name:%s',
share_name) share_name)
fsid = self.helper.get_fsid_by_name(share_name) fsid = self.helper.get_fsid_by_name(share_name)
if fsid: if fsid:
self.helper._delete_fs(fsid) self.helper._delete_fs(fsid)
return return
LOG.warning(_LW('The filesystem was not found.')) LOG.warning('The filesystem was not found.')
return return
share_id = share['ID'] share_id = share['ID']
@ -452,8 +452,8 @@ class V3StorageConnection(driver.HuaweiBase):
try: try:
os.rmdir(item['mount_src']) os.rmdir(item['mount_src'])
except Exception as err: except Exception as err:
LOG.warning(_LW('Failed to remove temp file. File path: ' LOG.warning('Failed to remove temp file. File path:'
'%(file_path)s. Reason: %(err)s.'), '%(file_path)s. Reason: %(err)s.',
{'file_path': item['mount_src'], {'file_path': item['mount_src'],
'err': err}) 'err': err})
@ -467,8 +467,8 @@ class V3StorageConnection(driver.HuaweiBase):
self.allow_access(old_share, old_access) self.allow_access(old_share, old_access)
except exception.ManilaException as err: except exception.ManilaException as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to add access to share %(name)s. ' LOG.error('Failed to add access to share %(name)s. '
'Reason: %(err)s.'), 'Reason: %(err)s.',
{'name': old_share['name'], {'name': old_share['name'],
'err': err}) 'err': err})
@ -478,8 +478,8 @@ class V3StorageConnection(driver.HuaweiBase):
self.mount_share_to_host(old_share, old_access) self.mount_share_to_host(old_share, old_access)
except exception.ShareMountException as err: except exception.ShareMountException as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to mount old share %(name)s. ' LOG.error('Failed to mount old share %(name)s. '
'Reason: %(err)s.'), 'Reason: %(err)s.',
{'name': old_share['name'], {'name': old_share['name'],
'err': err}) 'err': err})
@ -489,8 +489,8 @@ class V3StorageConnection(driver.HuaweiBase):
except Exception as err: except Exception as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.umount_share_from_host(old_share) self.umount_share_from_host(old_share)
LOG.error(_LE('Failed to mount new share %(name)s. ' LOG.error('Failed to mount new share %(name)s. '
'Reason: %(err)s.'), 'Reason: %(err)s.',
{'name': new_share['name'], {'name': new_share['name'],
'err': err}) 'err': err})
@ -500,8 +500,8 @@ class V3StorageConnection(driver.HuaweiBase):
try: try:
self.umount_share_from_host(item) self.umount_share_from_host(item)
except exception.ShareUmountException as err: except exception.ShareUmountException as err:
LOG.warning(_LW('Failed to unmount share %(name)s. ' LOG.warning('Failed to unmount share %(name)s. '
'Reason: %(err)s.'), 'Reason: %(err)s.',
{'name': item['name'], {'name': item['name'],
'err': err}) 'err': err})
@ -573,7 +573,7 @@ class V3StorageConnection(driver.HuaweiBase):
if copy.get_progress()['total_progress'] == 100: if copy.get_progress()['total_progress'] == 100:
copy_finish = True copy_finish = True
except Exception as err: except Exception as err:
LOG.error(_LE("Failed to copy data, reason: %s."), err) LOG.error("Failed to copy data, reason: %s.", err)
return copy_finish return copy_finish
@ -695,12 +695,12 @@ class V3StorageConnection(driver.HuaweiBase):
share_url_type = self.helper._get_share_url_type(share_proto) share_url_type = self.helper._get_share_url_type(share_proto)
access_type = access['access_type'] access_type = access['access_type']
if share_proto == 'NFS' and access_type not in ('ip', 'user'): if share_proto == 'NFS' and access_type not in ('ip', 'user'):
LOG.warning(_LW('Only IP or USER access types are allowed for ' LOG.warning('Only IP or USER access types are allowed for '
'NFS shares.')) 'NFS shares.')
return return
elif share_proto == 'CIFS' and access_type != 'user': elif share_proto == 'CIFS' and access_type != 'user':
LOG.warning(_LW('Only USER access type is allowed for' LOG.warning('Only USER access type is allowed for'
' CIFS shares.')) ' CIFS shares.')
return return
access_to = access['access_to'] access_to = access['access_to']
@ -710,14 +710,14 @@ class V3StorageConnection(driver.HuaweiBase):
access_to = '*' access_to = '*'
share = self.helper._get_share_by_name(share_name, share_url_type) share = self.helper._get_share_by_name(share_name, share_url_type)
if not share: if not share:
LOG.warning(_LW('Can not get share %s.'), share_name) LOG.warning('Can not get share %s.', share_name)
return return
access_id = self.helper._get_access_from_share(share['ID'], access_to, access_id = self.helper._get_access_from_share(share['ID'], access_to,
share_proto) share_proto)
if not access_id: if not access_id:
LOG.warning(_LW('Can not get access id from share. ' LOG.warning('Can not get access id from share. '
'share_name: %s'), share_name) 'share_name: %s', share_name)
return return
self.helper._remove_access_from_share(access_id, share_proto) self.helper._remove_access_from_share(access_id, share_proto)
@ -798,7 +798,7 @@ class V3StorageConnection(driver.HuaweiBase):
share_url_type = self.helper._get_share_url_type(share_proto) share_url_type = self.helper._get_share_url_type(share_proto)
share_stor = self.helper._get_share_by_name(share_name, share_url_type) share_stor = self.helper._get_share_by_name(share_name, share_url_type)
if not share_stor: if not share_stor:
LOG.warning(_LW('Cannot get share %s.'), share_name) LOG.warning('Cannot get share %s.', share_name)
return return
share_id = share_stor['ID'] share_id = share_stor['ID']
all_accesses = self.helper._get_all_access_from_share(share_id, all_accesses = self.helper._get_all_access_from_share(share_id,
@ -920,8 +920,8 @@ class V3StorageConnection(driver.HuaweiBase):
opts['thin_provisioning'] = constants.THICK_PROVISIONING opts['thin_provisioning'] = constants.THICK_PROVISIONING
change_opts = self.check_retype_change_opts(opts, poolinfo, fs) change_opts = self.check_retype_change_opts(opts, poolinfo, fs)
LOG.info(_LI('Retyping share (%(share)s), changed options are : ' LOG.info('Retyping share (%(share)s), changed options are : '
'(%(change_opts)s).'), '(%(change_opts)s).',
{'share': old_share_name, 'change_opts': change_opts}) {'share': old_share_name, 'change_opts': change_opts})
try: try:
self.retype_share(change_opts, fs_id) self.retype_share(change_opts, fs_id)
@ -1198,9 +1198,9 @@ class V3StorageConnection(driver.HuaweiBase):
if wait_interval: if wait_interval:
return int(wait_interval) return int(wait_interval)
else: else:
LOG.info(_LI( LOG.info(
"Wait interval is not configured in huawei " "Wait interval is not configured in huawei "
"conf file. Use default: %(default_wait_interval)d."), "conf file. Use default: %(default_wait_interval)d.",
{"default_wait_interval": constants.DEFAULT_WAIT_INTERVAL}) {"default_wait_interval": constants.DEFAULT_WAIT_INTERVAL})
return constants.DEFAULT_WAIT_INTERVAL return constants.DEFAULT_WAIT_INTERVAL
@ -1211,9 +1211,9 @@ class V3StorageConnection(driver.HuaweiBase):
if timeout: if timeout:
return int(timeout) return int(timeout)
else: else:
LOG.info(_LI( LOG.info(
"Timeout is not configured in huawei conf file. " "Timeout is not configured in huawei conf file. "
"Use default: %(default_timeout)d."), "Use default: %(default_timeout)d.",
{"default_timeout": constants.DEFAULT_TIMEOUT}) {"default_timeout": constants.DEFAULT_TIMEOUT})
return constants.DEFAULT_TIMEOUT return constants.DEFAULT_TIMEOUT
@ -1736,8 +1736,8 @@ class V3StorageConnection(driver.HuaweiBase):
remote_fs_id=self.helper.get_fsid_by_name(new_share_name) remote_fs_id=self.helper.get_fsid_by_name(new_share_name)
) )
except Exception: except Exception:
LOG.exception(_LE('Failed to create a replication pair ' LOG.exception('Failed to create a replication pair '
'with host %s.'), 'with host %s.',
active_replica['host']) active_replica['host'])
raise raise
@ -1760,7 +1760,7 @@ class V3StorageConnection(driver.HuaweiBase):
replica_pair_id = self.private_storage.get(replica['share_id'], replica_pair_id = self.private_storage.get(replica['share_id'],
'replica_pair_id') 'replica_pair_id')
if replica_pair_id is None: if replica_pair_id is None:
msg = _LE("No replication pair ID recorded for share %s.") msg = ("No replication pair ID recorded for share %s.")
LOG.error(msg, replica['share_id']) LOG.error(msg, replica['share_id'])
return common_constants.STATUS_ERROR return common_constants.STATUS_ERROR
@ -1780,7 +1780,7 @@ class V3StorageConnection(driver.HuaweiBase):
try: try:
self.replica_mgr.switch_over(replica_pair_id) self.replica_mgr.switch_over(replica_pair_id)
except Exception: except Exception:
LOG.exception(_LE('Failed to promote replica %s.'), LOG.exception('Failed to promote replica %s.',
replica['id']) replica['id'])
raise raise
@ -1790,8 +1790,8 @@ class V3StorageConnection(driver.HuaweiBase):
try: try:
self.update_access(replica, access_rules, [], [], share_server) self.update_access(replica, access_rules, [], [], share_server)
except Exception: except Exception:
LOG.warning(_LW('Failed to set access rules to ' LOG.warning('Failed to set access rules to '
'new active replica %s.'), 'new active replica %s.',
replica['id']) replica['id'])
updated_new_active_access = False updated_new_active_access = False
@ -1800,8 +1800,8 @@ class V3StorageConnection(driver.HuaweiBase):
try: try:
self.clear_access(old_active_replica, share_server) self.clear_access(old_active_replica, share_server)
except Exception: except Exception:
LOG.warning(_LW("Failed to clear access rules from " LOG.warning("Failed to clear access rules from "
"old active replica %s."), "old active replica %s.",
old_active_replica['id']) old_active_replica['id'])
cleared_old_active_access = False cleared_old_active_access = False
@ -1833,8 +1833,8 @@ class V3StorageConnection(driver.HuaweiBase):
replica_pair_id = self.private_storage.get(replica['share_id'], replica_pair_id = self.private_storage.get(replica['share_id'],
'replica_pair_id') 'replica_pair_id')
if replica_pair_id is None: if replica_pair_id is None:
msg = _LW("No replication pair ID recorded for share %(share)s. " msg = ("No replication pair ID recorded for share %(share)s. "
"Continue to delete replica %(replica)s.") "Continue to delete replica %(replica)s.")
LOG.warning(msg, {'share': replica['share_id'], LOG.warning(msg, {'share': replica['share_id'],
'replica': replica['id']}) 'replica': replica['id']})
else: else:
@ -1844,6 +1844,6 @@ class V3StorageConnection(driver.HuaweiBase):
try: try:
self.delete_share(replica, share_server) self.delete_share(replica, share_server)
except Exception: except Exception:
LOG.exception(_LE('Failed to delete replica %s.'), LOG.exception('Failed to delete replica %s.',
replica['id']) replica['id'])
raise raise

View File

@ -25,7 +25,7 @@ from six.moves import http_cookiejar
from six.moves.urllib import request as urlreq # pylint: disable=E0611 from six.moves.urllib import request as urlreq # pylint: disable=E0611
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW from manila.i18n import _
from manila.share.drivers.huawei import constants from manila.share.drivers.huawei import constants
from manila import utils from manila import utils
@ -77,8 +77,8 @@ class RestHelper(object):
LOG.debug('Response Data: %(res)s.', {'res': res}) LOG.debug('Response Data: %(res)s.', {'res': res})
except Exception as err: except Exception as err:
LOG.error(_LE('\nBad response from server: %(url)s.' LOG.error('\nBad response from server: %(url)s.'
' Error: %(err)s'), {'url': url, 'err': err}) ' Error: %(err)s', {'url': url, 'err': err})
res = '{"error":{"code":%s,' \ res = '{"error":{"code":%s,' \
'"description":"Connect server error"}}' \ '"description":"Connect server error"}}' \
% constants.ERROR_CONNECT_TO_SERVER % constants.ERROR_CONNECT_TO_SERVER
@ -110,7 +110,7 @@ class RestHelper(object):
if((result['error']['code'] != 0) if((result['error']['code'] != 0)
or ("data" not in result) or ("data" not in result)
or (result['data']['deviceid'] is None)): or (result['data']['deviceid'] is None)):
LOG.error(_LE("Login to %s failed, try another."), item_url) LOG.error("Login to %s failed, try another.", item_url)
continue continue
LOG.debug('Login success: %(url)s\n', LOG.debug('Login success: %(url)s\n',
@ -139,7 +139,7 @@ class RestHelper(object):
error_code = result['error']['code'] error_code = result['error']['code']
if(error_code == constants.ERROR_CONNECT_TO_SERVER if(error_code == constants.ERROR_CONNECT_TO_SERVER
or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER):
LOG.error(_LE("Can't open the recent url, re-login.")) LOG.error("Can't open the recent url, re-login.")
deviceid = self.login() deviceid = self.login()
if deviceid is not None: if deviceid is not None:
@ -214,7 +214,7 @@ class RestHelper(object):
utils.execute('chmod', '666', filepath, run_as_root=True) utils.execute('chmod', '666', filepath, run_as_root=True)
except Exception as err: except Exception as err:
LOG.error(_LE('Bad response from change file: %s.') % err) LOG.error('Bad response from change file: %s.' % err)
raise raise
def create_share(self, share_name, fs_id, share_proto): def create_share(self, share_name, fs_id, share_proto):
@ -1151,8 +1151,8 @@ class RestHelper(object):
url = "/vlan/" + vlan_id url = "/vlan/" + vlan_id
result = self.call(url, None, 'DELETE') result = self.call(url, None, 'DELETE')
if result['error']['code'] == constants.ERROR_LOGICAL_PORT_EXIST: if result['error']['code'] == constants.ERROR_LOGICAL_PORT_EXIST:
LOG.warning(_LW('Cannot delete vlan because there is ' LOG.warning('Cannot delete vlan because there is '
'a logical port on vlan.')) 'a logical port on vlan.')
return return
self._assert_rest_result(result, _('Delete vlan error.')) self._assert_rest_result(result, _('Delete vlan error.'))
@ -1402,7 +1402,7 @@ class RestHelper(object):
if (result['error']['code'] == if (result['error']['code'] ==
constants.ERROR_REPLICATION_PAIR_NOT_EXIST): constants.ERROR_REPLICATION_PAIR_NOT_EXIST):
LOG.warning(_LW('Replication pair %s was not found.'), LOG.warning('Replication pair %s was not found.',
pair_id) pair_id)
return return

View File

@ -18,7 +18,7 @@ from oslo_utils import strutils
from manila.common import constants as common_constants from manila.common import constants as common_constants
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW from manila.i18n import _
from manila.share.drivers.huawei import constants from manila.share.drivers.huawei import constants
@ -56,7 +56,7 @@ class ReplicaPairManager(object):
pair_info = self.helper.create_replication_pair(pair_params) pair_info = self.helper.create_replication_pair(pair_params)
except Exception: except Exception:
msg = _LE("Failed to create replication pair for share %s.") msg = ("Failed to create replication pair for share %s.")
LOG.exception(msg, local_share_name) LOG.exception(msg, local_share_name)
raise raise
@ -69,8 +69,8 @@ class ReplicaPairManager(object):
pair_info = self.helper.get_replication_pair_by_id( pair_info = self.helper.get_replication_pair_by_id(
replica_pair_id) replica_pair_id)
except Exception: except Exception:
LOG.exception(_LE('Failed to get replication pair info for ' LOG.exception('Failed to get replication pair info for '
'%s.'), replica_pair_id) '%s.', replica_pair_id)
raise raise
return pair_info return pair_info
@ -114,7 +114,7 @@ class ReplicaPairManager(object):
pair_info = self._get_replication_pair_info(replica_pair_id) pair_info = self._get_replication_pair_info(replica_pair_id)
except Exception: except Exception:
# if cannot communicate to backend, return error # if cannot communicate to backend, return error
LOG.error(_LE('Cannot get replica state, return %s'), LOG.error('Cannot get replica state, return %s',
common_constants.STATUS_ERROR) common_constants.STATUS_ERROR)
return common_constants.STATUS_ERROR return common_constants.STATUS_ERROR
@ -124,8 +124,8 @@ class ReplicaPairManager(object):
try: try:
self.helper.sync_replication_pair(pair_id) self.helper.sync_replication_pair(pair_id)
except Exception as err: except Exception as err:
LOG.warning(_LW('Failed to sync replication pair %(id)s. ' LOG.warning('Failed to sync replication pair %(id)s. '
'Reason: %(err)s'), 'Reason: %(err)s',
{'id': pair_id, 'err': err}) {'id': pair_id, 'err': err})
def update_replication_pair_state(self, replica_pair_id): def update_replication_pair_state(self, replica_pair_id):
@ -133,8 +133,8 @@ class ReplicaPairManager(object):
health = self._check_replication_health(pair_info) health = self._check_replication_health(pair_info)
if health is not None: if health is not None:
LOG.warning(_LW("Cannot update the replication %s " LOG.warning("Cannot update the replication %s "
"because it's not in normal status."), "because it's not in normal status.",
replica_pair_id) replica_pair_id)
return return
@ -145,9 +145,9 @@ class ReplicaPairManager(object):
try: try:
self.helper.switch_replication_pair(replica_pair_id) self.helper.switch_replication_pair(replica_pair_id)
except Exception: except Exception:
msg = _LE('Replication pair %s primary/secondary ' msg = ('Replication pair %s primary/secondary '
'relationship is not right, try to switch over ' 'relationship is not right, try to switch over '
'again but still failed.') 'again but still failed.')
LOG.exception(msg, replica_pair_id) LOG.exception(msg, replica_pair_id)
return return
@ -158,8 +158,8 @@ class ReplicaPairManager(object):
try: try:
self.helper.set_pair_secondary_write_lock(replica_pair_id) self.helper.set_pair_secondary_write_lock(replica_pair_id)
except Exception: except Exception:
msg = _LE('Replication pair %s secondary access is R/W, ' msg = ('Replication pair %s secondary access is R/W, '
'try to set write lock but still failed.') 'try to set write lock but still failed.')
LOG.exception(msg, replica_pair_id) LOG.exception(msg, replica_pair_id)
return return
@ -173,8 +173,8 @@ class ReplicaPairManager(object):
pair_info = self._get_replication_pair_info(replica_pair_id) pair_info = self._get_replication_pair_info(replica_pair_id)
if strutils.bool_from_string(pair_info['ISPRIMARY']): if strutils.bool_from_string(pair_info['ISPRIMARY']):
LOG.warning(_LW('The replica to promote is already primary, ' LOG.warning('The replica to promote is already primary, '
'no need to switch over.')) 'no need to switch over.')
return return
replica_state = self._check_replica_state(pair_info) replica_state = self._check_replica_state(pair_info)
@ -192,14 +192,14 @@ class ReplicaPairManager(object):
# means replication pair is in an abnormal status, # means replication pair is in an abnormal status,
# ignore this exception, continue to cancel secondary write lock, # ignore this exception, continue to cancel secondary write lock,
# let secondary share accessible for disaster recovery. # let secondary share accessible for disaster recovery.
LOG.exception(_LE('Failed to split replication pair %s while ' LOG.exception('Failed to split replication pair %s while '
'switching over.'), replica_pair_id) 'switching over.', replica_pair_id)
try: try:
self.helper.cancel_pair_secondary_write_lock(replica_pair_id) self.helper.cancel_pair_secondary_write_lock(replica_pair_id)
except Exception: except Exception:
LOG.exception(_LE('Failed to cancel replication pair %s ' LOG.exception('Failed to cancel replication pair %s '
'secondary write lock.'), replica_pair_id) 'secondary write lock.', replica_pair_id)
raise raise
try: try:
@ -207,8 +207,8 @@ class ReplicaPairManager(object):
self.helper.set_pair_secondary_write_lock(replica_pair_id) self.helper.set_pair_secondary_write_lock(replica_pair_id)
self.helper.sync_replication_pair(replica_pair_id) self.helper.sync_replication_pair(replica_pair_id)
except Exception: except Exception:
LOG.exception(_LE('Failed to completely switch over ' LOG.exception('Failed to completely switch over '
'replication pair %s.'), replica_pair_id) 'replication pair %s.', replica_pair_id)
# for all the rest steps, # for all the rest steps,
# because secondary share is accessible now, # because secondary share is accessible now,
@ -222,15 +222,15 @@ class ReplicaPairManager(object):
except Exception: except Exception:
# Ignore this exception because replication pair may at some # Ignore this exception because replication pair may at some
# abnormal status that supports deleting. # abnormal status that supports deleting.
LOG.warning(_LW('Failed to split replication pair %s ' LOG.warning('Failed to split replication pair %s '
'before deleting it. Ignore this exception, ' 'before deleting it. Ignore this exception, '
'and try to delete anyway.'), 'and try to delete anyway.',
replica_pair_id) replica_pair_id)
try: try:
self.helper.delete_replication_pair(replica_pair_id) self.helper.delete_replication_pair(replica_pair_id)
except Exception: except Exception:
LOG.exception(_LE('Failed to delete replication pair %s.'), LOG.exception('Failed to delete replication pair %s.',
replica_pair_id) replica_pair_id)
raise raise

View File

@ -44,7 +44,7 @@ import six
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LI from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers.helpers import NFSHelper from manila.share.drivers.helpers import NFSHelper
from manila.share import share_types from manila.share import share_types
@ -684,8 +684,8 @@ class GPFSShareDriver(driver.ExecuteMixin, driver.GaneshaMixin,
msg = _('Failed to set quota for share %s.') % new_share_name msg = _('Failed to set quota for share %s.') % new_share_name
LOG.exception(msg) LOG.exception(msg)
raise exception.GPFSException(msg) raise exception.GPFSException(msg)
LOG.info(_LI('Existing share %(shr)s has size %(size)s KB ' LOG.info('Existing share %(shr)s has size %(size)s KB '
'which is below 1GiB, so extended it to 1GiB.') % 'which is below 1GiB, so extended it to 1GiB.' %
{'shr': new_share_name, 'size': share_size}) {'shr': new_share_name, 'size': share_size})
share_size = 1 share_size = 1
else: else:

View File

@ -28,7 +28,7 @@ from oslo_utils import importutils
import six import six
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers import generic from manila.share.drivers import generic
from manila.share import utils from manila.share import utils
@ -105,9 +105,9 @@ class LVMMixin(driver.ExecuteMixin):
share_name), run_as_root=True) share_name), run_as_root=True)
except exception.ProcessExecutionError as exc: except exception.ProcessExecutionError as exc:
if "not found" not in exc.stderr: if "not found" not in exc.stderr:
LOG.exception(_LE("Error deleting volume")) LOG.exception("Error deleting volume")
raise raise
LOG.warning(_LW("Volume not found: %s") % exc.stderr) LOG.warning("Volume not found: %s" % exc.stderr)
def _create_snapshot(self, context, snapshot): def _create_snapshot(self, context, snapshot):
"""Creates a snapshot.""" """Creates a snapshot."""
@ -253,12 +253,12 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
if 'device is busy' in six.text_type(exc): if 'device is busy' in six.text_type(exc):
raise exception.ShareBusyException(reason=share['name']) raise exception.ShareBusyException(reason=share['name'])
else: else:
LOG.info(_LI('Unable to umount: %s'), exc) LOG.info('Unable to umount: %s', exc)
# remove dir # remove dir
try: try:
os.rmdir(mount_path) os.rmdir(mount_path)
except OSError: except OSError:
LOG.warning(_LW('Unable to delete %s'), mount_path) LOG.warning('Unable to delete %s', mount_path)
def ensure_share(self, ctx, share, share_server=None): def ensure_share(self, ctx, share, share_server=None):
"""Ensure that storage are mounted and exported.""" """Ensure that storage are mounted and exported."""
@ -273,7 +273,7 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
self._get_helper(share).remove_exports( self._get_helper(share).remove_exports(
self.share_server, share['name']) self.share_server, share['name'])
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
LOG.warning(_LW("Can't remove share %r"), share['id']) LOG.warning("Can't remove share %r", share['id'])
except exception.InvalidShare as exc: except exception.InvalidShare as exc:
LOG.warning(exc.message) LOG.warning(exc.message)
@ -326,7 +326,7 @@ class LVMShareDriver(LVMMixin, driver.ShareDriver):
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
out, err = self._execute('mount', '-l', run_as_root=True) out, err = self._execute('mount', '-l', run_as_root=True)
if device_name in out: if device_name in out:
LOG.warning(_LW("%s is already mounted"), device_name) LOG.warning("%s is already mounted", device_name)
else: else:
raise raise
return mount_path return mount_path

View File

@ -27,7 +27,6 @@ import six
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _ from manila.i18n import _
from manila.i18n import _LE
from manila import utils from manila import utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -71,13 +70,13 @@ class BaseDriverUtil(object):
if self._check_error(e): if self._check_error(e):
raise raise
elif x < len(self.hosts) - 1: elif x < len(self.hosts) - 1:
msg = _LE('Error running SSH command. Trying another host') msg = ('Error running SSH command. Trying another host')
LOG.error(msg) LOG.error(msg)
else: else:
raise raise
except Exception as e: except Exception as e:
if x < len(self.hosts) - 1: if x < len(self.hosts) - 1:
msg = _LE('Error running SSH command. Trying another host') msg = ('Error running SSH command. Trying another host')
LOG.error(msg) LOG.error(msg)
else: else:
raise exception.ProcessExecutionError(six.text_type(e)) raise exception.ProcessExecutionError(six.text_type(e))

View File

@ -26,7 +26,7 @@ from oslo_utils import units
from manila import context from manila import context
from manila import exception from manila import exception
from manila.i18n import _, _LW, _LI from manila.i18n import _
from manila.share import api from manila.share import api
from manila.share import driver from manila.share import driver
@ -258,8 +258,8 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
"""Deletes share storage.""" """Deletes share storage."""
volume_name = self._get_volume_name(context, share) volume_name = self._get_volume_name(context, share)
if volume_name == "error": if volume_name == "error":
LOG.info(_LI("Skipping deleting share with name %s, as it does not" LOG.info("Skipping deleting share with name %s, as it does not"
" exist on the backend"), share['name']) " exist on the backend", share['name'])
return return
try: try:
self._maprfs_util.delete_volume(volume_name) self._maprfs_util.delete_volume(volume_name)
@ -295,7 +295,7 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
# method shouldn`t raise exception if share does # method shouldn`t raise exception if share does
# not exist actually # not exist actually
if not self._maprfs_util.volume_exists(volume_name): if not self._maprfs_util.volume_exists(volume_name):
LOG.warning(_LW('Can not get share %s.'), share['name']) LOG.warning('Can not get share %s.', share['name'])
return return
# check update # check update
if add_rules or delete_rules: if add_rules or delete_rules:
@ -337,10 +337,10 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
raise exception.MapRFSException(msg=msg) raise exception.MapRFSException(msg=msg)
if not self.configuration.maprfs_cldb_ip: if not self.configuration.maprfs_cldb_ip:
LOG.warning(_LW('CLDB nodes are not specified!')) LOG.warning('CLDB nodes are not specified!')
if not self.configuration.maprfs_zookeeper_ip: if not self.configuration.maprfs_zookeeper_ip:
LOG.warning(_LW('Zookeeper nodes are not specified!')) LOG.warning('Zookeeper nodes are not specified!')
if not self._check_maprfs_state(): if not self._check_maprfs_state():
msg = _('MapR-FS is not in healthy state.') msg = _('MapR-FS is not in healthy state.')
@ -383,7 +383,7 @@ class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
location = self._get_share_export_locations(share, path=share_path) location = self._get_share_export_locations(share, path=share_path)
if size == 0: if size == 0:
size = used size = used
msg = _LW( msg = (
'Share %s has no size quota. Total used value will be' 'Share %s has no size quota. Total used value will be'
' used as share size') ' used as share size')
LOG.warning(msg, share['name']) LOG.warning(msg, share['name'])

View File

@ -21,7 +21,7 @@ from oslo_log import log
from oslo_utils import importutils from oslo_utils import importutils
from manila import exception from manila import exception
from manila.i18n import _, _LI from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers.netapp import options from manila.share.drivers.netapp import options
from manila.share.drivers.netapp import utils as na_utils from manila.share.drivers.netapp import utils as na_utils
@ -69,7 +69,7 @@ class NetAppDriver(object):
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config) na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
app_version = na_utils.OpenStackInfo().info() app_version = na_utils.OpenStackInfo().info()
LOG.info(_LI('OpenStack OS Version Info: %s'), app_version) LOG.info('OpenStack OS Version Info: %s', app_version)
kwargs['app_version'] = app_version kwargs['app_version'] = app_version
driver_mode = NetAppDriver._get_driver_mode( driver_mode = NetAppDriver._get_driver_mode(
@ -107,8 +107,8 @@ class NetAppDriver(object):
storage_family = storage_family.lower() storage_family = storage_family.lower()
fmt = {'storage_family': storage_family, 'driver_mode': driver_mode} fmt = {'storage_family': storage_family, 'driver_mode': driver_mode}
LOG.info(_LI('Requested unified config: %(storage_family)s and ' LOG.info('Requested unified config: %(storage_family)s and '
'%(driver_mode)s.') % fmt) '%(driver_mode)s.' % fmt)
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family) family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
if family_meta is None: if family_meta is None:
@ -124,6 +124,6 @@ class NetAppDriver(object):
kwargs['netapp_mode'] = 'proxy' kwargs['netapp_mode'] = 'proxy'
driver = importutils.import_object(driver_loc, *args, **kwargs) driver = importutils.import_object(driver_loc, *args, **kwargs)
LOG.info(_LI('NetApp driver of family %(storage_family)s and mode ' LOG.info('NetApp driver of family %(storage_family)s and mode '
'%(driver_mode)s loaded.') % fmt) '%(driver_mode)s loaded.' % fmt)
return driver return driver

View File

@ -16,7 +16,6 @@
from oslo_log import log from oslo_log import log
from oslo_utils import excutils from oslo_utils import excutils
from manila.i18n import _LE
from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp import utils as na_utils from manila.share.drivers.netapp import utils as na_utils
@ -89,7 +88,7 @@ class NetAppBaseClient(object):
result = self.send_request('license-v2-list-info') result = self.send_request('license-v2-list-info')
except netapp_api.NaApiError: except netapp_api.NaApiError:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("Could not get licenses list.")) LOG.exception("Could not get licenses list.")
return sorted( return sorted(
[l.get_child_content('package').lower() [l.get_child_content('package').lower()

View File

@ -26,7 +26,7 @@ from oslo_utils import units
import six import six
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_base from manila.share.drivers.netapp.dataontap.client import client_base
from manila.share.drivers.netapp import utils as na_utils from manila.share.drivers.netapp import utils as na_utils
@ -327,7 +327,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
Offlines and destroys root volumes. Deletes Vserver. Offlines and destroys root volumes. Deletes Vserver.
""" """
if not self.vserver_exists(vserver_name): if not self.vserver_exists(vserver_name):
LOG.error(_LE("Vserver %s does not exist."), vserver_name) LOG.error("Vserver %s does not exist.", vserver_name)
return return
root_volume_name = self.get_vserver_root_volume_name(vserver_name) root_volume_name = self.get_vserver_root_volume_name(vserver_name)
@ -338,7 +338,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
vserver_client.offline_volume(root_volume_name) vserver_client.offline_volume(root_volume_name)
except netapp_api.NaApiError as e: except netapp_api.NaApiError as e:
if e.code == netapp_api.EVOLUMEOFFLINE: if e.code == netapp_api.EVOLUMEOFFLINE:
LOG.error(_LE("Volume %s is already offline."), LOG.error("Volume %s is already offline.",
root_volume_name) root_volume_name)
else: else:
raise raise
@ -367,8 +367,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
vserver_client.send_request('cifs-server-delete', api_args) vserver_client.send_request('cifs-server-delete', api_args)
except netapp_api.NaApiError as e: except netapp_api.NaApiError as e:
if e.code == netapp_api.EOBJECTNOTFOUND: if e.code == netapp_api.EOBJECTNOTFOUND:
LOG.error(_LE('CIFS server does not exist for ' LOG.error('CIFS server does not exist for '
'Vserver %s.'), vserver_name) 'Vserver %s.', vserver_name)
else: else:
vserver_client.send_request('cifs-server-delete') vserver_client.send_request('cifs-server-delete')
@ -1069,7 +1069,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
vserver_aggr_info_list = vserver_aggr_info_element.get_children() vserver_aggr_info_list = vserver_aggr_info_element.get_children()
if not vserver_aggr_info_list: if not vserver_aggr_info_list:
LOG.warning(_LW('No aggregates assigned to Vserver %s.'), LOG.warning('No aggregates assigned to Vserver %s.',
vserver_name) vserver_name)
# Return dict of key-value pair of aggr_name:aggr_size_available. # Return dict of key-value pair of aggr_name:aggr_size_available.
@ -1387,7 +1387,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
self.send_request('net-dns-create', api_args) self.send_request('net-dns-create', api_args)
except netapp_api.NaApiError as e: except netapp_api.NaApiError as e:
if e.code == netapp_api.EDUPLICATEENTRY: if e.code == netapp_api.EDUPLICATEENTRY:
LOG.error(_LE("DNS exists for Vserver.")) LOG.error("DNS exists for Vserver.")
else: else:
msg = _("Failed to configure DNS. %s") msg = _("Failed to configure DNS. %s")
raise exception.NetAppException(msg % e.message) raise exception.NetAppException(msg % e.message)
@ -2027,8 +2027,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return return
except netapp_api.NaApiError as e: except netapp_api.NaApiError as e:
if e.code == netapp_api.EAPIERROR and 'job ID' in e.message: if e.code == netapp_api.EAPIERROR and 'job ID' in e.message:
msg = _LW('Could not unmount volume %(volume)s due to ' msg = ('Could not unmount volume %(volume)s due to '
'ongoing volume operation: %(exception)s') 'ongoing volume operation: %(exception)s')
msg_args = {'volume': volume_name, 'exception': e} msg_args = {'volume': volume_name, 'exception': e}
LOG.warning(msg, msg_args) LOG.warning(msg, msg_args)
time.sleep(retry_interval) time.sleep(retry_interval)
@ -2642,7 +2642,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
node_client.send_request('ems-autosupport-log', message_dict) node_client.send_request('ems-autosupport-log', message_dict)
LOG.debug('EMS executed successfully.') LOG.debug('EMS executed successfully.')
except netapp_api.NaApiError as e: except netapp_api.NaApiError as e:
LOG.warning(_LW('Failed to invoke EMS. %s') % e) LOG.warning('Failed to invoke EMS. %s' % e)
@na_utils.trace @na_utils.trace
def get_aggregate(self, aggregate_name): def get_aggregate(self, aggregate_name):
@ -3276,8 +3276,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
has_snapmirrors = len(snapmirrors) > 0 has_snapmirrors = len(snapmirrors) > 0
except netapp_api.NaApiError: except netapp_api.NaApiError:
msg = _LE("Could not determine if volume %s is part of " msg = ("Could not determine if volume %s is part of "
"existing snapmirror relationships.") "existing snapmirror relationships.")
LOG.exception(msg, volume['name']) LOG.exception(msg, volume['name'])
has_snapmirrors = False has_snapmirrors = False

View File

@ -25,7 +25,7 @@ from oslo_log import log
from oslo_utils import excutils from oslo_utils import excutils
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI from manila.i18n import _
from manila.share import configuration from manila.share import configuration
from manila.share import driver from manila.share import driver
from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import api as netapp_api
@ -196,7 +196,7 @@ class DataMotionSession(object):
if (e.code == netapp_api.EOBJECTNOTFOUND or if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message): "(entry doesn't exist)" in e.message):
LOG.info(_LI('No snapmirror relationship to delete')) LOG.info('No snapmirror relationship to delete')
exc_context.reraise = False exc_context.reraise = False
if release: if release:
@ -267,7 +267,7 @@ class DataMotionSession(object):
)[0] )[0]
if snapmirror.get('relationship-status') != 'quiesced': if snapmirror.get('relationship-status') != 'quiesced':
raise exception.ReplicationException( raise exception.ReplicationException(
reason=_LE("Snapmirror relationship is not quiesced.")) reason=("Snapmirror relationship is not quiesced."))
try: try:
wait_for_quiesced() wait_for_quiesced()

View File

@ -33,7 +33,7 @@ import six
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
@ -158,11 +158,11 @@ class NetAppCmodeFileStorageLibrary(object):
'backend': self._backend_name, 'backend': self._backend_name,
'licenses': ', '.join(self._licenses), 'licenses': ', '.join(self._licenses),
} }
LOG.info(_LI('Available licenses on %(backend)s ' LOG.info('Available licenses on %(backend)s '
'are %(licenses)s.'), log_data) 'are %(licenses)s.', log_data)
if 'nfs' not in self._licenses and 'cifs' not in self._licenses: if 'nfs' not in self._licenses and 'cifs' not in self._licenses:
msg = _LE('Neither NFS nor CIFS is licensed on %(backend)s') msg = 'Neither NFS nor CIFS is licensed on %(backend)s'
msg_args = {'backend': self._backend_name} msg_args = {'backend': self._backend_name}
LOG.error(msg % msg_args) LOG.error(msg % msg_args)
@ -657,9 +657,9 @@ class NetAppCmodeFileStorageLibrary(object):
except (exception.InvalidInput, except (exception.InvalidInput,
exception.VserverNotSpecified, exception.VserverNotSpecified,
exception.VserverNotFound) as error: exception.VserverNotFound) as error:
LOG.warning(_LW("Could not determine share server for share being " LOG.warning("Could not determine share server for share being "
"deleted: %(share)s. Deletion of share record " "deleted: %(share)s. Deletion of share record "
"will proceed anyway. Error: %(error)s"), "will proceed anyway. Error: %(error)s",
{'share': share['id'], 'error': error}) {'share': share['id'], 'error': error})
return return
@ -668,7 +668,7 @@ class NetAppCmodeFileStorageLibrary(object):
self._remove_export(share, vserver_client) self._remove_export(share, vserver_client)
self._deallocate_container(share_name, vserver_client) self._deallocate_container(share_name, vserver_client)
else: else:
LOG.info(_LI("Share %s does not exist."), share['id']) LOG.info("Share %s does not exist.", share['id'])
@na_utils.trace @na_utils.trace
def _deallocate_container(self, share_name, vserver_client): def _deallocate_container(self, share_name, vserver_client):
@ -812,9 +812,9 @@ class NetAppCmodeFileStorageLibrary(object):
except (exception.InvalidInput, except (exception.InvalidInput,
exception.VserverNotSpecified, exception.VserverNotSpecified,
exception.VserverNotFound) as error: exception.VserverNotFound) as error:
LOG.warning(_LW("Could not determine share server for snapshot " LOG.warning("Could not determine share server for snapshot "
"being deleted: %(snap)s. Deletion of snapshot " "being deleted: %(snap)s. Deletion of snapshot "
"record will proceed anyway. Error: %(error)s"), "record will proceed anyway. Error: %(error)s",
{'snap': snapshot['id'], 'error': error}) {'snap': snapshot['id'], 'error': error})
return return
@ -825,7 +825,7 @@ class NetAppCmodeFileStorageLibrary(object):
try: try:
self._delete_snapshot(vserver_client, share_name, snapshot_name) self._delete_snapshot(vserver_client, share_name, snapshot_name)
except exception.SnapshotResourceNotFound: except exception.SnapshotResourceNotFound:
msg = _LI("Snapshot %(snap)s does not exist on share %(share)s.") msg = ("Snapshot %(snap)s does not exist on share %(share)s.")
msg_args = {'snap': snapshot_name, 'share': share_name} msg_args = {'snap': snapshot_name, 'share': share_name}
LOG.info(msg, msg_args) LOG.info(msg, msg_args)
@ -1099,9 +1099,9 @@ class NetAppCmodeFileStorageLibrary(object):
except (exception.InvalidInput, except (exception.InvalidInput,
exception.VserverNotSpecified, exception.VserverNotSpecified,
exception.VserverNotFound) as error: exception.VserverNotFound) as error:
LOG.warning(_LW("Could not determine share server for consistency " LOG.warning("Could not determine share server for consistency "
"group being deleted: %(cg)s. Deletion of CG " "group being deleted: %(cg)s. Deletion of CG "
"record will proceed anyway. Error: %(error)s"), "record will proceed anyway. Error: %(error)s",
{'cg': cg_dict['id'], 'error': error}) {'cg': cg_dict['id'], 'error': error})
@na_utils.trace @na_utils.trace
@ -1128,9 +1128,9 @@ class NetAppCmodeFileStorageLibrary(object):
except (exception.InvalidInput, except (exception.InvalidInput,
exception.VserverNotSpecified, exception.VserverNotSpecified,
exception.VserverNotFound) as error: exception.VserverNotFound) as error:
LOG.warning(_LW("Could not determine share server for CG snapshot " LOG.warning("Could not determine share server for CG snapshot "
"being deleted: %(snap)s. Deletion of CG snapshot " "being deleted: %(snap)s. Deletion of CG snapshot "
"record will proceed anyway. Error: %(error)s"), "record will proceed anyway. Error: %(error)s",
{'snap': snap_dict['id'], 'error': error}) {'snap': snap_dict['id'], 'error': error})
return None, None return None, None
@ -1143,8 +1143,8 @@ class NetAppCmodeFileStorageLibrary(object):
self._delete_snapshot( self._delete_snapshot(
vserver_client, share_name, snapshot_name) vserver_client, share_name, snapshot_name)
except exception.SnapshotResourceNotFound: except exception.SnapshotResourceNotFound:
msg = _LI("Snapshot %(snap)s does not exist on share " msg = ("Snapshot %(snap)s does not exist on share "
"%(share)s.") "%(share)s.")
msg_args = {'snap': snapshot_name, 'share': share_name} msg_args = {'snap': snapshot_name, 'share': share_name}
LOG.info(msg, msg_args) LOG.info(msg, msg_args)
continue continue
@ -1185,9 +1185,9 @@ class NetAppCmodeFileStorageLibrary(object):
except (exception.InvalidInput, except (exception.InvalidInput,
exception.VserverNotSpecified, exception.VserverNotSpecified,
exception.VserverNotFound) as error: exception.VserverNotFound) as error:
LOG.warning(_LW("Could not determine share server for share " LOG.warning("Could not determine share server for share "
"%(share)s during access rules update. " "%(share)s during access rules update. "
"Error: %(error)s"), "Error: %(error)s",
{'share': share['id'], 'error': error}) {'share': share['id'], 'error': error})
return return
@ -1216,8 +1216,8 @@ class NetAppCmodeFileStorageLibrary(object):
The self._ssc_stats attribute is updated with the following format. The self._ssc_stats attribute is updated with the following format.
{<aggregate_name> : {<ssc_key>: <ssc_value>}} {<aggregate_name> : {<ssc_key>: <ssc_value>}}
""" """
LOG.info(_LI("Updating storage service catalog information for " LOG.info("Updating storage service catalog information for "
"backend '%s'"), self._backend_name) "backend '%s'", self._backend_name)
# Work on a copy and update the ssc data atomically before returning. # Work on a copy and update the ssc data atomically before returning.
ssc_stats = copy.deepcopy(self._ssc_stats) ssc_stats = copy.deepcopy(self._ssc_stats)
@ -1349,7 +1349,7 @@ class NetAppCmodeFileStorageLibrary(object):
try: try:
snapmirrors = dm_session.get_snapmirrors(active_replica, replica) snapmirrors = dm_session.get_snapmirrors(active_replica, replica)
except netapp_api.NaApiError: except netapp_api.NaApiError:
LOG.exception(_LE("Could not get snapmirrors for replica %s."), LOG.exception("Could not get snapmirrors for replica %s.",
replica['id']) replica['id'])
return constants.STATUS_ERROR return constants.STATUS_ERROR
@ -1358,8 +1358,8 @@ class NetAppCmodeFileStorageLibrary(object):
try: try:
dm_session.create_snapmirror(active_replica, replica) dm_session.create_snapmirror(active_replica, replica)
except netapp_api.NaApiError: except netapp_api.NaApiError:
LOG.exception(_LE("Could not create snapmirror for " LOG.exception("Could not create snapmirror for "
"replica %s."), replica['id']) "replica %s.", replica['id'])
return constants.STATUS_ERROR return constants.STATUS_ERROR
return constants.REPLICA_STATE_OUT_OF_SYNC return constants.REPLICA_STATE_OUT_OF_SYNC
@ -1381,7 +1381,7 @@ class NetAppCmodeFileStorageLibrary(object):
share_name) share_name)
return constants.REPLICA_STATE_OUT_OF_SYNC return constants.REPLICA_STATE_OUT_OF_SYNC
except netapp_api.NaApiError: except netapp_api.NaApiError:
LOG.exception(_LE("Could not resync snapmirror.")) LOG.exception("Could not resync snapmirror.")
return constants.STATUS_ERROR return constants.STATUS_ERROR
last_update_timestamp = float( last_update_timestamp = float(
@ -1433,8 +1433,8 @@ class NetAppCmodeFileStorageLibrary(object):
context, dm_session, orig_active_replica, replica, context, dm_session, orig_active_replica, replica,
access_rules, share_server=share_server)) access_rules, share_server=share_server))
except exception.StorageCommunicationException: except exception.StorageCommunicationException:
LOG.exception(_LE("Could not communicate with the backend " LOG.exception("Could not communicate with the backend "
"for replica %s during promotion."), "for replica %s during promotion.",
replica['id']) replica['id'])
new_active_replica = copy.deepcopy(replica) new_active_replica = copy.deepcopy(replica)
new_active_replica['replica_state'] = ( new_active_replica['replica_state'] = (
@ -1524,16 +1524,16 @@ class NetAppCmodeFileStorageLibrary(object):
replica['status'] = constants.STATUS_ERROR replica['status'] = constants.STATUS_ERROR
replica['replica_state'] = constants.STATUS_ERROR replica['replica_state'] = constants.STATUS_ERROR
replica['export_locations'] = [] replica['export_locations'] = []
msg = _LE("Failed to change replica (%s) to a SnapMirror " msg = ("Failed to change replica (%s) to a SnapMirror "
"destination. Replica backend is unreachable.") "destination. Replica backend is unreachable.")
LOG.exception(msg, replica['id']) LOG.exception(msg, replica['id'])
return replica return replica
except netapp_api.NaApiError: except netapp_api.NaApiError:
replica['replica_state'] = constants.STATUS_ERROR replica['replica_state'] = constants.STATUS_ERROR
replica['export_locations'] = [] replica['export_locations'] = []
msg = _LE("Failed to change replica (%s) to a SnapMirror " msg = ("Failed to change replica (%s) to a SnapMirror "
"destination.") "destination.")
LOG.exception(msg, replica['id']) LOG.exception(msg, replica['id'])
return replica return replica
@ -1735,8 +1735,8 @@ class NetAppCmodeFileStorageLibrary(object):
share_volume, source_vserver, destination_aggregate) share_volume, source_vserver, destination_aggregate)
except Exception: except Exception:
msg = _LE("Cannot migrate share %(shr)s efficiently between " msg = ("Cannot migrate share %(shr)s efficiently between "
"%(src)s and %(dest)s.") "%(src)s and %(dest)s.")
msg_args = { msg_args = {
'shr': source_share['id'], 'shr': source_share['id'],
'src': source_share['host'], 'src': source_share['host'],
@ -1746,9 +1746,9 @@ class NetAppCmodeFileStorageLibrary(object):
else: else:
compatible = True compatible = True
else: else:
msg = _LW("Cluster credentials have not been configured " msg = ("Cluster credentials have not been configured "
"with this share driver. Cannot perform volume move " "with this share driver. Cannot perform volume move "
"operations.") "operations.")
LOG.warning(msg) LOG.warning(msg)
compatibility = { compatibility = {
@ -1774,8 +1774,8 @@ class NetAppCmodeFileStorageLibrary(object):
self._client.start_volume_move( self._client.start_volume_move(
share_volume, vserver, destination_aggregate) share_volume, vserver, destination_aggregate)
msg = _LI("Began volume move operation of share %(shr)s from %(src)s " msg = ("Began volume move operation of share %(shr)s from %(src)s "
"to %(dest)s.") "to %(dest)s.")
msg_args = { msg_args = {
'shr': source_share['id'], 'shr': source_share['id'],
'src': source_share['host'], 'src': source_share['host'],
@ -1826,8 +1826,8 @@ class NetAppCmodeFileStorageLibrary(object):
'cutover_soft_deferred'): 'cutover_soft_deferred'):
status['percent-complete'] = 100 status['percent-complete'] = 100
msg = _LI("Volume move status for share %(share)s: (State) %(state)s. " msg = ("Volume move status for share %(share)s: (State) %(state)s. "
"(Phase) %(phase)s. Details: %(details)s") "(Phase) %(phase)s. Details: %(details)s")
msg_args = { msg_args = {
'state': status['state'], 'state': status['state'],
'details': status['details'], 'details': status['details'],
@ -1854,13 +1854,13 @@ class NetAppCmodeFileStorageLibrary(object):
try: try:
self._get_volume_move_status(source_share, share_server) self._get_volume_move_status(source_share, share_server)
except exception.NetAppException: except exception.NetAppException:
LOG.exception(_LE("Could not get volume move status.")) LOG.exception("Could not get volume move status.")
return return
self._client.abort_volume_move(share_volume, vserver) self._client.abort_volume_move(share_volume, vserver)
msg = _LI("Share volume move operation for share %(shr)s from host " msg = ("Share volume move operation for share %(shr)s from host "
"%(src)s to %(dest)s was successfully aborted.") "%(src)s to %(dest)s was successfully aborted.")
msg_args = { msg_args = {
'shr': source_share['id'], 'shr': source_share['id'],
'src': source_share['host'], 'src': source_share['host'],
@ -1903,9 +1903,9 @@ class NetAppCmodeFileStorageLibrary(object):
destination_share['id']) destination_share['id'])
vserver_client.set_volume_name(share_volume, new_share_volume_name) vserver_client.set_volume_name(share_volume, new_share_volume_name)
msg = _LI("Volume move operation for share %(shr)s has completed " msg = ("Volume move operation for share %(shr)s has completed "
"successfully. Share has been moved from %(src)s to " "successfully. Share has been moved from %(src)s to "
"%(dest)s.") "%(dest)s.")
msg_args = { msg_args = {
'shr': source_share['id'], 'shr': source_share['id'],
'src': source_share['host'], 'src': source_share['host'],

View File

@ -26,7 +26,7 @@ from oslo_log import log
from oslo_utils import excutils from oslo_utils import excutils
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW, _LI from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp import utils as na_utils from manila.share.drivers.netapp import utils as na_utils
@ -47,8 +47,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
if self._have_cluster_creds: if self._have_cluster_creds:
if self.configuration.netapp_vserver: if self.configuration.netapp_vserver:
msg = _LW('Vserver is specified in the configuration. This is ' msg = ('Vserver is specified in the configuration. This is '
'ignored when the driver is managing share servers.') 'ignored when the driver is managing share servers.')
LOG.warning(msg) LOG.warning(msg)
else: # only have vserver creds, which is an error in multi_svm mode else: # only have vserver creds, which is an error in multi_svm mode
@ -191,7 +191,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
vserver_name) vserver_name)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to configure Vserver.")) LOG.error("Failed to configure Vserver.")
self._delete_vserver(vserver_name, self._delete_vserver(vserver_name,
security_services=security_services) security_services=security_services)
@ -243,7 +243,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
network_allocations = network_info.get('admin_network_allocations') network_allocations = network_info.get('admin_network_allocations')
if not network_allocations: if not network_allocations:
LOG.info(_LI('No admin network defined for Vserver %s.') % LOG.info('No admin network defined for Vserver %s.' %
vserver_name) vserver_name)
return return
@ -310,15 +310,15 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
'vserver_name') if server_details else None 'vserver_name') if server_details else None
if not vserver: if not vserver:
LOG.warning(_LW("Vserver not specified for share server being " LOG.warning("Vserver not specified for share server being "
"deleted. Deletion of share server record will " "deleted. Deletion of share server record will "
"proceed anyway.")) "proceed anyway.")
return return
elif not self._client.vserver_exists(vserver): elif not self._client.vserver_exists(vserver):
LOG.warning(_LW("Could not find Vserver for share server being " LOG.warning("Could not find Vserver for share server being "
"deleted: %s. Deletion of share server " "deleted: %s. Deletion of share server "
"record will proceed anyway."), vserver) "record will proceed anyway.", vserver)
return return
self._delete_vserver(vserver, security_services=security_services) self._delete_vserver(vserver, security_services=security_services)
@ -362,4 +362,4 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
node = interface['home-node'] node = interface['home-node']
self._client.delete_vlan(node, port, vlan) self._client.delete_vlan(node, port, vlan)
except exception.NetAppException: except exception.NetAppException:
LOG.exception(_LE("Deleting Vserver VLAN failed.")) LOG.exception("Deleting Vserver VLAN failed.")

View File

@ -25,7 +25,7 @@ import re
from oslo_log import log from oslo_log import log
from manila import exception from manila import exception
from manila.i18n import _, _LI from manila.i18n import _
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp import utils as na_utils from manila.share.drivers.netapp import utils as na_utils
@ -72,8 +72,8 @@ class NetAppCmodeSingleSVMFileStorageLibrary(
'correctly.') % self._vserver 'correctly.') % self._vserver
raise exception.NetAppException(msg) raise exception.NetAppException(msg)
msg = _LI('Using Vserver %(vserver)s for backend %(backend)s with ' msg = ('Using Vserver %(vserver)s for backend %(backend)s with '
'%(creds)s credentials.') '%(creds)s credentials.')
msg_args = {'vserver': self._vserver, 'backend': self._backend_name} msg_args = {'vserver': self._vserver, 'backend': self._backend_name}
msg_args['creds'] = ('cluster' if self._have_cluster_creds msg_args['creds'] = ('cluster' if self._have_cluster_creds
else 'Vserver') else 'Vserver')

View File

@ -21,7 +21,7 @@ import copy
from oslo_log import log as logging from oslo_log import log as logging
from manila import exception from manila import exception
from manila.i18n import _, _LE from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import api as netapp_api
@ -60,9 +60,9 @@ class PerformanceLibrary(object):
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time' self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
else: else:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
LOG.exception(_LE('Could not get performance base counter ' LOG.exception('Could not get performance base counter '
'name. Performance-based scheduler ' 'name. Performance-based scheduler '
'functions may not be available.')) 'functions may not be available.')
def update_performance_cache(self, flexvol_pools, aggregate_pools): def update_performance_cache(self, flexvol_pools, aggregate_pools):
"""Called periodically to update per-pool node utilization metrics.""" """Called periodically to update per-pool node utilization metrics."""
@ -194,8 +194,8 @@ class PerformanceLibrary(object):
return max(min(100.0, node_utilization), 0) return max(min(100.0, node_utilization), 0)
except Exception: except Exception:
LOG.exception(_LE('Could not calculate node utilization for ' LOG.exception('Could not calculate node utilization for '
'node %s.'), node_name) 'node %s.', node_name)
return DEFAULT_UTILIZATION return DEFAULT_UTILIZATION
def _get_kahuna_utilization(self, counters_t1, counters_t2): def _get_kahuna_utilization(self, counters_t1, counters_t2):
@ -343,8 +343,8 @@ class PerformanceLibrary(object):
self._get_node_utilization_wafl_counters(node_name) + self._get_node_utilization_wafl_counters(node_name) +
self._get_node_utilization_processor_counters(node_name)) self._get_node_utilization_processor_counters(node_name))
except netapp_api.NaApiError: except netapp_api.NaApiError:
LOG.exception(_LE('Could not get utilization counters from node ' LOG.exception('Could not get utilization counters from node '
'%s'), node_name) '%s', node_name)
return None return None
def _get_node_utilization_system_counters(self, node_name): def _get_node_utilization_system_counters(self, node_name):

View File

@ -23,7 +23,6 @@ import six
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LI
from manila.share.drivers.netapp.dataontap.protocols import base from manila.share.drivers.netapp.dataontap.protocols import base
from manila.share.drivers.netapp import utils as na_utils from manila.share.drivers.netapp import utils as na_utils
@ -89,15 +88,15 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
self._is_readonly(new_rules[address])) self._is_readonly(new_rules[address]))
# Rename policy currently in force # Rename policy currently in force
LOG.info(_LI('Renaming NFS export policy for share %(share)s to ' LOG.info('Renaming NFS export policy for share %(share)s to '
'%(policy)s.') % '%(policy)s.' %
{'share': share_name, 'policy': temp_old_export_policy_name}) {'share': share_name, 'policy': temp_old_export_policy_name})
self._client.rename_nfs_export_policy(export_policy_name, self._client.rename_nfs_export_policy(export_policy_name,
temp_old_export_policy_name) temp_old_export_policy_name)
# Switch share to the new policy # Switch share to the new policy
LOG.info(_LI('Setting NFS export policy for share %(share)s to ' LOG.info('Setting NFS export policy for share %(share)s to '
'%(policy)s.') % '%(policy)s.' %
{'share': share_name, 'policy': temp_new_export_policy_name}) {'share': share_name, 'policy': temp_new_export_policy_name})
self._client.set_nfs_export_policy_for_volume( self._client.set_nfs_export_policy_for_volume(
share_name, temp_new_export_policy_name) share_name, temp_new_export_policy_name)
@ -106,8 +105,8 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
self._client.soft_delete_nfs_export_policy(temp_old_export_policy_name) self._client.soft_delete_nfs_export_policy(temp_old_export_policy_name)
# Rename new policy to its final name # Rename new policy to its final name
LOG.info(_LI('Renaming NFS export policy for share %(share)s to ' LOG.info('Renaming NFS export policy for share %(share)s to '
'%(policy)s.') % '%(policy)s.' %
{'share': share_name, 'policy': export_policy_name}) {'share': share_name, 'policy': export_policy_name})
self._client.rename_nfs_export_policy(temp_new_export_policy_name, self._client.rename_nfs_export_policy(temp_new_export_policy_name,
export_policy_name) export_policy_name)
@ -117,8 +116,8 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
"""Checks whether access rule type and level are valid.""" """Checks whether access rule type and level are valid."""
if rule['access_type'] != 'ip': if rule['access_type'] != 'ip':
msg = _("Clustered Data ONTAP supports only 'ip' type for share " msg = ("Clustered Data ONTAP supports only 'ip' type for share "
"access rules with NFS protocol.") "access rules with NFS protocol.")
raise exception.InvalidShareAccess(reason=msg) raise exception.InvalidShareAccess(reason=msg)
if rule['access_level'] not in constants.ACCESS_LEVELS: if rule['access_level'] not in constants.ACCESS_LEVELS:

View File

@ -24,7 +24,7 @@ from oslo_log import log
import six import six
from manila import exception from manila import exception
from manila.i18n import _, _LI, _LW from manila.i18n import _
from manila import version from manila import version
@ -43,9 +43,9 @@ def validate_driver_instantiation(**kwargs):
""" """
if kwargs and kwargs.get('netapp_mode') == 'proxy': if kwargs and kwargs.get('netapp_mode') == 'proxy':
return return
LOG.warning(_LW('Please use NetAppDriver in the configuration file ' LOG.warning('Please use NetAppDriver in the configuration file '
'to load the driver instead of directly specifying ' 'to load the driver instead of directly specifying '
'the driver module name.')) 'the driver module name.')
def check_flags(required_flags, configuration): def check_flags(required_flags, configuration):
@ -74,7 +74,7 @@ def setup_tracing(trace_flags_string):
flags = trace_flags_string.split(',') flags = trace_flags_string.split(',')
flags = [flag.strip() for flag in flags] flags = [flag.strip() for flag in flags]
for invalid_flag in list(set(flags) - set(VALID_TRACE_FLAGS)): for invalid_flag in list(set(flags) - set(VALID_TRACE_FLAGS)):
LOG.warning(_LW('Invalid trace flag: %s') % invalid_flag) LOG.warning('Invalid trace flag: %s' % invalid_flag)
TRACE_METHOD = 'method' in flags TRACE_METHOD = 'method' in flags
TRACE_API = 'api' in flags TRACE_API = 'api' in flags
@ -164,7 +164,7 @@ class OpenStackInfo(object):
"'%{version}\t%{release}\t%{vendor}'", "'%{version}\t%{release}\t%{vendor}'",
self.PACKAGE_NAME) self.PACKAGE_NAME)
if not out: if not out:
LOG.info(_LI('No rpm info found for %(pkg)s package.') % { LOG.info('No rpm info found for %(pkg)s package.' % {
'pkg': self.PACKAGE_NAME}) 'pkg': self.PACKAGE_NAME})
return False return False
parts = out.split() parts = out.split()
@ -173,7 +173,7 @@ class OpenStackInfo(object):
self._vendor = ' '.join(parts[2::]) self._vendor = ' '.join(parts[2::])
return True return True
except Exception as e: except Exception as e:
LOG.info(_LI('Could not run rpm command: %(msg)s.') % { LOG.info('Could not run rpm command: %(msg)s.' % {
'msg': e}) 'msg': e})
return False return False
@ -185,9 +185,9 @@ class OpenStackInfo(object):
out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'", out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
self.PACKAGE_NAME) self.PACKAGE_NAME)
if not out: if not out:
LOG.info(_LI( LOG.info(
'No dpkg-query info found for %(pkg)s package.') % { 'No dpkg-query info found for %(pkg)s package.' % {
'pkg': self.PACKAGE_NAME}) 'pkg': self.PACKAGE_NAME})
return False return False
# Debian format: [epoch:]upstream_version[-debian_revision] # Debian format: [epoch:]upstream_version[-debian_revision]
deb_version = out deb_version = out
@ -204,7 +204,7 @@ class OpenStackInfo(object):
self._vendor = _vendor self._vendor = _vendor
return True return True
except Exception as e: except Exception as e:
LOG.info(_LI('Could not run dpkg-query command: %(msg)s.') % { LOG.info('Could not run dpkg-query command: %(msg)s.' % {
'msg': e}) 'msg': e})
return False return False

View File

@ -16,7 +16,7 @@
from oslo_log import log from oslo_log import log
from manila import exception from manila import exception
from manila.i18n import _, _LI from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers.nexenta.ns4 import nexenta_nfs_helper from manila.share.drivers.nexenta.ns4 import nexenta_nfs_helper
from manila.share.drivers.nexenta import options from manila.share.drivers.nexenta import options
@ -98,7 +98,7 @@ class NexentaNasDriver(driver.ShareDriver):
LOG.debug('Creating a snapshot of share %s.', snapshot['share_name']) LOG.debug('Creating a snapshot of share %s.', snapshot['share_name'])
snap_id = self.helper.create_snapshot( snap_id = self.helper.create_snapshot(
snapshot['share_name'], snapshot['name']) snapshot['share_name'], snapshot['name'])
LOG.info(_LI('Created snapshot %s.'), snap_id) LOG.info('Created snapshot %s.', snap_id)
def delete_snapshot(self, context, snapshot, share_server=None): def delete_snapshot(self, context, snapshot, share_server=None):
"""Delete a snapshot.""" """Delete a snapshot."""

View File

@ -18,7 +18,7 @@ from oslo_utils import excutils
from manila.common import constants as common from manila.common import constants as common
from manila import exception from manila import exception
from manila.i18n import _, _LI from manila.i18n import _
from manila.share.drivers.nexenta.ns4 import jsonrpc from manila.share.drivers.nexenta.ns4 import jsonrpc
from manila.share.drivers.nexenta import utils from manila.share.drivers.nexenta import utils
@ -112,8 +112,8 @@ class NFSHelper(object):
except exception.NexentaException as e: except exception.NexentaException as e:
with excutils.save_and_reraise_exception() as exc: with excutils.save_and_reraise_exception() as exc:
if NOT_EXIST in e.args[0]: if NOT_EXIST in e.args[0]:
LOG.info(_LI('Folder %s does not exist, it was ' LOG.info('Folder %s does not exist, it was '
'already deleted.'), folder) 'already deleted.', folder)
exc.reraise = False exc.reraise = False
def _get_share_path(self, share_name): def _get_share_path(self, share_name):
@ -137,20 +137,20 @@ class NFSHelper(object):
except exception.NexentaException as e: except exception.NexentaException as e:
with excutils.save_and_reraise_exception() as exc: with excutils.save_and_reraise_exception() as exc:
if NOT_EXIST in e.args[0]: if NOT_EXIST in e.args[0]:
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not ' LOG.info('Snapshot %(folder)s@%(snapshot)s does not '
'exist, it was already deleted.'), 'exist, it was already deleted.',
{ {
'folder': share_name, 'folder': share_name,
'snapshot': snapshot_name, 'snapshot': snapshot_name,
}) })
exc.reraise = False exc.reraise = False
elif DEP_CLONES in e.args[0]: elif DEP_CLONES in e.args[0]:
LOG.info(_LI( LOG.info(
'Snapshot %(folder)s@%(snapshot)s has dependent ' 'Snapshot %(folder)s@%(snapshot)s has dependent '
'clones, it will be deleted later.'), { 'clones, it will be deleted later.', {
'folder': share_name, 'folder': share_name,
'snapshot': snapshot_name 'snapshot': snapshot_name
}) })
exc.reraise = False exc.reraise = False
def create_share_from_snapshot(self, share, snapshot): def create_share_from_snapshot(self, share, snapshot):

View File

@ -18,7 +18,7 @@ from oslo_utils import units
from manila.common import constants as common from manila.common import constants as common
from manila import exception from manila import exception
from manila.i18n import _, _LW, _LE from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers.nexenta.ns5 import jsonrpc from manila.share.drivers.nexenta.ns5 import jsonrpc
from manila.share.drivers.nexenta import options from manila.share.drivers.nexenta import options
@ -156,9 +156,9 @@ class NexentaNasDriver(driver.ShareDriver):
try: try:
self.delete_share(None, share) self.delete_share(None, share)
except exception.NexentaException as exc: except exception.NexentaException as exc:
LOG.warning(_LW( LOG.warning(
"Cannot destroy created filesystem: %(vol)s/%(folder)s, " "Cannot destroy created filesystem: %(vol)s/%(folder)s, "
"exception: %(exc)s"), "exception: %(exc)s",
{'vol': self.pool_name, 'folder': '/'.join( {'vol': self.pool_name, 'folder': '/'.join(
(self.fs_prefix, share['name'])), 'exc': exc}) (self.fs_prefix, share['name'])), 'exc': exc})
raise raise
@ -194,12 +194,12 @@ class NexentaNasDriver(driver.ShareDriver):
self._add_permission(share['name']) self._add_permission(share['name'])
except exception.NexentaException: except exception.NexentaException:
LOG.exception( LOG.exception(
_LE('Failed to add permissions for %s'), share['name']) ('Failed to add permissions for %s'), share['name'])
try: try:
self.delete_share(None, share) self.delete_share(None, share)
except exception.NexentaException: except exception.NexentaException:
LOG.warning(_LW("Cannot destroy cloned filesystem: " LOG.warning("Cannot destroy cloned filesystem: "
"%(vol)s/%(filesystem)s"), "%(vol)s/%(filesystem)s",
{'vol': self.pool_name, {'vol': self.pool_name,
'filesystem': '/'.join( 'filesystem': '/'.join(
(self.fs_prefix, share['name']))}) (self.fs_prefix, share['name']))})
@ -269,7 +269,7 @@ class NexentaNasDriver(driver.ShareDriver):
except exception.NexentaException as e: except exception.NexentaException as e:
if e.kwargs['code'] == 'ENOENT': if e.kwargs['code'] == 'ENOENT':
LOG.warning( LOG.warning(
_LW('snapshot %(name)s not found, response: %(msg)s'), { 'snapshot %(name)s not found, response: %(msg)s', {
'name': snapshot['name'], 'msg': e.msg}) 'name': snapshot['name'], 'msg': e.msg})
else: else:
raise raise

View File

@ -25,8 +25,8 @@ from oslo_utils import units
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _
from manila import share from manila import share
from manila.i18n import _, _LE, _LI, _LW
from manila.share import driver from manila.share import driver
from manila.share.drivers.qnap import api from manila.share.drivers.qnap import api
from manila import utils from manila import utils
@ -83,9 +83,9 @@ class QnapShareDriver(driver.ShareDriver):
try: try:
self.api_executor = self._create_api_executor() self.api_executor = self._create_api_executor()
except Exception: except Exception:
LOG.exception(_LE('Failed to create HTTP client. Check IP ' LOG.exception('Failed to create HTTP client. Check IP '
'address, port, username, password and make ' 'address, port, username, password and make '
'sure the array version is compatible.')) 'sure the array version is compatible.')
raise raise
def check_for_setup_error(self): def check_for_setup_error(self):
@ -301,7 +301,7 @@ class QnapShareDriver(driver.ShareDriver):
# Use private_storage to retreive volume ID created in the NAS. # Use private_storage to retreive volume ID created in the NAS.
volID = self.private_storage.get(share['id'], 'volID') volID = self.private_storage.get(share['id'], 'volID')
if not volID: if not volID:
LOG.warning(_LW('volID for Share %s does not exist'), share['id']) LOG.warning('volID for Share %s does not exist', share['id'])
return return
LOG.debug('volID: %s', volID) LOG.debug('volID: %s', volID)
@ -309,7 +309,7 @@ class QnapShareDriver(driver.ShareDriver):
self.configuration.qnap_poolname, self.configuration.qnap_poolname,
vol_no=volID) vol_no=volID)
if del_share is None: if del_share is None:
LOG.warning(_LW('Share %s does not exist'), share['id']) LOG.warning('Share %s does not exist', share['id'])
return return
vol_no = del_share.find('vol_no').text vol_no = del_share.find('vol_no').text
@ -350,7 +350,7 @@ class QnapShareDriver(driver.ShareDriver):
volID = self.private_storage.get(snapshot['share']['id'], 'volID') volID = self.private_storage.get(snapshot['share']['id'], 'volID')
if not volID: if not volID:
LOG.warning( LOG.warning(
_LW('volID for Share %s does not exist'), 'volID for Share %s does not exist',
snapshot['share']['id']) snapshot['share']['id'])
raise exception.ShareResourceNotFound( raise exception.ShareResourceNotFound(
share_id=snapshot['share']['id']) share_id=snapshot['share']['id'])
@ -401,7 +401,7 @@ class QnapShareDriver(driver.ShareDriver):
snapshot_id = (snapshot.get('provider_location') or snapshot_id = (snapshot.get('provider_location') or
self.private_storage.get(snapshot['id'], 'snapshot_id')) self.private_storage.get(snapshot['id'], 'snapshot_id'))
if not snapshot_id: if not snapshot_id:
LOG.warning(_LW('Snapshot %s does not exist'), snapshot['id']) LOG.warning('Snapshot %s does not exist', snapshot['id'])
return return
LOG.debug('snapshot_id: %s', snapshot_id) LOG.debug('snapshot_id: %s', snapshot_id)
@ -421,7 +421,7 @@ class QnapShareDriver(driver.ShareDriver):
snapshot_id = (snapshot.get('provider_location') or snapshot_id = (snapshot.get('provider_location') or
self.private_storage.get(snapshot['id'], 'snapshot_id')) self.private_storage.get(snapshot['id'], 'snapshot_id'))
if not snapshot_id: if not snapshot_id:
LOG.warning(_LW('Snapshot %s does not exist'), snapshot['id']) LOG.warning('Snapshot %s does not exist', snapshot['id'])
raise exception.SnapshotResourceNotFound(name=snapshot['id']) raise exception.SnapshotResourceNotFound(name=snapshot['id'])
LOG.debug('snapshot_id: %s', snapshot_id) LOG.debug('snapshot_id: %s', snapshot_id)
@ -568,7 +568,7 @@ class QnapShareDriver(driver.ShareDriver):
try: try:
self._check_share_access(share_proto, access_type) self._check_share_access(share_proto, access_type)
except exception.InvalidShareAccess: except exception.InvalidShareAccess:
LOG.warning(_LW('The denied rule is invalid and does not exist.')) LOG.warning('The denied rule is invalid and does not exist.')
return return
hostlist = self.api_executor.get_host_list() hostlist = self.api_executor.get_host_list()
@ -603,8 +603,8 @@ class QnapShareDriver(driver.ShareDriver):
"""Manages a share that exists on backend.""" """Manages a share that exists on backend."""
if share['share_proto'].lower() == 'nfs': if share['share_proto'].lower() == 'nfs':
# 10.0.0.1:/share/example # 10.0.0.1:/share/example
LOG.info(_LI("Share %(shr_path)s will be managed with ID " LOG.info("Share %(shr_path)s will be managed with ID"
"%(shr_id)s."), "%(shr_id)s.",
{'shr_path': share['export_locations'][0]['path'], {'shr_path': share['export_locations'][0]['path'],
'shr_id': share['id']}) 'shr_id': share['id']})
@ -646,8 +646,8 @@ class QnapShareDriver(driver.ShareDriver):
volName = self.private_storage.get(share['id'], 'volName') volName = self.private_storage.get(share['id'], 'volName')
LOG.debug('volName: %s', volName) LOG.debug('volName: %s', volName)
LOG.info(_LI("Share %(shr_path)s was successfully managed with ID " LOG.info("Share %(shr_path)s was successfully managed with ID "
"%(shr_id)s."), "%(shr_id)s.",
{'shr_path': share['export_locations'][0]['path'], {'shr_path': share['export_locations'][0]['path'],
'shr_id': share['id']}) 'shr_id': share['id']})

View File

@ -28,7 +28,6 @@ import six
import six.moves.urllib.parse as urlparse import six.moves.urllib.parse as urlparse
from manila import exception from manila import exception
from manila.i18n import _LW
from manila import utils from manila import utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -48,9 +47,9 @@ class JsonRpc(object):
if self._url_scheme == 'https': if self._url_scheme == 'https':
if not self._ca_file: if not self._ca_file:
self._ca_file = False self._ca_file = False
LOG.warning(_LW( LOG.warning(
"Will not verify the server certificate of the API service" "Will not verify the server certificate of the API service"
" because the CA certificate is not available.")) " because the CA certificate is not available.")
self._id = 0 self._id = 0
self._credentials = auth.HTTPBasicAuth( self._credentials = auth.HTTPBasicAuth(
user_credentials[0], user_credentials[1]) user_credentials[0], user_credentials[1])

View File

@ -29,7 +29,7 @@ import six
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers.quobyte import jsonrpc from manila.share.drivers.quobyte import jsonrpc
@ -121,7 +121,7 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
try: try:
self.rpc.call('getInformation', {}) self.rpc.call('getInformation', {})
except Exception as exc: except Exception as exc:
LOG.error(_LE("Could not connect to API: %s"), exc) LOG.error("Could not connect to API: %s", exc)
raise exception.QBException( raise exception.QBException(
_('Could not connect to API: %s') % exc) _('Could not connect to API: %s') % exc)
@ -143,8 +143,8 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
total = float(result['total_physical_capacity']) total = float(result['total_physical_capacity'])
used = float(result['total_physical_usage']) used = float(result['total_physical_usage'])
LOG.info(_LI('Read capacity of %(cap)s bytes and ' LOG.info('Read capacity of %(cap)s bytes and '
'usage of %(use)s bytes from backend. '), 'usage of %(use)s bytes from backend. ',
{'cap': total, 'use': used}) {'cap': total, 'use': used})
free = total - used free = total - used
if free < 0: if free < 0:
@ -244,8 +244,8 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
share['name'], share['name'],
self._get_project_name(context, share['project_id'])) self._get_project_name(context, share['project_id']))
if not volume_uuid: if not volume_uuid:
LOG.warning(_LW("No volume found for " LOG.warning("No volume found for "
"share %(project_id)s/%(name)s") "share %(project_id)s/%(name)s"
% {"project_id": share['project_id'], % {"project_id": share['project_id'],
"name": share['name']}) "name": share['name']})
return return
@ -374,7 +374,7 @@ class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,):
self._allow_access(context, share, a_rule) self._allow_access(context, share, a_rule)
else: else:
if not access_rules: if not access_rules:
LOG.warning(_LW("No access rules provided in update_access.")) LOG.warning("No access rules provided in update_access.")
else: else:
# Handling access rule recovery # Handling access rule recovery
existing_rules = self._fetch_existing_access(context, share) existing_rules = self._fetch_existing_access(context, share)

View File

@ -32,7 +32,7 @@ from manila.common import constants as const
from manila import compute from manila import compute
from manila import context from manila import context
from manila import exception from manila import exception
from manila.i18n import _, _LW from manila.i18n import _
from manila.network.linux import ip_lib from manila.network.linux import ip_lib
from manila.network.neutron import api as neutron from manila.network.neutron import api as neutron
from manila import utils from manila import utils
@ -327,8 +327,8 @@ class ServiceInstanceManager(object):
name = name or self.get_config_option( name = name or self.get_config_option(
"service_instance_security_group") "service_instance_security_group")
if not name: if not name:
LOG.warning(_LW("Name for service instance security group is not " LOG.warning("Name for service instance security group is not "
"provided. Skipping security group step.")) "provided. Skipping security group step.")
return None return None
s_groups = [s for s in self.compute_api.security_group_list(context) s_groups = [s for s in self.compute_api.security_group_list(context)
if s.name == name] if s.name == name]
@ -359,15 +359,15 @@ class ServiceInstanceManager(object):
def ensure_service_instance(self, context, server): def ensure_service_instance(self, context, server):
"""Ensures that server exists and active.""" """Ensures that server exists and active."""
if 'instance_id' not in server: if 'instance_id' not in server:
LOG.warning(_LW("Unable to check server existence since " LOG.warning("Unable to check server existence since "
"'instance_id' key is not set in share server " "'instance_id' key is not set in share server "
"backend details.")) "backend details.")
return False return False
try: try:
inst = self.compute_api.server_get(self.admin_context, inst = self.compute_api.server_get(self.admin_context,
server['instance_id']) server['instance_id'])
except exception.InstanceNotFound: except exception.InstanceNotFound:
LOG.warning(_LW("Service instance %s does not exist."), LOG.warning("Service instance %s does not exist.",
server['instance_id']) server['instance_id'])
return False return False
if inst['status'] == 'ACTIVE': if inst['status'] == 'ACTIVE':
@ -510,11 +510,11 @@ class ServiceInstanceManager(object):
raise exception.ServiceInstanceException( raise exception.ServiceInstanceException(
_('Neither service instance password nor key are available.')) _('Neither service instance password nor key are available.'))
if not key_path: if not key_path:
LOG.warning(_LW( LOG.warning(
'No key path is available. May be non-existent key path is ' 'No key path is available. May be non-existent key path is '
'provided. Check path_to_private_key (current value ' 'provided. Check path_to_private_key (current value '
'%(private_path)s) and path_to_public_key (current value ' '%(private_path)s) and path_to_public_key (current value '
'%(public_path)s) in manila configuration file.'), dict( '%(public_path)s) in manila configuration file.', dict(
private_path=self.path_to_private_key, private_path=self.path_to_private_key,
public_path=self.path_to_public_key)) public_path=self.path_to_public_key))
network_data = self.network_helper.setup_network(network_info) network_data = self.network_helper.setup_network(network_info)
@ -965,8 +965,8 @@ class NeutronNetworkHelper(BaseNetworkhelper):
addr_list = device.addr.list() addr_list = device.addr.list()
except Exception as e: except Exception as e:
if 'does not exist' in six.text_type(e): if 'does not exist' in six.text_type(e):
LOG.warning(_LW( LOG.warning(
"Device %s does not exist anymore.") % device.name) "Device %s does not exist anymore." % device.name)
else: else:
raise raise
for addr in addr_list: for addr in addr_list:

View File

@ -23,11 +23,11 @@ import six
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from manila import utils
from manila.i18n import _, _LI, _LW
from manila import exception from manila import exception
from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share import utils as share_utils from manila.share import utils as share_utils
from manila import utils
tegile_opts = [ tegile_opts = [
cfg.StrOpt('tegile_nas_server', cfg.StrOpt('tegile_nas_server',
@ -201,7 +201,7 @@ class TegileShareDriver(driver.ShareDriver):
# of 'sharename' if inherited share properties are selected. # of 'sharename' if inherited share properties are selected.
ip, real_share_name = self._api('createShare', params).split() ip, real_share_name = self._api('createShare', params).split()
LOG.info(_LI("Created share %(sharename)s, share id %(shid)s."), LOG.info("Created share %(sharename)s, share id %(shid)s.",
{'sharename': share_name, 'shid': share['id']}) {'sharename': share_name, 'shid': share['id']})
return self._get_location_path(real_share_name, share_proto, ip) return self._get_location_path(real_share_name, share_proto, ip)
@ -273,8 +273,8 @@ class TegileShareDriver(driver.ShareDriver):
params = (share, snap_name, False) params = (share, snap_name, False)
LOG.info(_LI('Creating snapshot for share_name=%(shr)s' LOG.info('Creating snapshot for share_name=%(shr)s'
' snap_name=%(name)s'), ' snap_name=%(name)s',
{'shr': share_name, 'name': snap_name}) {'shr': share_name, 'name': snap_name})
self._api('createShareSnapshot', params) self._api('createShareSnapshot', params)
@ -383,18 +383,18 @@ class TegileShareDriver(driver.ShareDriver):
def _check_share_access(self, share_proto, access_type): def _check_share_access(self, share_proto, access_type):
if share_proto == 'CIFS' and access_type != 'user': if share_proto == 'CIFS' and access_type != 'user':
reason = _LW('Only USER access type is allowed for ' reason = ('Only USER access type is allowed for '
'CIFS shares.') 'CIFS shares.')
LOG.warning(reason) LOG.warning(reason)
raise exception.InvalidShareAccess(reason=reason) raise exception.InvalidShareAccess(reason=reason)
elif share_proto == 'NFS' and access_type not in ('ip', 'user'): elif share_proto == 'NFS' and access_type not in ('ip', 'user'):
reason = _LW('Only IP or USER access types are allowed for ' reason = ('Only IP or USER access types are allowed for '
'NFS shares.') 'NFS shares.')
LOG.warning(reason) LOG.warning(reason)
raise exception.InvalidShareAccess(reason=reason) raise exception.InvalidShareAccess(reason=reason)
elif share_proto not in ('NFS', 'CIFS'): elif share_proto not in ('NFS', 'CIFS'):
reason = _LW('Unsupported protocol \"%s\" specified for ' reason = ('Unsupported protocol \"%s\" specified for '
'access rule.') % share_proto 'access rule.') % share_proto
raise exception.InvalidShareAccess(reason=reason) raise exception.InvalidShareAccess(reason=reason)
@debugger @debugger

View File

@ -21,7 +21,7 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
from manila import exception from manila import exception
from manila.i18n import _, _LI, _LW from manila.i18n import _
from manila.share.drivers import service_instance from manila.share.drivers import service_instance
from manila.share.drivers.windows import windows_utils from manila.share.drivers.windows import windows_utils
from manila.share.drivers.windows import winrm_helper from manila.share.drivers.windows import winrm_helper
@ -232,21 +232,21 @@ class WindowsServiceInstanceManager(service_instance.ServiceInstanceManager):
def get_valid_security_service(self, security_services): def get_valid_security_service(self, security_services):
if not security_services: if not security_services:
LOG.info(_LI("No security services provided.")) LOG.info("No security services provided.")
elif len(security_services) > 1: elif len(security_services) > 1:
LOG.warning(_LW("Multiple security services provided. Only one " LOG.warning("Multiple security services provided. Only one "
"security service of type 'active_directory' " "security service of type 'active_directory' "
"is supported.")) "is supported.")
else: else:
security_service = security_services[0] security_service = security_services[0]
security_service_type = security_service['type'] security_service_type = security_service['type']
if security_service_type == 'active_directory': if security_service_type == 'active_directory':
return security_service return security_service
else: else:
LOG.warning(_LW("Only security services of type " LOG.warning("Only security services of type "
"'active_directory' are supported. " "'active_directory' are supported. "
"Retrieved security " "Retrieved security "
"service type: %(sec_type)s."), "service type: %(sec_type)s.",
{'sec_type': security_service_type}) {'sec_type': security_service_type})
return None return None

View File

@ -18,7 +18,6 @@ import os
from oslo_log import log from oslo_log import log
from oslo_utils import units from oslo_utils import units
from manila.i18n import _LW
from manila.share import driver as base_driver from manila.share import driver as base_driver
from manila.share.drivers import generic from manila.share.drivers import generic
from manila.share.drivers.windows import service_instance from manila.share.drivers.windows import service_instance
@ -67,9 +66,9 @@ class WindowsSMBDriver(generic.GenericShareDriver):
security_service['user'], security_service['user'],
security_service['password']) security_service['password'])
except Exception as exc: except Exception as exc:
LOG.warning(_LW("Failed to remove service instance " LOG.warning("Failed to remove service instance "
"%(instance_id)s from domain %(domain)s. " "%(instance_id)s from domain %(domain)s. "
"Exception: %(exc)s."), "Exception: %(exc)s.",
dict(instance_id=server_details['instance_id'], dict(instance_id=server_details['instance_id'],
domain=security_service['domain'], domain=security_service['domain'],
exc=exc)) exc=exc))

View File

@ -20,7 +20,6 @@ from oslo_log import log
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _LI, _LW
from manila.share.drivers import helpers from manila.share.drivers import helpers
from manila.share.drivers.windows import windows_utils from manila.share.drivers.windows import windows_utils
@ -78,7 +77,7 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
'-ReadAccess', "*%s" % self._NULL_SID] '-ReadAccess', "*%s" % self._NULL_SID]
self._remote_exec(server, cmd) self._remote_exec(server, cmd)
else: else:
LOG.info(_LI("Skipping creating export %s as it already exists."), LOG.info("Skipping creating export %s as it already exists.",
share_name) share_name)
return self.get_exports_for_share(server, export_location) return self.get_exports_for_share(server, export_location)
@ -127,20 +126,20 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
share_name) share_name)
else: else:
LOG.warning( LOG.warning(
_LW("Found explicit deny ACE rule that was not " "Found explicit deny ACE rule that was not "
"created by Manila and will be ignored: %s"), "created by Manila and will be ignored: %s",
raw_acl) raw_acl)
continue continue
if access_level == self._ACCESS_LEVEL_CUSTOM: if access_level == self._ACCESS_LEVEL_CUSTOM:
LOG.warning( LOG.warning(
_LW("Found 'custom' ACE rule that will be ignored: %s"), "Found 'custom' ACE rule that will be ignored: %s",
raw_acl) raw_acl)
continue continue
elif access_right == self._WIN_ACCESS_RIGHT_FULL: elif access_right == self._WIN_ACCESS_RIGHT_FULL:
LOG.warning( LOG.warning(
_LW("Account '%(access_to)s' was given full access " "Account '%(access_to)s' was given full access "
"right on share %(share_name)s. Manila only " "right on share %(share_name)s. Manila only "
"grants 'change' access."), "grants 'change' access.",
{'access_to': access_to, {'access_to': access_to,
'share_name': share_name}) 'share_name': share_name})
@ -159,8 +158,8 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
"-AccountName", "'%s'" % access_to, "-Force"] "-AccountName", "'%s'" % access_to, "-Force"]
self._remote_exec(server, cmd) self._remote_exec(server, cmd)
self._refresh_acl(server, share_name) self._refresh_acl(server, share_name)
LOG.info(_LI("Granted %(access_level)s access to '%(access_to)s' " LOG.info("Granted %(access_level)s access to '%(access_to)s' "
"on share %(share_name)s"), "on share %(share_name)s",
{'access_level': access_level, {'access_level': access_level,
'access_to': access_to, 'access_to': access_to,
'share_name': share_name}) 'share_name': share_name})
@ -174,8 +173,8 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
'-AccountName', '"%s"' % access_to, '-Force'] '-AccountName', '"%s"' % access_to, '-Force']
self._remote_exec(server, cmd) self._remote_exec(server, cmd)
self._refresh_acl(server, share_name) self._refresh_acl(server, share_name)
LOG.info(_LI("Revoked access to '%(access_to)s' " LOG.info("Revoked access to '%(access_to)s' "
"on share %(share_name)s"), "on share %(share_name)s",
{'access_to': access_to, {'access_to': access_to,
'share_name': share_name}) 'share_name': share_name})
@ -207,12 +206,12 @@ class WindowsSMBHelper(helpers.CIFSHelperBase):
except (exception.InvalidShareAccess, except (exception.InvalidShareAccess,
exception.InvalidShareAccessLevel): exception.InvalidShareAccessLevel):
# This check will allow invalid rules to be deleted. # This check will allow invalid rules to be deleted.
LOG.warning(_LW( LOG.warning(
"Unsupported access level %(level)s or access type " "Unsupported access level %(level)s or access type "
"%(type)s, skipping removal of access rule to " "%(type)s, skipping removal of access rule to "
"%(to)s.") % {'level': deleted_rule['access_level'], "%(to)s." % {'level': deleted_rule['access_level'],
'type': deleted_rule['access_type'], 'type': deleted_rule['access_type'],
'to': deleted_rule['access_to']}) 'to': deleted_rule['access_to']})
continue continue
self._revoke_share_access(server, share_name, self._revoke_share_access(server, share_name,
deleted_rule['access_to']) deleted_rule['access_to'])

View File

@ -17,7 +17,6 @@ import re
from oslo_log import log from oslo_log import log
from manila.i18n import _LI
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -125,9 +124,9 @@ class WindowsUtils(object):
# NOTE(lpetrut): An instance reboot is needed but this will be # NOTE(lpetrut): An instance reboot is needed but this will be
# performed using Nova so that the instance state can be # performed using Nova so that the instance state can be
# retrieved easier. # retrieved easier.
LOG.info(_LI("Joining server %(ip)s to Active Directory " LOG.info("Joining server %(ip)s to Active Directory "
"domain %(domain)s"), dict(ip=server['ip'], "domain %(domain)s", dict(ip=server['ip'],
domain=domain)) domain=domain))
cmds = [ cmds = [
('$password = "%s" | ' ('$password = "%s" | '
'ConvertTo-SecureString -asPlainText -Force' % admin_password), 'ConvertTo-SecureString -asPlainText -Force' % admin_password),

View File

@ -30,7 +30,7 @@ from oslo_utils import timeutils
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LI, _LW from manila.i18n import _
from manila.share import configuration from manila.share import configuration
from manila.share import driver from manila.share import driver
from manila.share.drivers.zfsonlinux import utils as zfs_utils from manila.share.drivers.zfsonlinux import utils as zfs_utils
@ -279,7 +279,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
self.zfs('destroy', '-f', name) self.zfs('destroy', '-f', name)
return return
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
LOG.info(_LI("Failed to destroy ZFS dataset, retrying one time")) LOG.info("Failed to destroy ZFS dataset, retrying one time")
# NOTE(bswartz): There appears to be a bug in ZFS when creating and # NOTE(bswartz): There appears to be a bug in ZFS when creating and
# destroying datasets concurrently where the filesystem remains mounted # destroying datasets concurrently where the filesystem remains mounted
@ -529,8 +529,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
break break
else: else:
LOG.warning( LOG.warning(
_LW("Share with '%(id)s' ID and '%(name)s' NAME is " "Share with '%(id)s' ID and '%(name)s' NAME is "
"absent on backend. Nothing has been deleted."), "absent on backend. Nothing has been deleted.",
{'id': share['id'], 'name': dataset_name}) {'id': share['id'], 'name': dataset_name})
self.private_storage.delete(share['id']) self.private_storage.delete(share['id'])
@ -574,8 +574,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
break break
else: else:
LOG.warning( LOG.warning(
_LW("Snapshot with '%(id)s' ID and '%(name)s' NAME is " "Snapshot with '%(id)s' ID and '%(name)s' NAME is "
"absent on backend. Nothing has been deleted."), "absent on backend. Nothing has been deleted.",
{'id': snapshot['id'], 'name': snapshot_name}) {'id': snapshot['id'], 'name': snapshot_name})
@ensure_share_server_not_provided @ensure_share_server_not_provided
@ -972,8 +972,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
break break
else: else:
LOG.warning( LOG.warning(
_LW("Share replica with '%(id)s' ID and '%(name)s' NAME is " "Share replica with '%(id)s' ID and '%(name)s' NAME is "
"absent on backend. Nothing has been deleted."), "absent on backend. Nothing has been deleted.",
{'id': replica['id'], 'name': dataset_name}) {'id': replica['id'], 'name': dataset_name})
self.private_storage.delete(replica['id']) self.private_storage.delete(replica['id'])
@ -1131,7 +1131,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
'sudo', 'zfs', 'receive', '-vF', dataset_name, 'sudo', 'zfs', 'receive', '-vF', dataset_name,
) )
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"), LOG.warning("Failed to sync replica %(id)s. %(e)s",
{'id': repl['id'], 'e': e}) {'id': repl['id'], 'e': e})
replica_dict[repl['id']]['replica_state'] = ( replica_dict[repl['id']]['replica_state'] = (
constants.REPLICA_STATE_OUT_OF_SYNC) constants.REPLICA_STATE_OUT_OF_SYNC)
@ -1153,7 +1153,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
constants.REPLICA_STATE_IN_SYNC) constants.REPLICA_STATE_IN_SYNC)
except Exception as e: except Exception as e:
LOG.warning( LOG.warning(
_LW("Failed to update currently active replica. \n%s"), e) "Failed to update currently active replica. \n%s", e)
replica_dict[active_replica['id']]['replica_state'] = ( replica_dict[active_replica['id']]['replica_state'] = (
constants.REPLICA_STATE_OUT_OF_SYNC) constants.REPLICA_STATE_OUT_OF_SYNC)
@ -1185,7 +1185,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
'sudo', 'zfs', 'receive', '-vF', dataset_name, 'sudo', 'zfs', 'receive', '-vF', dataset_name,
) )
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"), LOG.warning("Failed to sync replica %(id)s. %(e)s",
{'id': repl['id'], 'e': e}) {'id': repl['id'], 'e': e})
replica_dict[repl['id']]['replica_state'] = ( replica_dict[repl['id']]['replica_state'] = (
constants.REPLICA_STATE_OUT_OF_SYNC) constants.REPLICA_STATE_OUT_OF_SYNC)
@ -1274,7 +1274,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
) )
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning( LOG.warning(
_LW("Failed to sync snapshot instance %(id)s. %(e)s"), "Failed to sync snapshot instance %(id)s. %(e)s",
{'id': replica_snapshot['id'], 'e': e}) {'id': replica_snapshot['id'], 'e': e})
replica_snapshots_dict[replica_snapshot['id']]['status'] = ( replica_snapshots_dict[replica_snapshot['id']]['status'] = (
constants.STATUS_ERROR) constants.STATUS_ERROR)
@ -1526,8 +1526,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
x for x in line.strip().split(' ') if x != ''][1] x for x in line.strip().split(' ') if x != ''][1]
self.execute('sudo', 'kill', '-9', migr_pid) self.execute('sudo', 'kill', '-9', migr_pid)
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW( LOG.warning(
"Caught following error trying to kill migration process: %s"), "Caught following error trying to kill migration process: %s",
e) e)
# Sleep couple of seconds before destroying updated objects # Sleep couple of seconds before destroying updated objects
@ -1544,9 +1544,9 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
'sudo', 'zfs', 'destroy', '-r', dst_dataset_name, 'sudo', 'zfs', 'destroy', '-r', dst_dataset_name,
) )
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW( LOG.warning(
"Failed to destroy destination dataset with following error: " "Failed to destroy destination dataset with following error: "
"%s"), "%s",
e) e)
LOG.debug( LOG.debug(

View File

@ -28,7 +28,7 @@ import six
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila.share import driver from manila.share import driver
from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.ganesha import utils as ganesha_utils
from manila import utils from manila import utils
@ -103,7 +103,7 @@ class ExecuteMixin(driver.ExecuteMixin):
try: try:
return self.execute(*cmd, **kwargs) return self.execute(*cmd, **kwargs)
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to run command, got error: %s"), e) LOG.warning("Failed to run command, got error: %s", e)
raise raise
def _get_option(self, resource_name, option_name, pool_level=False, def _get_option(self, resource_name, option_name, pool_level=False,
@ -201,8 +201,8 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
self._is_kernel_version = True self._is_kernel_version = True
except exception.ProcessExecutionError as e: except exception.ProcessExecutionError as e:
LOG.info( LOG.info(
_LI("Looks like ZFS kernel module is absent. " "Looks like ZFS kernel module is absent. "
"Assuming FUSE version is installed. Error: %s"), e) "Assuming FUSE version is installed. Error: %s", e)
self._is_kernel_version = False self._is_kernel_version = False
return self._is_kernel_version return self._is_kernel_version
@ -215,7 +215,7 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
try: try:
self.execute('sudo', 'exportfs') self.execute('sudo', 'exportfs')
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
LOG.exception(_LE("Call of 'exportfs' utility returned error.")) LOG.exception("Call of 'exportfs' utility returned error.")
raise raise
# Init that class instance attribute on start of manila-share service # Init that class instance attribute on start of manila-share service
@ -300,8 +300,8 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
break break
else: else:
LOG.warning( LOG.warning(
_LW("Dataset with '%(name)s' NAME is absent on backend. " "Dataset with '%(name)s' NAME is absent on backend. "
"Access rules were not applied."), {'name': dataset_name}) "Access rules were not applied.", {'name': dataset_name})
# NOTE(vponomaryov): Setting of ZFS share options does not remove rules # NOTE(vponomaryov): Setting of ZFS share options does not remove rules
# that were added and then removed. So, remove them explicitly. # that were added and then removed. So, remove them explicitly.

View File

@ -18,7 +18,7 @@ from oslo_log import log
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LW from manila.i18n import _
from manila.share.drivers.zfssa import restclient from manila.share.drivers.zfssa import restclient
@ -233,12 +233,12 @@ class ZFSSAApi(object):
svc = self.share_path % (pool, project, share) svc = self.share_path % (pool, project, share)
ret = self.rclient.delete(svc) ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT: if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_LE('Error deleting ' exception_msg = (('Error deleting '
'share: %(share)s to ' 'share: %(share)s to '
'pool: %(pool)s ' 'pool: %(pool)s '
'project: %(project)s ' 'project: %(project)s '
'return code: %(ret.status)d ' 'return code: %(ret.status)d '
'message: %(ret.data)s.'), 'message: %(ret.data)s.'),
{'share': share, {'share': share,
'pool': pool, 'pool': pool,
'project': project, 'project': project,
@ -391,7 +391,7 @@ class ZFSSAApi(object):
svc = "%(base)s/%(prop)s" % {'base': base, 'prop': schema['property']} svc = "%(base)s/%(prop)s" % {'base': base, 'prop': schema['property']}
ret = self.rclient.get(svc) ret = self.rclient.get(svc)
if ret.status == restclient.Status.OK: if ret.status == restclient.Status.OK:
LOG.warning(_LW('Property %s already exists.'), schema['property']) LOG.warning('Property %s already exists.', schema['property'])
return return
ret = self.rclient.post(base, schema) ret = self.rclient.post(base, schema)
if ret.status != restclient.Status.CREATED: if ret.status != restclient.Status.CREATED:

View File

@ -24,7 +24,6 @@ import six
from manila import exception from manila import exception
from manila.i18n import _ from manila.i18n import _
from manila.i18n import _LE
from manila.share import driver from manila.share import driver
from manila.share.drivers.zfssa import zfssarest from manila.share.drivers.zfssa import zfssarest
@ -278,7 +277,7 @@ class ZFSSAShareDriver(driver.ShareDriver):
snapshot['share_id'], snapshot['share_id'],
snapshot['id']) snapshot['id'])
if has_clones: if has_clones:
LOG.error(_LE("snapshot %s: has clones"), snapshot['id']) LOG.error("snapshot %s: has clones", snapshot['id'])
raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot['id']) raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot['id'])
self.zfssa.delete_snapshot(lcfg.zfssa_pool, self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project, lcfg.zfssa_project,
@ -303,7 +302,7 @@ class ZFSSAShareDriver(driver.ShareDriver):
try: try:
details = self._get_share_details(name) details = self._get_share_details(name)
except Exception: except Exception:
LOG.error(_LE('Cannot manage share %s'), name) LOG.error('Cannot manage share %s', name)
raise raise
lcfg = self.configuration lcfg = self.configuration
@ -438,8 +437,8 @@ class ZFSSAShareDriver(driver.ShareDriver):
new_size_byte = int(new_size) * units.Gi new_size_byte = int(new_size) * units.Gi
if used_space > new_size_byte: if used_space > new_size_byte:
LOG.error(_LE('%(used).1fGB of share %(id)s is already used. ' LOG.error('%(used).1fGB of share %(id)s is already used. '
'Cannot shrink to %(newsize)dGB.'), 'Cannot shrink to %(newsize)dGB.',
{'used': float(used_space) / units.Gi, {'used': float(used_space) / units.Gi,
'id': share['id'], 'id': share['id'],
'newsize': new_size}) 'newsize': new_size})

View File

@ -29,7 +29,6 @@ from oslo_log import log
import six import six
from manila import context as ctxt from manila import context as ctxt
from manila.i18n import _LW
hook_options = [ hook_options = [
@ -110,7 +109,7 @@ class HookBase(object):
*args, **kwargs) *args, **kwargs)
except Exception as e: except Exception as e:
if self.suppress_pre_hooks_errors: if self.suppress_pre_hooks_errors:
LOG.warning(_LW("\nSuppressed exception in pre hook. %s\n"), e) LOG.warning("\nSuppressed exception in pre hook. %s\n", e)
pre_data = e pre_data = e
else: else:
raise raise
@ -135,7 +134,7 @@ class HookBase(object):
except Exception as e: except Exception as e:
if self.suppress_post_hooks_errors: if self.suppress_post_hooks_errors:
LOG.warning( LOG.warning(
_LW("\nSuppressed exception in post hook. %s\n"), e) "\nSuppressed exception in post hook. %s\n", e)
post_data = e post_data = e
else: else:
raise raise

View File

@ -38,7 +38,7 @@ from manila import context
from manila import coordination from manila import coordination
from manila.data import rpcapi as data_rpcapi from manila.data import rpcapi as data_rpcapi
from manila import exception from manila import exception
from manila.i18n import _, _LE, _LI, _LW from manila.i18n import _
from manila import manager from manila import manager
from manila import quota from manila import quota
from manila.share import access from manila.share import access
@ -204,8 +204,8 @@ class ShareManager(manager.SchedulerDependentManager):
share_driver = self.configuration.share_driver share_driver = self.configuration.share_driver
if share_driver in MAPPING: if share_driver in MAPPING:
msg_args = {'old': share_driver, 'new': MAPPING[share_driver]} msg_args = {'old': share_driver, 'new': MAPPING[share_driver]}
LOG.warning(_LW("Driver path %(old)s is deprecated, update your " LOG.warning("Driver path %(old)s is deprecated, update your "
"configuration to the new path %(new)s"), "configuration to the new path %(new)s",
msg_args) msg_args)
share_driver = MAPPING[share_driver] share_driver = MAPPING[share_driver]
@ -250,8 +250,8 @@ class ShareManager(manager.SchedulerDependentManager):
try: try:
pool = self.driver.get_pool(share_instance) pool = self.driver.get_pool(share_instance)
except Exception: except Exception:
LOG.exception(_LE("Failed to fetch pool name for share: " LOG.exception("Failed to fetch pool name for share: "
"%(share)s."), "%(share)s.",
{'share': share_instance['id']}) {'share': share_instance['id']})
return return
@ -277,10 +277,10 @@ class ShareManager(manager.SchedulerDependentManager):
self.driver.check_for_setup_error() self.driver.check_for_setup_error()
except Exception: except Exception:
LOG.exception( LOG.exception(
_LE("Error encountered during initialization of driver " ("Error encountered during initialization of driver "
"'%(name)s' on '%(host)s' host."), { "'%(name)s' on '%(host)s' host."), {
"name": self.driver.__class__.__name__, "name": self.driver.__class__.__name__,
"host": self.host, "host": self.host,
} }
) )
self.driver.initialized = False self.driver.initialized = False
@ -298,8 +298,8 @@ class ShareManager(manager.SchedulerDependentManager):
if share_ref.is_busy: if share_ref.is_busy:
LOG.info( LOG.info(
_LI("Share instance %(id)s: skipping export, " "Share instance %(id)s: skipping export, "
"because it is busy with an active task: %(task)s."), "because it is busy with an active task: %(task)s.",
{'id': share_instance['id'], {'id': share_instance['id'],
'task': share_ref['task_state']}, 'task': share_ref['task_state']},
) )
@ -307,8 +307,8 @@ class ShareManager(manager.SchedulerDependentManager):
if share_instance['status'] != constants.STATUS_AVAILABLE: if share_instance['status'] != constants.STATUS_AVAILABLE:
LOG.info( LOG.info(
_LI("Share instance %(id)s: skipping export, " "Share instance %(id)s: skipping export, "
"because it has '%(status)s' status."), "because it has '%(status)s' status.",
{'id': share_instance['id'], {'id': share_instance['id'],
'status': share_instance['status']}, 'status': share_instance['status']},
) )
@ -322,9 +322,9 @@ class ShareManager(manager.SchedulerDependentManager):
export_locations = self.driver.ensure_share( export_locations = self.driver.ensure_share(
ctxt, share_instance, share_server=share_server) ctxt, share_instance, share_server=share_server)
except Exception: except Exception:
LOG.exception(_LE("Caught exception trying ensure " LOG.exception("Caught exception trying ensure "
"share '%(s_id)s'."), {'s_id': "share '%(s_id)s'.",
share_instance['id']}) {'s_id': share_instance['id']})
continue continue
if export_locations: if export_locations:
@ -341,8 +341,8 @@ class ShareManager(manager.SchedulerDependentManager):
ctxt, share_instance['id'], share_server=share_server) ctxt, share_instance['id'], share_server=share_server)
except Exception: except Exception:
LOG.exception( LOG.exception(
_LE("Unexpected error occurred while updating access " ("Unexpected error occurred while updating access "
"rules for share instance %(s_id)s."), "rules for share instance %(s_id)s."),
{'s_id': share_instance['id']}, {'s_id': share_instance['id']},
) )
@ -369,14 +369,14 @@ class ShareManager(manager.SchedulerDependentManager):
self.snapshot_access_helper.update_access_rules( self.snapshot_access_helper.update_access_rules(
ctxt, snap_instance['id'], share_server) ctxt, snap_instance['id'], share_server)
except Exception: except Exception:
LOG.exception(_LE( LOG.exception(
"Unexpected error occurred while updating " "Unexpected error occurred while updating "
"access rules for snapshot instance %s."), "access rules for snapshot instance %s.",
snap_instance['id']) snap_instance['id'])
self.publish_service_capabilities(ctxt) self.publish_service_capabilities(ctxt)
LOG.info(_LI("Finished initialization of driver: '%(driver)s" LOG.info("Finished initialization of driver: '%(driver)s"
"@%(host)s'"), "@%(host)s'",
{"driver": self.driver.__class__.__name__, {"driver": self.driver.__class__.__name__,
"host": self.host}) "host": self.host})
@ -431,7 +431,7 @@ class ShareManager(manager.SchedulerDependentManager):
context, parent_share_server_id) context, parent_share_server_id)
except exception.ShareServerNotFound: except exception.ShareServerNotFound:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
error(_LE("Parent share server %s does not exist."), error("Parent share server %s does not exist.",
parent_share_server_id) parent_share_server_id)
if parent_share_server['status'] != constants.STATUS_ACTIVE: if parent_share_server['status'] != constants.STATUS_ACTIVE:
@ -439,8 +439,8 @@ class ShareManager(manager.SchedulerDependentManager):
'id': parent_share_server_id, 'id': parent_share_server_id,
'status': parent_share_server['status'], 'status': parent_share_server['status'],
} }
error(_LE("Parent share server %(id)s has invalid status " error("Parent share server %(id)s has invalid status "
"'%(status)s'."), error_params) "'%(status)s'.", error_params)
raise exception.InvalidShareServer( raise exception.InvalidShareServer(
share_server_id=parent_share_server share_server_id=parent_share_server
) )
@ -478,7 +478,7 @@ class ShareManager(manager.SchedulerDependentManager):
) )
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
error(_LE("Cannot choose compatible share server: %s"), error("Cannot choose compatible share server: %s",
e) e)
if not compatible_share_server: if not compatible_share_server:
@ -526,10 +526,10 @@ class ShareManager(manager.SchedulerDependentManager):
# Create share server on backend with data from db. # Create share server on backend with data from db.
share_server = self._setup_server(context, share_server, share_server = self._setup_server(context, share_server,
metadata=metadata) metadata=metadata)
LOG.info(_LI("Share server created successfully.")) LOG.info("Share server created successfully.")
else: else:
LOG.info(_LI("Using preexisting share server: " LOG.info("Using preexisting share server: "
"'%(share_server_id)s'"), "'%(share_server_id)s'",
{'share_server_id': share_server['id']}) {'share_server_id': share_server['id']})
return share_server return share_server
@ -640,7 +640,7 @@ class ShareManager(manager.SchedulerDependentManager):
) )
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
error(_LE("Cannot choose compatible share-server: %s"), error("Cannot choose compatible share-server: %s",
e) e)
if not compatible_share_server: if not compatible_share_server:
@ -670,10 +670,10 @@ class ShareManager(manager.SchedulerDependentManager):
# Create share server on backend with data from db. # Create share server on backend with data from db.
compatible_share_server = self._setup_server( compatible_share_server = self._setup_server(
context, compatible_share_server) context, compatible_share_server)
LOG.info(_LI("Share server created successfully.")) LOG.info("Share server created successfully.")
else: else:
LOG.info(_LI("Used preexisting share server " LOG.info("Used preexisting share server "
"'%(share_server_id)s'"), "'%(share_server_id)s'",
{'share_server_id': compatible_share_server['id']}) {'share_server_id': compatible_share_server['id']})
return compatible_share_server, updated_share_group return compatible_share_server, updated_share_group
@ -963,8 +963,8 @@ class ShareManager(manager.SchedulerDependentManager):
(constants. (constants.
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)}) TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)})
LOG.info(_LI("Share Migration for share %s completed " LOG.info("Share Migration for share %s completed "
"first phase successfully."), "first phase successfully.",
share['id']) share['id'])
else: else:
share = self.db.share_get( share = self.db.share_get(
@ -972,8 +972,8 @@ class ShareManager(manager.SchedulerDependentManager):
if (share['task_state'] == if (share['task_state'] ==
constants.TASK_STATE_MIGRATION_CANCELLED): constants.TASK_STATE_MIGRATION_CANCELLED):
LOG.warning(_LW( LOG.warning(
"Share Migration for share %s was cancelled."), "Share Migration for share %s was cancelled.",
share['id']) share['id'])
except Exception: except Exception:
@ -1074,7 +1074,7 @@ class ShareManager(manager.SchedulerDependentManager):
except Exception as e: except Exception as e:
if not isinstance(e, NotImplementedError): if not isinstance(e, NotImplementedError):
LOG.exception( LOG.exception(
_LE("The driver could not migrate the share %(shr)s"), ("The driver could not migrate the share %(shr)s"),
{'shr': share_id}) {'shr': share_id})
try: try:
@ -1265,7 +1265,7 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_snapshot_instance_delete(context, instance['id']) self.db.share_snapshot_instance_delete(context, instance['id'])
self.db.share_instance_delete(context, instance_id) self.db.share_instance_delete(context, instance_id)
LOG.info(_LI("Share instance %s: deleted successfully."), LOG.info("Share instance %s: deleted successfully.",
instance_id) instance_id)
self._check_delete_share_server(context, share_instance) self._check_delete_share_server(context, share_instance)
@ -1280,8 +1280,8 @@ class ShareManager(manager.SchedulerDependentManager):
share_ref = self.db.share_get(context, src_share_instance['share_id']) share_ref = self.db.share_get(context, src_share_instance['share_id'])
LOG.info(_LI("Received request to finish Share Migration for " LOG.info("Received request to finish Share Migration for "
"share %s."), share_ref['id']) "share %s.", share_ref['id'])
if share_ref['task_state'] == ( if share_ref['task_state'] == (
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE):
@ -1341,8 +1341,8 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_update( self.db.share_update(
context, dest_share_instance['share_id'], model_update) context, dest_share_instance['share_id'], model_update)
LOG.info(_LI("Share Migration for share %s" LOG.info("Share Migration for share %s"
" completed successfully."), share_ref['id']) " completed successfully.", share_ref['id'])
def _get_extra_specs_from_share_type(self, context, share_type_id): def _get_extra_specs_from_share_type(self, context, share_type_id):
@ -1386,8 +1386,8 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_ref['id'], context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
LOG.info(_LI("Share Migration for share %s" LOG.info("Share Migration for share %s"
" was cancelled."), share_ref['id']) " was cancelled.", share_ref['id'])
return return
else: else:
raise exception.ShareMigrationFailed(reason=msg) raise exception.ShareMigrationFailed(reason=msg)
@ -1489,8 +1489,8 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_ref['id'], context, share_ref['id'],
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
LOG.info(_LI("Share Migration for share %s" LOG.info("Share Migration for share %s"
" was cancelled."), share_ref['id']) " was cancelled.", share_ref['id'])
@utils.require_driver_initialized @utils.require_driver_initialized
def migration_get_progress(self, context, src_instance_id, def migration_get_progress(self, context, src_instance_id,
@ -1586,8 +1586,8 @@ class ShareManager(manager.SchedulerDependentManager):
) )
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
error = _LE("Creation of share instance %s failed: " error = ("Creation of share instance %s failed: "
"failed to get share server.") "failed to get share server.")
LOG.error(error, share_instance_id) LOG.error(error, share_instance_id)
self.db.share_instance_update( self.db.share_instance_update(
context, share_instance_id, context, share_instance_id,
@ -1610,7 +1610,7 @@ class ShareManager(manager.SchedulerDependentManager):
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Share instance %s failed on creation."), LOG.error("Share instance %s failed on creation.",
share_instance_id) share_instance_id)
detail_data = getattr(e, 'detail_data', {}) detail_data = getattr(e, 'detail_data', {})
@ -1626,16 +1626,16 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_export_locations_update( self.db.share_export_locations_update(
context, share_instance['id'], export_locations) context, share_instance['id'], export_locations)
else: else:
LOG.warning(_LW('Share instance information in exception ' LOG.warning('Share instance information in exception '
'can not be written to db because it ' 'can not be written to db because it '
'contains %s and it is not a dictionary.'), 'contains %s and it is not a dictionary.',
detail_data) detail_data)
self.db.share_instance_update( self.db.share_instance_update(
context, share_instance_id, context, share_instance_id,
{'status': constants.STATUS_ERROR} {'status': constants.STATUS_ERROR}
) )
else: else:
LOG.info(_LI("Share instance %s created successfully."), LOG.info("Share instance %s created successfully.",
share_instance_id) share_instance_id)
share = self.db.share_get(context, share_instance['share_id']) share = self.db.share_get(context, share_instance['share_id'])
updates = { updates = {
@ -1742,8 +1742,8 @@ class ShareManager(manager.SchedulerDependentManager):
) )
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to get share server " LOG.error("Failed to get share server "
"for share replica creation.")) "for share replica creation.")
self.db.share_replica_update( self.db.share_replica_update(
context, share_replica['id'], context, share_replica['id'],
{'status': constants.STATUS_ERROR, {'status': constants.STATUS_ERROR,
@ -1785,7 +1785,7 @@ class ShareManager(manager.SchedulerDependentManager):
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Share replica %s failed on creation."), LOG.error("Share replica %s failed on creation.",
share_replica['id']) share_replica['id'])
self.db.share_replica_update( self.db.share_replica_update(
context, share_replica['id'], context, share_replica['id'],
@ -1800,8 +1800,8 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_replica['id'], context, share_replica['id'],
replica_ref.get('export_locations')) replica_ref.get('export_locations'))
else: else:
msg = _LW('Invalid export locations passed to the share ' msg = ('Invalid export locations passed to the share '
'manager.') 'manager.')
LOG.warning(msg) LOG.warning(msg)
if replica_ref.get('replica_state'): if replica_ref.get('replica_state'):
@ -1819,7 +1819,7 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_replica['id'], context, share_replica['id'],
constants.STATUS_ACTIVE) constants.STATUS_ACTIVE)
LOG.info(_LI("Share replica %s created successfully."), LOG.info("Share replica %s created successfully.",
share_replica['id']) share_replica['id'])
@add_hooks @add_hooks
@ -1899,7 +1899,7 @@ class ShareManager(manager.SchedulerDependentManager):
context, replica_snapshot['id']) context, replica_snapshot['id'])
self.db.share_replica_delete(context, share_replica['id']) self.db.share_replica_delete(context, share_replica['id'])
LOG.info(_LI("Share replica %s deleted successfully."), LOG.info("Share replica %s deleted successfully.",
share_replica['id']) share_replica['id'])
@add_hooks @add_hooks
@ -1980,10 +1980,10 @@ class ShareManager(manager.SchedulerDependentManager):
for instance in active_replica_snapshot_instances: for instance in active_replica_snapshot_instances:
if instance['status'] in (constants.STATUS_CREATING, if instance['status'] in (constants.STATUS_CREATING,
constants.STATUS_DELETING): constants.STATUS_DELETING):
msg = _LI("The replica snapshot instance %(instance)s was " msg = ("The replica snapshot instance %(instance)s was "
"in %(state)s. Since it was not in %(available)s " "in %(state)s. Since it was not in %(available)s "
"state when the replica was promoted, it will be " "state when the replica was promoted, it will be "
"set to %(error)s.") "set to %(error)s.")
payload = { payload = {
'instance': instance['id'], 'instance': instance['id'],
'state': instance['status'], 'state': instance['status'],
@ -2048,8 +2048,8 @@ class ShareManager(manager.SchedulerDependentManager):
context, share_replica['id'], context, share_replica['id'],
updated_replica.get('access_rules_status')) updated_replica.get('access_rules_status'))
LOG.info(_LI("Share replica %s: promoted to active state " LOG.info("Share replica %s: promoted to active state "
"successfully."), share_replica['id']) "successfully.", share_replica['id'])
@periodic_task.periodic_task(spacing=CONF.replica_state_update_interval) @periodic_task.periodic_task(spacing=CONF.replica_state_update_interval)
@utils.require_driver_initialized @utils.require_driver_initialized
@ -2135,8 +2135,8 @@ class ShareManager(manager.SchedulerDependentManager):
context, replica_list, share_replica, access_rules, context, replica_list, share_replica, access_rules,
available_share_snapshots, share_server=share_server) available_share_snapshots, share_server=share_server)
except Exception: except Exception:
msg = _LE("Driver error when updating replica " msg = ("Driver error when updating replica "
"state for replica %s.") "state for replica %s.")
LOG.exception(msg, share_replica['id']) LOG.exception(msg, share_replica['id'])
self.db.share_replica_update( self.db.share_replica_update(
context, share_replica['id'], context, share_replica['id'],
@ -2150,8 +2150,8 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_replica_update(context, share_replica['id'], self.db.share_replica_update(context, share_replica['id'],
{'replica_state': replica_state}) {'replica_state': replica_state})
elif replica_state: elif replica_state:
msg = (_LW("Replica %(id)s cannot be set to %(state)s " msg = (("Replica %(id)s cannot be set to %(state)s "
"through update call.") % "through update call.") %
{'id': share_replica['id'], 'state': replica_state}) {'id': share_replica['id'], 'state': replica_state})
LOG.warning(msg) LOG.warning(msg)
@ -2272,9 +2272,9 @@ class ShareManager(manager.SchedulerDependentManager):
if not snapshot_update.get('size'): if not snapshot_update.get('size'):
snapshot_update['size'] = snapshot_ref['share']['size'] snapshot_update['size'] = snapshot_ref['share']['size']
LOG.warning(_LW("Cannot get the size of the snapshot " LOG.warning("Cannot get the size of the snapshot "
"%(snapshot_id)s. Using the size of " "%(snapshot_id)s. Using the size of "
"the share instead."), "the share instead.",
{'snapshot_id': snapshot_id}) {'snapshot_id': snapshot_id})
self._update_quota_usages(context, project_id, { self._update_quota_usages(context, project_id, {
@ -2356,7 +2356,7 @@ class ShareManager(manager.SchedulerDependentManager):
except exception.InvalidShare as e: except exception.InvalidShare as e:
share_manage_set_error_status( share_manage_set_error_status(
_LE("Share can not be unmanaged: %s."), e) ("Share can not be unmanaged: %s."), e)
return return
try: try:
@ -2370,7 +2370,7 @@ class ShareManager(manager.SchedulerDependentManager):
# Quota reservation errors here are not fatal, because # Quota reservation errors here are not fatal, because
# unmanage is administrator API and he/she could update user # unmanage is administrator API and he/she could update user
# quota usages later if it's required. # quota usages later if it's required.
LOG.warning(_LW("Failed to update quota usages: %s."), e) LOG.warning("Failed to update quota usages: %s.", e)
if self.configuration.safe_get('unmanage_remove_access_rules'): if self.configuration.safe_get('unmanage_remove_access_rules'):
try: try:
@ -2382,11 +2382,11 @@ class ShareManager(manager.SchedulerDependentManager):
) )
except Exception as e: except Exception as e:
share_manage_set_error_status( share_manage_set_error_status(
_LE("Can not remove access rules of share: %s."), e) ("Can not remove access rules of share: %s."), e)
return return
self.db.share_instance_delete(context, share_instance['id']) self.db.share_instance_delete(context, share_instance['id'])
LOG.info(_LI("Share %s: unmanaged successfully."), share_id) LOG.info("Share %s: unmanaged successfully.", share_id)
@add_hooks @add_hooks
@utils.require_driver_initialized @utils.require_driver_initialized
@ -2396,7 +2396,7 @@ class ShareManager(manager.SchedulerDependentManager):
msg = _("Unmanage snapshot is not supported for " msg = _("Unmanage snapshot is not supported for "
"driver_handles_share_servers=True mode.") "driver_handles_share_servers=True mode.")
self.db.share_snapshot_update(context, snapshot_id, status) self.db.share_snapshot_update(context, snapshot_id, status)
LOG.error(_LE("Share snapshot cannot be unmanaged: %s."), LOG.error("Share snapshot cannot be unmanaged: %s.",
msg) msg)
return return
@ -2415,7 +2415,7 @@ class ShareManager(manager.SchedulerDependentManager):
msg = _("Unmanage snapshot is not supported for " msg = _("Unmanage snapshot is not supported for "
"share snapshots with share servers.") "share snapshots with share servers.")
self.db.share_snapshot_update(context, snapshot_id, status) self.db.share_snapshot_update(context, snapshot_id, status)
LOG.error(_LE("Share snapshot cannot be unmanaged: %s."), LOG.error("Share snapshot cannot be unmanaged: %s.",
msg) msg)
return return
@ -2428,7 +2428,7 @@ class ShareManager(manager.SchedulerDependentManager):
share_server=share_server) share_server=share_server)
except Exception: except Exception:
LOG.exception( LOG.exception(
_LE("Cannot remove access rules of snapshot %s."), ("Cannot remove access rules of snapshot %s."),
snapshot_id) snapshot_id)
self.db.share_snapshot_update(context, snapshot_id, status) self.db.share_snapshot_update(context, snapshot_id, status)
return return
@ -2437,7 +2437,7 @@ class ShareManager(manager.SchedulerDependentManager):
self.driver.unmanage_snapshot(snapshot_instance) self.driver.unmanage_snapshot(snapshot_instance)
except exception.UnmanageInvalidShareSnapshot as e: except exception.UnmanageInvalidShareSnapshot as e:
self.db.share_snapshot_update(context, snapshot_id, status) self.db.share_snapshot_update(context, snapshot_id, status)
LOG.error(_LE("Share snapshot cannot be unmanaged: %s."), e) LOG.error("Share snapshot cannot be unmanaged: %s.", e)
return return
try: try:
@ -2452,7 +2452,7 @@ class ShareManager(manager.SchedulerDependentManager):
# Quota reservation errors here are not fatal, because # Quota reservation errors here are not fatal, because
# unmanage is administrator API and he/she could update user # unmanage is administrator API and he/she could update user
# quota usages later if it's required. # quota usages later if it's required.
LOG.warning(_LW("Failed to update quota usages: %s."), e) LOG.warning("Failed to update quota usages: %s.", e)
self.db.share_snapshot_instance_delete( self.db.share_snapshot_instance_delete(
context, snapshot_instance['id']) context, snapshot_instance['id'])
@ -2502,8 +2502,8 @@ class ShareManager(manager.SchedulerDependentManager):
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
msg = _LE('Share %(share)s could not be reverted ' msg = ('Share %(share)s could not be reverted '
'to snapshot %(snap)s.') 'to snapshot %(snap)s.')
msg_args = {'share': share_id, 'snap': snapshot_id} msg_args = {'share': share_id, 'snap': snapshot_id}
LOG.exception(msg, msg_args) LOG.exception(msg, msg_args)
@ -2529,8 +2529,8 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_snapshot_update( self.db.share_snapshot_update(
context, snapshot_id, {'status': constants.STATUS_AVAILABLE}) context, snapshot_id, {'status': constants.STATUS_AVAILABLE})
msg = _LI('Share %(share)s reverted to snapshot %(snap)s ' msg = ('Share %(share)s reverted to snapshot %(snap)s '
'successfully.') 'successfully.')
msg_args = {'share': share_id, 'snap': snapshot_id} msg_args = {'share': share_id, 'snap': snapshot_id}
LOG.info(msg, msg_args) LOG.info(msg, msg_args)
@ -2550,14 +2550,14 @@ class ShareManager(manager.SchedulerDependentManager):
share_server=share_server share_server=share_server
) )
except exception.ShareResourceNotFound: except exception.ShareResourceNotFound:
LOG.warning(_LW("Share instance %s does not exist in the " LOG.warning("Share instance %s does not exist in the "
"backend."), share_instance_id) "backend.", share_instance_id)
except Exception: except Exception:
with excutils.save_and_reraise_exception() as exc_context: with excutils.save_and_reraise_exception() as exc_context:
if force: if force:
msg = _LE("The driver was unable to delete access rules " msg = ("The driver was unable to delete access rules "
"for the instance: %s. Will attempt to delete " "for the instance: %s. Will attempt to delete "
"the instance anyway.") "the instance anyway.")
LOG.error(msg, share_instance_id) LOG.error(msg, share_instance_id)
exc_context.reraise = False exc_context.reraise = False
else: else:
@ -2570,16 +2570,16 @@ class ShareManager(manager.SchedulerDependentManager):
self.driver.delete_share(context, share_instance, self.driver.delete_share(context, share_instance,
share_server=share_server) share_server=share_server)
except exception.ShareResourceNotFound: except exception.ShareResourceNotFound:
LOG.warning(_LW("Share instance %s does not exist in the " LOG.warning("Share instance %s does not exist in the "
"backend."), share_instance_id) "backend.", share_instance_id)
except Exception: except Exception:
with excutils.save_and_reraise_exception() as exc_context: with excutils.save_and_reraise_exception() as exc_context:
if force: if force:
msg = _LE("The driver was unable to delete the share " msg = ("The driver was unable to delete the share "
"instance: %s on the backend. Since this " "instance: %s on the backend. Since this "
"operation is forced, the instance will be " "operation is forced, the instance will be "
"deleted from Manila's database. A cleanup on " "deleted from Manila's database. A cleanup on "
"the backend may be necessary.") "the backend may be necessary.")
LOG.error(msg, share_instance_id) LOG.error(msg, share_instance_id)
exc_context.reraise = False exc_context.reraise = False
else: else:
@ -2589,7 +2589,7 @@ class ShareManager(manager.SchedulerDependentManager):
{'status': constants.STATUS_ERROR_DELETING}) {'status': constants.STATUS_ERROR_DELETING})
self.db.share_instance_delete(context, share_instance_id) self.db.share_instance_delete(context, share_instance_id)
LOG.info(_LI("Share instance %s: deleted successfully."), LOG.info("Share instance %s: deleted successfully.",
share_instance_id) share_instance_id)
self._check_delete_share_server(context, share_instance) self._check_delete_share_server(context, share_instance)
@ -2610,7 +2610,7 @@ class ShareManager(manager.SchedulerDependentManager):
if not (self.driver.driver_handles_share_servers and if not (self.driver.driver_handles_share_servers and
self.configuration.automatic_share_server_cleanup): self.configuration.automatic_share_server_cleanup):
return return
LOG.info(_LI("Check for unused share servers to delete.")) LOG.info("Check for unused share servers to delete.")
updated_before = timeutils.utcnow() - datetime.timedelta( updated_before = timeutils.utcnow() - datetime.timedelta(
minutes=self.configuration.unused_share_server_cleanup_interval) minutes=self.configuration.unused_share_server_cleanup_interval)
servers = self.db.share_server_get_all_unused_deletable(ctxt, servers = self.db.share_server_get_all_unused_deletable(ctxt,
@ -2697,10 +2697,10 @@ class ShareManager(manager.SchedulerDependentManager):
share_server=share_server) share_server=share_server)
except Exception: except Exception:
LOG.exception( LOG.exception(
_LE("Failed to remove access rules for snapshot %s."), ("Failed to remove access rules for snapshot %s."),
snapshot_instance['id']) snapshot_instance['id'])
LOG.warning(_LW("The driver was unable to remove access rules " LOG.warning("The driver was unable to remove access rules "
"for snapshot %s. Moving on."), "for snapshot %s. Moving on.",
snapshot_instance['snapshot_id']) snapshot_instance['snapshot_id'])
try: try:
@ -2731,8 +2731,8 @@ class ShareManager(manager.SchedulerDependentManager):
user_id=snapshot_ref['user_id']) user_id=snapshot_ref['user_id'])
except Exception: except Exception:
reservations = None reservations = None
LOG.exception(_LE("Failed to update quota usages while deleting " LOG.exception("Failed to update quota usages while deleting "
"snapshot %s."), snapshot_id) "snapshot %s.", snapshot_id)
if reservations: if reservations:
QUOTAS.commit(context, reservations, project_id=project_id, QUOTAS.commit(context, reservations, project_id=project_id,
@ -2833,8 +2833,8 @@ class ShareManager(manager.SchedulerDependentManager):
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
msg = _LE('Share %(share)s could not be reverted ' msg = ('Share %(share)s could not be reverted '
'to snapshot %(snap)s.') 'to snapshot %(snap)s.')
msg_args = {'share': share_id, 'snap': snapshot_id} msg_args = {'share': share_id, 'snap': snapshot_id}
LOG.exception(msg, msg_args) LOG.exception(msg, msg_args)
@ -2862,8 +2862,8 @@ class ShareManager(manager.SchedulerDependentManager):
context, active_replica_snapshot['id'], context, active_replica_snapshot['id'],
{'status': constants.STATUS_AVAILABLE}) {'status': constants.STATUS_AVAILABLE})
msg = _LI('Share %(share)s reverted to snapshot %(snap)s ' msg = ('Share %(share)s reverted to snapshot %(snap)s '
'successfully.') 'successfully.')
msg_args = {'share': share_id, 'snap': snapshot_id} msg_args = {'share': share_id, 'snap': snapshot_id}
LOG.info(msg, msg_args) LOG.info(msg, msg_args)
@ -3039,19 +3039,19 @@ class ShareManager(manager.SchedulerDependentManager):
except exception.SnapshotResourceNotFound: except exception.SnapshotResourceNotFound:
if replica_snapshot['status'] == constants.STATUS_DELETING: if replica_snapshot['status'] == constants.STATUS_DELETING:
LOG.info(_LI('Snapshot %(snapshot_instance)s on replica ' LOG.info('Snapshot %(snapshot_instance)s on replica '
'%(replica)s has been deleted.'), msg_payload) '%(replica)s has been deleted.', msg_payload)
self.db.share_snapshot_instance_delete( self.db.share_snapshot_instance_delete(
context, replica_snapshot['id']) context, replica_snapshot['id'])
else: else:
LOG.exception(_LE("Replica snapshot %s was not found on " LOG.exception("Replica snapshot %s was not found on "
"the backend."), replica_snapshot['id']) "the backend.", replica_snapshot['id'])
self.db.share_snapshot_instance_update( self.db.share_snapshot_instance_update(
context, replica_snapshot['id'], context, replica_snapshot['id'],
{'status': constants.STATUS_ERROR}) {'status': constants.STATUS_ERROR})
except Exception: except Exception:
LOG.exception(_LE("Driver error while updating replica snapshot: " LOG.exception("Driver error while updating replica snapshot: "
"%s"), replica_snapshot['id']) "%s", replica_snapshot['id'])
self.db.share_snapshot_instance_update( self.db.share_snapshot_instance_update(
context, replica_snapshot['id'], context, replica_snapshot['id'],
{'status': constants.STATUS_ERROR}) {'status': constants.STATUS_ERROR})
@ -3081,7 +3081,7 @@ class ShareManager(manager.SchedulerDependentManager):
@periodic_task.periodic_task(spacing=CONF.periodic_interval) @periodic_task.periodic_task(spacing=CONF.periodic_interval)
@utils.require_driver_initialized @utils.require_driver_initialized
def _report_driver_status(self, context): def _report_driver_status(self, context):
LOG.info(_LI('Updating share status')) LOG.info('Updating share status')
share_stats = self.driver.get_share_stats(refresh=True) share_stats = self.driver.get_share_stats(refresh=True)
if not share_stats: if not share_stats:
return return
@ -3309,7 +3309,7 @@ class ShareManager(manager.SchedulerDependentManager):
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error( LOG.error(
_LE("Share server '%s' failed on deletion."), "Share server '%s' failed on deletion.",
server_id) server_id)
self.db.share_server_update( self.db.share_server_update(
context, server_id, {'status': constants.STATUS_ERROR}) context, server_id, {'status': constants.STATUS_ERROR})
@ -3318,7 +3318,7 @@ class ShareManager(manager.SchedulerDependentManager):
_wrapped_delete_share_server() _wrapped_delete_share_server()
LOG.info( LOG.info(
_LI("Share server '%s' has been deleted successfully."), "Share server '%s' has been deleted successfully.",
share_server['id']) share_server['id'])
self.driver.deallocate_network(context, share_server['id']) self.driver.deallocate_network(context, share_server['id'])
@ -3336,7 +3336,7 @@ class ShareManager(manager.SchedulerDependentManager):
self.driver.extend_share( self.driver.extend_share(
share_instance, new_size, share_server=share_server) share_instance, new_size, share_server=share_server)
except Exception as e: except Exception as e:
LOG.exception(_LE("Extend share failed."), resource=share) LOG.exception("Extend share failed.", resource=share)
try: try:
self.db.share_update( self.db.share_update(
@ -3363,7 +3363,7 @@ class ShareManager(manager.SchedulerDependentManager):
} }
share = self.db.share_update(context, share['id'], share_update) share = self.db.share_update(context, share['id'], share_update)
LOG.info(_LI("Extend share completed successfully."), resource=share) LOG.info("Extend share completed successfully.", resource=share)
@add_hooks @add_hooks
@utils.require_driver_initialized @utils.require_driver_initialized
@ -3396,7 +3396,7 @@ class ShareManager(manager.SchedulerDependentManager):
gigabytes=-size_decrease) gigabytes=-size_decrease)
except Exception as e: except Exception as e:
error_occurred( error_occurred(
e, _LE("Failed to update quota on share shrinking.")) e, ("Failed to update quota on share shrinking."))
try: try:
self.driver.shrink_share( self.driver.shrink_share(
@ -3406,11 +3406,11 @@ class ShareManager(manager.SchedulerDependentManager):
# shouldn't shrink share when this validation error occurs. # shouldn't shrink share when this validation error occurs.
except Exception as e: except Exception as e:
if isinstance(e, exception.ShareShrinkingPossibleDataLoss): if isinstance(e, exception.ShareShrinkingPossibleDataLoss):
msg = _LE("Shrink share failed due to possible data loss.") msg = ("Shrink share failed due to possible data loss.")
status = constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR status = constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR
error_params = {'msg': msg, 'status': status} error_params = {'msg': msg, 'status': status}
else: else:
error_params = {'msg': _LE("Shrink share failed.")} error_params = {'msg': ("Shrink share failed.")}
try: try:
error_occurred(e, **error_params) error_occurred(e, **error_params)
@ -3427,7 +3427,7 @@ class ShareManager(manager.SchedulerDependentManager):
} }
share = self.db.share_update(context, share['id'], share_update) share = self.db.share_update(context, share['id'], share_update)
LOG.info(_LI("Shrink share completed successfully."), resource=share) LOG.info("Shrink share completed successfully.", resource=share)
@utils.require_driver_initialized @utils.require_driver_initialized
def create_share_group(self, context, share_group_id): def create_share_group(self, context, share_group_id):
@ -3479,15 +3479,15 @@ class ShareManager(manager.SchedulerDependentManager):
) )
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to get share server" LOG.error("Failed to get share server"
" for share group creation.")) " for share group creation.")
self.db.share_group_update( self.db.share_group_update(
context, share_group_id, context, share_group_id,
{'status': constants.STATUS_ERROR}) {'status': constants.STATUS_ERROR})
try: try:
# TODO(ameade): Add notification for create.start # TODO(ameade): Add notification for create.start
LOG.info(_LI("Share group %s: creating"), share_group_id) LOG.info("Share group %s: creating", share_group_id)
model_update, share_update_list = None, None model_update, share_update_list = None, None
@ -3525,7 +3525,7 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_instance_update( self.db.share_instance_update(
context, share['id'], context, share['id'],
{'status': constants.STATUS_ERROR}) {'status': constants.STATUS_ERROR})
LOG.error(_LE("Share group %s: create failed"), share_group_id) LOG.error("Share group %s: create failed", share_group_id)
now = timeutils.utcnow() now = timeutils.utcnow()
for share in shares: for share in shares:
@ -3535,7 +3535,7 @@ class ShareManager(manager.SchedulerDependentManager):
share_group_ref['id'], share_group_ref['id'],
{'status': status, {'status': status,
'created_at': now}) 'created_at': now})
LOG.info(_LI("Share group %s: created successfully"), share_group_id) LOG.info("Share group %s: created successfully", share_group_id)
# TODO(ameade): Add notification for create.end # TODO(ameade): Add notification for create.end
@ -3553,7 +3553,7 @@ class ShareManager(manager.SchedulerDependentManager):
# TODO(ameade): Add notification for delete.start # TODO(ameade): Add notification for delete.start
try: try:
LOG.info(_LI("Share group %s: deleting"), share_group_id) LOG.info("Share group %s: deleting", share_group_id)
share_server = None share_server = None
if share_group_ref.get('share_server_id'): if share_group_ref.get('share_server_id'):
share_server = self.db.share_server_get( share_server = self.db.share_server_get(
@ -3571,11 +3571,11 @@ class ShareManager(manager.SchedulerDependentManager):
context, context,
share_group_ref['id'], share_group_ref['id'],
{'status': constants.STATUS_ERROR}) {'status': constants.STATUS_ERROR})
LOG.error(_LE("Share group %s: delete failed"), LOG.error("Share group %s: delete failed",
share_group_ref['id']) share_group_ref['id'])
self.db.share_group_destroy(context, share_group_id) self.db.share_group_destroy(context, share_group_id)
LOG.info(_LI("Share group %s: deleted successfully"), share_group_id) LOG.info("Share group %s: deleted successfully", share_group_id)
# TODO(ameade): Add notification for delete.end # TODO(ameade): Add notification for delete.end
@ -3594,7 +3594,7 @@ class ShareManager(manager.SchedulerDependentManager):
updated_members_ids = [] updated_members_ids = []
try: try:
LOG.info(_LI("Share group snapshot %s: creating"), LOG.info("Share group snapshot %s: creating",
share_group_snapshot_id) share_group_snapshot_id)
share_server = None share_server = None
if snap_ref['share_group'].get('share_server_id'): if snap_ref['share_group'].get('share_server_id'):
@ -3611,9 +3611,9 @@ class ShareManager(manager.SchedulerDependentManager):
# to have here also 'export_locations' when it is supported. # to have here also 'export_locations' when it is supported.
member_id = update.pop('id', None) member_id = update.pop('id', None)
if not member_id: if not member_id:
LOG.warning(_LW( LOG.warning(
"One of share group snapshot '%s' members does not " "One of share group snapshot '%s' members does not "
"have reference ID. Its update was skipped."), "have reference ID. Its update was skipped.",
share_group_snapshot_id) share_group_snapshot_id)
continue continue
# TODO(vponomaryov): remove following condition when # TODO(vponomaryov): remove following condition when
@ -3660,7 +3660,7 @@ class ShareManager(manager.SchedulerDependentManager):
context, context,
snap_ref['id'], snap_ref['id'],
{'status': constants.STATUS_ERROR}) {'status': constants.STATUS_ERROR})
LOG.error(_LE("Share group snapshot %s: create failed"), LOG.error("Share group snapshot %s: create failed",
share_group_snapshot_id) share_group_snapshot_id)
for member in (snap_ref.get('share_group_snapshot_members') or []): for member in (snap_ref.get('share_group_snapshot_members') or []):
@ -3673,7 +3673,7 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_group_snapshot_update( self.db.share_group_snapshot_update(
context, snap_ref['id'], context, snap_ref['id'],
{'status': status, 'updated_at': now}) {'status': status, 'updated_at': now})
LOG.info(_LI("Share group snapshot %s: created successfully"), LOG.info("Share group snapshot %s: created successfully",
share_group_snapshot_id) share_group_snapshot_id)
return snap_ref['id'] return snap_ref['id']
@ -3691,7 +3691,7 @@ class ShareManager(manager.SchedulerDependentManager):
snapshot_update = False snapshot_update = False
try: try:
LOG.info(_LI("Share group snapshot %s: deleting"), LOG.info("Share group snapshot %s: deleting",
share_group_snapshot_id) share_group_snapshot_id)
share_server = None share_server = None
@ -3719,12 +3719,12 @@ class ShareManager(manager.SchedulerDependentManager):
context, context,
snap_ref['id'], snap_ref['id'],
{'status': constants.STATUS_ERROR}) {'status': constants.STATUS_ERROR})
LOG.error(_LE("Share group snapshot %s: delete failed"), LOG.error("Share group snapshot %s: delete failed",
snap_ref['name']) snap_ref['name'])
self.db.share_group_snapshot_destroy(context, share_group_snapshot_id) self.db.share_group_snapshot_destroy(context, share_group_snapshot_id)
LOG.info(_LI("Share group snapshot %s: deleted successfully"), LOG.info("Share group snapshot %s: deleted successfully",
share_group_snapshot_id) share_group_snapshot_id)
def _get_share_replica_dict(self, context, share_replica): def _get_share_replica_dict(self, context, share_replica):

View File

@ -21,7 +21,7 @@ from oslo_log import log
from manila.common import constants from manila.common import constants
from manila import exception from manila import exception
from manila.i18n import _, _LW from manila.i18n import _
from manila.share import api as share_api from manila.share import api as share_api
import manila.utils as utils import manila.utils as utils
@ -130,16 +130,16 @@ class ShareMigrationHelper(object):
try: try:
self.delete_instance_and_wait(new_instance) self.delete_instance_and_wait(new_instance)
except Exception: except Exception:
LOG.warning(_LW("Failed to cleanup new instance during generic" LOG.warning("Failed to cleanup new instance during generic"
" migration for share %s."), self.share['id']) " migration for share %s.", self.share['id'])
def cleanup_access_rules(self, share_instance, share_server): def cleanup_access_rules(self, share_instance, share_server):
try: try:
self.revert_access_rules(share_instance, share_server) self.revert_access_rules(share_instance, share_server)
except Exception: except Exception:
LOG.warning(_LW("Failed to cleanup access rules during generic" LOG.warning("Failed to cleanup access rules during generic"
" migration for share %s."), self.share['id']) " migration for share %s.", self.share['id'])
def revert_access_rules(self, share_instance, share_server): def revert_access_rules(self, share_instance, share_server):

View File

@ -29,7 +29,6 @@ from manila import context
from manila import db from manila import db
from manila import exception from manila import exception
from manila.i18n import _ from manila.i18n import _
from manila.i18n import _LE
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -53,7 +52,7 @@ def create(context, name, extra_specs=None, is_public=True, projects=None):
is_public=is_public), is_public=is_public),
projects=projects) projects=projects)
except db_exception.DBError: except db_exception.DBError:
LOG.exception(_LE('DB error.')) LOG.exception('DB error.')
raise exception.ShareTypeCreateFailed(name=name, raise exception.ShareTypeCreateFailed(name=name,
extra_specs=extra_specs) extra_specs=extra_specs)
return type_ref return type_ref
@ -86,8 +85,8 @@ def get_all_types(context, inactive=0, search_opts=None):
required_extra_specs = get_valid_required_extra_specs( required_extra_specs = get_valid_required_extra_specs(
type_args['extra_specs']) type_args['extra_specs'])
except exception.InvalidExtraSpec: except exception.InvalidExtraSpec:
LOG.exception(_LE('Share type %(share_type)s has invalid required' LOG.exception('Share type %(share_type)s has invalid required'
' extra specs.'), {'share_type': type_name}) ' extra specs.', {'share_type': type_name})
type_args['required_extra_specs'] = required_extra_specs type_args['required_extra_specs'] = required_extra_specs
@ -172,8 +171,8 @@ def get_default_share_type(ctxt=None):
# Couldn't find share type with the name in default_share_type # Couldn't find share type with the name in default_share_type
# flag, record this issue and move on # flag, record this issue and move on
# TODO(zhiteng) consider add notification to warn admin # TODO(zhiteng) consider add notification to warn admin
LOG.exception(_LE('Default share type is not found, ' LOG.exception('Default share type is not found, '
'please check default_share_type config: %s'), 'please check default_share_type config: %s',
e) e)

View File

@ -16,7 +16,6 @@
from oslo_log import log from oslo_log import log
from manila.common import constants from manila.common import constants
from manila.i18n import _LI
from manila import utils from manila import utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -147,8 +146,8 @@ class ShareSnapshotInstanceAccess(object):
self._update_access_rules(context, snapshot_instance, self._update_access_rules(context, snapshot_instance,
share_server=share_server) share_server=share_server)
else: else:
LOG.info(_LI("Access rules were successfully applied for " LOG.info("Access rules were successfully applied for "
"snapshot instance: %s"), snapshot_instance['id']) "snapshot instance: %s", snapshot_instance['id'])
def _check_needs_refresh(self, context, snapshot_instance_id): def _check_needs_refresh(self, context, snapshot_instance_id):

View File

@ -20,7 +20,6 @@ from manila import context
from manila import db from manila import db
from manila import exception from manila import exception
from manila.i18n import _ from manila.i18n import _
from manila.i18n import _LE
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -38,7 +37,7 @@ def create(context, name, share_types, group_specs=None, is_public=True,
"share_types": share_types}, "share_types": share_types},
projects=projects) projects=projects)
except db_exception.DBError: except db_exception.DBError:
LOG.exception(_LE('DB error')) LOG.exception('DB error')
raise exception.ShareGroupTypeCreateFailed( raise exception.ShareGroupTypeCreateFailed(
name=name, group_specs=group_specs) name=name, group_specs=group_specs)
return type_ref return type_ref
@ -142,8 +141,8 @@ def get_default(ctxt=None):
return get_by_name(ctxt, name) return get_by_name(ctxt, name)
except exception.ShareGroupTypeNotFoundByName: except exception.ShareGroupTypeNotFoundByName:
LOG.exception( LOG.exception(
_LE("Default share group type '%s' is not found, " "Default share group type '%s' is not found, "
"please check 'default_share_group_type' config."), "please check 'default_share_group_type' config.",
name, name,
) )