debug level logs should not be translated

According to the OpenStack translation policy available at
https://wiki.openstack.org/wiki/LoggingStandards debug messages
should not be translated. Like mentioned in several changes in
Nova by garyk this is to help prioritize log translation.

This patch adds a new hacking check - N319 - that ensures all
debug log messages don't have translations.

Change-Id: Id9c2715f25c8f2ea52235aba4bd1583655391584
Implements: blueprint debug-translation-removal
Closes-Bug: #1318713
This commit is contained in:
Christian Berendt 2014-05-12 18:28:58 +02:00 committed by John Griffith
parent a398e1b5be
commit 5061ab9586
97 changed files with 1065 additions and 1011 deletions

View File

@ -8,6 +8,8 @@ Cinder Style Commandments
Cinder Specific Commandments
----------------------------
- [N319] Validate that debug level logs are not translated
General
-------
- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised::

View File

@ -115,9 +115,9 @@ if __name__ == '__main__':
print(_("Found %d volumes") % len(volumes))
for volume_ref in volumes:
try:
LOG.debug(_("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
"<%(extra_info)s>") %
LOG.debug("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
"<%(extra_info)s>" %
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
@ -138,9 +138,9 @@ if __name__ == '__main__':
'audit_period_beginning': str(volume_ref.created_at),
'audit_period_ending': str(volume_ref.created_at),
}
LOG.debug(_("Send create notification for "
LOG.debug("Send create notification for "
"<volume_id: %(volume_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>") %
"<project_id %(project_id)s> <%(extra_info)s>" %
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': local_extra_info})
@ -165,9 +165,9 @@ if __name__ == '__main__':
'audit_period_beginning': str(volume_ref.deleted_at),
'audit_period_ending': str(volume_ref.deleted_at),
}
LOG.debug(_("Send delete notification for "
LOG.debug("Send delete notification for "
"<volume_id: %(volume_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>") %
"<project_id %(project_id)s> <%(extra_info)s>" %
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': local_extra_info})
@ -190,8 +190,8 @@ if __name__ == '__main__':
print(_("Found %d snapshots") % len(snapshots))
for snapshot_ref in snapshots:
try:
LOG.debug(_("Send notification for <snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>") %
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>" %
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
@ -212,9 +212,9 @@ if __name__ == '__main__':
'audit_period_beginning': str(snapshot_ref.created_at),
'audit_period_ending': str(snapshot_ref.created_at),
}
LOG.debug(_("Send create notification for "
LOG.debug("Send create notification for "
"<snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>") %
"<project_id %(project_id)s> <%(extra_info)s>" %
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': local_extra_info})
@ -239,9 +239,9 @@ if __name__ == '__main__':
'audit_period_beginning': str(snapshot_ref.deleted_at),
'audit_period_ending': str(snapshot_ref.deleted_at),
}
LOG.debug(_("Send delete notification for "
LOG.debug("Send delete notification for "
"<snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>") %
"<project_id %(project_id)s> <%(extra_info)s>" %
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': local_extra_info})

View File

@ -186,7 +186,7 @@ def remove_version_from_href(href):
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = _('href %s does not contain version') % href
msg = 'href %s does not contain version' % href
LOG.debug(msg)
raise ValueError(msg)

View File

@ -158,7 +158,7 @@ class BackupsController(wsgi.Controller):
@wsgi.serializers(xml=BackupTemplate)
def show(self, req, id):
"""Return data about the given backup."""
LOG.debug(_('show called for member %s'), id)
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
try:
@ -170,7 +170,7 @@ class BackupsController(wsgi.Controller):
def delete(self, req, id):
"""Delete a backup."""
LOG.debug(_('delete called for member %s'), id)
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.audit(_('Delete backup with id: %s'), id, context=context)
@ -215,7 +215,7 @@ class BackupsController(wsgi.Controller):
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new backup."""
LOG.debug(_('Creating new backup %s'), body)
LOG.debug('Creating new backup %s', body)
if not self.is_valid_body(body, 'backup'):
raise exc.HTTPBadRequest()
@ -254,7 +254,7 @@ class BackupsController(wsgi.Controller):
@wsgi.deserializers(xml=RestoreDeserializer)
def restore(self, req, id, body):
"""Restore an existing backup to a volume."""
LOG.debug(_('Restoring backup %(backup_id)s (%(body)s)'),
LOG.debug('Restoring backup %(backup_id)s (%(body)s)',
{'backup_id': id, 'body': body})
if not self.is_valid_body(body, 'restore'):
msg = _("Incorrect request body format")
@ -297,7 +297,7 @@ class BackupsController(wsgi.Controller):
@wsgi.serializers(xml=BackupExportImportTemplate)
def export_record(self, req, id):
"""Export a backup."""
LOG.debug(_('export record called for member %s.'), id)
LOG.debug('export record called for member %s.', id)
context = req.environ['cinder.context']
try:
@ -309,7 +309,7 @@ class BackupsController(wsgi.Controller):
retval = self._view_builder.export_summary(
req, dict(backup_info.iteritems()))
LOG.debug(_('export record output: %s.'), retval)
LOG.debug('export record output: %s.', retval)
return retval
@wsgi.response(201)
@ -317,7 +317,7 @@ class BackupsController(wsgi.Controller):
@wsgi.deserializers(xml=BackupImportDeserializer)
def import_record(self, req, body):
"""Import a backup."""
LOG.debug(_('Importing record from %s.'), body)
LOG.debug('Importing record from %s.', body)
if not self.is_valid_body(body, 'backup-record'):
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
@ -330,7 +330,7 @@ class BackupsController(wsgi.Controller):
except KeyError:
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.debug(_('Importing backup using %(service)s and url %(url)s.'),
LOG.debug('Importing backup using %(service)s and url %(url)s.',
{'service': backup_service, 'url': backup_url})
try:
@ -345,7 +345,7 @@ class BackupsController(wsgi.Controller):
raise exc.HTTPInternalServerError(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup.iteritems()))
LOG.debug(_('import record output: %s.'), retval)
LOG.debug('import record output: %s.', retval)
return retval

View File

@ -128,7 +128,7 @@ class VolumeTransferController(wsgi.Controller):
def _get_transfers(self, req, is_detail):
"""Returns a list of transfers, transformed through view builder."""
context = req.environ['cinder.context']
LOG.debug(_('Listing volume transfers'))
LOG.debug('Listing volume transfers')
transfers = self.transfer_api.get_all(context)
limited_list = common.limited(transfers, req)
@ -144,7 +144,7 @@ class VolumeTransferController(wsgi.Controller):
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new volume transfer."""
LOG.debug(_('Creating new volume transfer %s'), body)
LOG.debug('Creating new volume transfer %s', body)
if not self.is_valid_body(body, 'transfer'):
raise exc.HTTPBadRequest()
@ -180,7 +180,7 @@ class VolumeTransferController(wsgi.Controller):
def accept(self, req, id, body):
"""Accept a new volume transfer."""
transfer_id = id
LOG.debug(_('Accepting volume transfer %s'), transfer_id)
LOG.debug('Accepting volume transfer %s', transfer_id)
if not self.is_valid_body(body, 'accept'):
raise exc.HTTPBadRequest()

View File

@ -233,12 +233,12 @@ class ExtensionManager(object):
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug(_('Ext name: %s'), extension.name)
LOG.debug(_('Ext alias: %s'), extension.alias)
LOG.debug(_('Ext description: %s'),
LOG.debug('Ext name: %s', extension.name)
LOG.debug('Ext alias: %s', extension.alias)
LOG.debug('Ext description: %s',
' '.join(extension.__doc__.strip().split()))
LOG.debug(_('Ext namespace: %s'), extension.namespace)
LOG.debug(_('Ext updated: %s'), extension.updated)
LOG.debug('Ext namespace: %s', extension.namespace)
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
@ -254,13 +254,13 @@ class ExtensionManager(object):
expected to call the register() method at least once.
"""
LOG.debug(_("Loading extension %s"), ext_factory)
LOG.debug("Loading extension %s", ext_factory)
# Load the factory
factory = importutils.import_class(ext_factory)
# Call it
LOG.debug(_("Calling extension factory %s"), ext_factory)
LOG.debug("Calling extension factory %s", ext_factory)
factory(self)
def _load_extensions(self):

View File

@ -86,7 +86,7 @@ class APIRouter(base_wsgi.Router):
def _setup_ext_routes(self, mapper, ext_mgr):
for resource in ext_mgr.get_resources():
LOG.debug(_('Extended resource: %s'),
LOG.debug('Extended resource: %s',
resource.collection)
wsgi_resource = wsgi.Resource(resource.controller)
@ -116,8 +116,8 @@ class APIRouter(base_wsgi.Router):
'collection': collection})
continue
LOG.debug(_('Extension %(ext_name)s extending resource: '
'%(collection)s'),
LOG.debug('Extension %(ext_name)s extending resource: '
'%(collection)s',
{'ext_name': extension.extension.name,
'collection': collection})

View File

@ -784,17 +784,17 @@ class Resource(wsgi.Application):
def get_body(self, request):
if len(request.body) == 0:
LOG.debug(_("Empty body provided in request"))
LOG.debug("Empty body provided in request")
return None, ''
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
LOG.debug("Unrecognized Content-Type provided in request")
return None, ''
if not content_type:
LOG.debug(_("No Content-Type provided in request"))
LOG.debug("No Content-Type provided in request")
return None, ''
return content_type, request.body

View File

@ -63,7 +63,7 @@ class BackupMetadataAPI(base.Base):
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug(_("Getting metadata type '%s'") % type_tag)
LOG.debug("Getting metadata type '%s'" % type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
@ -75,9 +75,9 @@ class BackupMetadataAPI(base.Base):
continue
container[type_tag][key] = value
LOG.debug(_("Completed fetching metadata type '%s'") % type_tag)
LOG.debug("Completed fetching metadata type '%s'" % type_tag)
else:
LOG.debug(_("No metadata type '%s' available") % type_tag)
LOG.debug("No metadata type '%s' available" % type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
@ -86,7 +86,7 @@ class BackupMetadataAPI(base.Base):
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug(_("Getting metadata type '%s'") % type_tag)
LOG.debug("Getting metadata type '%s'" % type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
@ -98,9 +98,9 @@ class BackupMetadataAPI(base.Base):
continue
container[type_tag][entry] = meta[entry]
LOG.debug(_("Completed fetching metadata type '%s'") % type_tag)
LOG.debug("Completed fetching metadata type '%s'" % type_tag)
else:
LOG.debug(_("No metadata type '%s' available") % type_tag)
LOG.debug("No metadata type '%s' available" % type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
@ -109,7 +109,7 @@ class BackupMetadataAPI(base.Base):
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug(_("Getting metadata type '%s'") % type_tag)
LOG.debug("Getting metadata type '%s'" % type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
@ -122,9 +122,9 @@ class BackupMetadataAPI(base.Base):
continue
container[type_tag][entry.key] = entry.value
LOG.debug(_("Completed fetching metadata type '%s'") % type_tag)
LOG.debug("Completed fetching metadata type '%s'" % type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug(_("No metadata type '%s' available") % type_tag)
LOG.debug("No metadata type '%s' available" % type_tag)
@staticmethod
def _filter(metadata, fields):
@ -140,13 +140,13 @@ class BackupMetadataAPI(base.Base):
if field in metadata:
subset[field] = metadata[field]
else:
LOG.debug(_("Excluding field '%s'") % (field))
LOG.debug("Excluding field '%s'" % (field))
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug(_("Restoring volume base metadata"))
LOG.debug("Restoring volume base metadata")
# Only set the display_name if it was not None since the
# restore action will have set a name which is more useful than
# None.
@ -159,7 +159,7 @@ class BackupMetadataAPI(base.Base):
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug(_("Restoring volume metadata"))
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
@ -168,7 +168,7 @@ class BackupMetadataAPI(base.Base):
First delete any existing metadata then save new values.
"""
LOG.debug(_("Restoring volume glance metadata"))
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
@ -235,7 +235,7 @@ class BackupMetadataAPI(base.Base):
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
msg = _("No metadata of type '%s' to restore") % (type)
msg = "No metadata of type '%s' to restore" % (type)
LOG.debug(msg)

View File

@ -136,7 +136,7 @@ class VolumeMetadataBackup(object):
"""
meta_obj = rados.Object(self._client.ioctx, self.name)
if not self._exists(meta_obj):
msg = _("Metadata backup object %s does not exist") % self.name
msg = "Metadata backup object %s does not exist" % self.name
LOG.debug(msg)
return None
@ -147,7 +147,7 @@ class VolumeMetadataBackup(object):
try:
meta_obj.remove()
except rados.ObjectNotFound:
msg = (_("Metadata backup object '%s' not found - ignoring") %
msg = ("Metadata backup object '%s' not found - ignoring" %
self.name)
LOG.debug(msg)
@ -276,7 +276,7 @@ class CephBackupDriver(BackupDriver):
and pad with zeroes.
"""
if length:
LOG.debug(_("Discarding %(length)s bytes from offset %(offset)s") %
LOG.debug("Discarding %(length)s bytes from offset %(offset)s" %
{'length': length, 'offset': offset})
if self._file_is_rbd(volume):
volume.rbd_image.discard(offset, length)
@ -284,7 +284,7 @@ class CephBackupDriver(BackupDriver):
zeroes = '\0' * length
chunks = int(length / self.chunk_size)
for chunk in xrange(0, chunks):
LOG.debug(_("Writing zeroes chunk %d") % chunk)
LOG.debug("Writing zeroes chunk %d" % chunk)
volume.write(zeroes)
volume.flush()
# yield to any other pending backups
@ -298,11 +298,11 @@ class CephBackupDriver(BackupDriver):
def _transfer_data(self, src, src_name, dest, dest_name, length):
"""Transfer data between files (Python IO objects)."""
LOG.debug(_("Transferring data between '%(src)s' and '%(dest)s'") %
LOG.debug("Transferring data between '%(src)s' and '%(dest)s'" %
{'src': src_name, 'dest': dest_name})
chunks = int(length / self.chunk_size)
LOG.debug(_("%(chunks)s chunks of %(bytes)s bytes to be transferred") %
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred" %
{'chunks': chunks, 'bytes': self.chunk_size})
for chunk in xrange(0, chunks):
@ -331,7 +331,7 @@ class CephBackupDriver(BackupDriver):
rem = int(length % self.chunk_size)
if rem:
LOG.debug(_("Transferring remaining %s bytes") % rem)
LOG.debug("Transferring remaining %s bytes" % rem)
data = src.read(rem)
if data == '':
if CONF.restore_discard_excess_bytes:
@ -347,7 +347,7 @@ class CephBackupDriver(BackupDriver):
This will be the base image used for storing differential exports.
"""
LOG.debug(_("Creating base image '%s'") % name)
LOG.debug("Creating base image '%s'" % name)
old_format, features = self._get_rbd_support()
self.rbd.RBD().create(ioctx=rados_client.ioctx,
name=name,
@ -374,10 +374,10 @@ class CephBackupDriver(BackupDriver):
snap_name = self._get_backup_snap_name(base_rbd, base_name,
backup_id)
if snap_name:
LOG.debug(_("Deleting backup snapshot='%s'") % snap_name)
LOG.debug("Deleting backup snapshot='%s'" % snap_name)
base_rbd.remove_snap(snap_name)
else:
LOG.debug(_("No backup snapshot to delete"))
LOG.debug("No backup snapshot to delete")
# Now check whether any snapshots remain on the base image
backup_snaps = self.get_backup_snaps(base_rbd)
@ -413,8 +413,8 @@ class CephBackupDriver(BackupDriver):
try_diff_format = True
base_name = self._get_backup_base_name(volume_id, backup_id)
LOG.debug(_("Trying diff format basename='%(basename)s' for "
"backup base image of volume %(volume)s.") %
LOG.debug("Trying diff format basename='%(basename)s' for "
"backup base image of volume %(volume)s." %
{'basename': base_name, 'volume': volume_id})
with rbd_driver.RADOSClient(self) as client:
@ -460,8 +460,8 @@ class CephBackupDriver(BackupDriver):
'basename': base_name})
raise exc
else:
LOG.debug(_("Base backup image='%(basename)s' of volume "
"%(volume)s deleted.") %
LOG.debug("Base backup image='%(basename)s' of volume "
"%(volume)s deleted." %
{'basename': base_name, 'volume': volume_id})
retries = 0
finally:
@ -471,8 +471,8 @@ class CephBackupDriver(BackupDriver):
# volume backup snapshot.
src_name = strutils.safe_encode(volume_id)
if src_name in self.rbd.RBD().list(client.ioctx):
LOG.debug(_("Deleting source volume snapshot '%(snapshot)s' "
"for backup %(basename)s.") %
LOG.debug("Deleting source volume snapshot '%(snapshot)s' "
"for backup %(basename)s." %
{'snapshot': snap, 'basename': base_name})
src_rbd = self.rbd.Image(client.ioctx, src_name)
try:
@ -482,8 +482,8 @@ class CephBackupDriver(BackupDriver):
def _piped_execute(self, cmd1, cmd2):
"""Pipe output of cmd1 into cmd2."""
LOG.debug(_("Piping cmd1='%s' into...") % ' '.join(cmd1))
LOG.debug(_("cmd2='%s'") % ' '.join(cmd2))
LOG.debug("Piping cmd1='%s' into..." % ' '.join(cmd1))
LOG.debug("cmd2='%s'" % ' '.join(cmd2))
try:
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
@ -519,8 +519,8 @@ class CephBackupDriver(BackupDriver):
since the rbd volume/base was created, otherwise it will be those
changed since the snapshot was created.
"""
LOG.debug(_("Performing differential transfer from '%(src)s' to "
"'%(dest)s'") %
LOG.debug("Performing differential transfer from '%(src)s' to "
"'%(dest)s'" %
{'src': src_name, 'dest': dest_name})
# NOTE(dosaboy): Need to be tolerant of clusters/clients that do
@ -557,12 +557,12 @@ class CephBackupDriver(BackupDriver):
"""Return tuple (exists, name)."""
rbds = self.rbd.RBD().list(client.ioctx)
if name not in rbds:
msg = _("Image '%s' not found - trying diff format name") % name
msg = "Image '%s' not found - trying diff format name" % name
LOG.debug(msg)
if try_diff_format:
name = self._get_backup_base_name(volume_id, diff_format=True)
if name not in rbds:
msg = _("Diff format image '%s' not found") % name
msg = "Diff format image '%s' not found" % name
LOG.debug(msg)
return False, name
else:
@ -597,8 +597,8 @@ class CephBackupDriver(BackupDriver):
# Identify our --from-snap point (if one exists)
from_snap = self._get_most_recent_snap(source_rbd_image)
LOG.debug(_("Using --from-snap '%(snap)s' for incremental backup of "
"volume %(volume)s.") %
LOG.debug("Using --from-snap '%(snap)s' for incremental backup of "
"volume %(volume)s." %
{'snap': from_snap, 'volume': volume_id})
base_name = self._get_backup_base_name(volume_id, diff_format=True)
@ -614,8 +614,8 @@ class CephBackupDriver(BackupDriver):
# If a from_snap is defined but the base does not exist, we
# ignore it since it is stale and waiting to be cleaned up.
if from_snap:
LOG.debug(_("Source snapshot '%(snapshot)s' of volume "
"%(volume)s is stale so deleting.") %
LOG.debug("Source snapshot '%(snapshot)s' of volume "
"%(volume)s is stale so deleting." %
{'snapshot': from_snap, 'volume': volume_id})
source_rbd_image.remove_snap(from_snap)
from_snap = None
@ -638,7 +638,7 @@ class CephBackupDriver(BackupDriver):
# Snapshot source volume so that we have a new point-in-time
new_snap = self._get_new_snap_name(backup_id)
LOG.debug(_("Creating backup snapshot='%s'") % new_snap)
LOG.debug("Creating backup snapshot='%s'" % new_snap)
source_rbd_image.create_snap(new_snap)
# Attempt differential backup. If this fails, perhaps because librbd
@ -658,7 +658,7 @@ class CephBackupDriver(BackupDriver):
src_snap=new_snap,
from_snap=from_snap)
LOG.debug(_("Differential backup transfer completed in %.4fs") %
LOG.debug("Differential backup transfer completed in %.4fs" %
(time.time() - before))
# We don't need the previous snapshot (if there was one) anymore so
@ -667,7 +667,7 @@ class CephBackupDriver(BackupDriver):
source_rbd_image.remove_snap(from_snap)
except exception.BackupRBDOperationFailed:
LOG.debug(_("Differential backup transfer failed"))
LOG.debug("Differential backup transfer failed")
# Clean up if image was created as part of this operation
if image_created:
@ -675,8 +675,8 @@ class CephBackupDriver(BackupDriver):
base_name=base_name)
# Delete snapshot
LOG.debug(_("Deleting diff backup snapshot='%(snapshot)s' of "
"source volume='%(volume)s'.") %
LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of "
"source volume='%(volume)s'." %
{'snapshot': new_snap, 'volume': volume_id})
source_rbd_image.remove_snap(new_snap)
@ -699,8 +699,8 @@ class CephBackupDriver(BackupDriver):
with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
# First create base backup image
old_format, features = self._get_rbd_support()
LOG.debug(_("Creating backup base image='%(name)s' for volume "
"%(volume)s.")
LOG.debug("Creating backup base image='%(name)s' for volume "
"%(volume)s."
% {'name': backup_name, 'volume': volume_id})
self.rbd.RBD().create(ioctx=client.ioctx,
name=backup_name,
@ -710,7 +710,7 @@ class CephBackupDriver(BackupDriver):
stripe_unit=self.rbd_stripe_unit,
stripe_count=self.rbd_stripe_count)
LOG.debug(_("Copying data from volume %s.") % volume_id)
LOG.debug("Copying data from volume %s." % volume_id)
dest_rbd = self.rbd.Image(client.ioctx, backup_name)
try:
rbd_meta = rbd_driver.RBDImageMetadata(dest_rbd,
@ -771,17 +771,17 @@ class CephBackupDriver(BackupDriver):
"""
snaps = self.get_backup_snaps(rbd_image)
LOG.debug(_("Looking for snapshot of backup base '%s'") % name)
LOG.debug("Looking for snapshot of backup base '%s'" % name)
if not snaps:
LOG.debug(_("Backup base '%s' has no snapshots") % name)
LOG.debug("Backup base '%s' has no snapshots" % name)
return None
snaps = [snap['name'] for snap in snaps
if snap['backup_id'] == backup_id]
if not snaps:
LOG.debug(_("Backup '%s' has no snapshot") % backup_id)
LOG.debug("Backup '%s' has no snapshot" % backup_id)
return None
if len(snaps) > 1:
@ -790,7 +790,7 @@ class CephBackupDriver(BackupDriver):
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug(_("Found snapshot '%s'") % (snaps[0]))
LOG.debug("Found snapshot '%s'" % (snaps[0]))
return snaps[0]
def _get_most_recent_snap(self, rbd_image):
@ -826,11 +826,11 @@ class CephBackupDriver(BackupDriver):
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
LOG.debug(_("No metadata to backup for volume %s.") %
LOG.debug("No metadata to backup for volume %s." %
backup['volume_id'])
return
LOG.debug(_("Backing up metadata for volume %s.") %
LOG.debug("Backing up metadata for volume %s." %
backup['volume_id'])
try:
with rbd_driver.RADOSClient(self) as client:
@ -852,7 +852,7 @@ class CephBackupDriver(BackupDriver):
volume_id = volume['id']
volume_name = volume['name']
LOG.debug(_("Starting backup of volume='%s'.") % volume_id)
LOG.debug("Starting backup of volume='%s'." % volume_id)
# Ensure we are at the beginning of the volume
volume_file.seek(0)
@ -865,7 +865,7 @@ class CephBackupDriver(BackupDriver):
self._backup_rbd(backup_id, volume_id, volume_file,
volume_name, length)
except exception.BackupRBDOperationFailed:
LOG.debug(_("Forcing full backup of volume %s.") % volume_id)
LOG.debug("Forcing full backup of volume %s." % volume_id)
do_full_backup = True
else:
do_full_backup = True
@ -885,7 +885,7 @@ class CephBackupDriver(BackupDriver):
self.delete(backup)
raise
LOG.debug(_("Backup '%(backup_id)s' of volume %(volume_id)s finished.")
LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished."
% {'backup_id': backup_id, 'volume_id': volume_id})
def _full_restore(self, backup_id, volume_id, dest_file, dest_name,
@ -945,7 +945,7 @@ class CephBackupDriver(BackupDriver):
dest_image = self.rbd.Image(client.ioctx,
strutils.safe_encode(restore_vol))
try:
LOG.debug(_("Adjusting restore vol size"))
LOG.debug("Adjusting restore vol size")
dest_image.resize(adjust_size)
finally:
dest_image.close()
@ -957,8 +957,8 @@ class CephBackupDriver(BackupDriver):
rbd_pool = restore_file.rbd_pool
rbd_conf = restore_file.rbd_conf
LOG.debug(_("Attempting incremental restore from base='%(base)s' "
"snap='%(snap)s'") %
LOG.debug("Attempting incremental restore from base='%(base)s' "
"snap='%(snap)s'" %
{'base': base_name, 'snap': restore_point})
before = time.time()
try:
@ -980,7 +980,7 @@ class CephBackupDriver(BackupDriver):
self._check_restore_vol_size(base_name, restore_name, restore_length,
rbd_pool)
LOG.debug(_("Restore transfer completed in %.4fs") %
LOG.debug("Restore transfer completed in %.4fs" %
(time.time() - before))
def _num_backup_snaps(self, backup_base_name):
@ -1029,7 +1029,7 @@ class CephBackupDriver(BackupDriver):
rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb)
if extents:
LOG.debug(_("RBD has %s extents") % sum(extents))
LOG.debug("RBD has %s extents" % sum(extents))
return True
return False
@ -1066,8 +1066,8 @@ class CephBackupDriver(BackupDriver):
# made from, force a full restore since a diff will not work in
# this case.
if volume['id'] == backup['volume_id']:
msg = (_("Destination volume is same as backup source volume "
"%s - forcing full copy.") % volume['id'])
msg = ("Destination volume is same as backup source volume "
"%s - forcing full copy." % volume['id'])
LOG.debug(msg)
return False, restore_point
@ -1077,7 +1077,7 @@ class CephBackupDriver(BackupDriver):
if self._rbd_has_extents(volume_file.rbd_image):
# We return the restore point so that a full copy is done
# from snapshot.
LOG.debug(_("Destination has extents - forcing full copy"))
LOG.debug("Destination has extents - forcing full copy")
return False, restore_point
return True, restore_point
@ -1115,7 +1115,7 @@ class CephBackupDriver(BackupDriver):
restore_point, length)
do_full_restore = False
except exception.BackupRBDOperationFailed:
LOG.debug(_("Forcing full restore to volume %s.") %
LOG.debug("Forcing full restore to volume %s." %
volume['id'])
if do_full_restore:
@ -1136,7 +1136,7 @@ class CephBackupDriver(BackupDriver):
if meta is not None:
self.put_metadata(volume_id, meta)
else:
LOG.debug(_("Volume %s has no backed up metadata.") %
LOG.debug("Volume %s has no backed up metadata." %
backup['volume_id'])
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version")
@ -1149,8 +1149,8 @@ class CephBackupDriver(BackupDriver):
If volume metadata is available this will also be restored.
"""
target_volume = self.db.volume_get(self.context, volume_id)
LOG.debug(_('Starting restore from Ceph backup=%(src)s to '
'volume=%(dest)s') %
LOG.debug('Starting restore from Ceph backup=%(src)s to '
'volume=%(dest)s' %
{'src': backup['id'], 'dest': target_volume['name']})
try:
@ -1160,14 +1160,14 @@ class CephBackupDriver(BackupDriver):
try:
fileno = volume_file.fileno()
except IOError:
LOG.debug(_("Restore target I/O object does not support "
"fileno() - skipping call to fsync()."))
LOG.debug("Restore target I/O object does not support "
"fileno() - skipping call to fsync().")
else:
os.fsync(fileno)
self._restore_metadata(backup, volume_id)
LOG.debug(_('Restore to volume %s finished successfully.') %
LOG.debug('Restore to volume %s finished successfully.' %
volume_id)
except exception.BackupOperationError as e:
LOG.error(_('Restore to volume %(volume)s finished with error - '
@ -1176,7 +1176,7 @@ class CephBackupDriver(BackupDriver):
def delete(self, backup):
"""Delete the given backup from Ceph object store."""
LOG.debug(_('Delete started for backup=%s') % backup['id'])
LOG.debug('Delete started for backup=%s' % backup['id'])
delete_failed = False
try:
@ -1196,8 +1196,8 @@ class CephBackupDriver(BackupDriver):
"finished with warning.") %
{'backup': backup['id'], 'volume': backup['volume_id']})
else:
LOG.debug(_("Delete of backup '%(backup)s' for volume "
"'%(volume)s' finished.") %
LOG.debug("Delete of backup '%(backup)s' for volume "
"'%(volume)s' finished." %
{'backup': backup['id'], 'volume': backup['volume_id']})

View File

@ -137,8 +137,8 @@ class SwiftBackupDriver(BackupDriver):
def _create_container(self, context, backup):
backup_id = backup['id']
container = backup['container']
LOG.debug(_('_create_container started, container: %(container)s,'
'backup: %(backup_id)s') %
LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s' %
{'container': container, 'backup_id': backup_id})
if container is None:
container = CONF.backup_swift_container
@ -156,7 +156,7 @@ class SwiftBackupDriver(BackupDriver):
volume = 'volume_%s' % (backup['volume_id'])
timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
prefix = volume + '/' + timestamp + '/' + backup_name
LOG.debug(_('_generate_swift_object_name_prefix: %s') % prefix)
LOG.debug('_generate_swift_object_name_prefix: %s' % prefix)
return prefix
def _generate_object_names(self, backup):
@ -165,7 +165,7 @@ class SwiftBackupDriver(BackupDriver):
prefix=prefix,
full_listing=True)[1]
swift_object_names = [swift_obj['name'] for swift_obj in swift_objects]
LOG.debug(_('generated object list: %s') % swift_object_names)
LOG.debug('generated object list: %s' % swift_object_names)
return swift_object_names
def _metadata_filename(self, backup):
@ -176,8 +176,8 @@ class SwiftBackupDriver(BackupDriver):
def _write_metadata(self, backup, volume_id, container, object_list,
volume_meta):
filename = self._metadata_filename(backup)
LOG.debug(_('_write_metadata started, container name: %(container)s,'
' metadata filename: %(filename)s') %
LOG.debug('_write_metadata started, container name: %(container)s,'
' metadata filename: %(filename)s' %
{'container': container, 'filename': filename})
metadata = {}
metadata['version'] = self.DRIVER_VERSION
@ -199,17 +199,17 @@ class SwiftBackupDriver(BackupDriver):
'metadata file sent to swift [%(md5)s]') % {'etag': etag,
'md5': md5}
raise exception.InvalidBackup(reason=err)
LOG.debug(_('_write_metadata finished'))
LOG.debug('_write_metadata finished')
def _read_metadata(self, backup):
container = backup['container']
filename = self._metadata_filename(backup)
LOG.debug(_('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s') %
LOG.debug('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s' %
{'container': container, 'filename': filename})
(resp, body) = self.conn.get_object(container, filename)
metadata = json.loads(body)
LOG.debug(_('_read_metadata finished (%s)') % metadata)
LOG.debug('_read_metadata finished (%s)' % metadata)
return metadata
def _prepare_backup(self, backup):
@ -233,10 +233,10 @@ class SwiftBackupDriver(BackupDriver):
object_prefix})
volume_size_bytes = volume['size'] * units.GiB
availability_zone = self.az
LOG.debug(_('starting backup of volume: %(volume_id)s to swift,'
' volume size: %(volume_size_bytes)d, swift object names'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s') %
LOG.debug('starting backup of volume: %(volume_id)s to swift,'
' volume size: %(volume_size_bytes)d, swift object names'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s' %
{
'volume_id': volume_id,
'volume_size_bytes': volume_size_bytes,
@ -257,37 +257,37 @@ class SwiftBackupDriver(BackupDriver):
obj[object_name] = {}
obj[object_name]['offset'] = data_offset
obj[object_name]['length'] = len(data)
LOG.debug(_('reading chunk of data from volume'))
LOG.debug('reading chunk of data from volume')
if self.compressor is not None:
algorithm = CONF.backup_compression_algorithm.lower()
obj[object_name]['compression'] = algorithm
data_size_bytes = len(data)
data = self.compressor.compress(data)
comp_size_bytes = len(data)
LOG.debug(_('compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using '
'%(algorithm)s') %
LOG.debug('compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using '
'%(algorithm)s' %
{
'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
'algorithm': algorithm,
})
else:
LOG.debug(_('not compressing data'))
LOG.debug('not compressing data')
obj[object_name]['compression'] = 'none'
reader = six.StringIO(data)
LOG.debug(_('About to put_object'))
LOG.debug('About to put_object')
try:
etag = self.conn.put_object(container, object_name, reader,
content_length=len(data))
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') %
LOG.debug('swift MD5 for %(object_name)s: %(etag)s' %
{'object_name': object_name, 'etag': etag, })
md5 = hashlib.md5(data).hexdigest()
obj[object_name]['md5'] = md5
LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') %
LOG.debug('backup MD5 for %(object_name)s: %(md5)s' %
{'object_name': object_name, 'md5': md5})
if etag != md5:
err = _('error writing object to swift, MD5 of object in '
@ -298,7 +298,7 @@ class SwiftBackupDriver(BackupDriver):
object_id += 1
object_meta['list'] = object_list
object_meta['id'] = object_id
LOG.debug(_('Calling eventlet.sleep(0)'))
LOG.debug('Calling eventlet.sleep(0)')
eventlet.sleep(0)
def _finalize_backup(self, backup, container, object_meta):
@ -316,7 +316,7 @@ class SwiftBackupDriver(BackupDriver):
raise exception.SwiftConnectionFailed(reason=err)
self.db.backup_update(self.context, backup['id'],
{'object_count': object_id})
LOG.debug(_('backup %s finished.') % backup['id'])
LOG.debug('backup %s finished.' % backup['id'])
def _backup_metadata(self, backup, object_meta):
"""Backup volume metadata.
@ -359,12 +359,12 @@ class SwiftBackupDriver(BackupDriver):
def _restore_v1(self, backup, volume_id, metadata, volume_file):
"""Restore a v1 swift volume backup from swift."""
backup_id = backup['id']
LOG.debug(_('v1 swift volume backup restore of %s started'), backup_id)
LOG.debug('v1 swift volume backup restore of %s started', backup_id)
container = backup['container']
metadata_objects = metadata['objects']
metadata_object_names = sum((obj.keys() for obj in metadata_objects),
[])
LOG.debug(_('metadata_object_names = %s') % metadata_object_names)
LOG.debug('metadata_object_names = %s' % metadata_object_names)
prune_list = [self._metadata_filename(backup)]
swift_object_names = [swift_object_name for swift_object_name in
self._generate_object_names(backup)
@ -376,9 +376,9 @@ class SwiftBackupDriver(BackupDriver):
for metadata_object in metadata_objects:
object_name = metadata_object.keys()[0]
LOG.debug(_('restoring object from swift. backup: %(backup_id)s, '
'container: %(container)s, swift object name: '
'%(object_name)s, volume: %(volume_id)s') %
LOG.debug('restoring object from swift. backup: %(backup_id)s, '
'container: %(container)s, swift object name: '
'%(object_name)s, volume: %(volume_id)s' %
{
'backup_id': backup_id,
'container': container,
@ -392,7 +392,7 @@ class SwiftBackupDriver(BackupDriver):
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
if decompressor is not None:
LOG.debug(_('decompressing data using %s algorithm') %
LOG.debug('decompressing data using %s algorithm' %
compression_algorithm)
decompressed = decompressor.decompress(body)
volume_file.write(decompressed)
@ -415,7 +415,7 @@ class SwiftBackupDriver(BackupDriver):
# threads can run, allowing for among other things the service
# status to be updated
eventlet.sleep(0)
LOG.debug(_('v1 swift volume backup restore of %s finished'),
LOG.debug('v1 swift volume backup restore of %s finished',
backup_id)
def restore(self, backup, volume_id, volume_file):
@ -423,9 +423,9 @@ class SwiftBackupDriver(BackupDriver):
backup_id = backup['id']
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug(_('starting restore of backup %(object_prefix)s from swift'
' container: %(container)s, to volume %(volume_id)s, '
'backup: %(backup_id)s') %
LOG.debug('starting restore of backup %(object_prefix)s from swift'
' container: %(container)s, to volume %(volume_id)s, '
'backup: %(backup_id)s' %
{
'object_prefix': object_prefix,
'container': container,
@ -437,7 +437,7 @@ class SwiftBackupDriver(BackupDriver):
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
metadata_version = metadata['version']
LOG.debug(_('Restoring swift backup version %s'), metadata_version)
LOG.debug('Restoring swift backup version %s', metadata_version)
try:
restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
metadata_version))
@ -458,7 +458,7 @@ class SwiftBackupDriver(BackupDriver):
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug(_('restore %(backup_id)s to %(volume_id)s finished.') %
LOG.debug('restore %(backup_id)s to %(volume_id)s finished.' %
{'backup_id': backup_id, 'volume_id': volume_id})
def delete(self, backup):
@ -484,8 +484,8 @@ class SwiftBackupDriver(BackupDriver):
LOG.warn(_('swift error while deleting object %s, '
'continuing with delete') % swift_object_name)
else:
LOG.debug(_('deleted swift object: %(swift_object_name)s'
' in container: %(container)s') %
LOG.debug('deleted swift object: %(swift_object_name)s'
' in container: %(container)s' %
{
'swift_object_name': swift_object_name,
'container': container
@ -494,7 +494,7 @@ class SwiftBackupDriver(BackupDriver):
# Yield so other threads can run
eventlet.sleep(0)
LOG.debug(_('delete %s finished') % backup['id'])
LOG.debug('delete %s finished' % backup['id'])
def get_backup_driver(context):

View File

@ -367,8 +367,8 @@ class TSMBackupDriver(BackupDriver):
volume_id = backup['volume_id']
volume_path, backup_mode = _get_volume_realpath(volume_file,
volume_id)
LOG.debug(_('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.')
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.'
% {'volume_id': volume_id,
'volume_path': volume_path,
'mode': backup_mode})
@ -412,7 +412,7 @@ class TSMBackupDriver(BackupDriver):
finally:
_cleanup_device_hardlink(backup_path, volume_path, volume_id)
LOG.debug(_('Backup %s finished.') % backup_id)
LOG.debug('Backup %s finished.' % backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
@ -428,10 +428,10 @@ class TSMBackupDriver(BackupDriver):
# backup_path is the path that was originally backed up.
backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug(_('Starting restore of backup from TSM '
'to volume %(volume_id)s, '
'backup: %(backup_id)s, '
'mode: %(mode)s.') %
LOG.debug('Starting restore of backup from TSM '
'to volume %(volume_id)s, '
'backup: %(backup_id)s, '
'mode: %(mode)s.' %
{'volume_id': volume_id,
'backup_id': backup_id,
'mode': backup_mode})
@ -473,7 +473,7 @@ class TSMBackupDriver(BackupDriver):
finally:
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
LOG.debug(_('Restore %(backup_id)s to %(volume_id)s finished.')
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.'
% {'backup_id': backup_id,
'volume_id': volume_id})
@ -488,7 +488,7 @@ class TSMBackupDriver(BackupDriver):
delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
volume_id = backup['volume_id']
LOG.debug(_('Delete started for backup: %(backup)s, mode: %(mode)s.'),
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
{'backup': backup['id'],
'mode': backup_mode})
@ -534,7 +534,7 @@ class TSMBackupDriver(BackupDriver):
'err': err})
LOG.error(err)
LOG.debug(_('Delete %s finished.') % backup['id'])
LOG.debug('Delete %s finished.' % backup['id'])
def get_backup_driver(context):

View File

@ -102,7 +102,7 @@ class BackupManager(manager.SchedulerDependentManager):
msg = _("NULL host not allowed for volume backend lookup.")
raise exception.BackupFailedToGetVolumeBackend(msg)
else:
LOG.debug(_("Checking hostname '%s' for backend info.") % (host))
LOG.debug("Checking hostname '%s' for backend info." % (host))
part = host.partition('@')
if (part[1] == '@') and (part[2] != ''):
backend = part[2]
@ -119,10 +119,10 @@ class BackupManager(manager.SchedulerDependentManager):
return 'default'
def _get_manager(self, backend):
LOG.debug(_("Manager requested for volume_backend '%s'.") %
LOG.debug("Manager requested for volume_backend '%s'." %
(backend))
if backend is None:
LOG.debug(_("Fetching default backend."))
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
if backend not in self.volume_managers:
msg = (_("Volume manager for backend '%s' does not exist.") %
@ -131,10 +131,10 @@ class BackupManager(manager.SchedulerDependentManager):
return self.volume_managers[backend]
def _get_driver(self, backend=None):
LOG.debug(_("Driver requested for volume_backend '%s'.") %
LOG.debug("Driver requested for volume_backend '%s'." %
(backend))
if backend is None:
LOG.debug(_("Fetching default backend."))
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
mgr = self._get_manager(backend)
mgr.driver.db = self.db
@ -149,14 +149,14 @@ class BackupManager(manager.SchedulerDependentManager):
service_name=backend)
config = mgr.configuration
backend_name = config.safe_get('volume_backend_name')
LOG.debug(_("Registering backend %(backend)s (host=%(host)s "
"backend_name=%(backend_name)s).") %
LOG.debug("Registering backend %(backend)s (host=%(host)s "
"backend_name=%(backend_name)s)." %
{'backend': backend, 'host': host,
'backend_name': backend_name})
self.volume_managers[backend] = mgr
else:
default = importutils.import_object(CONF.volume_manager)
LOG.debug(_("Registering default backend %s.") % (default))
LOG.debug("Registering default backend %s." % (default))
self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver):

View File

@ -239,8 +239,8 @@ class ISCSIConnector(InitiatorConnector):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(host_device)s "
"(after %(tries)s rescans)"),
LOG.debug("Found iSCSI node %(host_device)s "
"(after %(tries)s rescans)",
{'host_device': host_device, 'tries': tries})
if self.use_multipath:
@ -616,7 +616,7 @@ class FibreChannelConnector(InitiatorConnector):
def _wait_for_device_discovery(host_devices):
tries = self.tries
for device in host_devices:
LOG.debug(_("Looking for Fibre Channel dev %(device)s"),
LOG.debug("Looking for Fibre Channel dev %(device)s",
{'device': device})
if os.path.exists(device):
self.host_device = device
@ -646,8 +646,8 @@ class FibreChannelConnector(InitiatorConnector):
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug(_("Found Fibre Channel volume %(name)s "
"(after %(tries)s rescans)"),
LOG.debug("Found Fibre Channel volume %(name)s "
"(after %(tries)s rescans)",
{'name': self.device_name, 'tries': tries})
# see if the new drive is part of a multipath
@ -655,7 +655,7 @@ class FibreChannelConnector(InitiatorConnector):
if self.use_multipath:
mdev_info = self._linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug(_("Multipath device discovered %(device)s")
LOG.debug("Multipath device discovered %(device)s"
% {'device': mdev_info['device']})
device_path = mdev_info['device']
devices = mdev_info['devices']
@ -787,8 +787,8 @@ class AoEConnector(InitiatorConnector):
timer.start(interval=2).wait()
if waiting_status['tries']:
LOG.debug(_("Found AoE device %(path)s "
"(after %(tries)s rediscover)"),
LOG.debug("Found AoE device %(path)s "
"(after %(tries)s rediscover)",
{'path': aoe_path,
'tries': waiting_status['tries']})
@ -813,7 +813,7 @@ class AoEConnector(InitiatorConnector):
root_helper=self._root_helper,
check_exit_code=0)
LOG.debug(_('aoe-discover: stdout=%(out)s stderr%(err)s') %
LOG.debug('aoe-discover: stdout=%(out)s stderr%(err)s' %
{'out': out, 'err': err})
def _aoe_revalidate(self, aoe_device):
@ -823,7 +823,7 @@ class AoEConnector(InitiatorConnector):
root_helper=self._root_helper,
check_exit_code=0)
LOG.debug(_('aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s') %
LOG.debug('aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s' %
{'dev': aoe_device, 'out': out, 'err': err})
def _aoe_flush(self, aoe_device):
@ -832,7 +832,7 @@ class AoEConnector(InitiatorConnector):
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=0)
LOG.debug(_('aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s') %
LOG.debug('aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s' %
{'dev': aoe_device, 'out': out, 'err': err})

View File

@ -161,7 +161,7 @@ class LinuxSCSI(executor.Executor):
% {'line': line})
return None
LOG.debug(_("Found multipath device = %(mdev)s")
LOG.debug("Found multipath device = %(mdev)s"
% {'mdev': mdev})
device_lines = lines[3:]
for dev_line in device_lines:

View File

@ -178,8 +178,8 @@ class TgtAdm(TargetAdmin):
f = open(volume_path, 'w+')
f.write(volume_conf)
f.close()
LOG.debug(_('Created volume path %(vp)s,\n'
'content: %(vc)s')
LOG.debug('Created volume path %(vp)s,\n'
'content: %(vc)s'
% {'vp': volume_path, 'vc': volume_conf})
old_persist_file = None

View File

@ -431,10 +431,10 @@ class LVM(executor.Executor):
size_str = self._calculate_thin_pool_size()
cmd = ['lvcreate', '-T', '-L', size_str, vg_pool_name]
LOG.debug(_('Created thin pool \'%(pool)s\' with size %(size)s of '
'total %(free)sg') % {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
LOG.debug('Created thin pool \'%(pool)s\' with size %(size)s of '
'total %(free)sg' % {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
@ -601,7 +601,7 @@ class LVM(executor.Executor):
{'command': err.cmd, 'response': err.stderr})
LOG.debug(mesg)
LOG.debug(_('Attempting udev settle and retry of lvremove...'))
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
self._execute('lvremove',

View File

@ -120,12 +120,12 @@ class RemoteFsClient(object):
options = self._nfs_mount_type_opts[mnt_type]
try:
self._do_mount('nfs', nfs_share, mount_path, options, flags)
LOG.debug(_('Mounted %(sh)s using %(mnt_type)s.')
LOG.debug('Mounted %(sh)s using %(mnt_type)s.'
% {'sh': nfs_share, 'mnt_type': mnt_type})
return
except Exception as e:
mnt_errors[mnt_type] = six.text_type(e)
LOG.debug(_('Failed to do %s mount.'), mnt_type)
LOG.debug('Failed to do %s mount.', mnt_type)
raise exception.BrickException(_("NFS mount failed for share %(sh)s."
"Error - %(error)s")
% {'sh': nfs_share,

View File

@ -95,7 +95,7 @@ def novaclient(context, admin=False):
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug(_('Novaclient connection created using URL: %s') % url)
LOG.debug('Novaclient connection created using URL: %s' % url)
extensions = [assisted_volume_snapshots]

View File

49
cinder/hacking/checks.py Normal file
View File

@ -0,0 +1,49 @@
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Guidelines for writing new hacking checks
- Use only for Cinder specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to
cinder/tests/unit/test_hacking.py
"""
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def factory(register):
register(no_translate_debug_logs)

View File

@ -175,9 +175,9 @@ def fetch_to_volume_format(context, image_service,
# qemu-img is not installed but we do have a RAW image. As a
# result we only need to copy the image to the destination and then
# return.
LOG.debug(_('Copying image from %(tmp)s to volume %(dest)s - '
'size: %(size)s') % {'tmp': tmp, 'dest': dest,
'size': image_meta['size']})
LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
'size: %(size)s' % {'tmp': tmp, 'dest': dest,
'size': image_meta['size']})
volume_utils.copy_volume(tmp, dest, image_meta['size'], blocksize)
return

View File

@ -125,7 +125,7 @@ class SchedulerDependentManager(Manager):
def _publish_service_capabilities(self, context):
"""Pass data back to the scheduler at a periodic interval."""
if self.last_capabilities:
LOG.debug(_('Notifying Schedulers of capabilities ...'))
LOG.debug('Notifying Schedulers of capabilities ...')
self.scheduler_rpcapi.update_service_capabilities(
context,
self.service_name,

View File

@ -746,7 +746,7 @@ class QuotaEngine(object):
expire=expire,
project_id=project_id)
LOG.debug(_("Created reservations %s") % reservations)
LOG.debug("Created reservations %s" % reservations)
return reservations

View File

@ -256,7 +256,7 @@ class FilterScheduler(driver.Scheduler):
if not hosts:
return []
LOG.debug(_("Filtered %s") % hosts)
LOG.debug("Filtered %s" % hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
@ -273,7 +273,7 @@ class FilterScheduler(driver.Scheduler):
def _choose_top_host(self, weighed_hosts, request_spec):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug(_("Choosing %s") % host_state.host)
LOG.debug("Choosing %s" % host_state.host)
volume_properties = request_spec['volume_properties']
host_state.consume_from_volume(volume_properties)
return top_host

View File

@ -263,13 +263,13 @@ class HostManager(object):
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
if service_name != 'volume':
LOG.debug(_('Ignoring %(service_name)s service update '
'from %(host)s'),
LOG.debug('Ignoring %(service_name)s service update '
'from %(host)s',
{'service_name': service_name, 'host': host})
return
LOG.debug(_("Received %(service_name)s service update from "
"%(host)s.") %
LOG.debug("Received %(service_name)s service update from "
"%(host)s." %
{'service_name': service_name, 'host': host})
# Copy the capabilities, so we don't modify the original dict

View File

@ -109,7 +109,7 @@ class Service(service.Service):
except exception.NotFound:
self._create_service_ref(ctxt)
LOG.debug(_("Creating RPC server for service %s") % self.topic)
LOG.debug("Creating RPC server for service %s" % self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
@ -248,8 +248,8 @@ class Service(service.Service):
try:
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
LOG.debug(_('The service database object disappeared, '
'Recreating it.'))
LOG.debug('The service database object disappeared, '
'Recreating it.')
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
@ -372,7 +372,7 @@ def serve(server, workers=None):
def wait():
LOG.debug(_('Full set of CONF:'))
LOG.debug('Full set of CONF:')
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
@ -380,7 +380,7 @@ def wait():
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and
("mysql:" in flag_get or "postgresql:" in flag_get))):
LOG.debug(_('%s : FLAG SET ') % flag)
LOG.debug('%s : FLAG SET ' % flag)
else:
LOG.debug('%(flag)s : %(flag_get)s' %
{'flag': flag, 'flag_get': flag_get})

View File

@ -54,7 +54,7 @@ class FakeISCSIDriver(lvm.LVMISCSIDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE ISCSI: %s"), cmd)
LOG.debug("FAKE ISCSI: %s", cmd)
return (None, None)
@ -73,7 +73,7 @@ class FakeISERDriver(FakeISCSIDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE ISER: %s"), cmd)
LOG.debug("FAKE ISER: %s", cmd)
return (None, None)
@ -121,12 +121,12 @@ class LoggingVolumeDriver(driver.VolumeDriver):
@staticmethod
def log_action(action, parameters):
"""Logs the command."""
LOG.debug(_("LoggingVolumeDriver: %s") % (action))
LOG.debug("LoggingVolumeDriver: %s" % (action))
log_dictionary = {}
if parameters:
log_dictionary = dict(parameters)
log_dictionary['action'] = action
LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary))
LOG.debug("LoggingVolumeDriver: %s" % (log_dictionary))
LoggingVolumeDriver._LOGS.append(log_dictionary)
@staticmethod

View File

@ -67,7 +67,7 @@ def fake_execute(*cmd_parts, **kwargs):
run_as_root = kwargs.get('run_as_root', False)
cmd_str = ' '.join(str(part) for part in cmd_parts)
LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str)
LOG.debug("Faking execution of cmd (subprocess): %s", cmd_str)
_fake_execute_log.append(cmd_str)
reply_handler = fake_execute_default_reply_handler
@ -75,7 +75,7 @@ def fake_execute(*cmd_parts, **kwargs):
for fake_replier in _fake_execute_repliers:
if re.match(fake_replier[0], cmd_str):
reply_handler = fake_replier[1]
LOG.debug(_('Faked command matched %s') % fake_replier[0])
LOG.debug('Faked command matched %s' % fake_replier[0])
break
if isinstance(reply_handler, basestring):
@ -91,12 +91,12 @@ def fake_execute(*cmd_parts, **kwargs):
run_as_root=run_as_root,
check_exit_code=check_exit_code)
except processutils.ProcessExecutionError as e:
LOG.debug(_('Faked command raised an exception %s'), e)
LOG.debug('Faked command raised an exception %s', e)
raise
LOG.debug(_("Reply to faked command is stdout='%(stdout)s' "
"stderr='%(stderr)s'") % {'stdout': reply[0],
'stderr': reply[1]})
LOG.debug("Reply to faked command is stdout='%(stdout)s' "
"stderr='%(stderr)s'" % {'stdout': reply[0],
'stderr': reply[1]})
# Replicate the sleep call in the real function
greenthread.sleep(0)

View File

@ -121,7 +121,7 @@ class TestOpenStackClient(object):
headers=headers)
http_status = response.status_code
LOG.debug(_("%(auth_uri)s => code %(http_status)s"),
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
@ -144,7 +144,7 @@ class TestOpenStackClient(object):
response = self.request(full_uri, **kwargs)
http_status = response.status_code
LOG.debug(_("%(relative_uri)s => code %(http_status)s"),
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
@ -162,7 +162,7 @@ class TestOpenStackClient(object):
def _decode_json(self, response):
body = response.text
LOG.debug(_("Decoding JSON: %s") % (body))
LOG.debug("Decoding JSON: %s" % (body))
if body:
return jsonutils.loads(body)
else:

View File

@ -26,4 +26,4 @@ class LoginTest(integrated_helpers._IntegratedTestBase):
"""Simple check - we list volumes - so we know we're logged in."""
volumes = self.api.get_volumes()
for volume in volumes:
LOG.debug(_("volume: %s") % volume)
LOG.debug("volume: %s" % volume)

View File

@ -1438,18 +1438,18 @@ class StorwizeSVCFakeDriver(storwize_svc.StorwizeSVCDriver):
def _run_ssh(self, cmd, check_exit_code=True, attempts=1):
try:
LOG.debug(_('Run CLI command: %s') % cmd)
LOG.debug('Run CLI command: %s' % cmd)
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
(stdout, stderr) = ret
LOG.debug(_('CLI output:\n stdout: %(stdout)s\n stderr: '
'%(stderr)s') % {'stdout': stdout, 'stderr': stderr})
LOG.debug('CLI output:\n stdout: %(stdout)s\n stderr: '
'%(stderr)s' % {'stdout': stdout, 'stderr': stderr})
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n '
'stderr: %(err)s') % {'out': e.stdout,
'err': e.stderr})
LOG.debug('CLI Exception output:\n stdout: %(out)s\n '
'stderr: %(err)s' % {'out': e.stdout,
'err': e.stderr})
return ret

View File

@ -182,7 +182,7 @@ class FakeBrcdFCZoneClientCLI(object):
raise paramiko.SSHException("Unable to connect to fabric")
def get_active_zone_set(self):
LOG.debug(_("Inside get_active_zone_set %s"), GlobalVars._active_cfg)
LOG.debug("Inside get_active_zone_set %s", GlobalVars._active_cfg)
return GlobalVars._active_cfg
def add_zones(self, zones, isActivate):

View File

@ -433,7 +433,7 @@ class LazyPluggable(object):
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug(_('backend %s'), self.__backend)
LOG.debug('backend %s', self.__backend)
return self.__backend
def __getattr__(self, key):
@ -708,7 +708,7 @@ def tempdir(**kwargs):
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug(_('Could not remove tmpdir: %s'), e)
LOG.debug('Could not remove tmpdir: %s', e)
def walk_class_hierarchy(clazz, encountered=None):

View File

@ -295,7 +295,7 @@ class API(base.Base):
filters['no_migration_targets'] = True
if filters:
LOG.debug(_("Searching by: %s") % str(filters))
LOG.debug("Searching by: %s" % str(filters))
if (context.is_admin and 'all_tenants' in filters):
# Need to remove all_tenants to pass the filtering below.
@ -337,7 +337,7 @@ class API(base.Base):
context, context.project_id)
if search_opts:
LOG.debug(_("Searching by: %s") % search_opts)
LOG.debug("Searching by: %s" % search_opts)
results = []
not_found = object()

View File

@ -275,7 +275,7 @@ class VolumeDriver(object):
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug(_('copy_data_between_volumes %(src)s -> %(dest)s.')
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.'
% {'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
@ -332,7 +332,7 @@ class VolumeDriver(object):
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
LOG.debug(_('copy_image_to_volume %s.') % volume['name'])
LOG.debug('copy_image_to_volume %s.' % volume['name'])
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
@ -349,7 +349,7 @@ class VolumeDriver(object):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
LOG.debug(_('copy_volume_to_image %s.') % volume['name'])
LOG.debug('copy_volume_to_image %s.' % volume['name'])
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
@ -375,7 +375,7 @@ class VolumeDriver(object):
# clean this up in the future.
model_update = None
try:
LOG.debug(_("Volume %s: creating export"), volume['id'])
LOG.debug("Volume %s: creating export", volume['id'])
model_update = self.create_export(context, volume)
if model_update:
volume = self.db.volume_update(context, volume['id'],
@ -453,7 +453,7 @@ class VolumeDriver(object):
raise exception.VolumeBackendAPIException(data=err_msg)
try:
LOG.debug(_("volume %s: removing export"), volume['id'])
LOG.debug("volume %s: removing export", volume['id'])
self.remove_export(context, volume)
except Exception as ex:
LOG.exception(_("Error detaching volume %(volume)s, "
@ -487,7 +487,7 @@ class VolumeDriver(object):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
LOG.debug(_('Creating a new backup for volume %s.') %
LOG.debug('Creating a new backup for volume %s.' %
volume['name'])
properties = utils.brick_get_connector_properties()
@ -504,8 +504,8 @@ class VolumeDriver(object):
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug(_('Restoring backup %(backup)s to '
'volume %(volume)s.') %
LOG.debug('Restoring backup %(backup)s to '
'volume %(volume)s.' %
{'backup': backup['id'],
'volume': volume['name']})
@ -651,7 +651,7 @@ class ISCSIDriver(VolumeDriver):
except processutils.ProcessExecutionError as ex:
LOG.error(_("ISCSI discovery attempt failed for:%s") %
volume['host'].split('@')[0])
LOG.debug(_("Error from iscsiadm -m discovery: %s") % ex.stderr)
LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr)
return None
for target in out.splitlines():
@ -702,7 +702,7 @@ class ISCSIDriver(VolumeDriver):
(volume['name']))
raise exception.InvalidVolume(reason=msg)
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
LOG.debug("ISCSI Discovery: Found %s" % (location))
properties['target_discovered'] = True
results = location.split(" ")
@ -820,7 +820,7 @@ class ISCSIDriver(VolumeDriver):
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'Generic_iSCSI'
@ -880,7 +880,7 @@ class FakeISCSIDriver(ISCSIDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE ISCSI: %s"), cmd)
LOG.debug("FAKE ISCSI: %s", cmd)
return (None, None)
def create_volume_from_snapshot(self, volume, snapshot):
@ -978,7 +978,7 @@ class ISERDriver(ISCSIDriver):
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'Generic_iSER'
@ -1017,7 +1017,7 @@ class FakeISERDriver(FakeISCSIDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE ISER: %s"), cmd)
LOG.debug("FAKE ISER: %s", cmd)
return (None, None)

View File

@ -221,7 +221,7 @@ class CoraidAppliance(object):
relogin_attempts -= 1
if relogin_attempts <= 0:
raise exception.CoraidESMReloginFailed()
LOG.debug(_('Session is expired. Relogin on ESM.'))
LOG.debug('Session is expired. Relogin on ESM.')
self._relogin()
else:
return reply
@ -281,7 +281,7 @@ class CoraidAppliance(object):
try:
self.rpc('fetch', {}, None, allow_empty_response=True)
except Exception as e:
LOG.debug(_('Coraid Appliance ping failed: %s'), e)
LOG.debug('Coraid Appliance ping failed: %s', e)
raise exception.CoraidESMNotAvailable(reason=e)
def create_lun(self, repository_name, volume_name, volume_size_in_gb):
@ -294,7 +294,7 @@ class CoraidAppliance(object):
'op': 'orchStrLun',
'args': 'add'}
esm_result = self.esm_command(request)
LOG.debug(_('Volume "%(name)s" created with VSX LUN "%(lun)s"') %
LOG.debug('Volume "%(name)s" created with VSX LUN "%(lun)s"' %
{'name': volume_name,
'lun': esm_result['firstParam']})
return esm_result
@ -308,15 +308,15 @@ class CoraidAppliance(object):
'op': 'orchStrLun/verified',
'args': 'delete'}
esm_result = self.esm_command(request)
LOG.debug(_('Volume "%s" deleted.'), volume_name)
LOG.debug('Volume "%s" deleted.', volume_name)
return esm_result
def resize_volume(self, volume_name, new_volume_size_in_gb):
LOG.debug(_('Resize volume "%(name)s" to %(size)s GB.') %
LOG.debug('Resize volume "%(name)s" to %(size)s GB.' %
{'name': volume_name,
'size': new_volume_size_in_gb})
repository = self.get_volume_repository(volume_name)
LOG.debug(_('Repository for volume "%(name)s" found: "%(repo)s"') %
LOG.debug('Repository for volume "%(name)s" found: "%(repo)s"' %
{'name': volume_name,
'repo': repository})
@ -330,7 +330,7 @@ class CoraidAppliance(object):
'args': 'resize'}
esm_result = self.esm_command(request)
LOG.debug(_('Volume "%(name)s" resized. New size is %(size)s GB.') %
LOG.debug('Volume "%(name)s" resized. New size is %(size)s GB.' %
{'name': volume_name,
'size': new_volume_size_in_gb})
return esm_result
@ -502,7 +502,7 @@ class CoraidDriver(driver.VolumeDriver):
shelf = volume_info['shelf']
lun = volume_info['lun']
LOG.debug(_('Initialize connection %(shelf)s/%(lun)s for %(name)s') %
LOG.debug('Initialize connection %(shelf)s/%(lun)s for %(name)s' %
{'shelf': shelf,
'lun': lun,
'name': volume['name']})

View File

@ -166,7 +166,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
" for volume %s") %
(volume['name']))
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
LOG.debug("ISCSI Discovery: Found %s" % (location))
properties['target_discovered'] = True
hostname = connector['host']
@ -199,7 +199,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
break
for endpoint in endpoints:
if properties['target_iqn'] == endpoint:
LOG.debug(_("Found iSCSI endpoint: %s") % endpoint)
LOG.debug("Found iSCSI endpoint: %s" % endpoint)
foundEndpoint = True
break
if foundEndpoint:
@ -244,7 +244,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
def update_volume_stats(self):
"""Retrieve status info from volume group."""
LOG.debug(_("Updating volume status"))
LOG.debug("Updating volume status")
# retrieving the volume update from the VNX
data = self.cli.update_volume_status()
backend_name = self.configuration.safe_get('volume_backend_name')

View File

@ -79,7 +79,7 @@ class EMCSMISCommon():
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
LOG.debug(_('Entering create_volume.'))
LOG.debug('Entering create_volume.')
volumesize = int(volume['size']) * units.GiB
volumename = volume['name']
@ -91,15 +91,15 @@ class EMCSMISCommon():
storage_type = self._get_storage_type(volume)
LOG.debug(_('Create Volume: %(volume)s '
'Storage type: %(storage_type)s')
LOG.debug('Create Volume: %(volume)s '
'Storage type: %(storage_type)s'
% {'volume': volumename,
'storage_type': storage_type})
pool, storage_system = self._find_pool(storage_type[POOL])
LOG.debug(_('Create Volume: %(volume)s Pool: %(pool)s '
'Storage System: %(storage_system)s')
LOG.debug('Create Volume: %(volume)s Pool: %(pool)s '
'Storage System: %(storage_system)s'
% {'volume': volumename,
'pool': pool,
'storage_system': storage_system})
@ -117,10 +117,10 @@ class EMCSMISCommon():
provisioning = self._get_provisioning(storage_type)
LOG.debug(_('Create Volume: %(name)s Method: '
LOG.debug('Create Volume: %(name)s Method: '
'CreateOrModifyElementFromStoragePool ConfigServicie: '
'%(service)s ElementName: %(name)s InPool: %(pool)s '
'ElementType: %(provisioning)s Size: %(size)lu')
'ElementType: %(provisioning)s Size: %(size)lu'
% {'service': configservice,
'name': volumename,
'pool': pool,
@ -133,7 +133,7 @@ class EMCSMISCommon():
ElementType=self._getnum(provisioning, '16'),
Size=self._getnum(volumesize, '64'))
LOG.debug(_('Create Volume: %(volumename)s Return code: %(rc)lu')
LOG.debug('Create Volume: %(volumename)s Return code: %(rc)lu'
% {'volumename': volumename,
'rc': rc})
@ -161,9 +161,9 @@ class EMCSMISCommon():
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
name['keybindings'] = keys
LOG.debug(_('Leaving create_volume: %(volumename)s '
LOG.debug('Leaving create_volume: %(volumename)s '
'Return code: %(rc)lu '
'volume instance: %(name)s')
'volume instance: %(name)s'
% {'volumename': volumename,
'rc': rc,
'name': name})
@ -173,7 +173,7 @@ class EMCSMISCommon():
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(_('Entering create_volume_from_snapshot.'))
LOG.debug('Entering create_volume_from_snapshot.')
snapshotname = snapshot['name']
volumename = volume['name']
@ -188,9 +188,9 @@ class EMCSMISCommon():
snapshot_instance = self._find_lun(snapshot)
storage_system = snapshot_instance['SystemName']
LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s '
LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Snapshot Instance: '
'%(snapshotinstance)s Storage System: %(storage_system)s.')
'%(snapshotinstance)s Storage System: %(storage_system)s.'
% {'volumename': volumename,
'snapshotname': snapshotname,
'snapshotinstance': snapshot_instance.path,
@ -218,11 +218,11 @@ class EMCSMISCommon():
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s '
LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Method: CreateElementReplica '
'ReplicationService: %(service)s ElementName: '
'%(elementname)s SyncType: 8 SourceElement: '
'%(sourceelement)s')
'%(sourceelement)s'
% {'volumename': volumename,
'snapshotname': snapshotname,
'service': repservice,
@ -265,9 +265,9 @@ class EMCSMISCommon():
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
name['keybindings'] = keys
LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s '
LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s. Successfully clone volume '
'from snapshot. Finding the clone relationship.')
'from snapshot. Finding the clone relationship.'
% {'volumename': volumename,
'snapshotname': snapshotname})
@ -277,11 +277,11 @@ class EMCSMISCommon():
# Remove the Clone relationshop so it can be used as a regular lun
# 8 - Detach operation
LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s '
LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s. Remove the clone '
'relationship. Method: ModifyReplicaSynchronization '
'ReplicationService: %(service)s Operation: 8 '
'Synchronization: %(sync_name)s')
'Synchronization: %(sync_name)s'
% {'volumename': volumename,
'snapshotname': snapshotname,
'service': repservice,
@ -293,8 +293,8 @@ class EMCSMISCommon():
Operation=self._getnum(8, '16'),
Synchronization=sync_name)
LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Return code: %(rc)lu')
LOG.debug('Create Volume from Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Return code: %(rc)lu'
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc})
@ -314,9 +314,9 @@ class EMCSMISCommon():
raise exception.VolumeBackendAPIException(
data=exception_message)
LOG.debug(_('Leaving create_volume_from_snapshot: Volume: '
LOG.debug('Leaving create_volume_from_snapshot: Volume: '
'%(volumename)s Snapshot: %(snapshotname)s '
'Return code: %(rc)lu.')
'Return code: %(rc)lu.'
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc})
@ -325,7 +325,7 @@ class EMCSMISCommon():
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(_('Entering create_cloned_volume.'))
LOG.debug('Entering create_cloned_volume.')
srcname = src_vref['name']
volumename = volume['name']
@ -340,9 +340,9 @@ class EMCSMISCommon():
src_instance = self._find_lun(src_vref)
storage_system = src_instance['SystemName']
LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s '
LOG.debug('Create Cloned Volume: Volume: %(volumename)s '
'Source Volume: %(srcname)s Source Instance: '
'%(src_instance)s Storage System: %(storage_system)s.')
'%(src_instance)s Storage System: %(storage_system)s.'
% {'volumename': volumename,
'srcname': srcname,
'src_instance': src_instance.path,
@ -359,11 +359,11 @@ class EMCSMISCommon():
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s '
LOG.debug('Create Cloned Volume: Volume: %(volumename)s '
'Source Volume: %(srcname)s Method: CreateElementReplica '
'ReplicationService: %(service)s ElementName: '
'%(elementname)s SyncType: 8 SourceElement: '
'%(sourceelement)s')
'%(sourceelement)s'
% {'volumename': volumename,
'srcname': srcname,
'service': repservice,
@ -406,9 +406,9 @@ class EMCSMISCommon():
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
name['keybindings'] = keys
LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s '
LOG.debug('Create Cloned Volume: Volume: %(volumename)s '
'Source Volume: %(srcname)s. Successfully cloned volume '
'from source volume. Finding the clone relationship.')
'from source volume. Finding the clone relationship.'
% {'volumename': volumename,
'srcname': srcname})
@ -418,11 +418,11 @@ class EMCSMISCommon():
# Remove the Clone relationshop so it can be used as a regular lun
# 8 - Detach operation
LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s '
LOG.debug('Create Cloned Volume: Volume: %(volumename)s '
'Source Volume: %(srcname)s. Remove the clone '
'relationship. Method: ModifyReplicaSynchronization '
'ReplicationService: %(service)s Operation: 8 '
'Synchronization: %(sync_name)s')
'Synchronization: %(sync_name)s'
% {'volumename': volumename,
'srcname': srcname,
'service': repservice,
@ -434,8 +434,8 @@ class EMCSMISCommon():
Operation=self._getnum(8, '16'),
Synchronization=sync_name)
LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s '
'Source Volume: %(srcname)s Return code: %(rc)lu')
LOG.debug('Create Cloned Volume: Volume: %(volumename)s '
'Source Volume: %(srcname)s Return code: %(rc)lu'
% {'volumename': volumename,
'srcname': srcname,
'rc': rc})
@ -455,9 +455,9 @@ class EMCSMISCommon():
raise exception.VolumeBackendAPIException(
data=exception_message)
LOG.debug(_('Leaving create_cloned_volume: Volume: '
LOG.debug('Leaving create_cloned_volume: Volume: '
'%(volumename)s Source Volume: %(srcname)s '
'Return code: %(rc)lu.')
'Return code: %(rc)lu.'
% {'volumename': volumename,
'srcname': srcname,
'rc': rc})
@ -466,7 +466,7 @@ class EMCSMISCommon():
def delete_volume(self, volume):
"""Deletes an EMC volume."""
LOG.debug(_('Entering delete_volume.'))
LOG.debug('Entering delete_volume.')
volumename = volume['name']
LOG.info(_('Delete Volume: %(volume)s')
% {'volume': volumename})
@ -493,12 +493,12 @@ class EMCSMISCommon():
device_id = vol_instance['DeviceID']
LOG.debug(_('Delete Volume: %(name)s DeviceID: %(deviceid)s')
LOG.debug('Delete Volume: %(name)s DeviceID: %(deviceid)s'
% {'name': volumename,
'deviceid': device_id})
LOG.debug(_('Delete Volume: %(name)s Method: EMCReturnToStoragePool '
'ConfigServic: %(service)s TheElement: %(vol_instance)s')
LOG.debug('Delete Volume: %(name)s Method: EMCReturnToStoragePool '
'ConfigServic: %(service)s TheElement: %(vol_instance)s'
% {'service': configservice,
'name': volumename,
'vol_instance': vol_instance.path})
@ -520,14 +520,14 @@ class EMCSMISCommon():
raise exception.VolumeBackendAPIException(
data=exception_message)
LOG.debug(_('Leaving delete_volume: %(volumename)s Return code: '
'%(rc)lu')
LOG.debug('Leaving delete_volume: %(volumename)s Return code: '
'%(rc)lu'
% {'volumename': volumename,
'rc': rc})
def create_snapshot(self, snapshot, volume):
"""Creates a snapshot."""
LOG.debug(_('Entering create_snapshot.'))
LOG.debug('Entering create_snapshot.')
snapshotname = snapshot['name']
volumename = snapshot['volume_name']
@ -541,8 +541,8 @@ class EMCSMISCommon():
device_id = vol_instance['DeviceID']
storage_system = vol_instance['SystemName']
LOG.debug(_('Device ID: %(deviceid)s: Storage System: '
'%(storagesystem)s')
LOG.debug('Device ID: %(deviceid)s: Storage System: '
'%(storagesystem)s'
% {'deviceid': device_id,
'storagesystem': storage_system})
@ -555,10 +555,10 @@ class EMCSMISCommon():
% volumename)
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug(_("Create Snapshot: Method: CreateElementReplica: "
LOG.debug("Create Snapshot: Method: CreateElementReplica: "
"Target: %(snapshot)s Source: %(volume)s Replication "
"Service: %(service)s ElementName: %(elementname)s Sync "
"Type: 7 SourceElement: %(sourceelement)s.")
"Type: 7 SourceElement: %(sourceelement)s."
% {'snapshot': snapshotname,
'volume': volumename,
'service': repservice,
@ -571,8 +571,8 @@ class EMCSMISCommon():
SyncType=self._getnum(7, '16'),
SourceElement=vol_instance.path)
LOG.debug(_('Create Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Return code: %(rc)lu')
LOG.debug('Create Snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Return code: %(rc)lu'
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc})
@ -602,15 +602,15 @@ class EMCSMISCommon():
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
name['keybindings'] = keys
LOG.debug(_('Leaving create_snapshot: Snapshot: %(snapshot)s '
'Volume: %(volume)s Return code: %(rc)lu.') %
LOG.debug('Leaving create_snapshot: Snapshot: %(snapshot)s '
'Volume: %(volume)s Return code: %(rc)lu.' %
{'snapshot': snapshotname, 'volume': volumename, 'rc': rc})
return name
def delete_snapshot(self, snapshot, volume):
"""Deletes a snapshot."""
LOG.debug(_('Entering delete_snapshot.'))
LOG.debug('Entering delete_snapshot.')
snapshotname = snapshot['name']
volumename = snapshot['volume_name']
@ -620,8 +620,8 @@ class EMCSMISCommon():
self.conn = self._get_ecom_connection()
LOG.debug(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s. '
'Finding StorageSychronization_SV_SV.')
LOG.debug('Delete Snapshot: %(snapshot)s: volume: %(volume)s. '
'Finding StorageSychronization_SV_SV.'
% {'snapshot': snapshotname,
'volume': volumename})
@ -643,11 +643,11 @@ class EMCSMISCommon():
# Delete snapshot - deletes both the target element
# and the snap session
LOG.debug(_("Delete Snapshot: Target: %(snapshot)s "
LOG.debug("Delete Snapshot: Target: %(snapshot)s "
"Source: %(volume)s. Method: "
"ModifyReplicaSynchronization: "
"Replication Service: %(service)s Operation: 19 "
"Synchronization: %(sync_name)s.")
"Synchronization: %(sync_name)s."
% {'snapshot': snapshotname,
'volume': volumename,
'service': repservice,
@ -659,8 +659,8 @@ class EMCSMISCommon():
Operation=self._getnum(19, '16'),
Synchronization=sync_name)
LOG.debug(_('Delete Snapshot: Volume: %(volumename)s Snapshot: '
'%(snapshotname)s Return code: %(rc)lu')
LOG.debug('Delete Snapshot: Volume: %(volumename)s Snapshot: '
'%(snapshotname)s Return code: %(rc)lu'
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc})
@ -721,8 +721,8 @@ class EMCSMISCommon():
'error': str(ex.args)})
break
LOG.debug(_('Leaving delete_snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Return code: %(rc)lu.')
LOG.debug('Leaving delete_snapshot: Volume: %(volumename)s '
'Snapshot: %(snapshotname)s Return code: %(rc)lu.'
% {'volumename': volumename,
'snapshotname': snapshotname,
'rc': rc})
@ -742,9 +742,9 @@ class EMCSMISCommon():
lunmask_ctrl = self._find_lunmasking_scsi_protocol_controller(
storage_system, connector)
LOG.debug(_('ExposePaths: %(vol)s ConfigServicie: %(service)s '
LOG.debug('ExposePaths: %(vol)s ConfigServicie: %(service)s '
'LUNames: %(lun_name)s InitiatorPortIDs: %(initiator)s '
'DeviceAccesses: 2')
'DeviceAccesses: 2'
% {'vol': vol_instance.path,
'service': configservice,
'lun_name': lun_name,
@ -757,9 +757,9 @@ class EMCSMISCommon():
InitiatorPortIDs=initiators,
DeviceAccesses=[self._getnum(2, '16')])
else:
LOG.debug(_('ExposePaths parameter '
LOG.debug('ExposePaths parameter '
'LunMaskingSCSIProtocolController: '
'%(lunmasking)s')
'%(lunmasking)s'
% {'lunmasking': lunmask_ctrl})
rc, controller =\
self.conn.InvokeMethod('ExposePaths',
@ -772,7 +772,7 @@ class EMCSMISCommon():
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_('ExposePaths for volume %s completed successfully.')
LOG.debug('ExposePaths for volume %s completed successfully.'
% volumename)
# Unmapping method for VNX
@ -788,9 +788,9 @@ class EMCSMISCommon():
lunmask_ctrl = self._find_lunmasking_scsi_protocol_controller_for_vol(
vol_instance, connector)
LOG.debug(_('HidePaths: %(vol)s ConfigServicie: %(service)s '
LOG.debug('HidePaths: %(vol)s ConfigServicie: %(service)s '
'LUNames: %(device_id)s LunMaskingSCSIProtocolController: '
'%(lunmasking)s')
'%(lunmasking)s'
% {'vol': vol_instance.path,
'service': configservice,
'device_id': device_id,
@ -805,7 +805,7 @@ class EMCSMISCommon():
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_('HidePaths for volume %s completed successfully.')
LOG.debug('HidePaths for volume %s completed successfully.'
% volumename)
# Mapping method for VMAX
@ -818,8 +818,8 @@ class EMCSMISCommon():
volumename = vol_instance['ElementName']
masking_group = self._find_device_masking_group()
LOG.debug(_('AddMembers: ConfigServicie: %(service)s MaskingGroup: '
'%(masking_group)s Members: %(vol)s')
LOG.debug('AddMembers: ConfigServicie: %(service)s MaskingGroup: '
'%(masking_group)s Members: %(vol)s'
% {'service': configservice,
'masking_group': masking_group,
'vol': vol_instance.path})
@ -838,7 +838,7 @@ class EMCSMISCommon():
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_('AddMembers for volume %s completed successfully.')
LOG.debug('AddMembers for volume %s completed successfully.'
% volumename)
# Unmapping method for VMAX
@ -851,8 +851,8 @@ class EMCSMISCommon():
volumename = vol_instance['ElementName']
masking_group = self._find_device_masking_group()
LOG.debug(_('RemoveMembers: ConfigServicie: %(service)s '
'MaskingGroup: %(masking_group)s Members: %(vol)s')
LOG.debug('RemoveMembers: ConfigServicie: %(service)s '
'MaskingGroup: %(masking_group)s Members: %(vol)s'
% {'service': configservice,
'masking_group': masking_group,
'vol': vol_instance.path})
@ -869,7 +869,7 @@ class EMCSMISCommon():
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_('RemoveMembers for volume %s completed successfully.')
LOG.debug('RemoveMembers for volume %s completed successfully.'
% volumename)
def _map_lun(self, volume, connector):
@ -953,7 +953,7 @@ class EMCSMISCommon():
def extend_volume(self, volume, new_size):
"""Extends an existing volume."""
LOG.debug(_('Entering extend_volume.'))
LOG.debug('Entering extend_volume.')
volumesize = int(new_size) * units.GiB
volumename = volume['name']
@ -969,8 +969,8 @@ class EMCSMISCommon():
device_id = vol_instance['DeviceID']
storage_system = vol_instance['SystemName']
LOG.debug(_('Device ID: %(deviceid)s: Storage System: '
'%(storagesystem)s')
LOG.debug('Device ID: %(deviceid)s: Storage System: '
'%(storagesystem)s'
% {'deviceid': device_id,
'storagesystem': storage_system})
@ -985,10 +985,10 @@ class EMCSMISCommon():
provisioning = self._get_provisioning(storage_type)
LOG.debug(_('Extend Volume: %(name)s Method: '
LOG.debug('Extend Volume: %(name)s Method: '
'CreateOrModifyElementFromStoragePool ConfigServicie: '
'%(service)s ElementType: %(provisioning)s Size: %(size)lu'
'Volume path: %(volumepath)s')
'Volume path: %(volumepath)s'
% {'service': configservice,
'name': volumename,
'provisioning': provisioning,
@ -1001,7 +1001,7 @@ class EMCSMISCommon():
Size=self._getnum(volumesize, '64'),
TheElement=vol_instance.path)
LOG.debug(_('Extend Volume: %(volumename)s Return code: %(rc)lu')
LOG.debug('Extend Volume: %(volumename)s Return code: %(rc)lu'
% {'volumename': volumename,
'rc': rc})
@ -1015,14 +1015,14 @@ class EMCSMISCommon():
'error': errordesc})
raise exception.VolumeBackendAPIException(data=errordesc)
LOG.debug(_('Leaving extend_volume: %(volumename)s '
'Return code: %(rc)lu ')
LOG.debug('Leaving extend_volume: %(volumename)s '
'Return code: %(rc)lu '
% {'volumename': volumename,
'rc': rc})
def update_volume_stats(self):
"""Retrieve stats info."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
self.stats['total_capacity_gb'] = 'unknown'
self.stats['free_capacity_gb'] = 'unknown'
@ -1037,7 +1037,7 @@ class EMCSMISCommon():
specs = self._get_volumetype_extraspecs(volume)
if not specs:
specs = self._get_storage_type_conffile()
LOG.debug(_("Storage Type: %s") % (specs))
LOG.debug("Storage Type: %s" % (specs))
return specs
def _get_storage_type_conffile(self, filename=None):
@ -1054,7 +1054,7 @@ class EMCSMISCommon():
storageType = storageTypes[0].toxml()
storageType = storageType.replace('<StorageType>', '')
storageType = storageType.replace('</StorageType>', '')
LOG.debug(_("Found Storage Type in config file: %s")
LOG.debug("Found Storage Type in config file: %s"
% (storageType))
specs = {}
specs[POOL] = storageType
@ -1076,10 +1076,10 @@ class EMCSMISCommon():
if views is not None and len(views) > 0:
view = views[0].toxml().replace('<MaskingView>', '')
view = view.replace('</MaskingView>', '')
LOG.debug(_("Found Masking View: %s") % (view))
LOG.debug("Found Masking View: %s" % (view))
return view
else:
LOG.debug(_("Masking View not found."))
LOG.debug("Masking View not found.")
return None
def _get_timeout(self, filename=None):
@ -1094,10 +1094,10 @@ class EMCSMISCommon():
if timeouts is not None and len(timeouts) > 0:
timeout = timeouts[0].toxml().replace('<Timeout>', '')
timeout = timeout.replace('</Timeout>', '')
LOG.debug(_("Found Timeout: %s") % (timeout))
LOG.debug("Found Timeout: %s" % (timeout))
return timeout
else:
LOG.debug(_("Timeout not specified."))
LOG.debug("Timeout not specified.")
return 10
def _get_ecom_cred(self, filename=None):
@ -1119,7 +1119,7 @@ class EMCSMISCommon():
if ecomUser is not None and ecomPasswd is not None:
return ecomUser, ecomPasswd
else:
LOG.debug(_("Ecom user not found."))
LOG.debug("Ecom user not found.")
return None
def _get_ecom_server(self, filename=None):
@ -1139,11 +1139,11 @@ class EMCSMISCommon():
ecomPort = ecomPorts[0].toxml().replace('<EcomServerPort>', '')
ecomPort = ecomPort.replace('</EcomServerPort>', '')
if ecomIp is not None and ecomPort is not None:
LOG.debug(_("Ecom IP: %(ecomIp)s Port: %(ecomPort)s"),
LOG.debug("Ecom IP: %(ecomIp)s Port: %(ecomPort)s",
{'ecomIp': ecomIp, 'ecomPort': ecomPort})
return ecomIp, ecomPort
else:
LOG.debug(_("Ecom server not found."))
LOG.debug("Ecom server not found.")
return None
def _get_ecom_connection(self, filename=None):
@ -1162,7 +1162,7 @@ class EMCSMISCommon():
for repservice in repservices:
if storage_system == repservice['SystemName']:
foundRepService = repservice
LOG.debug(_("Found Replication Service: %s")
LOG.debug("Found Replication Service: %s"
% (repservice))
break
@ -1175,7 +1175,7 @@ class EMCSMISCommon():
for configservice in configservices:
if storage_system == configservice['SystemName']:
foundConfigService = configservice
LOG.debug(_("Found Storage Configuration Service: %s")
LOG.debug("Found Storage Configuration Service: %s"
% (configservice))
break
@ -1188,7 +1188,7 @@ class EMCSMISCommon():
for configservice in configservices:
if storage_system == configservice['SystemName']:
foundConfigService = configservice
LOG.debug(_("Found Controller Configuration Service: %s")
LOG.debug("Found Controller Configuration Service: %s"
% (configservice))
break
@ -1201,7 +1201,7 @@ class EMCSMISCommon():
for configservice in configservices:
if storage_system == configservice['SystemName']:
foundConfigService = configservice
LOG.debug(_("Found Storage Hardware ID Management Service: %s")
LOG.debug("Found Storage Hardware ID Management Service: %s"
% (configservice))
break
@ -1257,7 +1257,7 @@ class EMCSMISCommon():
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug(_("Pool: %(pool)s SystemName: %(systemname)s.")
LOG.debug("Pool: %(pool)s SystemName: %(systemname)s."
% {'pool': foundPool,
'systemname': systemname})
return foundPool, systemname
@ -1274,7 +1274,7 @@ class EMCSMISCommon():
if len(idarray) > 2:
systemname = idarray[0] + '+' + idarray[1]
LOG.debug(_("Pool name: %(poolname)s System name: %(systemname)s.")
LOG.debug("Pool name: %(poolname)s System name: %(systemname)s."
% {'poolname': poolname, 'systemname': systemname})
return poolname, systemname
@ -1289,11 +1289,11 @@ class EMCSMISCommon():
foundinstance = self.conn.GetInstance(instancename)
if foundinstance is None:
LOG.debug(_("Volume %(volumename)s not found on the array.")
LOG.debug("Volume %(volumename)s not found on the array."
% {'volumename': volumename})
else:
LOG.debug(_("Volume name: %(volumename)s Volume instance: "
"%(vol_instance)s.")
LOG.debug("Volume name: %(volumename)s Volume instance: "
"%(vol_instance)s."
% {'volumename': volumename,
'vol_instance': foundinstance.path})
@ -1307,7 +1307,7 @@ class EMCSMISCommon():
snapshotname = snapshot['name']
volumename = volume['name']
LOG.debug(_("Source: %(volumename)s Target: %(snapshotname)s.")
LOG.debug("Source: %(volumename)s Target: %(snapshotname)s."
% {'volumename': volumename, 'snapshotname': snapshotname})
snapshot_instance = self._find_lun(snapshot)
@ -1319,13 +1319,13 @@ class EMCSMISCommon():
foundsyncname = self._getinstancename(classname, bindings)
if foundsyncname is None:
LOG.debug(_("Source: %(volumename)s Target: %(snapshotname)s. "
"Storage Synchronized not found. ")
LOG.debug("Source: %(volumename)s Target: %(snapshotname)s. "
"Storage Synchronized not found. "
% {'volumename': volumename,
'snapshotname': snapshotname})
else:
LOG.debug(_("Storage system: %(storage_system)s "
"Storage Synchronized instance: %(sync)s.")
LOG.debug("Storage system: %(storage_system)s "
"Storage Synchronized instance: %(sync)s."
% {'storage_system': storage_system,
'sync': foundsyncname})
# Wait for SE_StorageSynchronized_SV_SV to be fully synced
@ -1354,7 +1354,7 @@ class EMCSMISCommon():
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_("Found %(name)s: %(initiator)s.")
LOG.debug("Found %(name)s: %(initiator)s."
% {'name': name,
'initiator': foundinitiatornames})
return foundinitiatornames
@ -1414,9 +1414,9 @@ class EMCSMISCommon():
if foundCtrl is not None:
break
LOG.debug(_("LunMaskingSCSIProtocolController for storage system "
LOG.debug("LunMaskingSCSIProtocolController for storage system "
"%(storage_system)s and initiator %(initiator)s is "
"%(ctrl)s.")
"%(ctrl)s."
% {'storage_system': storage_system,
'initiator': initiators,
'ctrl': foundCtrl})
@ -1455,8 +1455,8 @@ class EMCSMISCommon():
if foundCtrl is not None:
break
LOG.debug(_("LunMaskingSCSIProtocolController for storage volume "
"%(vol)s and initiator %(initiator)s is %(ctrl)s.")
LOG.debug("LunMaskingSCSIProtocolController for storage volume "
"%(vol)s and initiator %(initiator)s is %(ctrl)s."
% {'vol': vol_instance.path,
'initiator': initiators,
'ctrl': foundCtrl})
@ -1481,8 +1481,8 @@ class EMCSMISCommon():
storage_system,
connector)
LOG.debug(_("LunMaskingSCSIProtocolController for storage system "
"%(storage)s and %(connector)s is %(ctrl)s.")
LOG.debug("LunMaskingSCSIProtocolController for storage system "
"%(storage)s and %(connector)s is %(ctrl)s."
% {'storage': storage_system,
'connector': connector,
'ctrl': ctrl})
@ -1493,8 +1493,8 @@ class EMCSMISCommon():
numVolumesMapped = len(associators)
LOG.debug(_("Found %(numVolumesMapped)d volumes on storage system "
"%(storage)s mapped to %(initiator)s.")
LOG.debug("Found %(numVolumesMapped)d volumes on storage system "
"%(storage)s mapped to %(connector)s."
% {'numVolumesMapped': numVolumesMapped,
'storage': storage_system,
'connector': connector})
@ -1528,7 +1528,7 @@ class EMCSMISCommon():
out_device_number = '%06d' % out_num_device_number
LOG.debug(_("Available device number on %(storage)s: %(device)s.")
LOG.debug("Available device number on %(storage)s: %(device)s."
% {'storage': storage_system, 'device': out_device_number})
return out_device_number
@ -1553,9 +1553,9 @@ class EMCSMISCommon():
vol_instance,
connector)
LOG.debug(_("LunMaskingSCSIProtocolController for "
LOG.debug("LunMaskingSCSIProtocolController for "
"volume %(vol)s and connector %(connector)s "
"is %(ctrl)s.")
"is %(ctrl)s."
% {'vol': vol_instance.path,
'connector': connector,
'ctrl': ctrl})
@ -1594,8 +1594,8 @@ class EMCSMISCommon():
{'volumename': volumename,
'vol_instance': vol_instance.path})
else:
LOG.debug(_("Found device number %(device)d for volume "
"%(volumename)s %(vol_instance)s.") %
LOG.debug("Found device number %(device)d for volume "
"%(volumename)s %(vol_instance)s." %
{'device': out_num_device_number,
'volumename': volumename,
'vol_instance': vol_instance.path})
@ -1604,7 +1604,7 @@ class EMCSMISCommon():
'storagesystem': storage_system,
'owningsp': sp}
LOG.debug(_("Device info: %(data)s.") % {'data': data})
LOG.debug("Device info: %(data)s." % {'data': data})
return data
@ -1626,7 +1626,7 @@ class EMCSMISCommon():
ResultClass='SE_DeviceMaskingGroup')
foundMaskingGroup = groups[0]
LOG.debug(_("Masking view: %(view)s DeviceMaskingGroup: %(masking)s.")
LOG.debug("Masking view: %(view)s DeviceMaskingGroup: %(masking)s."
% {'view': maskingview_name,
'masking': foundMaskingGroup})
@ -1648,7 +1648,7 @@ class EMCSMISCommon():
if (storage_system == storsystemname and
owningsp == sp):
foundSystem = system
LOG.debug(_("Found Storage Processor System: %s")
LOG.debug("Found Storage Processor System: %s"
% (system))
break
@ -1676,9 +1676,9 @@ class EMCSMISCommon():
if len(arr2) > 1:
foundEndpoints.append(arr2[0])
LOG.debug(_("iSCSIProtocolEndpoint for storage system "
LOG.debug("iSCSIProtocolEndpoint for storage system "
"%(storage_system)s and SP %(sp)s is "
"%(endpoint)s.")
"%(endpoint)s."
% {'storage_system': storage_system,
'sp': owningsp,
'endpoint': foundEndpoints})
@ -1723,8 +1723,8 @@ class EMCSMISCommon():
hardwareids = self._find_storage_hardwareids(connector)
LOG.debug(_('EMCGetTargetEndpoints: Service: %(service)s '
'Storage HardwareIDs: %(hardwareids)s.')
LOG.debug('EMCGetTargetEndpoints: Service: %(service)s '
'Storage HardwareIDs: %(hardwareids)s.'
% {'service': configservice,
'hardwareids': hardwareids})
@ -1745,9 +1745,9 @@ class EMCSMISCommon():
# Add target wwn to the list if it is not already there
if not any(d == wwn for d in target_wwns):
target_wwns.append(wwn)
LOG.debug(_('Add target WWN: %s.') % wwn)
LOG.debug('Add target WWN: %s.' % wwn)
LOG.debug(_('Target WWNs: %s.') % target_wwns)
LOG.debug('Target WWNs: %s.' % target_wwns)
return target_wwns
@ -1763,8 +1763,8 @@ class EMCSMISCommon():
if wwpn.lower() == storid.lower():
foundInstances.append(hardwareid.path)
LOG.debug(_("Storage Hardware IDs for %(wwpns)s is "
"%(foundInstances)s.")
LOG.debug("Storage Hardware IDs for %(wwpns)s is "
"%(foundInstances)s."
% {'wwpns': wwpns,
'foundInstances': foundInstances})

View File

@ -163,7 +163,7 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug(_('Return FC data: %(data)s.')
LOG.debug('Return FC data: %(data)s.'
% {'data': data})
return data
@ -181,7 +181,7 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
'data': {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug(_('Return FC data: %(data)s.')
LOG.debug('Return FC data: %(data)s.'
% {'data': data})
return data
@ -215,7 +215,7 @@ class EMCSMISFCDriver(driver.FibreChannelDriver):
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCSMISFCDriver'

View File

@ -191,7 +191,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
" for volume %s") %
(volume['name']))
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
LOG.debug("ISCSI Discovery: Found %s" % (location))
properties['target_discovered'] = True
device_info = self.common.find_device_number(volume, connector)
@ -226,7 +226,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
break
for endpoint in endpoints:
if properties['target_iqn'] == endpoint:
LOG.debug(_("Found iSCSI endpoint: %s") % endpoint)
LOG.debug("Found iSCSI endpoint: %s" % endpoint)
foundEndpoint = True
break
if foundEndpoint:
@ -242,7 +242,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
properties['volume_id'] = volume['id']
LOG.debug(_("ISCSI properties: %s") % (properties))
LOG.debug("ISCSI properties: %s" % (properties))
auth = volume['provider_auth']
if auth:
@ -274,7 +274,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCSMISISCSIDriver'

View File

@ -122,7 +122,7 @@ class EMCVnxCli(object):
def create_volume(self, volume):
"""Creates a EMC volume."""
LOG.debug(_('Entering create_volume.'))
LOG.debug('Entering create_volume.')
volumesize = volume['size']
volumename = volume['name']
@ -134,7 +134,7 @@ class EMCVnxCli(object):
thinness = self._get_provisioning_by_volume(volume)
# executing CLI command to create volume
LOG.debug(_('Create Volume: %(volumename)s')
LOG.debug('Create Volume: %(volumename)s'
% {'volumename': volumename})
lun_create = ('lun', '-create',
@ -144,7 +144,7 @@ class EMCVnxCli(object):
'-poolName', self.pool_name,
'-name', volumename)
out, rc = self._cli_execute(*lun_create)
LOG.debug(_('Create Volume: %(volumename)s Return code: %(rc)s')
LOG.debug('Create Volume: %(volumename)s Return code: %(rc)s'
% {'volumename': volumename,
'rc': rc})
if rc == 4:
@ -175,7 +175,7 @@ class EMCVnxCli(object):
def delete_volume(self, volume):
"""Deletes an EMC volume."""
LOG.debug(_('Entering delete_volume.'))
LOG.debug('Entering delete_volume.')
volumename = volume['name']
# defining CLI command
lun_destroy = ('lun', '-destroy',
@ -184,7 +184,7 @@ class EMCVnxCli(object):
# executing CLI command to delete volume
out, rc = self._cli_execute(*lun_destroy)
LOG.debug(_('Delete Volume: %(volumename)s Output: %(out)s')
LOG.debug('Delete Volume: %(volumename)s Output: %(out)s'
% {'volumename': volumename, 'out': out})
if rc not in (0, 9):
msg = (_('Failed to destroy %s'), volumename)
@ -194,7 +194,7 @@ class EMCVnxCli(object):
def extend_volume(self, volume, new_size):
"""Extends an EMC volume."""
LOG.debug(_('Entering extend_volume.'))
LOG.debug('Entering extend_volume.')
volumename = volume['name']
# defining CLI command
@ -207,7 +207,7 @@ class EMCVnxCli(object):
# executing CLI command to extend volume
out, rc = self._cli_execute(*lun_expand)
LOG.debug(_('Extend Volume: %(volumename)s Output: %(out)s')
LOG.debug('Extend Volume: %(volumename)s Output: %(out)s'
% {'volumename': volumename,
'out': out})
if rc == 97:
@ -223,7 +223,7 @@ class EMCVnxCli(object):
def update_volume_status(self):
"""Retrieve status info."""
LOG.debug(_("Updating volume status"))
LOG.debug("Updating volume status")
poolname = self.pool_name
pool_list = ('storagepool', '-list',
@ -248,8 +248,8 @@ class EMCVnxCli(object):
device_id = self._find_lun_id(volumename)
LOG.debug(_('create_export: Volume: %(volume)s Device ID: '
'%(device_id)s')
LOG.debug('create_export: Volume: %(volume)s Device ID: '
'%(device_id)s'
% {'volume': volumename,
'device_id': device_id})
@ -272,7 +272,7 @@ class EMCVnxCli(object):
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(_('Entering create_snapshot.'))
LOG.debug('Entering create_snapshot.')
snapshotname = snapshot['name']
volumename = snapshot['volume_name']
LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s')
@ -289,7 +289,7 @@ class EMCVnxCli(object):
# executing CLI command to create snapshot
out, rc = self._cli_execute(*snap_create)
LOG.debug(_('Create Snapshot: %(snapshotname)s Unity: %(out)s')
LOG.debug('Create Snapshot: %(snapshotname)s Unity: %(out)s'
% {'snapshotname': snapshotname,
'out': out})
if rc != 0:
@ -299,7 +299,7 @@ class EMCVnxCli(object):
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(_('Entering delete_snapshot.'))
LOG.debug('Entering delete_snapshot.')
snapshotname = snapshot['name']
volumename = snapshot['volume_name']
@ -315,8 +315,8 @@ class EMCVnxCli(object):
# executing CLI command
out, rc = self._cli_execute(*snap_destroy)
LOG.debug(_('Delete Snapshot: Volume: %(volumename)s Snapshot: '
'%(snapshotname)s Output: %(out)s')
LOG.debug('Delete Snapshot: Volume: %(volumename)s Snapshot: '
'%(snapshotname)s Output: %(out)s'
% {'volumename': volumename,
'snapshotname': snapshotname,
'out': out})
@ -345,7 +345,7 @@ class EMCVnxCli(object):
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(_('Entering create_volume_from_snapshot.'))
LOG.debug('Entering create_volume_from_snapshot.')
snapshotname = snapshot['name']
source_volume_name = snapshot['volume_name']
@ -369,8 +369,8 @@ class EMCVnxCli(object):
# executing CLI command
out, rc = self._cli_execute(*lun_create)
LOG.debug(_('Create temporary Volume: %(volumename)s '
'Output : %(out)s')
LOG.debug('Create temporary Volume: %(volumename)s '
'Output : %(out)s'
% {'volumename': destvolumename, 'out': out})
if rc != 0:
@ -385,8 +385,8 @@ class EMCVnxCli(object):
# executing CLI command
out, rc = self._cli_execute(*smp_create)
LOG.debug(_('Create mount point : Volume: %(volumename)s '
'Source Volume: %(sourcevolumename)s Output: %(out)s')
LOG.debug('Create mount point : Volume: %(volumename)s '
'Source Volume: %(sourcevolumename)s Output: %(out)s'
% {'volumename': volumename,
'sourcevolumename': source_volume_name,
'out': out})
@ -403,8 +403,8 @@ class EMCVnxCli(object):
# executing CLI command
out, rc = self._cli_execute(*lun_attach)
LOG.debug(_('Attaching mount point Volume: %(volumename)s '
'with Snapshot: %(snapshotname)s Output: %(out)s')
LOG.debug('Attaching mount point Volume: %(volumename)s '
'with Snapshot: %(snapshotname)s Output: %(out)s'
% {'volumename': volumename,
'snapshotname': snapshotname,
'out': out})
@ -428,8 +428,8 @@ class EMCVnxCli(object):
# executing CLI command
out, rc = self._cli_execute(*migrate_start)
LOG.debug(_('Migrate Mount Point Volume: %(volumename)s '
'Output : %(out)s')
LOG.debug('Migrate Mount Point Volume: %(volumename)s '
'Output : %(out)s'
% {'volumename': volumename,
'out': out})
@ -496,13 +496,13 @@ class EMCVnxCli(object):
out, rc = self._cli_execute(*sg_list)
if rc != 0:
LOG.debug(_('creating new storage group %s'), storage_groupname)
LOG.debug('creating new storage group %s', storage_groupname)
sg_create = ('storagegroup', '-create',
'-gname', storage_groupname)
out, rc = self._cli_execute(*sg_create)
LOG.debug(_('Create new storage group : %(storage_groupname)s, '
'Output: %(out)s')
LOG.debug('Create new storage group : %(storage_groupname)s, '
'Output: %(out)s'
% {'storage_groupname': storage_groupname,
'out': out})
@ -518,8 +518,8 @@ class EMCVnxCli(object):
'-o')
out, rc = self._cli_execute(*connect_host)
LOG.debug(_('Connect storage group : %(storage_groupname)s ,'
'To Host : %(hostname)s, Output : %(out)s')
LOG.debug('Connect storage group : %(storage_groupname)s ,'
'To Host : %(hostname)s, Output : %(out)s'
% {'storage_groupname': storage_groupname,
'hostname': hostname,
'out': out})
@ -558,7 +558,7 @@ class EMCVnxCli(object):
for lun in lun_map.iterkeys():
if lun == int(allocated_lun_id):
host_lun_id = lun_map[lun]
LOG.debug(_('Host Lun Id : %s') % (host_lun_id))
LOG.debug('Host Lun Id : %s' % (host_lun_id))
break
# finding the owner SP for the LUN
@ -567,7 +567,7 @@ class EMCVnxCli(object):
if rc == 0:
output = out.split('\n')
owner_sp = output[2].split('Current Owner: SP ')[1]
LOG.debug(_('Owner SP : %s') % (owner_sp))
LOG.debug('Owner SP : %s' % (owner_sp))
device = {
'hostlunid': host_lun_id,
@ -625,8 +625,8 @@ class EMCVnxCli(object):
'-hlu', host_lun_id,
'-alu', allocated_lun_id)
out, rc = self._cli_execute(*addhlu)
LOG.debug(_('Add ALU %(alu)s to SG %(sg)s as %(hlu)s. '
'Output: %(out)s')
LOG.debug('Add ALU %(alu)s to SG %(sg)s as %(hlu)s. '
'Output: %(out)s'
% {'alu': allocated_lun_id,
'sg': storage_groupname,
'hlu': host_lun_id,
@ -655,7 +655,7 @@ class EMCVnxCli(object):
out, rc = self._cli_execute(*removehlu)
LOG.debug(_('Remove %(hlu)s from SG %(sg)s. Output: %(out)s')
LOG.debug('Remove %(hlu)s from SG %(sg)s. Output: %(out)s'
% {'hlu': device_number,
'sg': storage_groupname,
'out': out})
@ -700,8 +700,8 @@ class EMCVnxCli(object):
port_wwn = port_info[2].split('Port WWN:')[1].strip()
initiator_address.append(port_wwn)
LOG.debug(_('WWNs found for SP %(devicesp)s '
'are: %(initiator_address)s')
LOG.debug('WWNs found for SP %(devicesp)s '
'are: %(initiator_address)s'
% {'devicesp': device_sp,
'initiator_address': initiator_address})

View File

@ -136,7 +136,7 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
while not out.endswith(ending):
out += chan.recv(102400)
LOG.debug(_("CLI output\n%s"), out)
LOG.debug("CLI output\n%s", out)
return out.splitlines()
def _get_prefixed_value(self, lines, prefix):
@ -151,15 +151,15 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
chan = transport.open_session()
chan.invoke_shell()
LOG.debug(_("Reading CLI MOTD"))
LOG.debug("Reading CLI MOTD")
self._get_output(chan)
cmd = 'stty columns 255'
LOG.debug(_("Setting CLI terminal width: '%s'"), cmd)
LOG.debug("Setting CLI terminal width: '%s'", cmd)
chan.send(cmd + '\r')
out = self._get_output(chan)
LOG.debug(_("Sending CLI command: '%s'"), command)
LOG.debug("Sending CLI command: '%s'", command)
chan.send(command + '\r')
out = self._get_output(chan)
@ -244,7 +244,7 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
def _update_volume_stats(self):
"""Retrieve stats info from eqlx group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = {}
backend_name = "eqlx"
if self.configuration:
@ -279,8 +279,8 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
if err.stdout.find('does not exist.\n') > -1:
LOG.debug(_('Volume %s does not exist, '
'it may have already been deleted'),
LOG.debug('Volume %s does not exist, '
'it may have already been deleted',
volume['name'])
raise exception.VolumeNotFound(volume_id=volume['id'])

View File

@ -251,8 +251,8 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
qcow2.
"""
LOG.debug(_("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s")
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s"
% {'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
@ -270,7 +270,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
path_to_new_vol = self._local_path_volume(volume)
LOG.debug(_("will copy from snapshot at %s") % path_to_snap_img)
LOG.debug("will copy from snapshot at %s" % path_to_snap_img)
if self.configuration.glusterfs_qcow2_volumes:
out_format = 'qcow2'
@ -421,7 +421,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
context,
snapshot['volume_id'],
connection_info)
LOG.debug(_('nova call result: %s') % result)
LOG.debug('nova call result: %s' % result)
except Exception as e:
LOG.error(_('Call to Nova to create snapshot failed'))
LOG.exception(e)
@ -449,7 +449,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
'while creating snapshot.')
raise exception.GlusterfsException(msg)
LOG.debug(_('Status of snapshot %(id)s is now %(status)s') % {
LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
'id': snapshot['id'],
'status': s['status']
})
@ -474,8 +474,8 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
return
LOG.debug(_('create snapshot: %s') % snapshot)
LOG.debug(_('volume id: %s') % snapshot['volume_id'])
LOG.debug('create snapshot: %s' % snapshot)
LOG.debug('volume id: %s' % snapshot['volume_id'])
path_to_disk = self._local_path_volume(snapshot['volume'])
self._create_snapshot_offline(snapshot, path_to_disk)
@ -582,7 +582,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
"""
LOG.debug(_('deleting snapshot %s') % snapshot['id'])
LOG.debug('deleting snapshot %s' % snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
@ -607,7 +607,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug(_('snapshot_file for this snap is %s') % snapshot_file)
LOG.debug('snapshot_file for this snap is %s' % snapshot_file)
snapshot_path = '%s/%s' % (self._local_volume_dir(snapshot['volume']),
snapshot_file)
@ -645,7 +645,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
break
if base_id is None:
# This means we are deleting the oldest snapshot
msg = _('No %(base_id)s found for %(file)s') % {
msg = 'No %(base_id)s found for %(file)s' % {
'base_id': 'base_id',
'file': snapshot_file}
LOG.debug(msg)
@ -721,7 +721,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
higher_file),
None)
if highest_file is None:
msg = _('No file depends on %s.') % higher_file
msg = 'No file depends on %s.' % higher_file
LOG.debug(msg)
# Committing higher_file into snapshot_file
@ -816,8 +816,8 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
# Nova tasks completed successfully
break
else:
msg = _('status of snapshot %s is '
'still "deleting"... waiting') % snapshot['id']
msg = ('status of snapshot %s is '
'still "deleting"... waiting') % snapshot['id']
LOG.debug(msg)
time.sleep(increment)
seconds_elapsed += increment
@ -1011,7 +1011,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug(_("creating new volume at %s") % volume_path)
LOG.debug("creating new volume at %s" % volume_path)
if os.path.exists(volume_path):
msg = _('file already exists at %s') % volume_path
@ -1042,7 +1042,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
except Exception as exc:
LOG.error(_('Exception during mounting %s') % (exc,))
LOG.debug(_('Available shares: %s') % self._mounted_shares)
LOG.debug('Available shares: %s' % self._mounted_shares)
def _ensure_share_writable(self, path):
"""Ensure that the Cinder user can write to the share.

View File

@ -68,7 +68,7 @@ def _loc_info(loc):
def _do_lu_range_check(start, end, maxlun):
"""Validate array allocation range."""
LOG.debug(_("Range: start LU: %(start)s, end LU: %(end)s")
LOG.debug("Range: start LU: %(start)s, end LU: %(end)s"
% {'start': start,
'end': end})
if int(start) < 0:
@ -82,7 +82,7 @@ def _do_lu_range_check(start, end, maxlun):
raise exception.InvalidInput(reason=msg)
if int(end) > int(maxlun):
end = maxlun
LOG.debug(_("setting LU upper (end) limit to %s") % maxlun)
LOG.debug("setting LU upper (end) limit to %s" % maxlun)
return (start, end)
@ -325,7 +325,7 @@ class HUSDriver(driver.ISCSIDriver):
'%s' % (int(volume['size']) * 1024))
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug(_("LUN %(lun)s of size %(sz)s MB is created.")
LOG.debug("LUN %(lun)s of size %(sz)s MB is created."
% {'lun': lun,
'sz': sz})
return {'provider_location': lun}
@ -353,7 +353,7 @@ class HUSDriver(driver.ISCSIDriver):
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug(_("LUN %(lun)s of size %(size)s MB is cloned.")
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned."
% {'lun': lun,
'size': size})
return {'provider_location': lun}
@ -370,7 +370,7 @@ class HUSDriver(driver.ISCSIDriver):
self.config['password'],
arid, lun,
'%s' % (new_size * 1024))
LOG.debug(_("LUN %(lun)s extended to %(size)s GB.")
LOG.debug("LUN %(lun)s extended to %(size)s GB."
% {'lun': lun,
'size': new_size})
@ -393,7 +393,7 @@ class HUSDriver(driver.ISCSIDriver):
arid, lun, ctl, port, iqn,
'')
name = self.hus_name
LOG.debug(_("delete lun %(lun)s on %(name)s")
LOG.debug("delete lun %(lun)s on %(name)s"
% {'lun': lun,
'name': name})
self.bend.delete_lu(self.config['hus_cmd'],
@ -478,7 +478,7 @@ class HUSDriver(driver.ISCSIDriver):
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug(_("LUN %(lun)s of size %(sz)s MB is created from snapshot.")
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot."
% {'lun': lun,
'sz': sz})
return {'provider_location': lun}
@ -501,7 +501,7 @@ class HUSDriver(driver.ISCSIDriver):
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug(_("LUN %(lun)s of size %(size)s MB is created as snapshot.")
LOG.debug("LUN %(lun)s of size %(size)s MB is created as snapshot."
% {'lun': lun,
'size': size})
return {'provider_location': lun}
@ -520,7 +520,7 @@ class HUSDriver(driver.ISCSIDriver):
self.config['username'],
self.config['password'],
arid, lun)
LOG.debug(_("LUN %s is deleted.") % lun)
LOG.debug("LUN %s is deleted." % lun)
return
@utils.synchronized('hds_hus', external=True)

View File

@ -177,7 +177,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
conf[ip]['ctl'] = ctl
conf[ip]['port'] = port
conf[ip]['iscsi_port'] = ipp
msg = _('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s')
msg = ('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s')
LOG.debug(msg
% {'ip': ip,
'ipp': ipp,
@ -285,7 +285,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
for line in out.split('\n'):
if 'HDP' in line:
(hdp, size, _ign, used) = line.split()[1:5] # in MB
LOG.debug(_("stats: looking for: %s") % hdp)
LOG.debug("stats: looking for: %s", hdp)
if int(hdp) >= units.KiB: # HNAS fsid
hdp = line.split()[11]
if hdp in self.config['hdp'].keys():
@ -404,7 +404,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
"""
name = volume['name']
LOG.debug(_("create_export %(name)s") % {'name': name})
LOG.debug("create_export %(name)s" % {'name': name})
pass
@ -416,7 +416,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
provider = volume['provider_location']
name = volume['name']
LOG.debug(_("remove_export provider %(provider)s on %(name)s")
LOG.debug("remove_export provider %(provider)s on %(name)s"
% {'provider': provider,
'name': name})
@ -471,7 +471,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug(_("LUN %(lun)s of size %(size)s MB is cloned.")
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned."
% {'lun': lun,
'size': size})
return {'provider_location': lun}
@ -520,7 +520,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
name = self.hnas_name
LOG.debug(_("delete lun %(lun)s on %(name)s")
LOG.debug("delete lun %(lun)s on %(name)s"
% {'lun': lun,
'name': name})
@ -628,7 +628,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug(_("LUN %(lun)s of size %(sz)s MB is created from snapshot.")
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot."
% {'lun': lun, 'sz': sz})
return {'provider_location': lun}
@ -652,7 +652,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug(_("LUN %(lun)s of size %(size)s MB is created.")
LOG.debug("LUN %(lun)s of size %(size)s MB is created."
% {'lun': lun, 'size': size})
return {'provider_location': lun}
@ -687,7 +687,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
self.config['password'],
hdp, lun)
LOG.debug(_("LUN %s is deleted.") % lun)
LOG.debug("LUN %s is deleted.", lun)
return
def get_volume_stats(self, refresh=False):

View File

@ -207,7 +207,7 @@ class HDSNFSDriver(nfs.NfsDriver):
path = self._get_volume_path(nfs_mount, volume['name'])
# Resize the image file on share to new size.
LOG.debug(_('Checking file for resize'))
LOG.debug('Checking file for resize')
if self._is_file_size_equal(path, new_size):
return
@ -236,7 +236,7 @@ class HDSNFSDriver(nfs.NfsDriver):
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(_('create_volume_from %s') % volume)
LOG.debug('create_volume_from %s', volume)
vol_size = volume['size']
snap_size = snapshot['volume_size']
@ -263,7 +263,7 @@ class HDSNFSDriver(nfs.NfsDriver):
snapshot['name'],
snapshot['volume_id'])
share = self._get_volume_location(snapshot['volume_id'])
LOG.debug(_('Share: %s'), share)
LOG.debug('Share: %s', share)
# returns the mount point (not path)
return {'provider_location': share}

View File

@ -62,8 +62,8 @@ class HuaweiVolumeDriver(object):
conf_file = self.configuration.cinder_huawei_conf_file
(product, protocol) = self._get_conf_info(conf_file)
LOG.debug(_('_instantiate_driver: Loading %(protocol)s driver for '
'Huawei OceanStor %(product)s series storage arrays.')
LOG.debug('_instantiate_driver: Loading %(protocol)s driver for '
'Huawei OceanStor %(product)s series storage arrays.'
% {'protocol': protocol,
'product': product})

View File

@ -71,8 +71,8 @@ class HuaweiDoradoFCDriver(huawei_t.HuaweiTFCDriver):
def initialize_connection(self, volume, connector):
"""Create FC connection between a volume and a host."""
LOG.debug(_('initialize_connection: volume name: %(vol)s '
'host: %(host)s initiator: %(wwn)s')
LOG.debug('initialize_connection: volume name: %(vol)s '
'host: %(host)s initiator: %(wwn)s'
% {'vol': volume['name'],
'host': connector['host'],
'wwn': connector['wwpns']})
@ -89,7 +89,7 @@ class HuaweiDoradoFCDriver(huawei_t.HuaweiTFCDriver):
fc_port_details = self._get_host_port_details(host_id)
tgt_wwns = self._get_tgt_fc_port_wwns(fc_port_details)
LOG.debug(_('initialize_connection: Target FC ports WWNS: %s')
LOG.debug('initialize_connection: Target FC ports WWNS: %s'
% tgt_wwns)
# Finally, map the volume to the host.

View File

@ -98,8 +98,8 @@ class HuaweiTISCSIDriver(driver.ISCSIDriver):
def initialize_connection(self, volume, connector):
"""Map a volume to a host and return target iSCSI information."""
LOG.debug(_('initialize_connection: volume name: %(vol)s, '
'host: %(host)s, initiator: %(ini)s')
LOG.debug('initialize_connection: volume name: %(vol)s, '
'host: %(host)s, initiator: %(ini)s'
% {'vol': volume['name'],
'host': connector['host'],
'ini': connector['initiator']})
@ -203,7 +203,7 @@ class HuaweiTISCSIDriver(driver.ISCSIDriver):
"""
LOG.debug(_('_get_tgt_iqn: iSCSI IP is %s.') % port_ip)
LOG.debug('_get_tgt_iqn: iSCSI IP is %s.' % port_ip)
cli_cmd = 'showiscsitgtname'
out = self.common._execute_cli(cli_cmd)
@ -231,7 +231,7 @@ class HuaweiTISCSIDriver(driver.ISCSIDriver):
iqn = iqn_prefix + ':' + iqn_suffix + ':' + port_info[3]
LOG.debug(_('_get_tgt_iqn: iSCSI target iqn is %s.') % iqn)
LOG.debug('_get_tgt_iqn: iSCSI target iqn is %s.' % iqn)
return (iqn, port_info[0])
@ -320,8 +320,8 @@ class HuaweiTISCSIDriver(driver.ISCSIDriver):
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate the map."""
LOG.debug(_('terminate_connection: volume: %(vol)s, host: %(host)s, '
'connector: %(initiator)s')
LOG.debug('terminate_connection: volume: %(vol)s, host: %(host)s, '
'connector: %(initiator)s'
% {'vol': volume['name'],
'host': connector['host'],
'initiator': connector['initiator']})
@ -440,8 +440,8 @@ class HuaweiTFCDriver(driver.FibreChannelDriver):
def initialize_connection(self, volume, connector):
"""Create FC connection between a volume and a host."""
LOG.debug(_('initialize_connection: volume name: %(vol)s, '
'host: %(host)s, initiator: %(wwn)s')
LOG.debug('initialize_connection: volume name: %(vol)s, '
'host: %(host)s, initiator: %(wwn)s'
% {'vol': volume['name'],
'host': connector['host'],
'wwn': connector['wwpns']})
@ -458,7 +458,7 @@ class HuaweiTFCDriver(driver.FibreChannelDriver):
fc_port_details = self._get_host_port_details(host_id)
tgt_wwns = self._get_tgt_fc_port_wwns(fc_port_details)
LOG.debug(_('initialize_connection: Target FC ports WWNS: %s')
LOG.debug('initialize_connection: Target FC ports WWNS: %s'
% tgt_wwns)
# Finally, map the volume to the host.
@ -549,8 +549,8 @@ class HuaweiTFCDriver(driver.FibreChannelDriver):
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate the map."""
LOG.debug(_('terminate_connection: volume: %(vol)s, host: %(host)s, '
'connector: %(initiator)s')
LOG.debug('terminate_connection: volume: %(vol)s, host: %(host)s, '
'connector: %(initiator)s'
% {'vol': volume['name'],
'host': connector['host'],
'initiator': connector['initiator']})

View File

@ -125,7 +125,7 @@ def get_conf_host_os_type(host_ip, config):
if not host_os:
host_os = os_type['Linux'] # default os type
LOG.debug(_('_get_host_os_type: Host %(ip)s OS type is %(os)s.')
LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.'
% {'ip': host_ip, 'os': host_os})
return host_os

View File

@ -56,8 +56,8 @@ class HVSCommon():
Convert response into Python Object and return it.
"""
LOG.debug(_('HVS Request URL: %(url)s') % {'url': url})
LOG.debug(_('HVS Request Data: %(data)s') % {'data': data})
LOG.debug('HVS Request URL: %(url)s' % {'url': url})
LOG.debug('HVS Request Data: %(data)s' % {'data': data})
headers = {"Connection": "keep-alive",
"Content-Type": "application/json"}
@ -70,7 +70,7 @@ class HVSCommon():
if method:
req.get_method = lambda: method
res = urllib2.urlopen(req).read().decode("utf-8")
LOG.debug(_('HVS Response Data: %(res)s') % {'res': res})
LOG.debug('HVS Response Data: %(res)s' % {'res': res})
except Exception as err:
err_msg = _('Bad response from server: %s') % err
LOG.error(err_msg)
@ -324,8 +324,8 @@ class HVSCommon():
snapshot_name = self._encode_name(snapshot['id'])
volume_name = self._encode_name(snapshot['volume_id'])
LOG.debug(_('create_snapshot:snapshot name:%(snapshot)s, '
'volume name:%(volume)s.')
LOG.debug('create_snapshot:snapshot name:%(snapshot)s, '
'volume name:%(volume)s.'
% {'snapshot': snapshot_name,
'volume': volume_name})
@ -351,8 +351,8 @@ class HVSCommon():
snapshot_name = self._encode_name(snapshot['id'])
volume_name = self._encode_name(snapshot['volume_id'])
LOG.debug(_('_stop_snapshot:snapshot name:%(snapshot)s, '
'volume name:%(volume)s.')
LOG.debug('_stop_snapshot:snapshot name:%(snapshot)s, '
'volume name:%(volume)s.'
% {'snapshot': snapshot_name,
'volume': volume_name})
@ -471,8 +471,8 @@ class HVSCommon():
lun_id = self._get_volume_by_name(volume_name)
view_id = self._find_mapping_view(volume_name)
LOG.debug(_('_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)s'
'view_id: %(view_id)s')
LOG.debug('_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)s'
'view_id: %(view_id)s'
% {'lun_group': lungroup_id,
'view_id': view_id})
@ -508,8 +508,8 @@ class HVSCommon():
initiator_name = connector['initiator']
volume_name = self._encode_name(volume['id'])
LOG.debug(_('initiator name:%(initiator_name)s, '
'volume name:%(volume)s.')
LOG.debug('initiator name:%(initiator_name)s, '
'volume name:%(volume)s.'
% {'initiator_name': initiator_name,
'volume': volume_name})
@ -524,7 +524,7 @@ class HVSCommon():
lun_id = self._mapping_hostgroup_and_lungroup(volume_name,
hostgroup_id, hostid)
hostlunid = self._find_host_lun_id(hostid, lun_id)
LOG.debug(_("host lun id is %s") % hostlunid)
LOG.debug("host lun id is %s" % hostlunid)
# Return iSCSI properties.
properties = {}
@ -540,8 +540,8 @@ class HVSCommon():
wwns = connector['wwpns']
volume_name = self._encode_name(volume['id'])
LOG.debug(_('initiator name:%(initiator_name)s, '
'volume name:%(volume)s.')
LOG.debug('initiator name:%(initiator_name)s, '
'volume name:%(volume)s.'
% {'initiator_name': wwns,
'volume': volume_name})
@ -550,7 +550,7 @@ class HVSCommon():
connector['ip'])
free_wwns = self._get_connected_free_wwns()
LOG.debug(_("the free wwns %s") % free_wwns)
LOG.debug("the free wwns %s" % free_wwns)
for wwn in wwns:
if wwn in free_wwns:
self._add_fc_port_to_host(hostid, wwn)
@ -571,7 +571,7 @@ class HVSCommon():
properties['target_wwn'] = tgt_port_wwns
properties['target_lun'] = int(host_lun_id)
properties['volume_id'] = volume['id']
LOG.debug(_("the fc server properties is:%s") % properties)
LOG.debug("the fc server properties is:%s" % properties)
return {'driver_volume_type': 'fibre_channel',
'data': properties}
@ -871,8 +871,8 @@ class HVSCommon():
volume_name = self._encode_name(volume['id'])
host_name = connector['host']
LOG.debug(_('terminate_connection:volume name: %(volume)s, '
'initiator name: %(ini)s.')
LOG.debug('terminate_connection:volume name: %(volume)s, '
'initiator name: %(ini)s.'
% {'volume': volume_name,
'ini': initiator_name})
@ -967,8 +967,8 @@ class HVSCommon():
LOG.error(err_msg)
raise exception.CinderException(err_msg)
else:
LOG.debug(_('Use default prefetch fetchtype. '
'Prefetch fetchtype:Intelligent.'))
LOG.debug('Use default prefetch fetchtype. '
'Prefetch fetchtype:Intelligent.')
return lunsetinfo
@ -1080,7 +1080,7 @@ class HVSCommon():
def _get_tgt_iqn(self, iscsiip):
"""Get target iSCSI iqn."""
LOG.debug(_('_get_tgt_iqn: iSCSI IP is %s.') % iscsiip)
LOG.debug('_get_tgt_iqn: iSCSI IP is %s.' % iscsiip)
ip_info = self._get_iscsi_port_info(iscsiip)
iqn_prefix = self._get_iscsi_tgt_port()
@ -1098,7 +1098,7 @@ class HVSCommon():
iqn_suffix = iqn_suffix[i:]
break
iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsiip
LOG.debug(_('_get_tgt_iqn: iSCSI target iqn is %s') % iqn)
LOG.debug('_get_tgt_iqn: iSCSI target iqn is %s' % iqn)
return iqn
def _get_fc_target_wwpns(self, wwn):
@ -1121,8 +1121,8 @@ class HVSCommon():
def _parse_volume_type(self, volume):
type_id = volume['volume_type_id']
params = self._get_lun_conf_params()
LOG.debug(_('_parse_volume_type: type id: %(type_id)s '
'config parameter is: %(params)s')
LOG.debug('_parse_volume_type: type id: %(type_id)s '
'config parameter is: %(params)s'
% {'type_id': type_id,
'params': params})
@ -1159,7 +1159,7 @@ class HVSCommon():
'and make it consistent with the configuration '
'file %(conf)s.') % {'key': key, 'conf': conf})
LOG.debug(_("The config parameters are: %s") % params)
LOG.debug("The config parameters are: %s" % params)
return params
def update_volume_stats(self, refresh=False):

View File

@ -98,7 +98,7 @@ class TseriesCommon():
def do_setup(self, context):
"""Check config file."""
LOG.debug(_('do_setup'))
LOG.debug('do_setup')
self._check_conf_file()
self.login_info = self._get_login_info()
@ -221,7 +221,7 @@ class TseriesCommon():
"""Create a new volume."""
volume_name = self._name_translate(volume['name'])
LOG.debug(_('create_volume: volume name: %s') % volume_name)
LOG.debug('create_volume: volume name: %s' % volume_name)
self._update_login_info()
if int(volume['size']) == 0:
@ -239,8 +239,8 @@ class TseriesCommon():
"""
newname = VOL_AND_SNAP_NAME_PREFIX + str(hash(name))
LOG.debug(_('_name_translate: Name in cinder: %(old)s, new name in '
'storage system: %(new)s') % {'old': name, 'new': newname})
LOG.debug('_name_translate: Name in cinder: %(old)s, new name in '
'storage system: %(new)s' % {'old': name, 'new': newname})
return newname
@ -392,8 +392,8 @@ class TseriesCommon():
elif conf_params['PrefetchType'] == '2':
conf_params['PrefetchTimes'] = prefetch.attrib['Value'].strip()
else:
LOG.debug(_('_parse_conf_lun_params: Use default prefetch type. '
'Prefetch type: Intelligent'))
LOG.debug('_parse_conf_lun_params: Use default prefetch type. '
'Prefetch type: Intelligent')
pools_conf = root.findall('LUN/StoragePool')
for pool in pools_conf:
@ -431,7 +431,7 @@ class TseriesCommon():
"""
LOG.debug(_('CLI command: %s') % cmd)
LOG.debug('CLI command: %s' % cmd)
connect_times = 1
ip0 = self.login_info['ControllerIP0']
ip1 = self.login_info['ControllerIP1']
@ -506,7 +506,7 @@ class TseriesCommon():
def delete_volume(self, volume):
volume_name = self._name_translate(volume['name'])
LOG.debug(_('delete_volume: volume name: %s') % volume_name)
LOG.debug('delete_volume: volume name: %s' % volume_name)
self._update_login_info()
volume_id = volume.get('provider_location', None)
@ -565,8 +565,8 @@ class TseriesCommon():
snapshot_name = self._name_translate(snapshot['name'])
volume_name = self._name_translate(volume['name'])
LOG.debug(_('create_volume_from_snapshot: snapshot '
'name: %(snapshot)s, volume name: %(volume)s')
LOG.debug('create_volume_from_snapshot: snapshot '
'name: %(snapshot)s, volume name: %(volume)s'
% {'snapshot': snapshot_name,
'volume': volume_name})
@ -683,9 +683,9 @@ class TseriesCommon():
src_vol_name = self._name_translate(src_volume['name'])
tgt_vol_name = self._name_translate(tgt_volume['name'])
LOG.debug(_('create_cloned_volume: src volume: %(src)s, '
'tgt volume: %(tgt)s') % {'src': src_vol_name,
'tgt': tgt_vol_name})
LOG.debug('create_cloned_volume: src volume: %(src)s, '
'tgt volume: %(tgt)s' % {'src': src_vol_name,
'tgt': tgt_vol_name})
self._update_login_info()
src_vol_id = src_volume.get('provider_location', None)
@ -734,9 +734,9 @@ class TseriesCommon():
str(len(added_vol_ids)))
added_vol_size = str(int(new_size) - int(volume['size'])) + 'G'
LOG.debug(_('extend_volume: extended volume name: %(extended_name)s '
'new added volume name: %(added_name)s '
'new added volume size: %(added_size)s')
LOG.debug('extend_volume: extended volume name: %(extended_name)s '
'new added volume name: %(added_name)s '
'new added volume size: %(added_size)s'
% {'extended_name': extended_vol_name,
'added_name': added_vol_name,
'added_size': added_vol_size})
@ -774,8 +774,8 @@ class TseriesCommon():
snapshot_name = self._name_translate(snapshot['name'])
volume_name = self._name_translate(snapshot['volume_name'])
LOG.debug(_('create_snapshot: snapshot name: %(snapshot)s, '
'volume name: %(volume)s')
LOG.debug('create_snapshot: snapshot name: %(snapshot)s, '
'volume name: %(volume)s'
% {'snapshot': snapshot_name,
'volume': volume_name})
@ -850,9 +850,9 @@ class TseriesCommon():
snapshot_name = self._name_translate(snapshot['name'])
volume_name = self._name_translate(snapshot['volume_name'])
LOG.debug(_('delete_snapshot: snapshot name: %(snapshot)s, '
'volume name: %(volume)s') % {'snapshot': snapshot_name,
'volume': volume_name})
LOG.debug('delete_snapshot: snapshot name: %(snapshot)s, '
'volume name: %(volume)s' % {'snapshot': snapshot_name,
'volume': volume_name})
self._update_login_info()
snapshot_id = snapshot.get('provider_location', None)
@ -1074,7 +1074,7 @@ class TseriesCommon():
return lun_details
def change_lun_ctr(self, lun_id, ctr):
LOG.debug(_('change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s.')
LOG.debug('change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s.'
% {'lun': lun_id, 'ctr': ctr})
cli_cmd = 'chglun -lun %s -c %s' % (lun_id, ctr)
@ -1133,9 +1133,9 @@ class TseriesCommon():
if (re.search('there are IOs accessing the system', out) and
(attempts > 0)):
LOG.debug(_('_delete_map: There are IOs accessing '
'the system. Retry to delete host map '
'%(mapid)s 10s later.') % {'mapid': mapid})
LOG.debug('_delete_map: There are IOs accessing '
'the system. Retry to delete host map '
'%(mapid)s 10s later.' % {'mapid': mapid})
time.sleep(10)
attempts -= 1
@ -1180,7 +1180,7 @@ class TseriesCommon():
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("_update_volume_stats: Updating volume stats."))
LOG.debug("_update_volume_stats: Updating volume stats.")
data = {}
data['vendor_name'] = 'Huawei'
data['total_capacity_gb'] = 'infinite'
@ -1244,7 +1244,7 @@ class DoradoCommon(TseriesCommon):
def do_setup(self, context):
"""Check config file."""
LOG.debug(_('do_setup'))
LOG.debug('do_setup')
self._check_conf_file()
exist_luns = self._get_all_luns_info()

View File

@ -818,7 +818,7 @@ class GPFSDriver(driver.VolumeDriver):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
LOG.debug(_('Begin backup of volume %s.') % volume['name'])
LOG.debug('Begin backup of volume %s.' % volume['name'])
# create a snapshot that will be used as the backup source
backup_path = '%s_%s' % (volume_path, backup['id'])
@ -838,7 +838,7 @@ class GPFSDriver(driver.VolumeDriver):
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug(_('Begin restore of backup %s.') % backup['id'])
LOG.debug('Begin restore of backup %s.' % backup['id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):

View File

@ -96,20 +96,20 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
LOG.debug(_("Enter _get_provider_location: volume_id %s") % volume_id)
LOG.debug("Enter _get_provider_location: volume_id %s" % volume_id)
volume = self.db.volume_get(self._context, volume_id)
LOG.debug("Exit _get_provider_location")
return volume['provider_location']
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume."""
LOG.debug(_("Enter _get_export_path: volume_id %s") % volume_id)
LOG.debug("Enter _get_export_path: volume_id %s" % volume_id)
return self._get_provider_location(volume_id).split(':')[1]
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Enter _update_volume_stats"))
LOG.debug("Enter _update_volume_stats")
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'IBMNAS_NFS'
@ -135,7 +135,7 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
def _create_ibmnas_snap(self, src, dest, mount_path):
"""Create volume clones and snapshots."""
LOG.debug(_("Enter _create_ibmnas_snap: src %(src)s, dest %(dest)s")
LOG.debug("Enter _create_ibmnas_snap: src %(src)s, dest %(dest)s"
% {'src': src, 'dest': dest})
if mount_path is not None:
tmp_file_path = dest + '.snap'
@ -165,10 +165,10 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
def _create_ibmnas_copy(self, src, dest, snap):
"""Create a cloned volume, parent & the clone both remain writable."""
LOG.debug(_('Enter _create_ibmnas_copy: src %(src)s, dest %(dest)s, '
'snap %(snap)s') % {'src': src,
'dest': dest,
'snap': snap})
LOG.debug('Enter _create_ibmnas_copy: src %(src)s, dest %(dest)s, '
'snap %(snap)s' % {'src': src,
'dest': dest,
'snap': snap})
ssh_cmd = ['mkclone', '-p', snap, '-s', src, '-t', dest]
try:
self._run_ssh(ssh_cmd)
@ -199,8 +199,8 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
self._resize_volume_file(path, new_size)
def _delete_snapfiles(self, fchild, mount_point):
LOG.debug(_('Enter _delete_snapfiles: fchild %(fchild)s, '
'mount_point %(mount_point)s')
LOG.debug('Enter _delete_snapfiles: fchild %(fchild)s, '
'mount_point %(mount_point)s'
% {'fchild': fchild,
'mount_point': mount_point})
ssh_cmd = ['lsclone', fchild]

View File

@ -140,7 +140,7 @@ class StorwizeSVCDriver(san.SanDriver):
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug(_('enter: do_setup'))
LOG.debug('enter: do_setup')
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
@ -211,11 +211,11 @@ class StorwizeSVCDriver(san.SanDriver):
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
LOG.debug(_('leave: do_setup'))
LOG.debug('leave: do_setup')
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug(_('enter: check_for_setup_error'))
LOG.debug('enter: check_for_setup_error')
# Check that we have the system ID information
if self._state['system_name'] is None:
@ -251,7 +251,7 @@ class StorwizeSVCDriver(san.SanDriver):
opts = self._helpers.build_default_opts(self.configuration)
self._helpers.check_vdisk_opts(self._state, opts)
LOG.debug(_('leave: check_for_setup_error'))
LOG.debug('leave: check_for_setup_error')
def ensure_export(self, ctxt, volume):
"""Check that the volume exists on the storage.
@ -304,8 +304,8 @@ class StorwizeSVCDriver(san.SanDriver):
"""
LOG.debug(_('enter: initialize_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': volume, 'conn': connector})
LOG.debug('enter: initialize_connection: volume %(vol)s with '
'connector %(conn)s' % {'vol': volume, 'conn': connector})
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
volume_name = volume['name']
@ -443,8 +443,8 @@ class StorwizeSVCDriver(san.SanDriver):
'%(conn)s.\n') % {'vol': volume,
'conn': connector})
LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s')
LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s'
% {'vol': volume, 'conn': connector, 'prop': properties})
return {'driver_volume_type': type_str, 'data': properties, }
@ -470,8 +470,8 @@ class StorwizeSVCDriver(san.SanDriver):
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug(_('enter: terminate_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': volume, 'conn': connector})
LOG.debug('enter: terminate_connection: volume %(vol)s with '
'connector %(conn)s' % {'vol': volume, 'conn': connector})
vol_name = volume['name']
if 'host' in connector:
@ -505,8 +505,8 @@ class StorwizeSVCDriver(san.SanDriver):
self._helpers.unmap_vol_from_host(vol_name, host_name)
LOG.debug(_('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': volume, 'conn': connector})
LOG.debug('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s' % {'vol': volume, 'conn': connector})
return info
@ -567,7 +567,7 @@ class StorwizeSVCDriver(san.SanDriver):
opts, True)
def extend_volume(self, volume, new_size):
LOG.debug(_('enter: extend_volume: volume %s') % volume['id'])
LOG.debug('enter: extend_volume: volume %s' % volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
allow_snaps=False)
if not ret:
@ -578,7 +578,7 @@ class StorwizeSVCDriver(san.SanDriver):
extend_amt = int(new_size) - volume['size']
self._helpers.extend_vdisk(volume['name'], extend_amt)
LOG.debug(_('leave: extend_volume: volume %s') % volume['id'])
LOG.debug('leave: extend_volume: volume %s' % volume['id'])
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
@ -657,7 +657,7 @@ class StorwizeSVCDriver(san.SanDriver):
'vdiskcopyops')
def _check_volume_copy_ops(self):
LOG.debug(_("enter: update volume copy status"))
LOG.debug("enter: update volume copy status")
ctxt = context.get_admin_context()
copy_items = self._vdiskcopyops.items()
for vol_id, copy_ops in copy_items:
@ -687,7 +687,7 @@ class StorwizeSVCDriver(san.SanDriver):
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
copy_op[1])
LOG.debug(_("exit: update volume copy status"))
LOG.debug("exit: update volume copy status")
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage.
@ -702,7 +702,7 @@ class StorwizeSVCDriver(san.SanDriver):
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(_('enter: migrate_volume: id=%(id)s, host=%(host)s') %
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
@ -722,7 +722,7 @@ class StorwizeSVCDriver(san.SanDriver):
vol_type, self._state,
self.configuration)
self._add_vdisk_copy_op(ctxt, volume, new_op)
LOG.debug(_('leave: migrate_volume: id=%(id)s, host=%(host)s') %
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host['host']})
return (True, None)
@ -744,11 +744,11 @@ class StorwizeSVCDriver(san.SanDriver):
self._helpers.change_vdisk_iogrp(volume['name'],
self._state, (new, old))
LOG.debug(_('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
ignore_keys = ['protocol', 'multipath']
no_copy_keys = ['warning', 'autoexpand', 'easytier']
@ -798,11 +798,11 @@ class StorwizeSVCDriver(san.SanDriver):
self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
new_opts, self._state)
LOG.debug(_('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True
def manage_existing(self, volume, ref):
@ -869,7 +869,7 @@ class StorwizeSVCDriver(san.SanDriver):
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = {}
data['vendor_name'] = 'IBM'

View File

@ -176,7 +176,7 @@ class StorwizeHelpers(object):
def get_host_from_connector(self, connector):
"""Return the Storwize host described by the connector."""
LOG.debug(_('enter: get_host_from_connector: %s') % connector)
LOG.debug('enter: get_host_from_connector: %s' % connector)
# If we have FC information, we have a faster lookup option
host_name = None
@ -209,7 +209,7 @@ class StorwizeHelpers(object):
[str(x).lower() for x in connector['wwpns']]):
host_name = name
LOG.debug(_('leave: get_host_from_connector: host %s') % host_name)
LOG.debug('leave: get_host_from_connector: host %s' % host_name)
return host_name
def create_host(self, connector):
@ -220,7 +220,7 @@ class StorwizeHelpers(object):
host name (at most 55 characters), plus a random 8-character suffix to
avoid collisions. The total length should be at most 63 characters.
"""
LOG.debug(_('enter: create_host: host %s') % connector['host'])
LOG.debug('enter: create_host: host %s' % connector['host'])
# Before we start, make sure host name is a string and that we have at
# least one port.
@ -267,7 +267,7 @@ class StorwizeHelpers(object):
for port in ports:
self.ssh.addhostport(host_name, port[0], port[1])
LOG.debug(_('leave: create_host: host %(host)s - %(host_name)s') %
LOG.debug('leave: create_host: host %(host)s - %(host_name)s' %
{'host': connector['host'], 'host_name': host_name})
return host_name
@ -277,8 +277,8 @@ class StorwizeHelpers(object):
def map_vol_to_host(self, volume_name, host_name, multihostmap):
"""Create a mapping between a volume to a host."""
LOG.debug(_('enter: map_vol_to_host: volume %(volume_name)s to '
'host %(host_name)s')
LOG.debug('enter: map_vol_to_host: volume %(volume_name)s to '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
# Check if this volume is already mapped to this host
@ -303,8 +303,8 @@ class StorwizeHelpers(object):
self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun,
multihostmap)
LOG.debug(_('leave: map_vol_to_host: LUN %(result_lun)s, volume '
'%(volume_name)s, host %(host_name)s') %
LOG.debug('leave: map_vol_to_host: LUN %(result_lun)s, volume '
'%(volume_name)s, host %(host_name)s' %
{'result_lun': result_lun,
'volume_name': volume_name,
'host_name': host_name})
@ -313,8 +313,8 @@ class StorwizeHelpers(object):
def unmap_vol_from_host(self, volume_name, host_name):
"""Unmap the volume and delete the host if it has no more mappings."""
LOG.debug(_('enter: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s')
LOG.debug('enter: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
# Check if the mapping exists
@ -350,8 +350,8 @@ class StorwizeHelpers(object):
if not len(resp):
self.delete_host(host_name)
LOG.debug(_('leave: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s')
LOG.debug('leave: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
@staticmethod
@ -498,10 +498,10 @@ class StorwizeHelpers(object):
return params
def create_vdisk(self, name, size, units, pool, opts):
LOG.debug(_('enter: create_vdisk: vdisk %s ') % name)
LOG.debug('enter: create_vdisk: vdisk %s ' % name)
params = self._get_vdisk_create_params(opts)
self.ssh.mkvdisk(name, size, units, pool, opts, params)
LOG.debug(_('leave: _create_vdisk: volume %s ') % name)
LOG.debug('leave: _create_vdisk: volume %s ' % name)
def get_vdisk_attributes(self, vdisk):
attrs = self.ssh.lsvdisk(vdisk)
@ -547,16 +547,16 @@ class StorwizeHelpers(object):
def run_flashcopy(self, source, target, timeout, full_copy=True):
"""Create a FlashCopy mapping from the source to the target."""
LOG.debug(_('enter: run_flashcopy: execute FlashCopy from source '
'%(source)s to target %(target)s') %
LOG.debug('enter: run_flashcopy: execute FlashCopy from source '
'%(source)s to target %(target)s' %
{'source': source, 'target': target})
fc_map_id = self.ssh.mkfcmap(source, target, full_copy)
self._prepare_fc_map(fc_map_id, timeout)
self.ssh.startfcmap(fc_map_id)
LOG.debug(_('leave: run_flashcopy: FlashCopy started from '
'%(source)s to %(target)s') %
LOG.debug('leave: run_flashcopy: FlashCopy started from '
'%(source)s to %(target)s' %
{'source': source, 'target': target})
def _get_vdisk_fc_mappings(self, vdisk):
@ -575,7 +575,7 @@ class StorwizeHelpers(object):
def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
"""FlashCopy mapping check helper."""
LOG.debug(_('Loopcall: _check_vdisk_fc_mappings(), vdisk %s') % name)
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s' % name)
mapping_ids = self._get_vdisk_fc_mappings(name)
wait_for_copy = False
for map_id in mapping_ids:
@ -634,7 +634,7 @@ class StorwizeHelpers(object):
# before it finishes. Don't set the sleep interval shorter
# than the heartbeat. Otherwise volume service heartbeat
# will not be serviced.
LOG.debug(_('Calling _ensure_vdisk_no_fc_mappings: vdisk %s')
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s'
% name)
ret = timer.start(interval=self.check_fcmapping_interval).wait()
timer.stop()
@ -642,17 +642,17 @@ class StorwizeHelpers(object):
def delete_vdisk(self, vdisk, force):
"""Ensures that vdisk is not part of FC mapping and deletes it."""
LOG.debug(_('enter: delete_vdisk: vdisk %s') % vdisk)
LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk)
if not self.is_vdisk_defined(vdisk):
LOG.info(_('Tried to delete non-existant vdisk %s.') % vdisk)
return
self.ensure_vdisk_no_fc_mappings(vdisk)
self.ssh.rmvdisk(vdisk, force=force)
LOG.debug(_('leave: delete_vdisk: vdisk %s') % vdisk)
LOG.debug('leave: delete_vdisk: vdisk %s' % vdisk)
def create_copy(self, src, tgt, src_id, config, opts, full_copy):
"""Create a new snapshot using FlashCopy."""
LOG.debug(_('enter: create_copy: snapshot %(src)s to %(tgt)s') %
LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s' %
{'tgt': tgt, 'src': src})
src_attrs = self.get_vdisk_attributes(src)
@ -672,8 +672,8 @@ class StorwizeHelpers(object):
with excutils.save_and_reraise_exception():
self.delete_vdisk(tgt, True)
LOG.debug(_('leave: _create_copy: snapshot %(tgt)s from '
'vdisk %(src)s') %
LOG.debug('leave: _create_copy: snapshot %(tgt)s from '
'vdisk %(src)s' %
{'tgt': tgt, 'src': src})
def extend_vdisk(self, vdisk, amount):
@ -739,8 +739,8 @@ class StorwizeHelpers(object):
def change_vdisk_iogrp(self, vdisk, state, iogrp):
if state['code_level'] < (6, 4, 0, 0):
LOG.debug(_('Ignore change IO group as storage code level is '
'%(code_level)s, below the required 6.4.0.0') %
LOG.debug('Ignore change IO group as storage code level is '
'%(code_level)s, below the required 6.4.0.0' %
{'code_level': state['code_level']})
else:
self.ssh.movevdisk(vdisk, str(iogrp[0]))

View File

@ -342,7 +342,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_('Unable to update stats on non-initialized '
'Volume Group: %s'), self.configuration.volume_group)

View File

@ -118,9 +118,9 @@ class RestClient(WebserviceClient):
"""Invokes end point for resource on path."""
params = {'m': method, 'p': path, 'd': data, 'sys': use_system,
't': timeout, 'v': verify, 'k': kwargs}
LOG.debug(_("Invoking rest with method: %(m)s, path: %(p)s,"
" data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
" verify: %(v)s, kwargs: %(k)s.") % (params))
LOG.debug("Invoking rest with method: %(m)s, path: %(p)s,"
" data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
" verify: %(v)s, kwargs: %(k)s." % (params))
url = self._get_resource_url(path, use_system, **kwargs)
if self._content_type == 'json':
headers = {'Accept': 'application/json',

View File

@ -223,7 +223,7 @@ class Driver(driver.ISCSIDriver):
self._objects['volumes']['ref_vol'].pop(vol_id, True)
self._objects['volumes']['label_ref'].pop(label)
else:
LOG.debug(_("Volume %s not cached."), label)
LOG.debug("Volume %s not cached.", label)
def _del_snapshot_frm_cache(self, obj_name):
"""Deletes snapshot group from cache."""
@ -232,7 +232,7 @@ class Driver(driver.ISCSIDriver):
self._objects['snapshots']['ref_snap'].pop(snap_id, True)
self._objects['snapshots']['label_ref'].pop(obj_name)
else:
LOG.debug(_("Snapshot %s not cached."), obj_name)
LOG.debug("Snapshot %s not cached.", obj_name)
def _del_vol_mapping_frm_cache(self, mapping):
"""Deletes volume mapping under cached volume."""
@ -242,7 +242,7 @@ class Driver(driver.ISCSIDriver):
try:
mappings.remove(mapping)
except ValueError:
LOG.debug(_("Mapping with id %s already removed."),
LOG.debug("Mapping with id %s already removed.",
mapping['lunMappingRef'])
def _get_volume(self, uid):
@ -347,7 +347,7 @@ class Driver(driver.ISCSIDriver):
def _create_snapshot_volume(self, snapshot_id):
"""Creates snapshot volume for given group with snapshot_id."""
group = self._get_cached_snapshot_grp(snapshot_id)
LOG.debug(_("Creating snap vol for group %s"), group['label'])
LOG.debug("Creating snap vol for group %s", group['label'])
image = self._get_cached_snap_grp_image(snapshot_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
capacity = int(image['pitCapacity']) / units.GiB
@ -622,7 +622,7 @@ class Driver(driver.ISCSIDriver):
def _update_volume_stats(self):
"""Update volume statistics."""
LOG.debug(_("Updating volume stats."))
LOG.debug("Updating volume stats.")
self._stats = self._stats or {}
netapp_backend = 'NetApp_ESeries'
backend_name = self.configuration.safe_get('volume_backend_name')
@ -686,7 +686,7 @@ class Driver(driver.ISCSIDriver):
try:
self._delete_volume(label)
except exception.NetAppDriverException:
LOG.debug(_("Error deleting vol with label %s."),
LOG.debug("Error deleting vol with label %s.",
label)
finally:
utils.set_safe_attr(self, 'clean_job_running', False)

View File

@ -102,7 +102,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
"""
host_filer = kwargs['hostname']
LOG.debug(_('Using NetApp filer: %s') % host_filer)
LOG.debug('Using NetApp filer: %s' % host_filer)
self.client = NaServer(host=host_filer,
server_type=NaServer.SERVER_TYPE_FILER,
transport_type=kwargs['transport_type'],
@ -147,7 +147,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
self.lun_table = {}
self._get_lun_list()
LOG.debug(_("Success getting LUN list from server"))
LOG.debug("Success getting LUN list from server")
def create_volume(self, volume):
"""Driver entry point for creating a new volume."""
@ -163,7 +163,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
metadata['SpaceReserved'] = 'true'
extra_specs = get_volume_extra_specs(volume)
self._create_lun_on_eligible_vol(name, size, metadata, extra_specs)
LOG.debug(_("Created LUN with name %s") % name)
LOG.debug("Created LUN with name %s" % name)
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, name, size, metadata))
@ -188,7 +188,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
lun_destroy.add_new_child('force', 'true')
self.client.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug(_("Destroyed LUN %s") % seg[-1])
LOG.debug("Destroyed LUN %s" % seg[-1])
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
@ -287,7 +287,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
LOG.debug(_("Snapshot %s deletion successful") % snapshot['name'])
LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for creating a new volume from a snapshot.
@ -652,7 +652,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
"""Moves the lun at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug(_("Moving lun %(name)s to %(new_name)s.")
LOG.debug("Moving lun %(name)s to %(new_name)s."
% {'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = NaElement("lun-move")
lun_move.add_new_child("path", path)
@ -726,7 +726,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
def _get_lun_block_count(self, path):
"""Gets block counts for the lun."""
LOG.debug(_("Getting lun block count."))
LOG.debug("Getting lun block count.")
block_count = 0
lun_infos = self._get_lun_by_args(path=path)
if not lun_infos:
@ -838,7 +838,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
attr_list = result.get_child_by_name('attributes-list')
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
return iscsi_service.get_child_content('node-name')
LOG.debug(_('No iscsi service found for vserver %s') % (self.vserver))
LOG.debug('No iscsi service found for vserver %s' % (self.vserver))
return None
def _create_lun_handle(self, metadata):
@ -1005,7 +1005,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
dest_block += int(block_count)
clone_create.add_child_elem(block_ranges)
self.client.invoke_successfully(clone_create, True)
LOG.debug(_("Cloned LUN with new name %s") % new_name)
LOG.debug("Cloned LUN with new name %s" % new_name)
lun = self._get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s'
% (volume, new_name))
if len(lun) == 0:
@ -1055,7 +1055,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = {}
netapp_backend = 'NetApp_iSCSI_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
@ -1440,11 +1440,11 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
fmt = {'name': name, 'new_name': new_name}
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug(_("Clone operation with src %(name)s"
" and dest %(new_name)s completed") % fmt)
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s completed" % fmt)
else:
LOG.debug(_("Clone operation with src %(name)s"
" and dest %(new_name)s failed") % fmt)
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s failed" % fmt)
raise NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
@ -1469,7 +1469,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
def _update_volume_stats(self):
"""Retrieve status info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = {}
netapp_backend = 'NetApp_iSCSI_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')

View File

@ -250,8 +250,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
file_name = 'img-cache-%s' % image_id
file_path = '%s/%s' % (dir, file_name)
if os.path.exists(file_path):
LOG.debug(_('Found cache file for image %(image_id)s'
' on share %(share)s')
LOG.debug('Found cache file for image %(image_id)s'
' on share %(share)s'
% {'image_id': image_id, 'share': share})
result.append((share, file_name))
return result
@ -271,7 +271,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
def _spawn_clean_cache_job(self):
"""Spawns a clean task if not running."""
if getattr(self, 'cleaning', None):
LOG.debug(_('Image cache cleaning in progress. Returning... '))
LOG.debug('Image cache cleaning in progress. Returning... ')
return
else:
#set cleaning to True
@ -282,7 +282,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
def _clean_image_cache(self):
"""Clean the image cache files in cache of space crunch."""
try:
LOG.debug(_('Image cache cleaning in progress.'))
LOG.debug('Image cache cleaning in progress.')
thres_size_perc_start =\
self.configuration.thres_avl_size_perc_start
thres_size_perc_stop =\
@ -298,7 +298,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
threshold_size = int(
(thres_size_perc_stop * total_size) / 100)
bytes_to_free = int(threshold_size - total_avl)
LOG.debug(_('Files to be queued for deletion %s'),
LOG.debug('Files to be queued for deletion %s',
eligible_files)
self._delete_files_till_bytes_free(
eligible_files, share, bytes_to_free)
@ -311,7 +311,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
% {'share': share, 'ex': e.__str__()})
continue
finally:
LOG.debug(_('Image cache cleaning done.'))
LOG.debug('Image cache cleaning done.')
self.cleaning = False
def _shortlist_del_eligible_files(self, share, old_files):
@ -336,14 +336,14 @@ class NetAppNFSDriver(nfs.NfsDriver):
def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0):
"""Delete files from disk till bytes are freed or list exhausted."""
LOG.debug(_('Bytes to free %s'), bytes_to_free)
LOG.debug('Bytes to free %s', bytes_to_free)
if file_list and bytes_to_free > 0:
sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True)
mount_fs = self._get_mount_point_for_share(share)
for f in sorted_files:
if f:
file_path = '%s/%s' % (mount_fs, f[0])
LOG.debug(_('Delete file path %s'), file_path)
LOG.debug('Delete file path %s', file_path)
@utils.synchronized(f[0], external=True)
def _do_delete():
@ -358,7 +358,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
def _delete_file(self, path):
"""Delete file from disk and return result as boolean."""
try:
LOG.debug(_('Deleting file at path %s'), path)
LOG.debug('Deleting file at path %s', path)
cmd = ['rm', '-f', path]
self._execute(*cmd, run_as_root=True)
return True
@ -415,7 +415,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
for res in cache_result:
# Repeat tries in other shares if failed in some
(share, file_name) = res
LOG.debug(_('Cache share: %s'), share)
LOG.debug('Cache share: %s', share)
if (share and
self._is_share_vol_compatible(volume, share)):
try:
@ -436,14 +436,14 @@ class NetAppNFSDriver(nfs.NfsDriver):
image_location = self._construct_image_nfs_url(image_location)
share = self._is_cloneable_share(image_location)
if share and self._is_share_vol_compatible(volume, share):
LOG.debug(_('Share is cloneable %s'), share)
LOG.debug('Share is cloneable %s', share)
volume['provider_location'] = share
(__, ___, img_file) = image_location.rpartition('/')
dir_path = self._get_mount_point_for_share(share)
img_path = '%s/%s' % (dir_path, img_file)
img_info = image_utils.qemu_img_info(img_path)
if img_info.file_format == 'raw':
LOG.debug(_('Image is raw %s'), image_id)
LOG.debug('Image is raw %s', image_id)
self._clone_volume(
img_file, volume['name'],
volume_id=None, share=share)
@ -478,7 +478,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
def _resize_image_file(self, path, new_size):
"""Resize the image file on share to new size."""
LOG.debug(_('Checking file for resize'))
LOG.debug('Checking file for resize')
if self._is_file_size_equal(path, new_size):
return
else:
@ -536,8 +536,8 @@ class NetAppNFSDriver(nfs.NfsDriver):
'*(/[^\/\\\\]+)$)')
matched = re.match(nfs_loc_pattern, image_location, flags=0)
if not matched:
LOG.debug(_('Image location not in the'
' expected format %s'), image_location)
LOG.debug('Image location not in the'
' expected format %s', image_location)
else:
conn = matched.group(2)
dr = matched.group(3) or '/'
@ -564,7 +564,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
if sh_exp == dir:
share_candidates.append(sh)
if share_candidates:
LOG.debug(_('Found possible share matches %s'),
LOG.debug('Found possible share matches %s',
share_candidates)
return self._share_match_for_ip(ip, share_candidates)
except Exception:
@ -1030,17 +1030,17 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
path = '/vol/%s/%s' % (exp_volume, file)
u_bytes = self._get_cluster_file_usage(path, vserver)
file_list.append((file, u_bytes))
LOG.debug(_('Shortlisted del elg files %s'), file_list)
LOG.debug('Shortlisted del elg files %s', file_list)
return file_list
def _get_cluster_file_usage(self, path, vserver):
"""Gets the file unique bytes."""
LOG.debug(_('Getting file usage for %s'), path)
LOG.debug('Getting file usage for %s', path)
file_use = NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self._invoke_successfully(file_use, vserver)
bytes = res.get_child_content('unique-bytes')
LOG.debug(_('file-usage for path %(path)s is %(bytes)s')
LOG.debug('file-usage for path %(path)s is %(bytes)s'
% {'path': path, 'bytes': bytes})
return bytes
@ -1057,9 +1057,9 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
ip_sh = share.split(':')[0]
sh_vserver = self._get_vserver_for_ip(ip_sh)
if sh_vserver == ip_vserver:
LOG.debug(_('Share match found for ip %s'), ip)
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug(_('No share match found for ip %s'), ip)
LOG.debug('No share match found for ip %s', ip)
return None
def _get_vserver_for_ip(self, ip):
@ -1090,7 +1090,7 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
def _is_share_vol_type_match(self, volume, share):
"""Checks if share matches volume type."""
netapp_vol = self._get_vol_for_share(share)
LOG.debug(_("Found volume %(vol)s for share %(share)s.")
LOG.debug("Found volume %(vol)s for share %(share)s."
% {'vol': netapp_vol, 'share': share})
extra_specs = get_volume_extra_specs(volume)
vols = ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs)
@ -1127,8 +1127,8 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
' offload workflow.')
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug(_("Copy offload either not configured or"
" unsupported."))
LOG.debug("Copy offload either not configured or"
" unsupported.")
except Exception as e:
LOG.exception(_('Copy offload workflow unsuccessful. %s'), e)
finally:
@ -1159,12 +1159,12 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
def _copy_from_cache(self, volume, image_id, cache_result):
"""Try copying image file_name from cached file_name."""
LOG.debug(_("Trying copy from cache using copy offload."))
LOG.debug("Trying copy from cache using copy offload.")
copied = False
for res in cache_result:
try:
(share, file_name) = res
LOG.debug(_("Found cache file_name on share %s."), share)
LOG.debug("Found cache file_name on share %s.", share)
if share != self._get_provider_location(volume['id']):
col_path = self.configuration.netapp_copyoffload_tool_path
src_ip = self._get_ip_verify_on_cluster(
@ -1178,14 +1178,14 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
src_path, dst_path, run_as_root=False,
check_exit_code=0)
self._register_image_in_cache(volume, image_id)
LOG.debug(_("Copied image from cache to volume %s using"
" copy offload."), volume['id'])
LOG.debug("Copied image from cache to volume %s using"
" copy offload.", volume['id'])
else:
self._clone_file_dst_exists(share, file_name,
volume['name'],
dest_exists=True)
LOG.debug(_("Copied image from cache to volume %s using"
" cloning."), volume['id'])
LOG.debug("Copied image from cache to volume %s using"
" cloning.", volume['id'])
self._post_clone_image(volume)
copied = True
break
@ -1203,7 +1203,7 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
def _copy_from_img_service(self, context, volume, image_service,
image_id):
"""Copies from the image service using copy offload."""
LOG.debug(_("Trying copy from image service using copy offload."))
LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
image_loc = self._construct_image_nfs_url(image_loc)
conn, dr = self._check_get_nfs_path_segs(image_loc)
@ -1236,19 +1236,19 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug(_('Copied image %(img)s to tmp file %(tmp)s.')
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
% {'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % (image_id))
if img_info['disk_format'] == 'raw':
LOG.debug(_('Image is raw %s.'), image_id)
LOG.debug('Image is raw %s.', image_id)
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
LOG.debug(_('Copied raw image %(img)s to volume %(vol)s.')
LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug(_('Image will be converted to raw %s.'), image_id)
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = str(uuid.uuid4())
dst_img_conv_local = os.path.join(dst_dir, img_conv)
@ -1268,8 +1268,8 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
dest_exists=True)
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug(_('Copied locally converted raw image'
' %(img)s to volume %(vol)s.')
LOG.debug('Copied locally converted raw image'
' %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
@ -1356,7 +1356,7 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
"""
msg_fmt = {'src_path': src_path, 'dest_path': dest_path}
LOG.debug(_("""Cloning with src %(src_path)s, dest %(dest_path)s""")
LOG.debug("""Cloning with src %(src_path)s, dest %(dest_path)s"""
% msg_fmt)
clone_start = NaElement.create_node_with_children(
'clone-start',
@ -1438,17 +1438,17 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
path = '/vol/%s/%s' % (exp_volume, file)
u_bytes = self._get_filer_file_usage(path)
file_list.append((file, u_bytes))
LOG.debug(_('Shortlisted del elg files %s'), file_list)
LOG.debug('Shortlisted del elg files %s', file_list)
return file_list
def _get_filer_file_usage(self, path):
"""Gets the file unique bytes."""
LOG.debug(_('Getting file usage for %s'), path)
LOG.debug('Getting file usage for %s', path)
file_use = NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self._invoke_successfully(file_use)
bytes = res.get_child_content('unique-bytes')
LOG.debug(_('file-usage for path %(path)s is %(bytes)s')
LOG.debug('file-usage for path %(path)s is %(bytes)s'
% {'path': path, 'bytes': bytes})
return bytes
@ -1485,9 +1485,9 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
for share in shares:
ip_sh = share.split(':')[0]
if self._is_filer_ip(ip_sh):
LOG.debug(_('Share match found for ip %s'), ip)
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug(_('No share match found for ip %s'), ip)
LOG.debug('No share match found for ip %s', ip)
return None
def _is_share_vol_compatible(self, volume, share):

View File

@ -238,8 +238,8 @@ def create_vol_list(vol_attrs):
vol.qos['qos_policy_group'] = None
vols.add(vol)
except KeyError as e:
LOG.debug(_('Unexpected error while creating'
' ssc vol list. Message - %s') % (e.message))
LOG.debug('Unexpected error while creating'
' ssc vol list. Message - %s' % (e.message))
continue
return vols
@ -269,7 +269,7 @@ def query_aggr_options(na_server, aggr_name):
if op.get_child_content('name') == 'raidtype':
attrs['raid_type'] = op.get_child_content('value')
except Exception as e:
LOG.debug(_("Exception querying aggr options. %s"), e)
LOG.debug("Exception querying aggr options. %s", e)
return attrs
@ -310,7 +310,7 @@ def get_sis_vol_dict(na_server, vserver, volume=None):
sis.get_child_content('state'))
sis_vols[vol] = v_sis
except Exception as e:
LOG.debug(_("Exception querying sis information. %s"), e)
LOG.debug("Exception querying sis information. %s", e)
return sis_vols
@ -344,7 +344,7 @@ def get_snapmirror_vol_dict(na_server, vserver, volume=None):
else:
mirrored_vols[src_volume] = [v_snap]
except Exception as e:
LOG.debug(_("Exception querying mirror information. %s"), e)
LOG.debug("Exception querying mirror information. %s", e)
return mirrored_vols
@ -376,7 +376,7 @@ def query_aggr_storage_disk(na_server, aggr):
else:
continue
except Exception as e:
LOG.debug(_("Exception querying storage disk. %s"), e)
LOG.debug("Exception querying storage disk. %s", e)
return 'unknown'

View File

@ -122,7 +122,7 @@ def provide_ems(requester, server, stats, netapp_backend,
else:
na_server.set_vfiler(None)
na_server.invoke_successfully(ems, True)
LOG.debug(_("ems executed successfully."))
LOG.debug("ems executed successfully.")
except NaApiError as e:
LOG.warn(_("Failed to invoke ems. Message : %s") % e)
finally:

View File

@ -188,8 +188,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921
"""
snapshot = {'volume_name': src_vref['name'],
'name': self._get_clone_snapshot_name(volume)}
LOG.debug(_('Creating temp snapshot of the original volume: '
'%(volume_name)s@%(name)s'), snapshot)
LOG.debug('Creating temp snapshot of the original volume: '
'%(volume_name)s@%(name)s', snapshot)
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete origin volume. But when cloned volume deleted
@ -229,7 +229,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug(_('Enter: migrate_volume: id=%(id)s, host=%(host)s') %
LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host})
false_ret = (False, None)
@ -572,7 +572,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug(_('Updating volume stats'))
LOG.debug('Updating volume stats')
stats = self.nms.volume.get_child_props(
self.configuration.nexenta_volume, 'health|size|used|available')

View File

@ -80,7 +80,7 @@ class NexentaJSONProxy(object):
'Content-Type': 'application/json',
'Authorization': 'Basic %s' % auth
}
LOG.debug(_('Sending JSON data: %s'), data)
LOG.debug('Sending JSON data: %s', data)
request = urllib2.Request(self.url, data, headers)
response_obj = urllib2.urlopen(request)
if response_obj.info().status == 'EOF in headers':
@ -93,7 +93,7 @@ class NexentaJSONProxy(object):
response_obj = urllib2.urlopen(request)
response_data = response_obj.read()
LOG.debug(_('Got response: %s'), response_data)
LOG.debug('Got response: %s', response_data)
response = jsonutils.loads(response_data)
if response.get('error') is not None:
raise NexentaJSONException(response['error'].get('message', ''))

View File

@ -111,7 +111,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s' % (dataset, volume['name'])
LOG.debug(_('Creating folder on Nexenta Store %s'), folder)
LOG.debug('Creating folder on Nexenta Store %s', folder)
nms.folder.create_with_props(
vol, folder,
{'compression': self.configuration.nexenta_volume_compression}
@ -362,7 +362,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
'recursive': 'true',
'anonymous_rw': 'true',
}
LOG.debug(_('Sharing folder %s on Nexenta Store'), folder)
LOG.debug('Sharing folder %s on Nexenta Store', folder)
nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,
share_opts)
@ -390,7 +390,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
self.shares[share_address] = share_opts
self.share2nms[share_address] = self._get_nms_for_url(nms_url)
LOG.debug(_('Shares loaded: %s') % self.shares)
LOG.debug('Shares loaded: %s' % self.shares)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.

View File

@ -125,8 +125,8 @@ class RemoteFsDriver(driver.VolumeDriver):
raising exception to continue working for cases
when not used with brick.
"""
LOG.debug(_("Driver specific implementation needs to return"
" mount_point_base."))
LOG.debug("Driver specific implementation needs to return"
" mount_point_base.")
return None
def create_volume(self, volume):
@ -501,7 +501,7 @@ class NfsDriver(RemoteFsDriver):
raise exception.NfsNoSuitableShareFound(
volume_size=volume_size_in_gib)
LOG.debug(_('Selected %s as target nfs share.'), target_share)
LOG.debug('Selected %s as target nfs share.', target_share)
return target_share
@ -534,13 +534,13 @@ class NfsDriver(RemoteFsDriver):
# available space but be within our oversubscription limit
# therefore allowing this share to still be selected as a valid
# target.
LOG.debug(_('%s is above nfs_used_ratio'), nfs_share)
LOG.debug('%s is above nfs_used_ratio', nfs_share)
return False
if apparent_available <= requested_volume_size:
LOG.debug(_('%s is above nfs_oversub_ratio'), nfs_share)
LOG.debug('%s is above nfs_oversub_ratio', nfs_share)
return False
if total_allocated / total_size >= oversub_ratio:
LOG.debug(_('%s reserved space is above nfs_oversub_ratio'),
LOG.debug('%s reserved space is above nfs_oversub_ratio',
nfs_share)
return False
return True

View File

@ -415,8 +415,8 @@ class RBDDriver(driver.VolumeDriver):
# flatten the source before cloning. Zero rbd_max_clone_depth means
# infinite is allowed.
if depth == CONF.rbd_max_clone_depth:
LOG.debug(_("maximum clone depth (%d) has been reached - "
"flattening source volume") %
LOG.debug("maximum clone depth (%d) has been reached - "
"flattening source volume" %
(CONF.rbd_max_clone_depth))
flatten_parent = True
@ -427,7 +427,7 @@ class RBDDriver(driver.VolumeDriver):
pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
LOG.debug(_("flattening source volume %s") % (src_name))
LOG.debug("flattening source volume %s" % (src_name))
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
@ -439,7 +439,7 @@ class RBDDriver(driver.VolumeDriver):
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
LOG.debug(_("creating snapshot='%s'") % (clone_snap))
LOG.debug("creating snapshot='%s'" % (clone_snap))
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception as exc:
@ -449,8 +449,8 @@ class RBDDriver(driver.VolumeDriver):
# Now clone source volume snapshot
try:
LOG.debug(_("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'") %
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'" %
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.rbd.RBD().clone(client.ioctx, src_name, clone_snap,
@ -463,7 +463,7 @@ class RBDDriver(driver.VolumeDriver):
finally:
src_volume.close()
LOG.debug(_("clone created successfully"))
LOG.debug("clone created successfully")
def create_volume(self, volume):
"""Creates a logical volume."""
@ -472,7 +472,7 @@ class RBDDriver(driver.VolumeDriver):
else:
size = int(volume['size']) * units.GiB
LOG.debug(_("creating volume '%s'") % (volume['name']))
LOG.debug("creating volume '%s'" % (volume['name']))
old_format = True
features = 0
@ -491,13 +491,13 @@ class RBDDriver(driver.VolumeDriver):
features=features)
def _flatten(self, pool, volume_name):
LOG.debug(_('flattening %(pool)s/%(img)s') %
LOG.debug('flattening %(pool)s/%(img)s' %
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug(_('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s') %
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s' %
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume['name']))
with RADOSClient(self, src_pool) as src_client:
@ -532,7 +532,7 @@ class RBDDriver(driver.VolumeDriver):
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug(_("volume has no backup snaps"))
LOG.debug("volume has no backup snaps")
def _get_clone_info(self, volume, volume_name, snap=None):
"""If volume is a clone, return its parent info.
@ -553,7 +553,7 @@ class RBDDriver(driver.VolumeDriver):
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug(_("volume %s is not a clone") % volume_name)
LOG.debug("volume %s is not a clone" % volume_name)
volume.set_snap(None)
return (None, None, None)
@ -571,7 +571,7 @@ class RBDDriver(driver.VolumeDriver):
parent_name,
parent_snap)
LOG.debug(_("deleting parent snapshot %s") % (parent_snap))
LOG.debug("deleting parent snapshot %s" % (parent_snap))
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
@ -582,7 +582,7 @@ class RBDDriver(driver.VolumeDriver):
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
LOG.debug(_("deleting parent %s") % (parent_name))
LOG.debug("deleting parent %s" % (parent_name))
self.rbd.RBD().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
@ -614,7 +614,7 @@ class RBDDriver(driver.VolumeDriver):
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug(_("volume has clone snapshot(s)"))
LOG.debug("volume has clone snapshot(s)")
# We grab one of these and use it when fetching parent
# info in case the volume has been flattened.
clone_snap = snap['name']
@ -630,7 +630,7 @@ class RBDDriver(driver.VolumeDriver):
rbd_image.close()
if clone_snap is None:
LOG.debug(_("deleting rbd volume %s") % (volume_name))
LOG.debug("deleting rbd volume %s" % (volume_name))
try:
self.rbd.RBD().remove(client.ioctx, volume_name)
except self.rbd.ImageBusy:
@ -647,7 +647,7 @@ class RBDDriver(driver.VolumeDriver):
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug(_("volume is a clone so cleaning references"))
LOG.debug("volume is a clone so cleaning references")
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
@ -704,7 +704,7 @@ class RBDDriver(driver.VolumeDriver):
'secret_type': 'ceph',
'secret_uuid': self.configuration.rbd_secret_uuid, }
}
LOG.debug(_('connection data: %s'), data)
LOG.debug('connection data: %s', data)
return data
def terminate_connection(self, volume, connector, **kwargs):
@ -732,7 +732,7 @@ class RBDDriver(driver.VolumeDriver):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable as e:
LOG.debug(_('not cloneable: %s'), e)
LOG.debug('not cloneable: %s', e)
return False
if self._get_fsid() != fsid:
@ -755,7 +755,7 @@ class RBDDriver(driver.VolumeDriver):
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug(_('Unable to open image %(loc)s: %(err)s') %
LOG.debug('Unable to open image %(loc)s: %(err)s' %
dict(loc=image_location, err=e))
return False
@ -828,7 +828,7 @@ class RBDDriver(driver.VolumeDriver):
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.backup(backup, rbd_fd)
LOG.debug(_("volume backup complete."))
LOG.debug("volume backup complete.")
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
@ -841,7 +841,7 @@ class RBDDriver(driver.VolumeDriver):
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.restore(backup, volume['id'], rbd_fd)
LOG.debug(_("volume restore complete."))
LOG.debug("volume restore complete.")
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
@ -856,5 +856,5 @@ class RBDDriver(driver.VolumeDriver):
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_("Extend volume from %(old_size)s GB to %(new_size)s GB."),
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})

View File

@ -260,8 +260,8 @@ class HP3PARCommon(object):
volume_name = self._get_3par_vol_name(volume['id'])
old_size = volume['size']
growth_size = int(new_size) - old_size
LOG.debug(_("Extending Volume %(vol)s from %(old)s to %(new)s, "
" by %(diff)s GB.") %
LOG.debug("Extending Volume %(vol)s from %(old)s to %(new)s, "
" by %(diff)s GB." %
{'vol': volume_name, 'old': old_size, 'new': new_size,
'diff': growth_size})
growth_size_mib = growth_size * units.KiB
@ -271,7 +271,7 @@ class HP3PARCommon(object):
_convert_to_base=False):
try:
if _convert_to_base:
LOG.debug(_("Converting to base volume prior to growing."))
LOG.debug("Converting to base volume prior to growing.")
self._convert_to_base_volume(volume)
self.client.growVolume(volume_name, growth_size_mib)
except Exception as ex:
@ -842,7 +842,7 @@ class HP3PARCommon(object):
def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None,
tpvv=True):
# Virtual volume sets are not supported with the -online option
LOG.debug(_('Creating clone of a volume %(src)s to %(dest)s.') %
LOG.debug('Creating clone of a volume %(src)s to %(dest)s.' %
{'src': src_name, 'dest': dest_name})
optional = {'tpvv': tpvv, 'online': True}
@ -899,7 +899,7 @@ class HP3PARCommon(object):
except hpexceptions.HTTPBadRequest as ex:
if ex.get_code() == 29:
if self.client.isOnlinePhysicalCopy(volume_name):
LOG.debug(_("Found an online copy for %(volume)s")
LOG.debug("Found an online copy for %(volume)s"
% {'volume': volume_name})
# the volume is in process of being cloned.
# stopOnlinePhysicalCopy will also delete
@ -999,11 +999,11 @@ class HP3PARCommon(object):
growth_size = volume['size'] - snapshot['volume_size']
if growth_size > 0:
try:
LOG.debug(_('Converting to base volume type: %s.') %
LOG.debug('Converting to base volume type: %s.' %
volume['id'])
self._convert_to_base_volume(volume)
growth_size_mib = growth_size * units.GiB / units.MiB
LOG.debug(_('Growing volume: %(id)s by %(size)s GiB.') %
LOG.debug('Growing volume: %(id)s by %(size)s GiB.' %
{'id': volume['id'], 'size': growth_size})
self.client.growVolume(volume_name, growth_size_mib)
except Exception as ex:
@ -1140,14 +1140,14 @@ class HP3PARCommon(object):
"""
dbg = {'id': volume['id'], 'host': host['host']}
LOG.debug(_('enter: migrate_volume: id=%(id)s, host=%(host)s.') % dbg)
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s.' % dbg)
false_ret = (False, None)
# Make sure volume is not attached
if volume['status'] != 'available':
LOG.debug(_('Volume is attached: migrate_volume: '
'id=%(id)s, host=%(host)s.') % dbg)
LOG.debug('Volume is attached: migrate_volume: '
'id=%(id)s, host=%(host)s.' % dbg)
return false_ret
if 'location_info' not in host['capabilities']:
@ -1162,30 +1162,30 @@ class HP3PARCommon(object):
sys_info = self.client.getStorageSystemInfo()
if not (dest_type == 'HP3PARDriver' and
dest_id == sys_info['serialNumber']):
LOG.debug(_('Dest does not match: migrate_volume: '
'id=%(id)s, host=%(host)s.') % dbg)
LOG.debug('Dest does not match: migrate_volume: '
'id=%(id)s, host=%(host)s.' % dbg)
return false_ret
type_info = self.get_volume_settings_from_type(volume)
if dest_cpg == type_info['cpg']:
LOG.debug(_('CPGs are the same: migrate_volume: '
'id=%(id)s, host=%(host)s.') % dbg)
LOG.debug('CPGs are the same: migrate_volume: '
'id=%(id)s, host=%(host)s.' % dbg)
return false_ret
# Check to make sure CPGs are in the same domain
src_domain = self.get_domain(type_info['cpg'])
dst_domain = self.get_domain(dest_cpg)
if src_domain != dst_domain:
LOG.debug(_('CPGs in different domains: migrate_volume: '
'id=%(id)s, host=%(host)s.') % dbg)
LOG.debug('CPGs in different domains: migrate_volume: '
'id=%(id)s, host=%(host)s.' % dbg)
return false_ret
self._convert_to_base_volume(volume, new_cpg=dest_cpg)
# TODO(Ramy) When volume retype is available,
# use that to change the type
LOG.debug(_('leave: migrate_volume: id=%(id)s, host=%(host)s.') % dbg)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s.' % dbg)
return (True, None)
def _convert_to_base_volume(self, volume, new_cpg=None):
@ -1205,8 +1205,8 @@ class HP3PARCommon(object):
task_id = self._copy_volume(volume_name, temp_vol_name,
cpg, cpg, type_info['tpvv'])
LOG.debug(_('Copy volume scheduled: convert_to_base_volume: '
'id=%s.') % volume['id'])
LOG.debug('Copy volume scheduled: convert_to_base_volume: '
'id=%s.' % volume['id'])
# Wait for the physical copy task to complete
def _wait_for_task(task_id):
@ -1229,19 +1229,19 @@ class HP3PARCommon(object):
'id=%(id)s, status=%(status)s.') % dbg
raise exception.CinderException(msg)
else:
LOG.debug(_('Copy volume completed: convert_to_base_volume: '
'id=%s.') % volume['id'])
LOG.debug('Copy volume completed: convert_to_base_volume: '
'id=%s.' % volume['id'])
comment = self._get_3par_vol_comment(volume_name)
if comment:
self.client.modifyVolume(temp_vol_name, {'comment': comment})
LOG.debug(_('Volume rename completed: convert_to_base_volume: '
'id=%s.') % volume['id'])
LOG.debug('Volume rename completed: convert_to_base_volume: '
'id=%s.' % volume['id'])
# Delete source volume after the copy is complete
self.client.deleteVolume(volume_name)
LOG.debug(_('Delete src volume completed: convert_to_base_volume: '
'id=%s.') % volume['id'])
LOG.debug('Delete src volume completed: convert_to_base_volume: '
'id=%s.' % volume['id'])
# Rename the new volume to the original name
self.client.modifyVolume(temp_vol_name, {'newName': volume_name})

View File

@ -105,7 +105,7 @@ class HPLeftHandCLIQProxy(SanISCSIDriver):
cliq_args['output'] = 'XML'
(out, _err) = self._cliq_run(verb, cliq_args, check_cliq_result)
LOG.debug(_("CLIQ command returned %s"), out)
LOG.debug("CLIQ command returned %s", out)
result_xml = etree.fromstring(out.encode('utf8'))
if check_cliq_result:
@ -202,7 +202,7 @@ class HPLeftHandCLIQProxy(SanISCSIDriver):
for k, v in status_node.attrib.items():
volume_attributes["permission." + k] = v
LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") %
LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s" %
{'volume_name': volume_name,
'volume_attributes': volume_attributes})
return volume_attributes
@ -258,7 +258,7 @@ class HPLeftHandCLIQProxy(SanISCSIDriver):
for k, v in status_node.attrib.items():
snapshot_attributes["permission." + k] = v
LOG.debug(_("Snapshot info: %(name)s => %(attributes)s") %
LOG.debug("Snapshot info: %(name)s => %(attributes)s" %
{'name': snapshot_name, 'attributes': snapshot_attributes})
return snapshot_attributes

View File

@ -403,11 +403,11 @@ class HPLeftHandRESTProxy(ISCSIDriver):
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(_('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
try:
volume_info = self.client.getVolumeByName(volume['name'])
except hpexceptions.HTTPNotFound:
@ -420,7 +420,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
new_extra_specs,
extra_specs_key_map.keys())
LOG.debug(_('LH specs=%(specs)s') % {'specs': lh_extra_specs})
LOG.debug('LH specs=%(specs)s' % {'specs': lh_extra_specs})
# only set the ones that have changed
changed_extra_specs = {}
@ -461,11 +461,11 @@ class HPLeftHandRESTProxy(ISCSIDriver):
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(_('enter: migrate_volume: id=%(id)s, host=%(host)s, '
'cluster=%(cluster)s') % {
'id': volume['id'],
'host': host,
'cluster': self.configuration.hplefthand_clustername})
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
'cluster=%(cluster)s' % {
'id': volume['id'],
'host': host,
'cluster': self.configuration.hplefthand_clustername})
false_ret = (False, None)
if 'location_info' not in host['capabilities']:
@ -476,7 +476,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
try:
# get the cluster info, if it exists and compare
cluster_info = self.client.getClusterByName(cluster)
LOG.debug(_('Clister info: %s') % cluster_info)
LOG.debug('Clister info: %s' % cluster_info)
virtual_ips = cluster_info['virtualIPAddresses']
if driver != self.__class__.__name__:
@ -498,7 +498,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
try:
volume_info = self.client.getVolumeByName(volume['name'])
LOG.debug(_('Volume info: %s') % volume_info)
LOG.debug('Volume info: %s' % volume_info)
# can't migrate if server is attached
if volume_info['iscsiSessions'] is not None:
@ -511,7 +511,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
snap_info = self.client.getVolume(
volume_info['id'],
'fields=snapshots,snapshots[resource[members[name]]]')
LOG.debug(_('Snapshot info: %s') % snap_info)
LOG.debug('Snapshot info: %s' % snap_info)
if snap_info['snapshots']['resource'] is not None:
LOG.info(_("Cannot provide backend assisted migration "
"for volume: %s because the volume has "

View File

@ -59,7 +59,7 @@ class HPMSACommon(object):
self.client_logout()
def client_login(self):
LOG.debug(_("Connecting to MSA"))
LOG.debug("Connecting to MSA")
try:
self.client.login()
except msa.HPMSAConnectionError as ex:
@ -79,7 +79,7 @@ class HPMSACommon(object):
def client_logout(self):
self.client.logout()
LOG.debug(_("Disconnected from MSA Array"))
LOG.debug("Disconnected from MSA Array")
def _get_vol_name(self, volume_id):
volume_name = self._encode_name(volume_id)
@ -128,7 +128,7 @@ class HPMSACommon(object):
def create_volume(self, volume):
volume_id = self._get_vol_name(volume['id'])
LOG.debug(_("Create Volume (%(display_name)s: %(name)s %(id)s)") %
LOG.debug("Create Volume (%(display_name)s: %(name)s %(id)s)" %
{'display_name': volume['display_name'],
'name': volume['name'], 'id': volume_id})
@ -172,7 +172,7 @@ class HPMSACommon(object):
self._assert_enough_space_for_copy(volume['size'])
self._assert_source_detached(src_vref)
LOG.debug(_("Cloning Volume %(source_id)s (%(dest_id)s)") %
LOG.debug("Cloning Volume %(source_id)s (%(dest_id)s)" %
{'source_id': volume['source_volid'],
'dest_id': volume['id']})
@ -191,8 +191,8 @@ class HPMSACommon(object):
self.get_volume_stats(True)
self._assert_enough_space_for_copy(volume['size'])
LOG.debug(_("Creating Volume from snapshot %(source_id)s "
"(%(dest_id)s)") %
LOG.debug("Creating Volume from snapshot %(source_id)s "
"(%(dest_id)s)" %
{'source_id': snapshot['id'], 'dest_id': volume['id']})
orig_name = self._get_snap_name(snapshot['id'])
@ -207,7 +207,7 @@ class HPMSACommon(object):
return None
def delete_volume(self, volume):
LOG.debug(_("Deleting Volume (%s)") % volume['id'])
LOG.debug("Deleting Volume (%s)" % volume['id'])
volume_name = self._get_vol_name(volume['id'])
try:
self.client.delete_volume(volume_name)
@ -276,7 +276,7 @@ class HPMSACommon(object):
return self.client.get_active_fc_target_ports()
def create_snapshot(self, snapshot):
LOG.debug(_("Creating Snapshot from %(volume_id)s (%(snap_id)s)") %
LOG.debug("Creating Snapshot from %(volume_id)s (%(snap_id)s)" %
{'volume_id': snapshot['volume_id'],
'snap_id': snapshot['id']})
snap_name = self._get_snap_name(snapshot['id'])
@ -289,7 +289,7 @@ class HPMSACommon(object):
def delete_snapshot(self, snapshot):
snap_name = self._get_snap_name(snapshot['id'])
LOG.debug(_("Deleting Snapshot (%s)") % snapshot['id'])
LOG.debug("Deleting Snapshot (%s)" % snapshot['id'])
try:
self.client.delete_snapshot(snap_name)
@ -304,8 +304,8 @@ class HPMSACommon(object):
volume_name = self._get_vol_name(volume['id'])
old_size = volume['size']
growth_size = int(new_size) - old_size
LOG.debug(_("Extending Volume %(volume_name)s from %(old_size)s to "
"%(new_size)s, by %(growth_size)s GB.") %
LOG.debug("Extending Volume %(volume_name)s from %(old_size)s to "
"%(new_size)s, by %(growth_size)s GB." %
{'volume_name': volume_name, 'old_size': old_size,
'new_size': new_size, 'growth_size': growth_size})
try:

View File

@ -197,7 +197,7 @@ class SheepdogDriver(driver.VolumeDriver):
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_("Extend volume from %(old_size)s GB to %(new_size)s GB."),
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
def backup_volume(self, context, backup, backup_service):

View File

@ -139,7 +139,7 @@ class SolidFireDriver(SanISCSIDriver):
cluster_password))[:-1]
header['Authorization'] = 'Basic %s' % auth_key
LOG.debug(_("Payload for SolidFire API call: %s"), payload)
LOG.debug("Payload for SolidFire API call: %s", payload)
api_endpoint = '/json-rpc/%s' % version
connection = httplib.HTTPSConnection(host, port)
@ -178,7 +178,7 @@ class SolidFireDriver(SanISCSIDriver):
connection.close()
LOG.debug(_("Results of SolidFire API call: %s"), data)
LOG.debug("Results of SolidFire API call: %s", data)
if 'error' in data:
if data['error']['name'] in max_simultaneous_clones:
@ -217,7 +217,7 @@ class SolidFireDriver(SanISCSIDriver):
params = {'username': sf_account_name}
data = self._issue_api_request('GetAccountByName', params)
if 'result' in data and 'account' in data['result']:
LOG.debug(_('Found solidfire account: %s'), sf_account_name)
LOG.debug('Found solidfire account: %s', sf_account_name)
sfaccount = data['result']['account']
return sfaccount
@ -248,7 +248,7 @@ class SolidFireDriver(SanISCSIDriver):
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
LOG.debug(_('solidfire account: %s does not exist, create it...'),
LOG.debug('solidfire account: %s does not exist, create it...',
sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
@ -464,8 +464,8 @@ class SolidFireDriver(SanISCSIDriver):
if uuid in v['name']:
found_count += 1
sf_volref = v
LOG.debug(_("Mapped SolidFire volumeID %(sfid)s "
"to cinder ID %(uuid)s.") %
LOG.debug("Mapped SolidFire volumeID %(sfid)s "
"to cinder ID %(uuid)s." %
{'sfid': v['volumeID'],
'uuid': uuid})
@ -545,7 +545,7 @@ class SolidFireDriver(SanISCSIDriver):
"""
LOG.debug(_("Enter SolidFire delete_volume..."))
LOG.debug("Enter SolidFire delete_volume...")
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
@ -570,11 +570,11 @@ class SolidFireDriver(SanISCSIDriver):
LOG.error(_("Volume ID %s was not found on "
"the SolidFire Cluster!"), volume['id'])
LOG.debug(_("Leaving SolidFire delete_volume"))
LOG.debug("Leaving SolidFire delete_volume")
def ensure_export(self, context, volume):
"""Verify the iscsi export info."""
LOG.debug(_("Executing SolidFire ensure_export..."))
LOG.debug("Executing SolidFire ensure_export...")
try:
return self._do_export(volume)
except exception.SolidFireAPIException:
@ -582,7 +582,7 @@ class SolidFireDriver(SanISCSIDriver):
def create_export(self, context, volume):
"""Setup the iscsi export info."""
LOG.debug(_("Executing SolidFire create_export..."))
LOG.debug("Executing SolidFire create_export...")
return self._do_export(volume)
def delete_snapshot(self, snapshot):
@ -633,7 +633,7 @@ class SolidFireDriver(SanISCSIDriver):
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
LOG.debug(_("Entering SolidFire extend_volume..."))
LOG.debug("Entering SolidFire extend_volume...")
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
@ -655,12 +655,12 @@ class SolidFireDriver(SanISCSIDriver):
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
LOG.debug(_("Leaving SolidFire extend_volume"))
LOG.debug("Leaving SolidFire extend_volume")
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
LOG.debug(_("Updating cluster status info"))
LOG.debug("Updating cluster status info")
params = {}
@ -699,7 +699,7 @@ class SolidFireDriver(SanISCSIDriver):
instance_uuid, host_name,
mountpoint):
LOG.debug(_("Entering SolidFire attach_volume..."))
LOG.debug("Entering SolidFire attach_volume...")
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
@ -724,7 +724,7 @@ class SolidFireDriver(SanISCSIDriver):
def detach_volume(self, context, volume):
LOG.debug(_("Entering SolidFire attach_volume..."))
LOG.debug("Entering SolidFire attach_volume...")
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
@ -769,7 +769,7 @@ class SolidFireDriver(SanISCSIDriver):
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
LOG.debug(_("Leaving SolidFire transfer volume"))
LOG.debug("Leaving SolidFire transfer volume")
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.

View File

@ -234,8 +234,8 @@ class VMwareAPISession(object):
# case of an inactive session. Therefore, we need a way to
# differentiate between these two cases.
if self._is_current_session_active():
LOG.debug(_("Returning empty response for "
"%(module)s.%(method)s invocation."),
LOG.debug("Returning empty response for "
"%(module)s.%(method)s invocation.",
{'module': module,
'method': method})
return []
@ -257,7 +257,7 @@ class VMwareAPISession(object):
:returns: True if the session is active; False otherwise
"""
LOG.debug(_("Checking if the current session: %s is active."),
LOG.debug("Checking if the current session: %s is active.",
self._session_id)
is_active = False
@ -302,11 +302,11 @@ class VMwareAPISession(object):
# If task already completed on server, it will not return
# the progress.
if hasattr(task_info, 'progress'):
LOG.debug(_("Task: %(task)s progress: %(prog)s.") %
LOG.debug("Task: %(task)s progress: %(prog)s." %
{'task': task, 'prog': task_info.progress})
return
elif task_info.state == 'success':
LOG.debug(_("Task %s status: success.") % task)
LOG.debug("Task %s status: success." % task)
else:
error_msg = str(task_info.error.localizedMessage)
LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
@ -329,9 +329,9 @@ class VMwareAPISession(object):
self.vim, lease, 'state')
if state == 'ready':
# done
LOG.debug(_("Lease is ready."))
LOG.debug("Lease is ready.")
elif state == 'initializing':
LOG.debug(_("Lease initializing..."))
LOG.debug("Lease initializing...")
return
elif state == 'error':
error_msg = self.invoke_api(vim_util, 'get_object_property',

View File

@ -48,12 +48,12 @@ class ThreadSafePipe(queue.LightQueue):
if self.transferred < self.max_transfer_size:
data_item = self.get()
self.transferred += len(data_item)
LOG.debug(_("Read %(bytes)s out of %(max)s from ThreadSafePipe.") %
LOG.debug("Read %(bytes)s out of %(max)s from ThreadSafePipe." %
{'bytes': self.transferred,
'max': self.max_transfer_size})
return data_item
else:
LOG.debug(_("Completed transfer of size %s.") % self.transferred)
LOG.debug("Completed transfer of size %s." % self.transferred)
return ""
def write(self, data):
@ -99,9 +99,9 @@ class GlanceWriteThread(object):
Function to do the image data transfer through an update
and thereon checks if the state is 'active'.
"""
LOG.debug(_("Initiating image service update on image: %(image)s "
"with meta: %(meta)s") % {'image': self.image_id,
'meta': self.image_meta})
LOG.debug("Initiating image service update on image: %(image)s "
"with meta: %(meta)s" % {'image': self.image_id,
'meta': self.image_meta})
self.image_service.update(self.context,
self.image_id,
self.image_meta,
@ -114,7 +114,7 @@ class GlanceWriteThread(object):
image_status = image_meta.get('status')
if image_status == 'active':
self.stop()
LOG.debug(_("Glance image: %s is now active.") %
LOG.debug("Glance image: %s is now active." %
self.image_id)
self.done.send(True)
# If the state is killed, then raise an exception.

View File

@ -169,8 +169,8 @@ class VMwareHTTPWriteFile(VMwareHTTPFile):
try:
self.conn.getresponse()
except Exception as excep:
LOG.debug(_("Exception during HTTP connection close in "
"VMwareHTTPWrite. Exception is %s.") % excep)
LOG.debug("Exception during HTTP connection close in "
"VMwareHTTPWrite. Exception is %s." % excep)
super(VMwareHTTPWriteFile, self).close()
@ -229,7 +229,7 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile):
def write(self, data):
"""Write to the file."""
self._progress += len(data)
LOG.debug(_("Written %s bytes to vmdk.") % self._progress)
LOG.debug("Written %s bytes to vmdk." % self._progress)
self.file_handle.send(data)
def update_progress(self):
@ -240,7 +240,7 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile):
"""
percent = int(float(self._progress) / self._vmdk_size * 100)
try:
LOG.debug(_("Updating progress to %s percent.") % percent)
LOG.debug("Updating progress to %s percent." % percent)
self._session.invoke_api(self._session.vim,
'HttpNfcLeaseProgress',
self._lease, percent=percent)
@ -256,9 +256,9 @@ class VMwareHTTPWriteVmdk(VMwareHTTPFile):
if state == 'ready':
self._session.invoke_api(self._session.vim, 'HttpNfcLeaseComplete',
self._lease)
LOG.debug(_("Lease released."))
LOG.debug("Lease released.")
else:
LOG.debug(_("Lease is already in state: %s.") % state)
LOG.debug("Lease is already in state: %s." % state)
super(VMwareHTTPWriteVmdk, self).close()
@ -305,7 +305,7 @@ class VMwareHTTPReadVmdk(VMwareHTTPFile):
def read(self, chunk_size):
"""Read a chunk from file."""
self._progress += READ_CHUNKSIZE
LOG.debug(_("Read %s bytes from vmdk.") % self._progress)
LOG.debug("Read %s bytes from vmdk." % self._progress)
return self.file_handle.read(READ_CHUNKSIZE)
def update_progress(self):
@ -316,7 +316,7 @@ class VMwareHTTPReadVmdk(VMwareHTTPFile):
"""
percent = int(float(self._progress) / self._vmdk_size * 100)
try:
LOG.debug(_("Updating progress to %s percent.") % percent)
LOG.debug("Updating progress to %s percent." % percent)
self._session.invoke_api(self._session.vim,
'HttpNfcLeaseProgress',
self._lease, percent=percent)
@ -332,7 +332,7 @@ class VMwareHTTPReadVmdk(VMwareHTTPFile):
if state == 'ready':
self._session.invoke_api(self._session.vim, 'HttpNfcLeaseComplete',
self._lease)
LOG.debug(_("Lease released."))
LOG.debug("Lease released.")
else:
LOG.debug(_("Lease is already in state: %s.") % state)
LOG.debug("Lease is already in state: %s." % state)
super(VMwareHTTPReadVmdk, self).close()

View File

@ -121,17 +121,17 @@ def _get_volume_type_extra_spec(type_id, spec_key, possible_values=None,
spec_value = volume_types.get_volume_type_extra_specs(type_id,
spec_key)
if not spec_value:
LOG.debug(_("Returning default spec value: %s.") % default_value)
LOG.debug("Returning default spec value: %s." % default_value)
return default_value
if possible_values is None:
return spec_value
if spec_value in possible_values:
LOG.debug(_("Returning spec value %s") % spec_value)
LOG.debug("Returning spec value %s" % spec_value)
return spec_value
LOG.debug(_("Invalid spec value: %s specified.") % spec_value)
LOG.debug("Invalid spec value: %s specified." % spec_value)
class VMwareEsxVmdkDriver(driver.VolumeDriver):
@ -249,7 +249,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
"%s.") % volume['name']
LOG.exception(msg)
raise error_util.VimFaultException([excep], msg)
LOG.debug(_("Verified volume %s can be created."), volume['name'])
LOG.debug("Verified volume %s can be created.", volume['name'])
def create_volume(self, volume):
"""Creates a volume.
@ -342,8 +342,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
LOG.error(msg)
raise error_util.VimException(msg)
LOG.debug(_("Selected datastore: %(datastore)s with %(host_count)d "
"connected host(s) for the volume.") %
LOG.debug("Selected datastore: %(datastore)s with %(host_count)d "
"connected host(s) for the volume." %
{'datastore': best_summary, 'host_count': max_host_count})
return best_summary
@ -367,8 +367,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
:return: subset of datastores that match storage_profile, or empty list
if none of the datastores match
"""
LOG.debug(_("Filter datastores matching storage profile %(profile)s: "
"%(dss)s."),
LOG.debug("Filter datastores matching storage profile %(profile)s: "
"%(dss)s.",
{'profile': storage_profile, 'dss': datastores})
profileId = self.volumeops.retrieve_profile_id(storage_profile)
if not profileId:
@ -394,7 +394,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
folder = self._get_volume_group_folder(datacenter)
storage_profile = self._get_storage_profile(volume)
if self._storage_policy_enabled and storage_profile:
LOG.debug(_("Storage profile required for this volume: %s."),
LOG.debug("Storage profile required for this volume: %s.",
storage_profile)
datastores = self._filter_ds_by_profile(datastores,
storage_profile)
@ -546,8 +546,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
if 'instance' in connector:
# The instance exists
instance = vim.get_moref(connector['instance'], 'VirtualMachine')
LOG.debug(_("The instance: %s for which initialize connection "
"is called, exists.") % instance)
LOG.debug("The instance: %s for which initialize connection "
"is called, exists." % instance)
# Get host managing the instance
host = self.volumeops.get_host(instance)
if not backing:
@ -561,8 +561,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self._relocate_backing(volume, backing, host)
else:
# The instance does not exist
LOG.debug(_("The instance for which initialize connection "
"is called, does not exist."))
LOG.debug("The instance for which initialize connection "
"is called, does not exist.")
if not backing:
# Create a backing in case it does not exist. It is a bad use
# case to boot from an empty volume.
@ -833,7 +833,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
timeout = self.configuration.vmware_image_transfer_timeout_secs
host_ip = self.configuration.vmware_host_ip
cookies = self.session.vim.client.options.transport.cookiejar
LOG.debug(_("Fetching glance image: %(id)s to server: %(host)s.") %
LOG.debug("Fetching glance image: %(id)s to server: %(host)s." %
{'id': image_id, 'host': host_ip})
vmware_images.fetch_flat_image(context, timeout, image_service,
image_id, image_size=image_size,
@ -870,8 +870,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
raise exception.VolumeBackendAPIException(data=err_msg)
size_gb = volume['size']
LOG.debug(_("Selected datastore %(ds)s for new volume of size "
"%(size)s GB.") % {'ds': summary.name, 'size': size_gb})
LOG.debug("Selected datastore %(ds)s for new volume of size "
"%(size)s GB." % {'ds': summary.name, 'size': size_gb})
# prepare create spec for backing vm
disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
@ -892,7 +892,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
# fetching image from glance will also create the backing
timeout = self.configuration.vmware_image_transfer_timeout_secs
host_ip = self.configuration.vmware_host_ip
LOG.debug(_("Fetching glance image: %(id)s to server: %(host)s.") %
LOG.debug("Fetching glance image: %(id)s to server: %(host)s." %
{'id': image_id, 'host': host_ip})
vmware_images.fetch_stream_optimized_image(context, timeout,
image_service,
@ -964,7 +964,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
:param image_service: Glance image service
:param image_id: Glance image id
"""
LOG.debug(_("Copy glance image: %s to create new volume.") % image_id)
LOG.debug("Copy glance image: %s to create new volume." % image_id)
# Record the volume size specified by the user, if the size is input
# from the API.
volume_size_in_gb = volume['size']
@ -1023,7 +1023,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
raise exception.InvalidVolume(msg)
# validate disk format is vmdk
LOG.debug(_("Copy Volume: %s to new image.") % volume['name'])
LOG.debug("Copy Volume: %s to new image." % volume['name'])
VMwareEsxVmdkDriver._validate_disk_format(image_meta['disk_format'])
# get backing vm of volume and its vmdk path

View File

@ -91,8 +91,8 @@ def start_transfer(context, timeout_secs, read_file_handle, max_data_size,
def fetch_flat_image(context, timeout_secs, image_service, image_id, **kwargs):
"""Download flat image from the glance image server."""
LOG.debug(_("Downloading image: %s from glance image server as a flat vmdk"
" file.") % image_id)
LOG.debug("Downloading image: %s from glance image server as a flat vmdk"
" file." % image_id)
file_size = int(kwargs.get('image_size'))
read_iter = image_service.download(context, image_id)
read_handle = rw_util.GlanceFileRead(read_iter)
@ -110,8 +110,8 @@ def fetch_flat_image(context, timeout_secs, image_service, image_id, **kwargs):
def fetch_stream_optimized_image(context, timeout_secs, image_service,
image_id, **kwargs):
"""Download stream optimized image from glance image server."""
LOG.debug(_("Downloading image: %s from glance image server using HttpNfc"
" import.") % image_id)
LOG.debug("Downloading image: %s from glance image server using HttpNfc"
" import." % image_id)
file_size = int(kwargs.get('image_size'))
read_iter = image_service.download(context, image_id)
read_handle = rw_util.GlanceFileRead(read_iter)
@ -129,8 +129,8 @@ def fetch_stream_optimized_image(context, timeout_secs, image_service,
def upload_image(context, timeout_secs, image_service, image_id, owner_id,
**kwargs):
"""Upload the vm's disk file to Glance image server."""
LOG.debug(_("Uploading image: %s to the Glance image server using HttpNfc"
" export.") % image_id)
LOG.debug("Uploading image: %s to the Glance image server using HttpNfc"
" export." % image_id)
file_size = kwargs.get('vmdk_size')
read_handle = rw_util.VMwareHTTPReadVmdk(kwargs.get('session'),
kwargs.get('host'),

View File

@ -85,17 +85,17 @@ class VMwareVolumeOps(object):
# Result not obtained, continue retrieving results.
retrieve_result = self.continue_retrieval(retrieve_result)
LOG.debug(_("Did not find any backing with name: %s") % name)
LOG.debug("Did not find any backing with name: %s" % name)
def delete_backing(self, backing):
"""Delete the backing.
:param backing: Managed object reference to the backing
"""
LOG.debug(_("Deleting the VM backing: %s.") % backing)
LOG.debug("Deleting the VM backing: %s." % backing)
task = self._session.invoke_api(self._session.vim, 'Destroy_Task',
backing)
LOG.debug(_("Initiated deletion of VM backing: %s.") % backing)
LOG.debug("Initiated deletion of VM backing: %s." % backing)
self._session.wait_for_task(task)
LOG.info(_("Deleted the VM backing: %s.") % backing)
@ -223,7 +223,7 @@ class VMwareVolumeOps(object):
datastores = prop.val.ManagedObjectReference
elif prop.name == 'parent':
compute_resource = prop.val
LOG.debug(_("Datastores attached to host %(host)s are: %(ds)s."),
LOG.debug("Datastores attached to host %(host)s are: %(ds)s.",
{'host': host, 'ds': datastores})
# Filter datastores based on if it is accessible, mounted and writable
valid_dss = []
@ -241,7 +241,7 @@ class VMwareVolumeOps(object):
LOG.error(msg)
raise error_util.VimException(msg)
else:
LOG.debug(_("Valid datastores are: %s"), valid_dss)
LOG.debug("Valid datastores are: %s", valid_dss)
return (valid_dss, resource_pool)
def _get_parent(self, child, parent_type):
@ -291,8 +291,8 @@ class VMwareVolumeOps(object):
:return: Reference to the child folder with input name if it already
exists, else create one and return the reference
"""
LOG.debug(_("Creating folder: %(child_folder_name)s under parent "
"folder: %(parent_folder)s.") %
LOG.debug("Creating folder: %(child_folder_name)s under parent "
"folder: %(parent_folder)s." %
{'child_folder_name': child_folder_name,
'parent_folder': parent_folder})
@ -308,7 +308,7 @@ class VMwareVolumeOps(object):
continue
child_entity_name = self.get_entity_name(child_entity)
if child_entity_name == child_folder_name:
LOG.debug(_("Child folder already present: %s.") %
LOG.debug("Child folder already present: %s." %
child_entity)
return child_entity
@ -316,7 +316,7 @@ class VMwareVolumeOps(object):
child_folder = self._session.invoke_api(self._session.vim,
'CreateFolder', parent_folder,
name=child_folder_name)
LOG.debug(_("Created child folder: %s.") % child_folder)
LOG.debug("Created child folder: %s." % child_folder)
return child_folder
def extend_virtual_disk(self, requested_size_in_gb, name, dc_ref,
@ -329,7 +329,7 @@ class VMwareVolumeOps(object):
:param eager_zero: Boolean determining if the free space
is zeroed out
"""
LOG.debug(_("Extending the volume %(name)s to %(size)s GB."),
LOG.debug("Extending the volume %(name)s to %(size)s GB.",
{'name': name, 'size': requested_size_in_gb})
diskMgr = self._session.vim.service_content.virtualDiskManager
@ -403,7 +403,7 @@ class VMwareVolumeOps(object):
vmProfile.profileId = profileId
create_spec.vmProfile = [vmProfile]
LOG.debug(_("Spec for creating the backing: %s.") % create_spec)
LOG.debug("Spec for creating the backing: %s." % create_spec)
return create_spec
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
@ -422,10 +422,10 @@ class VMwareVolumeOps(object):
:param profileId: storage profile ID to be associated with backing
:return: Reference to the created backing entity
"""
LOG.debug(_("Creating volume backing name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s at "
"folder: %(folder)s resourse pool: %(resource_pool)s "
"datastore name: %(ds_name)s profileId: %(profile)s.") %
LOG.debug("Creating volume backing name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s at "
"folder: %(folder)s resourse pool: %(resource_pool)s "
"datastore name: %(ds_name)s profileId: %(profile)s." %
{'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
'folder': folder, 'resource_pool': resource_pool,
'ds_name': ds_name, 'profile': profileId})
@ -435,7 +435,7 @@ class VMwareVolumeOps(object):
task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
folder, config=create_spec,
pool=resource_pool, host=host)
LOG.debug(_("Initiated creation of volume backing: %s.") % name)
LOG.debug("Initiated creation of volume backing: %s." % name)
task_info = self._session.wait_for_task(task)
backing = task_info.result
LOG.info(_("Successfully created volume backing: %s.") % backing)
@ -478,7 +478,7 @@ class VMwareVolumeOps(object):
relocate_spec.host = host
relocate_spec.diskMoveType = disk_move_type
LOG.debug(_("Spec for relocating the backing: %s.") % relocate_spec)
LOG.debug("Spec for relocating the backing: %s." % relocate_spec)
return relocate_spec
def relocate_backing(self, backing, datastore, resource_pool, host):
@ -492,8 +492,8 @@ class VMwareVolumeOps(object):
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
"""
LOG.debug(_("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s.") %
LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s." %
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
# Relocate the volume backing
@ -502,7 +502,7 @@ class VMwareVolumeOps(object):
disk_move_type)
task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
backing, spec=relocate_spec)
LOG.debug(_("Initiated relocation of volume backing: %s.") % backing)
LOG.debug("Initiated relocation of volume backing: %s." % backing)
self._session.wait_for_task(task)
LOG.info(_("Successfully relocated volume backing: %(backing)s "
"to datastore: %(ds)s and resource pool: %(rp)s.") %
@ -514,13 +514,13 @@ class VMwareVolumeOps(object):
:param backing: Reference to the backing
:param folder: Reference to the folder
"""
LOG.debug(_("Moving backing: %(backing)s to folder: %(fol)s.") %
LOG.debug("Moving backing: %(backing)s to folder: %(fol)s." %
{'backing': backing, 'fol': folder})
task = self._session.invoke_api(self._session.vim,
'MoveIntoFolder_Task', folder,
list=[backing])
LOG.debug(_("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s.") % {'backing': backing, 'fol': folder})
LOG.debug("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s." % {'backing': backing, 'fol': folder})
self._session.wait_for_task(task)
LOG.info(_("Successfully moved volume backing: %(backing)s into the "
"folder: %(fol)s.") % {'backing': backing, 'fol': folder})
@ -534,15 +534,15 @@ class VMwareVolumeOps(object):
:param quiesce: Whether to quiesce the backing when taking snapshot
:return: Created snapshot entity reference
"""
LOG.debug(_("Snapshoting backing: %(backing)s with name: %(name)s.") %
LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s." %
{'backing': backing, 'name': name})
task = self._session.invoke_api(self._session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=description,
memory=False, quiesce=quiesce)
LOG.debug(_("Initiated snapshot of volume backing: %(backing)s "
"named: %(name)s.") % {'backing': backing, 'name': name})
LOG.debug("Initiated snapshot of volume backing: %(backing)s "
"named: %(name)s." % {'backing': backing, 'name': name})
task_info = self._session.wait_for_task(task)
snapshot = task_info.result
LOG.info(_("Successfully created snapshot: %(snap)s for volume "
@ -593,8 +593,8 @@ class VMwareVolumeOps(object):
:param backing: Reference to the backing entity
:param name: Snapshot name
"""
LOG.debug(_("Deleting the snapshot: %(name)s from backing: "
"%(backing)s.") %
LOG.debug("Deleting the snapshot: %(name)s from backing: "
"%(backing)s." %
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
@ -605,8 +605,8 @@ class VMwareVolumeOps(object):
task = self._session.invoke_api(self._session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
LOG.debug(_("Initiated snapshot: %(name)s deletion for backing: "
"%(backing)s.") %
LOG.debug("Initiated snapshot: %(name)s deletion for backing: "
"%(backing)s." %
{'name': name, 'backing': backing})
self._session.wait_for_task(task)
LOG.info(_("Successfully deleted snapshot: %(name)s of backing: "
@ -637,7 +637,7 @@ class VMwareVolumeOps(object):
clone_spec.template = False
clone_spec.snapshot = snapshot
LOG.debug(_("Spec for cloning the backing: %s.") % clone_spec)
LOG.debug("Spec for cloning the backing: %s." % clone_spec)
return clone_spec
def clone_backing(self, name, backing, snapshot, clone_type, datastore):
@ -653,9 +653,9 @@ class VMwareVolumeOps(object):
:param clone_type: Whether a full clone or linked clone is to be made
:param datastore: Reference to the datastore entity
"""
LOG.debug(_("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
"datastore: %(ds)s") %
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
"datastore: %(ds)s" %
{'back': backing, 'name': name, 'type': clone_type,
'snap': snapshot, 'ds': datastore})
folder = self._get_folder(backing)
@ -667,7 +667,7 @@ class VMwareVolumeOps(object):
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)
LOG.debug(_("Initiated clone of backing: %s.") % name)
LOG.debug("Initiated clone of backing: %s." % name)
task_info = self._session.wait_for_task(task)
new_backing = task_info.result
LOG.info(_("Successfully created clone: %s.") % new_backing)
@ -678,7 +678,7 @@ class VMwareVolumeOps(object):
:param file_path: Datastore path of the file or folder
"""
LOG.debug(_("Deleting file: %(file)s under datacenter: %(dc)s.") %
LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s." %
{'file': file_path, 'dc': datacenter})
fileManager = self._session.vim.service_content.fileManager
task = self._session.invoke_api(self._session.vim,
@ -686,7 +686,7 @@ class VMwareVolumeOps(object):
fileManager,
name=file_path,
datacenter=datacenter)
LOG.debug(_("Initiated deletion via task: %s.") % task)
LOG.debug("Initiated deletion via task: %s." % task)
self._session.wait_for_task(task)
LOG.info(_("Successfully deleted file: %s.") % file_path)
@ -741,7 +741,7 @@ class VMwareVolumeOps(object):
:param src_vmdk_file_path: Source vmdk file path
:param dest_vmdk_file_path: Destination vmdk file path
"""
LOG.debug(_('Copying disk data before snapshot of the VM'))
LOG.debug('Copying disk data before snapshot of the VM')
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CopyVirtualDisk_Task',
@ -751,7 +751,7 @@ class VMwareVolumeOps(object):
destName=dest_vmdk_file_path,
destDatacenter=dc_ref,
force=True)
LOG.debug(_("Initiated copying disk data via task: %s.") % task)
LOG.debug("Initiated copying disk data via task: %s." % task)
self._session.wait_for_task(task)
LOG.info(_("Successfully copied disk at: %(src)s to: %(dest)s.") %
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
@ -762,14 +762,14 @@ class VMwareVolumeOps(object):
:param vmdk_file_path: VMDK file path to be deleted
:param dc_ref: Reference to datacenter that contains this VMDK file
"""
LOG.debug(_("Deleting vmdk file: %s.") % vmdk_file_path)
LOG.debug("Deleting vmdk file: %s." % vmdk_file_path)
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'DeleteVirtualDisk_Task',
diskMgr,
name=vmdk_file_path,
datacenter=dc_ref)
LOG.debug(_("Initiated deleting vmdk file via task: %s.") % task)
LOG.debug("Initiated deleting vmdk file via task: %s." % task)
self._session.wait_for_task(task)
LOG.info(_("Deleted vmdk file: %s.") % vmdk_file_path)
@ -778,7 +778,7 @@ class VMwareVolumeOps(object):
:return: PbmProfile data objects from VC
"""
LOG.debug(_("Get all profiles defined in current VC."))
LOG.debug("Get all profiles defined in current VC.")
pbm = self._session.pbm
profile_manager = pbm.service_content.profileManager
res_type = pbm.client.factory.create('ns0:PbmProfileResourceType')
@ -786,7 +786,7 @@ class VMwareVolumeOps(object):
profileIds = self._session.invoke_api(pbm, 'PbmQueryProfile',
profile_manager,
resourceType=res_type)
LOG.debug(_("Got profile IDs: %s"), profileIds)
LOG.debug("Got profile IDs: %s", profileIds)
return self._session.invoke_api(pbm, 'PbmRetrieveContent',
profile_manager,
profileIds=profileIds)
@ -797,11 +797,11 @@ class VMwareVolumeOps(object):
:param profile_name: profile name as string
:return: profile id as string
"""
LOG.debug(_("Trying to retrieve profile id for %s"), profile_name)
LOG.debug("Trying to retrieve profile id for %s", profile_name)
for profile in self.get_all_profiles():
if profile.name == profile_name:
profileId = profile.profileId
LOG.debug(_("Got profile id %(id)s for profile %(name)s."),
LOG.debug("Got profile id %(id)s for profile %(name)s.",
{'id': profileId, 'name': profile_name})
return profileId
@ -812,13 +812,13 @@ class VMwareVolumeOps(object):
:param profile_id: profile id string
:return: subset of hubs that match given profile_id
"""
LOG.debug(_("Filtering hubs %(hubs)s that match profile "
"%(profile)s."), {'hubs': hubs, 'profile': profile_id})
LOG.debug("Filtering hubs %(hubs)s that match profile "
"%(profile)s.", {'hubs': hubs, 'profile': profile_id})
pbm = self._session.pbm
placement_solver = pbm.service_content.placementSolver
filtered_hubs = self._session.invoke_api(pbm, 'PbmQueryMatchingHub',
placement_solver,
hubsToSearch=hubs,
profile=profile_id)
LOG.debug(_("Filtered hubs: %s"), filtered_hubs)
LOG.debug("Filtered hubs: %s", filtered_hubs)
return filtered_hubs

View File

@ -99,7 +99,7 @@ class WindowsDriver(driver.ISCSIDriver):
def local_path(self, volume):
base_vhd_folder = self.configuration.windows_iscsi_lun_path
if not os.path.exists(base_vhd_folder):
LOG.debug(_('Creating folder %s '), base_vhd_folder)
LOG.debug('Creating folder %s ', base_vhd_folder)
os.makedirs(base_vhd_folder)
return os.path.join(base_vhd_folder, str(volume['name']) + ".vhd")
@ -202,7 +202,7 @@ class WindowsDriver(driver.ISCSIDriver):
def _update_volume_stats(self):
"""Retrieve stats info for Windows device."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = {}
backend_name = self.__class__.__name__
if self.configuration:
@ -220,7 +220,7 @@ class WindowsDriver(driver.ISCSIDriver):
def extend_volume(self, volume, new_size):
"""Extend an Existing Volume."""
old_size = volume['size']
LOG.debug(_("Extend volume from %(old_size)s GB to %(new_size)s GB."),
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
additional_size = (new_size - old_size) * 1024
self.utils.extend(volume['name'], additional_size)

View File

@ -150,8 +150,8 @@ class WindowsUtils(object):
try:
disk = self._conn_wmi.WT_Disk(Description=vol_name)
if not disk:
LOG.debug(_('Skipping deleting disk %s as it does not '
'exist.') % vol_name)
LOG.debug('Skipping deleting disk %s as it does not '
'exist.' % vol_name)
return
wt_disk = disk[0]
wt_disk.Delete_()
@ -242,8 +242,8 @@ class WindowsUtils(object):
try:
host = self._conn_wmi.WT_Host(HostName=target_name)
if not host:
LOG.debug(_('Skipping removing target %s as it does not '
'exist.') % target_name)
LOG.debug('Skipping removing target %s as it does not '
'exist.' % target_name)
return
wt_host = host[0]
wt_host.RemoveAllWTDisks()

View File

@ -233,7 +233,7 @@ class ZadaraVPSAConnection(object):
self.ensure_connection(cmd)
(method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs)
LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"'),
LOG.debug('Sending %(method)s to %(url)s. Body "%(body)s"',
{'method': method, 'url': url, 'body': body})
if self.conf.zadara_vpsa_use_ssl:
@ -257,7 +257,7 @@ class ZadaraVPSAConnection(object):
raise exception.FailedCmdWithDump(status=status, data=data)
if method in ['POST', 'DELETE']:
LOG.debug(_('Operation completed. %(data)s'), {'data': data})
LOG.debug('Operation completed. %(data)s', {'data': data})
return xml_tree
@ -354,7 +354,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
if pool is not None:
total = int(pool.findtext('capacity'))
free = int(float(pool.findtext('available-capacity')))
LOG.debug(_('Pool %(name)s: %(total)sGB total, %(free)sGB free'),
LOG.debug('Pool %(name)s: %(total)sGB total, %(free)sGB free',
{'name': pool_name, 'total': total, 'free': free})
return (total, free)
@ -435,7 +435,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(_('Create snapshot: %s'), snapshot['name'])
LOG.debug('Create snapshot: %s', snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
@ -453,7 +453,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(_('Delete snapshot: %s'), snapshot['name'])
LOG.debug('Delete snapshot: %s', snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
@ -480,7 +480,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(_('Creating volume from snapshot: %s') % snapshot['name'])
LOG.debug('Creating volume from snapshot: %s' % snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
@ -506,7 +506,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(_('Creating clone of volume: %s') % src_vref['name'])
LOG.debug('Creating clone of volume: %s' % src_vref['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
@ -611,7 +611,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
properties['auth_username'] = ctrl['chap_user']
properties['auth_password'] = ctrl['chap_passwd']
LOG.debug(_('Attach properties: %(properties)s'),
LOG.debug('Attach properties: %(properties)s',
{'properties': properties})
return {'driver_volume_type': 'iscsi',
'data': properties}
@ -647,7 +647,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
LOG.debug("Updating volume stats")
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')

View File

@ -50,7 +50,7 @@ def restore_source_status(context, db, volume_spec):
source_volid = volume_spec['source_volid']
source_status = volume_spec['source_volstatus']
try:
LOG.debug(_('Restoring source %(source_volid)s status to %(status)s') %
LOG.debug('Restoring source %(source_volid)s status to %(status)s' %
{'status': source_status, 'source_volid': source_volid})
db.volume_update(context, source_volid, {'status': source_status})
except exception.CinderException:
@ -81,10 +81,10 @@ def error_out_volume(context, db, volume_id, reason=None):
# if reason:
# status['details'] = reason
try:
LOG.debug(_('Updating volume: %(volume_id)s with %(update)s'
' due to: %(reason)s') % {'volume_id': volume_id,
'reason': reason,
'update': update})
LOG.debug('Updating volume: %(volume_id)s with %(update)s'
' due to: %(reason)s' % {'volume_id': volume_id,
'reason': reason,
'update': update})
db.volume_update(context, volume_id, update)
except exception.CinderException:
# Don't let this cause further exceptions.

View File

@ -98,8 +98,8 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume_id
LOG.debug(_("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s") %
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s" %
{'volume_id': volume_id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
@ -117,7 +117,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
def _post_reschedule(self, context, volume_id):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug(_("Volume %s: re-scheduled"), volume_id)
LOG.debug("Volume %s: re-scheduled", volume_id)
def _pre_reschedule(self, context, volume_id):
"""Actions that happen before the rescheduling attempt occur here."""
@ -134,7 +134,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
'status': 'creating',
'scheduled_at': timeutils.utcnow(),
}
LOG.debug(_("Updating volume %(volume_id)s with %(update)s.") %
LOG.debug("Updating volume %(volume_id)s with %(update)s." %
{'update': update, 'volume_id': volume_id})
self.db.volume_update(context, volume_id, update)
except exception.CinderException:
@ -406,7 +406,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
def _enable_bootable_flag(self, context, volume_id):
try:
LOG.debug(_('Marking volume %s as bootable.'), volume_id)
LOG.debug('Marking volume %s as bootable.', volume_id)
self.db.volume_update(context, volume_id, {'bootable': True})
except exception.CinderException as ex:
LOG.exception(_("Failed updating volume %(volume_id)s bootable"
@ -436,8 +436,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Downloads Glance image to the specified volume."""
copy_image_to_volume = self.driver.copy_image_to_volume
volume_id = volume_ref['id']
LOG.debug(_("Attempting download of %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s.") %
LOG.debug("Attempting download of %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s." %
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
try:
@ -463,8 +463,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
else:
raise
LOG.debug(_("Downloaded image %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s successfully.") %
LOG.debug("Downloaded image %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s successfully." %
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
@ -500,8 +500,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# which means we can have partial create/update failure.
volume_metadata = dict(property_metadata)
volume_metadata.update(base_metadata)
LOG.debug(_("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s.") %
LOG.debug("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s." %
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
for (key, value) in volume_metadata.items():
@ -514,8 +514,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
def _create_from_image(self, context, volume_ref,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug(_("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.") %
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s." %
{'volume_id': volume_ref['id'],
'image_location': image_location, 'image_id': image_id})
# Create the volume from an image.

View File

@ -98,7 +98,7 @@ class _ExportMixin(object):
volume['name'] not in volume['provider_location']):
msg = _('Detected inconsistency in provider_location id')
LOG.debug(_('%s'), msg)
LOG.debug('%s', msg)
old_name = self._fix_id_migration(context, volume)
if 'in-use' in volume['status']:
old_name = None
@ -181,7 +181,7 @@ class _ExportMixin(object):
except putils.ProcessExecutionError:
link_path = '/dev/%s/%s' % (CONF.volume_group,
old_name)
LOG.debug(_('Symbolic link %s not found') % link_path)
LOG.debug('Symbolic link %s not found' % link_path)
os.chdir(start)
return
@ -248,7 +248,7 @@ class LioAdm(_ExportMixin, iscsi.LioAdm):
auth_user,
auth_pass)
except exception.NotFound:
LOG.debug(_("volume_info:%s"), volume_info)
LOG.debug("volume_info:%s", volume_info)
LOG.info(_("Skipping ensure_export. No iscsi_target "
"provision for volume: %s"), volume_id)

View File

@ -223,7 +223,7 @@ class VolumeManager(manager.SchedulerDependentManager):
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
LOG.debug("Re-exporting %s volumes", len(volumes))
try:
sum = 0
@ -262,7 +262,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# at this point the driver is considered initialized.
self.driver.set_initialized()
LOG.debug(_('Resuming any in progress delete operations'))
LOG.debug('Resuming any in progress delete operations')
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_('Resuming delete on volume: %s') % volume['id'])
@ -376,9 +376,9 @@ class VolumeManager(manager.SchedulerDependentManager):
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("volume %s: removing export"), volume_ref['id'])
LOG.debug("volume %s: removing export", volume_ref['id'])
self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['id'])
LOG.debug("volume %s: deleting", volume_ref['id'])
if unmanage_only:
self.driver.unmanage(volume_ref)
else:
@ -445,7 +445,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("snapshot %(snap_id)s: creating"),
LOG.debug("snapshot %(snap_id)s: creating",
{'snap_id': snapshot_ref['id']})
# Pass context so that drivers that want to use it, can,
@ -505,7 +505,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])
LOG.debug("snapshot %s: deleting", snapshot_ref['id'])
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
@ -697,8 +697,8 @@ class VolumeManager(manager.SchedulerDependentManager):
glance.get_remote_image_service(context, image_meta['id'])
self.driver.copy_volume_to_image(context, volume, image_service,
image_meta)
LOG.debug(_("Uploaded volume %(volume_id)s to "
"image (%(image_id)s) successfully"),
LOG.debug("Uploaded volume %(volume_id)s to "
"image (%(image_id)s) successfully",
{'volume_id': volume_id, 'image_id': image_id})
except Exception as error:
LOG.error(_("Error occurred while uploading volume %(volume_id)s "
@ -786,7 +786,7 @@ class VolumeManager(manager.SchedulerDependentManager):
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
LOG.debug(_("Volume %s: creating export"), volume_id)
LOG.debug("Volume %s: creating export", volume_id)
model_update = self.driver.create_export(context.elevated(),
volume)
if model_update:
@ -840,7 +840,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# FCZoneManager to add access control via FC zoning.
vol_type = conn_info.get('driver_volume_type', None)
mode = self.configuration.zoning_mode
LOG.debug(_("Zoning Mode: %s"), mode)
LOG.debug("Zoning Mode: %s", mode)
if vol_type == 'fibre_channel' and self.zonemanager:
self._add_or_delete_fc_connection(conn_info, 1)
return conn_info
@ -865,7 +865,7 @@ class VolumeManager(manager.SchedulerDependentManager):
if conn_info:
vol_type = conn_info.get('driver_volume_type', None)
mode = self.configuration.zoning_mode
LOG.debug(_("Zoning Mode: %s"), mode)
LOG.debug("Zoning Mode: %s", mode)
if vol_type == 'fibre_channel' and self.zonemanager:
self._add_or_delete_fc_connection(conn_info, 0)
except Exception as err:
@ -875,7 +875,7 @@ class VolumeManager(manager.SchedulerDependentManager):
raise exception.VolumeBackendAPIException(data=err_msg)
try:
LOG.debug(_("volume %s: removing export"), volume_id)
LOG.debug("volume %s: removing export", volume_id)
self.driver.remove_export(context.elevated(), volume_ref)
except Exception as ex:
LOG.exception(_("Error detaching volume %(volume)s, "
@ -1047,7 +1047,7 @@ class VolumeManager(manager.SchedulerDependentManager):
{'migration_status': 'migrating'})
if not force_host_copy and new_type_id is None:
try:
LOG.debug(_("volume %s: calling driver migrate_volume"),
LOG.debug("volume %s: calling driver migrate_volume",
volume_ref['id'])
moved, model_update = self.driver.migrate_volume(ctxt,
volume_ref,
@ -1326,12 +1326,12 @@ class VolumeManager(manager.SchedulerDependentManager):
_initiator_target_map = None
if 'initiator_target_map' in conn_info['data']:
_initiator_target_map = conn_info['data']['initiator_target_map']
LOG.debug(_("Initiator Target map:%s"), _initiator_target_map)
LOG.debug("Initiator Target map:%s", _initiator_target_map)
# NOTE(skolathur): Invoke Zonemanager to handle automated FC zone
# management when vol_type is fibre_channel and zoning_mode is fabric
# Initiator_target map associating each initiator WWN to one or more
# target WWN is passed to ZoneManager to add or update zone config.
LOG.debug(_("Zoning op: %s"), zone_op)
LOG.debug("Zoning op: %s", zone_op)
if _initiator_target_map is not None:
try:
if zone_op == 1:

View File

@ -237,7 +237,7 @@ def get_all_specs(context, inactive=False, search_opts={}):
qos_specs = db.qos_specs_get_all(context, inactive)
if search_opts:
LOG.debug(_("Searching by: %s") % search_opts)
LOG.debug("Searching by: %s" % search_opts)
def _check_specs_match(qos_specs, searchdict):
for k, v in searchdict.iteritems():

View File

@ -64,7 +64,7 @@ def get_all_types(context, inactive=0, search_opts={}):
vol_types = db.volume_type_get_all(context, inactive)
if search_opts:
LOG.debug(_("Searching by: %s") % search_opts)
LOG.debug("Searching by: %s" % search_opts)
def _check_extra_specs_match(vol_type, searchdict):
for k, v in searchdict.iteritems():

View File

@ -52,7 +52,7 @@ class BrcdFCSanLookupService(FCSanLookupService):
config = self.configuration
fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')]
LOG.debug(_('Fabric Names: %s'), fabric_names)
LOG.debug('Fabric Names: %s', fabric_names)
# There can be more than one SAN in the network and we need to
# get credentials for each for SAN context lookup later.
@ -106,7 +106,7 @@ class BrcdFCSanLookupService(FCSanLookupService):
"param - fc_fabric_names"))
fabrics = [x.strip() for x in fabric_names.split(',')]
LOG.debug(_("FC Fabric List: %s"), fabrics)
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
formatted_target_list.append(self.get_formatted_wwn(t))
@ -132,8 +132,8 @@ class BrcdFCSanLookupService(FCSanLookupService):
# logged in
nsinfo = ''
try:
LOG.debug(_("Getting name server data for "
"fabric %s"), fabric_ip)
LOG.debug("Getting name server data for "
"fabric %s", fabric_ip)
self.client.connect(
fabric_ip, fabric_port, fabric_user, fabric_pwd)
nsinfo = self.get_nameserver_info()
@ -149,24 +149,24 @@ class BrcdFCSanLookupService(FCSanLookupService):
raise exception.FCSanLookupServiceException(message=msg)
finally:
self.close_connection()
LOG.debug(_("Lookup service:nsinfo-%s"), nsinfo)
LOG.debug(_("Lookup service:initiator list from "
"caller-%s"), formatted_initiator_list)
LOG.debug(_("Lookup service:target list from "
"caller-%s"), formatted_target_list)
LOG.debug("Lookup service:nsinfo-%s", nsinfo)
LOG.debug("Lookup service:initiator list from "
"caller-%s", formatted_initiator_list)
LOG.debug("Lookup service:target list from "
"caller-%s", formatted_target_list)
visible_targets = filter(lambda x: x in formatted_target_list,
nsinfo)
visible_initiators = filter(lambda x: x in
formatted_initiator_list, nsinfo)
if visible_targets:
LOG.debug(_("Filtered targets is: %s"), visible_targets)
LOG.debug("Filtered targets is: %s", visible_targets)
# getting rid of the : before returning
for idx, elem in enumerate(visible_targets):
elem = str(elem).replace(':', '')
visible_targets[idx] = elem
else:
LOG.debug(_("No targets are in the nameserver for SAN %s"),
LOG.debug("No targets are in the nameserver for SAN %s",
fabric_name)
if visible_initiators:
@ -175,15 +175,15 @@ class BrcdFCSanLookupService(FCSanLookupService):
elem = str(elem).replace(':', '')
visible_initiators[idx] = elem
else:
LOG.debug(_("No initiators are in the nameserver "
"for SAN %s"), fabric_name)
LOG.debug("No initiators are in the nameserver "
"for SAN %s", fabric_name)
fabric_map = {
'initiator_port_wwn_list': visible_initiators,
'target_port_wwn_list': visible_targets
}
device_map[fabric_principal_wwn] = fabric_map
LOG.debug(_("Device map for SAN context: %s"), device_map)
LOG.debug("Device map for SAN context: %s", device_map)
return device_map
def get_nameserver_info(self):
@ -208,7 +208,7 @@ class BrcdFCSanLookupService(FCSanLookupService):
LOG.error(_("Failed collecting nscamshow"))
if cli_output:
nsinfo_list.extend(self._parse_ns_output(cli_output))
LOG.debug(_("Connector returning nsinfo-%s"), nsinfo_list)
LOG.debug("Connector returning nsinfo-%s", nsinfo_list)
return nsinfo_list
def close_connection(self):

View File

@ -127,14 +127,14 @@ class BrcdFCZoneClientCLI(object):
}
activate - True/False
"""
LOG.debug(_("Add Zones - Zones passed: %s"), zones)
LOG.debug("Add Zones - Zones passed: %s", zones)
cfg_name = None
iterator_count = 0
zone_with_sep = ''
active_zone_set = self.get_active_zone_set()
LOG.debug(_("Active zone set:%s"), active_zone_set)
LOG.debug("Active zone set:%s", active_zone_set)
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
LOG.debug(_("zone list:%s"), zone_list)
LOG.debug("zone list:%s", zone_list)
for zone in zones.keys():
# if zone exists, its an update. Delete & insert
# TODO(skolathur): This can be optimized to an update call later
@ -145,16 +145,16 @@ class BrcdFCZoneClientCLI(object):
except exception.BrocadeZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_("Deleting zone failed %s"), zone)
LOG.debug(_("Deleted Zone before insert : %s"), zone)
LOG.debug("Deleted Zone before insert : %s", zone)
zone_members_with_sep = ';'.join(str(member) for
member in zones[zone])
LOG.debug(_("Forming command for add zone"))
LOG.debug("Forming command for add zone")
cmd = 'zonecreate "%(zone)s", "%(zone_members_with_sep)s"' % {
'zone': zone,
'zone_members_with_sep': zone_members_with_sep}
LOG.debug(_("Adding zone, cmd to run %s"), cmd)
LOG.debug("Adding zone, cmd to run %s", cmd)
self.apply_zone_change(cmd.split())
LOG.debug(_("Created zones on the switch"))
LOG.debug("Created zones on the switch")
if(iterator_count > 0):
zone_with_sep += ';'
iterator_count += 1
@ -169,7 +169,7 @@ class BrcdFCZoneClientCLI(object):
else:
cmd = 'cfgadd "%(zoneset)s", "%(zones)s"' \
% {'zoneset': cfg_name, 'zones': zone_with_sep}
LOG.debug(_("New zone %s"), cmd)
LOG.debug("New zone %s", cmd)
self.apply_zone_change(cmd.split())
self._cfg_save()
if activate:
@ -220,7 +220,7 @@ class BrcdFCZoneClientCLI(object):
% {'active_zoneset_name': active_zoneset_name,
'zone_names': zone_names
}
LOG.debug(_("Delete zones: Config cmd to run:%s"), cmd)
LOG.debug("Delete zones: Config cmd to run:%s", cmd)
self.apply_zone_change(cmd.split())
for zone in zones:
self._zone_delete(zone)
@ -297,7 +297,7 @@ class BrcdFCZoneClientCLI(object):
not expected.
"""
stdout, stderr = None, None
LOG.debug(_("Executing command via ssh: %s"), cmd_list)
LOG.debug("Executing command via ssh: %s", cmd_list)
stdout, stderr = self._run_ssh(cmd_list, True, 1)
# no output expected, so output means there is an error
if stdout:
@ -320,7 +320,7 @@ class BrcdFCZoneClientCLI(object):
if (stdout):
for line in stdout:
if 'Fabric OS: v' in line:
LOG.debug(_("Firmware version string:%s"), line)
LOG.debug("Firmware version string:%s", line)
ver = line.split('Fabric OS: v')[1].split('.')
if (ver):
firmware = int(ver[0] + ver[1])
@ -430,7 +430,7 @@ class BrcdFCZoneClientCLI(object):
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug(_("Executing command via ssh: %s") % command)
LOG.debug("Executing command via ssh: %s" % command)
last_exception = None
try:
with self.sshpool.item() as ssh:
@ -442,10 +442,10 @@ class BrcdFCZoneClientCLI(object):
stdin.write("%s\n" % ZoneConstant.YES)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug(_("Exit Status from ssh:%s"), exit_status)
LOG.debug("Exit Status from ssh:%s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
@ -460,8 +460,8 @@ class BrcdFCZoneClientCLI(object):
LOG.error(e)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug(_("Handling error case after "
"SSH:%s"), last_exception)
LOG.debug("Handling error case after "
"SSH:%s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,

View File

@ -130,7 +130,7 @@ class BrcdFCZoneDriver(FCZoneDriver):
:param fabric: Fabric name from cinder.conf file
:param initiator_target_map: Mapping of initiator to list of targets
"""
LOG.debug(_("Add connection for Fabric:%s"), fabric)
LOG.debug("Add connection for Fabric:%s", fabric)
LOG.info(_("BrcdFCZoneDriver - Add connection "
"for I-T map: %s"), initiator_target_map)
fabric_ip = self.fabric_configs[fabric].safe_get('fc_fabric_address')
@ -226,7 +226,7 @@ class BrcdFCZoneDriver(FCZoneDriver):
LOG.error(e)
msg = _("Failed to add zoning configuration %s") % e
raise exception.FCZoneDriverException(msg)
LOG.debug(_("Zones added successfully: %s"), zone_map)
LOG.debug("Zones added successfully: %s", zone_map)
@lockutils.synchronized('brcd', 'fcfabric-', True)
def delete_connection(self, fabric, initiator_target_map):
@ -239,7 +239,7 @@ class BrcdFCZoneDriver(FCZoneDriver):
:param fabric: Fabric name from cinder.conf file
:param initiator_target_map: Mapping of initiator to list of targets
"""
LOG.debug(_("Delete connection for fabric:%s"), fabric)
LOG.debug("Delete connection for fabric:%s", fabric)
LOG.info(_("BrcdFCZoneDriver - Delete connection for I-T map: %s"),
initiator_target_map)
fabric_ip = self.fabric_configs[fabric].safe_get('fc_fabric_address')
@ -283,7 +283,7 @@ class BrcdFCZoneDriver(FCZoneDriver):
# Based on zoning policy, get zone member list and push changes to
# fabric. This operation could result in an update for zone config
# with new member list or deleting zones from active cfg.
LOG.debug(_("zone config from Fabric: %s"), cfgmap_from_fabric)
LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric)
for initiator_key in initiator_target_map.keys():
initiator = initiator_key.lower()
formatted_initiator = self.get_formatted_wwn(initiator)
@ -298,7 +298,7 @@ class BrcdFCZoneDriver(FCZoneDriver):
self.configuration.zone_name_prefix
+ initiator.replace(':', '')
+ target.replace(':', ''))
LOG.debug(_("Zone name to del: %s"), zone_name)
LOG.debug("Zone name to del: %s", zone_name)
if len(zone_names) > 0 and (zone_name in zone_names):
# delete zone.
LOG.debug(("Added zone to delete to "
@ -324,22 +324,22 @@ class BrcdFCZoneDriver(FCZoneDriver):
# filtered list and if it is non-empty, add initiator
# to it and update zone if filtered list is empty, we
# remove that zone.
LOG.debug(_("Zone delete - I mode: "
"filtered targets:%s"), filtered_members)
LOG.debug("Zone delete - I mode: "
"filtered targets:%s", filtered_members)
if filtered_members:
filtered_members.append(formatted_initiator)
LOG.debug(_("Filtered zone members to "
"update: %s"), filtered_members)
LOG.debug("Filtered zone members to "
"update: %s", filtered_members)
zone_map[zone_name] = filtered_members
LOG.debug(_("Filtered zone Map to "
"update: %s"), zone_map)
LOG.debug("Filtered zone Map to "
"update: %s", zone_map)
else:
zones_to_delete.append(zone_name)
else:
LOG.info(_("Zoning Policy: %s, not "
"recognized"), zoning_policy)
LOG.debug(_("Final Zone map to update: %s"), zone_map)
LOG.debug(_("Final Zone list to delete: %s"), zones_to_delete)
LOG.debug("Final Zone map to update: %s", zone_map)
LOG.debug("Final Zone list to delete: %s", zones_to_delete)
try:
# Update zone membership.
if zone_map:
@ -377,13 +377,13 @@ class BrcdFCZoneDriver(FCZoneDriver):
fabric_map = {}
fc_fabric_names = self.configuration.fc_fabric_names
fabrics = [x.strip() for x in fc_fabric_names.split(',')]
LOG.debug(_("Fabric List: %s"), fabrics)
LOG.debug(_("Target wwn List: %s"), target_wwn_list)
LOG.debug("Fabric List: %s", fabrics)
LOG.debug("Target wwn List: %s", target_wwn_list)
if len(fabrics) > 0:
for t in target_wwn_list:
formatted_target_list.append(self.get_formatted_wwn(t.lower()))
LOG.debug(_("Formatted Target wwn List:"
" %s"), formatted_target_list)
LOG.debug("Formatted Target wwn List:"
" %s", formatted_target_list)
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_address')
@ -419,7 +419,7 @@ class BrcdFCZoneDriver(FCZoneDriver):
nsinfo = None
try:
nsinfo = conn.get_nameserver_info()
LOG.debug(_("name server info from fabric:%s"), nsinfo)
LOG.debug("name server info from fabric:%s", nsinfo)
conn.cleanup()
except exception.BrocadeZoningCliException as ex:
with excutils.save_and_reraise_exception():
@ -442,9 +442,9 @@ class BrcdFCZoneDriver(FCZoneDriver):
visible_targets[idx]).replace(':', '')
fabric_map[fabric_name] = visible_targets
else:
LOG.debug(_("No targets are in the nameserver for SAN %s"),
LOG.debug("No targets are in the nameserver for SAN %s",
fabric_name)
LOG.debug(_("Return SAN context output:%s"), fabric_map)
LOG.debug("Return SAN context output:%s", fabric_map)
return fabric_map
def get_active_zone_set(self, fabric_ip,
@ -453,8 +453,8 @@ class BrcdFCZoneDriver(FCZoneDriver):
cfgmap = {}
conn = None
try:
LOG.debug(_("Southbound connector:"
" %s"), self.configuration.brcd_sb_connector)
LOG.debug("Southbound connector:"
" %s", self.configuration.brcd_sb_connector)
conn = importutils.import_object(
self.configuration.brcd_sb_connector,
ipaddress=fabric_ip, username=fabric_user,
@ -473,5 +473,5 @@ class BrcdFCZoneDriver(FCZoneDriver):
msg = (_("Failed to access active zoning configuration:%s") % e)
LOG.error(msg)
raise exception.FCZoneDriverException(msg)
LOG.debug(_("Active zone set from fabric: %s"), cfgmap)
LOG.debug("Active zone set from fabric: %s", cfgmap)
return cfgmap

View File

@ -41,7 +41,7 @@ class FCZoneDriver(fc_common.FCCommon):
def __init__(self, **kwargs):
super(FCZoneDriver, self).__init__(**kwargs)
LOG.debug(_("Initializing FCZoneDriver"))
LOG.debug("Initializing FCZoneDriver")
def add_connection(self, fabric, initiator_target_map):
"""Add connection control.

View File

@ -74,8 +74,8 @@ class FCSanLookupService(fc_common.FCCommon):
# Initialize vendor specific implementation of FCZoneDriver
if (self.configuration.fc_san_lookup_service):
lookup_service = self.configuration.fc_san_lookup_service
LOG.debug(_("Lookup service to invoke: "
"%s"), lookup_service)
LOG.debug("Lookup service to invoke: "
"%s", lookup_service)
self.lookup_service = importutils.import_object(
lookup_service, configuration=self.configuration)
else:

View File

@ -80,7 +80,7 @@ class ZoneManager(fc_common.FCCommon):
self.configuration.append_config_values(zone_manager_opts)
zone_driver = self.configuration.zone_driver
LOG.debug(_("Zone Driver from config: {%s}"), zone_driver)
LOG.debug("Zone Driver from config: {%s}", zone_driver)
zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager')
# Initialize vendor specific implementation of FCZoneDriver
@ -114,11 +114,11 @@ class ZoneManager(fc_common.FCCommon):
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.debug(_("Target List :%s"), {initiator: target_list})
LOG.debug("Target List :%s", {initiator: target_list})
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug(_("Fabric Map after context lookup:%s"), fabric_map)
LOG.debug("Fabric Map after context lookup:%s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
@ -162,8 +162,8 @@ class ZoneManager(fc_common.FCCommon):
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug(_("Delete connection Fabric Map from SAN "
"context: %s"), fabric_map)
LOG.debug("Delete connection Fabric Map from SAN "
"context: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
@ -180,8 +180,8 @@ class ZoneManager(fc_common.FCCommon):
if len(valid_i_t_map) > 0:
self.driver.delete_connection(fabric, valid_i_t_map)
LOG.debug(_("Delete Connection - Finished iterating over all"
" target list"))
LOG.debug("Delete Connection - Finished iterating over all"
" target list")
except Exception as e:
msg = _("Failed removing connection for fabric=%(fabric)s: "
"Error:%(err)s") % {'fabric': connected_fabric,
@ -196,7 +196,7 @@ class ZoneManager(fc_common.FCCommon):
to list of target WWNs visible to the fabric.
"""
fabric_map = self.driver.get_san_context(target_wwn_list)
LOG.debug(_("Got SAN context:%s"), fabric_map)
LOG.debug("Got SAN context:%s", fabric_map)
return fabric_map
def get_valid_initiator_target_map(self, initiator_target_map,

View File

@ -46,3 +46,6 @@ commands = {posargs}
ignore = E711,E712,F403,H302,H803
builtins = _
exclude = .git,.venv,.tox,dist,tools,doc,common,*egg,build
[hacking]
local-check-factory = cinder.hacking.checks.factory