Browse Source

Add missing ws seperator between words

This is to add missing ws seperator between words, usually
in log messages.

Change-Id: I3d0ffb5a32397998de1e66b823141cb0cc7d039c
changes/88/618488/3
zhufl 3 years ago
parent
commit
13c59aebd6
  1. 4
      cinder/volume/driver.py
  2. 2
      cinder/volume/drivers/dell_emc/ps.py
  3. 4
      cinder/volume/drivers/dell_emc/sc/storagecenter_api.py
  4. 4
      cinder/volume/drivers/dell_emc/vmax/common.py
  5. 2
      cinder/volume/drivers/dell_emc/vmax/rest.py
  6. 4
      cinder/volume/drivers/dell_emc/vnx/replication.py
  7. 2
      cinder/volume/drivers/dell_emc/vnx/taskflows.py
  8. 4
      cinder/volume/drivers/dell_emc/vnx/utils.py
  9. 2
      cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py
  10. 2
      cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py
  11. 12
      cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
  12. 12
      cinder/volume/drivers/inspur/instorage/instorage_common.py
  13. 2
      cinder/volume/drivers/nexenta/options.py
  14. 6
      cinder/volume/drivers/nimble.py
  15. 2
      cinder/volume/drivers/zfssa/zfssarest.py
  16. 2
      cinder/volume/manager.py
  17. 2
      cinder/volume/targets/scst.py

4
cinder/volume/driver.py

@ -82,7 +82,7 @@ volume_opts = [
cfg.IntOpt('volume_clear_size',
default=0,
max=1024,
help='Size in MiB to wipe at start of old volumes. 1024 MiB'
help='Size in MiB to wipe at start of old volumes. 1024 MiB '
'at max. 0 => all'),
cfg.StrOpt('volume_clear_ionice',
help='The flag to pass to ionice to alter the i/o priority '
@ -284,7 +284,7 @@ volume_opts = [
iser_opts = [
cfg.IntOpt('num_iser_scan_tries',
default=3,
help='The maximum number of times to rescan iSER target'
help='The maximum number of times to rescan iSER target '
'to find volume'),
cfg.StrOpt('iser_target_prefix',
default='iqn.2010-10.org.openstack:',

2
cinder/volume/drivers/dell_emc/ps.py

@ -412,7 +412,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
def do_setup(self, context):
"""Disable cli confirmation and tune output format."""
try:
msg = _("The Dell PS driver is moving to maintenance mode"
msg = _("The Dell PS driver is moving to maintenance mode "
"in the S release and will be removed in T release.")
versionutils.report_deprecated_feature(LOG, msg)

4
cinder/volume/drivers/dell_emc/sc/storagecenter_api.py

@ -3578,11 +3578,11 @@ class SCApi(object):
r = self.client.post('StorageCenter/ScLiveVolume', payload, True)
if self._check_result(r):
LOG.info('create_live_volume: Live Volume created from'
LOG.info('create_live_volume: Live Volume created from '
'%(svol)s to %(ssn)s',
{'svol': self._get_id(scvolume), 'ssn': remotessn})
return self._get_json(r)
LOG.error('create_live_volume: Failed to create Live Volume from'
LOG.error('create_live_volume: Failed to create Live Volume from '
'%(svol)s to %(ssn)s',
{'svol': self._get_id(scvolume), 'ssn': remotessn})
return None

4
cinder/volume/drivers/dell_emc/vmax/common.py

@ -3493,7 +3493,7 @@ class VMAXCommon(object):
extra_specs[utils.WORKLOAD], extra_specs,
do_disable_compression, is_re=True, rep_mode=rep_mode))
except Exception as e:
exception_message = (_("Failed to get or create replication"
exception_message = (_("Failed to get or create replication "
"group. Exception received: %(e)s")
% {'e': six.text_type(e)})
LOG.exception(exception_message)
@ -4579,7 +4579,7 @@ class VMAXCommon(object):
LOG.info("Reverted the volume to snapshot successfully")
except Exception as e:
exception_message = (_(
"Failed to revert the volume to the snapshot"
"Failed to revert the volume to the snapshot. "
"Exception received was %(e)s") % {'e': six.text_type(e)})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(

2
cinder/volume/drivers/dell_emc/vmax/rest.py

@ -1143,7 +1143,7 @@ class VMAXRest(object):
host_lun_id = int(host_lun_id, 16)
except Exception as e:
LOG.error("Unable to retrieve connection information "
"for volume %(vol)s in masking view %(mv)s"
"for volume %(vol)s in masking view %(mv)s. "
"Exception received: %(e)s.",
{'vol': device_id, 'mv': maskingview,
'e': e})

4
cinder/volume/drivers/dell_emc/vnx/replication.py

@ -245,7 +245,7 @@ class ReplicationAdapter(object):
group.replication_status ==
fields.ReplicationStatus.ENABLED):
# Group is on the primary VNX, failover is needed.
LOG.info('%(group_id)s will be failed over to secondary'
LOG.info('%(group_id)s will be failed over to secondary '
'%(secondary_backend_id)s.',
{'group_id': group.id,
'secondary_backend_id': secondary_backend_id})
@ -254,7 +254,7 @@ class ReplicationAdapter(object):
group.replication_status ==
fields.ReplicationStatus.FAILED_OVER):
# Group is on the secondary VNX, failover is needed.
LOG.info('%(group_id)s will be failed over to primary'
LOG.info('%(group_id)s will be failed over to primary '
'%(secondary_backend_id)s.',
{'group_id': group.id,
'secondary_backend_id': secondary_backend_id})

2
cinder/volume/drivers/dell_emc/vnx/taskflows.py

@ -332,7 +332,7 @@ class ExtendSMPTask(task.Task):
LOG.warning('Not extending the SMP: %s, because its base lun '
'is not thin.', smp_name)
else:
LOG.info('Not extending the SMP: %(smp)s, size: %(size)s, because'
LOG.info('Not extending the SMP: %(smp)s, size: %(size)s, because '
'the new size: %(new_size)s is smaller.',
{'smp': smp_name, 'size': smp.total_capacity_gb,
'new_size': lun_size})

4
cinder/volume/drivers/dell_emc/vnx/utils.py

@ -136,7 +136,7 @@ def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC,
test_value = False
with excutils.save_and_reraise_exception(
reraise=reraise_arbiter(ex)):
LOG.debug('Exception raised when executing %(condition_name)s'
LOG.debug('Exception raised when executing %(condition_name)s '
'in wait_until. Message: %(msg)s',
{'condition_name': condition.__name__,
'msg': ex.message})
@ -169,7 +169,7 @@ def validate_storage_migration(volume, target_host, src_serial, src_protocol):
return False
if serial_number != src_serial:
LOG.debug('Skip storage-assisted migration because '
'target and source backend are not managing'
'target and source backend are not managing '
'the same array.')
return False
if (target_host['capabilities']['storage_protocol'] != src_protocol

2
cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py

@ -682,7 +682,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
if lss_pair[0] not in source_lss_used]
self._assert(available_lss_pairs,
"All LSSs reserved for CG have been used out, "
"please reserve more LSS for CG if there are still"
"please reserve more LSS for CG if there are still "
"some empty LSSs left.")
else:
available_lss_pairs = set()

2
cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py

@ -1668,7 +1668,7 @@ class XIVProxy(proxy.IBMStorageProxy):
src_vref_size = float(src_vref['size'])
volume_size = float(volume['size'])
if volume_size < src_vref_size:
error = (_("New volume size (%(vol_size)s GB) cannot be less"
error = (_("New volume size (%(vol_size)s GB) cannot be less "
"than the source volume size (%(src_size)s GB)..")
% {'vol_size': volume_size, 'src_size': src_vref_size})
LOG.error(error)

12
cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py

@ -1864,7 +1864,7 @@ class StorwizeHelpers(object):
elif mapping_attrs['status'] == 'stopped':
self.ssh.prestartfcconsistgrp(fc_consistgrp)
elif mapping_attrs['status'] != 'preparing':
msg = (_('Unexpected mapping status %(status)s for mapping'
msg = (_('Unexpected mapping status %(status)s for mapping '
'%(id)s. Attributes: %(attr)s.') %
{'status': mapping_attrs['status'],
'id': fc_consistgrp,
@ -1976,7 +1976,7 @@ class StorwizeHelpers(object):
pool=None):
"""Create a FlashCopy mapping and add to consistent group."""
LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy'
' from source %(source)s to target %(target)s'
' from source %(source)s to target %(target)s. '
'Then add the flashcopy to %(cg)s.',
{'source': source, 'target': target, 'cg': consistgrp})
@ -2469,7 +2469,7 @@ class StorwizeHelpers(object):
topology is hyperswap.
"""
if state['code_level'] < (7, 6, 0, 0):
LOG.debug('Hyperswap failure as the storage'
LOG.debug('Hyperswap failure as the storage '
'code_level is %(code_level)s, below '
'the required 7.6.0.0.',
{'code_level': state['code_level']})
@ -3511,7 +3511,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
{'master': volume['name'], 'aux': tgt_volume})
continue
LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s'
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
'primary=%(primary)s',
{'vol': volume['name'],
'master_vol': rep_info['master_vdisk_name'],
@ -3657,7 +3657,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
def _replica_vol_ready():
rep_info = self._helpers.get_relationship_info(volume)
if not rep_info:
msg = (_('_wait_replica_vol_ready: no rc-releationship'
msg = (_('_wait_replica_vol_ready: no rc-releationship '
'is established for volume:%(volume)s. Please '
're-establish the rc-relationship and '
'synchronize the volumes on backend storage.'),
@ -4235,7 +4235,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
rccg = self._helpers.get_rccg(rccg_name)
if not rccg:
msg = (_('_replica_grp_ready: no group %(rccg)s exists on the '
'backend. Please re-create the rccg and synchronize'
'backend. Please re-create the rccg and synchronize '
'the volumes on backend storage.'),
{'rccg': rccg_name})
LOG.error(msg)

12
cinder/volume/drivers/inspur/instorage/instorage_common.py

@ -951,7 +951,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver):
self._state, opts)):
msg = (_("Failed to manage existing volume due to "
"I/O group mismatch. The I/O group of the "
"volume to be managed is %(vdisk_iogrp)s. I/O group"
"volume to be managed is %(vdisk_iogrp)s. I/O group "
"of the chosen type is %(opt_iogrp)s.") %
{'vdisk_iogrp': vdisk['IO_group_name'],
'opt_iogrp': opts['iogrp']})
@ -1360,7 +1360,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver):
{'master': volume.name, 'aux': tgt_volume})
continue
LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s'
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
'primary=%(primary)s',
{'vol': volume.name,
'master_vol': rep_info['master_vdisk_name'],
@ -1455,7 +1455,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver):
def _replica_vol_ready():
rep_info = self._assistant.get_relationship_info(volume)
if not rep_info:
msg = (_('_wait_replica_vol_ready: no rc-releationship'
msg = (_('_wait_replica_vol_ready: no rc-releationship '
'is established for volume:%(volume)s. Please '
're-establish the rc-relationship and '
'synchronize the volumes on backend storage.'),
@ -2525,7 +2525,7 @@ class InStorageAssistant(object):
greenthread.sleep(self.WAIT_TIME)
if not mapping_ready:
msg = (_('Mapping %(id)s prepare failed to complete within the'
msg = (_('Mapping %(id)s prepare failed to complete within the '
'allotted %(to)d seconds timeout. Terminating.')
% {'id': lc_map_id,
'to': timeout})
@ -2752,8 +2752,8 @@ class InStorageAssistant(object):
config, opts, full_copy=False,
pool=None):
"""Create a LocalCopy mapping and add to consistent group."""
LOG.debug('Enter: create_localcopy_to_consistgrp: create LocalCopy'
' from source %(source)s to target %(target)s'
LOG.debug('Enter: create_localcopy_to_consistgrp: create LocalCopy '
'from source %(source)s to target %(target)s. '
'Then add the localcopy to %(cg)s.',
{'source': source, 'target': target, 'cg': consistgrp})

2
cinder/volume/drivers/nexenta/options.py

@ -114,7 +114,7 @@ NEXENTA_ISCSI_OPTS = [
help='Nexenta target portal groups'),
cfg.StrOpt('nexenta_iscsi_target_portals',
default='',
help='Comma separated list of portals for NexentaStor5, in'
help='Comma separated list of portals for NexentaStor5, in '
'format of IP1:port1,IP2:port2. Port is optional, '
'default=3260. Example: 10.10.10.1:3267,10.10.1.2'),
cfg.StrOpt('nexenta_iscsi_target_host_group',

6
cinder/volume/drivers/nimble.py

@ -359,7 +359,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
if refresh:
group_info = self.APIExecutor.get_group_info()
if 'usage_valid' not in group_info:
raise NimbleDriverException(_('SpaceInfo returned by'
raise NimbleDriverException(_('SpaceInfo returned by '
'array is invalid'))
total_capacity = (group_info['usable_capacity_bytes'] /
float(units.Gi))
@ -1467,7 +1467,7 @@ class NimbleRestAPIExecutor(object):
filter = {"name": initiator_group_name}
r = self.get_query(api, filter)
if not r.json()['data']:
raise NimbleAPIException(_("Unable to retrieve information for"
raise NimbleAPIException(_("Unable to retrieve information for "
"initiator group : %s") %
initiator_group_name)
return r.json()['data'][0]['id']
@ -1544,7 +1544,7 @@ class NimbleRestAPIExecutor(object):
self.remove_acl(volume, acl_record['initiator_group_name'])
def remove_acl(self, volume, initiator_group_name):
LOG.info("removing ACL from volume=%(vol)s"
LOG.info("removing ACL from volume=%(vol)s "
"and %(igroup)s",
{"vol": volume['name'],
"igroup": initiator_group_name})

2
cinder/volume/drivers/zfssa/zfssarest.py

@ -234,7 +234,7 @@ class ZFSSAApi(object):
'pool: %(pool)s '
'Project: %(proj)s '
'volume: %(vol)s '
'for target: %(tgt)s and pool: %(tgt_pool)s'
'for target: %(tgt)s and pool: %(tgt_pool)s. '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'pool': host_pool,

2
cinder/volume/manager.py

@ -1437,7 +1437,7 @@ class VolumeManager(manager.CleanableManager,
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception('Create clone_image_volume: %(volume_id)s'
LOG.exception('Create clone_image_volume: %(volume_id)s '
'for image %(image_id)s, '
'failed (Exception: %(except)s)',
{'volume_id': volume.id,

2
cinder/volume/targets/scst.py

@ -310,7 +310,7 @@ class SCSTAdm(iscsi.ISCSITarget):
self.show_target(iscsi_target, iqn)
except Exception:
LOG.error("Skipping remove_export. No iscsi_target is"
LOG.error("Skipping remove_export. No iscsi_target is "
"presently exported for volume: %s", volume['id'])
return
vol = self.db.volume_get(context, volume['id'])

Loading…
Cancel
Save