Add missing ws seperator between words
This is to add missing ws seperator between words, usually in log messages. Change-Id: I3d0ffb5a32397998de1e66b823141cb0cc7d039c
This commit is contained in:
parent
18d0576bb5
commit
13c59aebd6
|
@ -82,7 +82,7 @@ volume_opts = [
|
||||||
cfg.IntOpt('volume_clear_size',
|
cfg.IntOpt('volume_clear_size',
|
||||||
default=0,
|
default=0,
|
||||||
max=1024,
|
max=1024,
|
||||||
help='Size in MiB to wipe at start of old volumes. 1024 MiB'
|
help='Size in MiB to wipe at start of old volumes. 1024 MiB '
|
||||||
'at max. 0 => all'),
|
'at max. 0 => all'),
|
||||||
cfg.StrOpt('volume_clear_ionice',
|
cfg.StrOpt('volume_clear_ionice',
|
||||||
help='The flag to pass to ionice to alter the i/o priority '
|
help='The flag to pass to ionice to alter the i/o priority '
|
||||||
|
@ -284,7 +284,7 @@ volume_opts = [
|
||||||
iser_opts = [
|
iser_opts = [
|
||||||
cfg.IntOpt('num_iser_scan_tries',
|
cfg.IntOpt('num_iser_scan_tries',
|
||||||
default=3,
|
default=3,
|
||||||
help='The maximum number of times to rescan iSER target'
|
help='The maximum number of times to rescan iSER target '
|
||||||
'to find volume'),
|
'to find volume'),
|
||||||
cfg.StrOpt('iser_target_prefix',
|
cfg.StrOpt('iser_target_prefix',
|
||||||
default='iqn.2010-10.org.openstack:',
|
default='iqn.2010-10.org.openstack:',
|
||||||
|
|
|
@ -412,7 +412,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
|
||||||
def do_setup(self, context):
|
def do_setup(self, context):
|
||||||
"""Disable cli confirmation and tune output format."""
|
"""Disable cli confirmation and tune output format."""
|
||||||
try:
|
try:
|
||||||
msg = _("The Dell PS driver is moving to maintenance mode"
|
msg = _("The Dell PS driver is moving to maintenance mode "
|
||||||
"in the S release and will be removed in T release.")
|
"in the S release and will be removed in T release.")
|
||||||
versionutils.report_deprecated_feature(LOG, msg)
|
versionutils.report_deprecated_feature(LOG, msg)
|
||||||
|
|
||||||
|
|
|
@ -3578,11 +3578,11 @@ class SCApi(object):
|
||||||
|
|
||||||
r = self.client.post('StorageCenter/ScLiveVolume', payload, True)
|
r = self.client.post('StorageCenter/ScLiveVolume', payload, True)
|
||||||
if self._check_result(r):
|
if self._check_result(r):
|
||||||
LOG.info('create_live_volume: Live Volume created from'
|
LOG.info('create_live_volume: Live Volume created from '
|
||||||
'%(svol)s to %(ssn)s',
|
'%(svol)s to %(ssn)s',
|
||||||
{'svol': self._get_id(scvolume), 'ssn': remotessn})
|
{'svol': self._get_id(scvolume), 'ssn': remotessn})
|
||||||
return self._get_json(r)
|
return self._get_json(r)
|
||||||
LOG.error('create_live_volume: Failed to create Live Volume from'
|
LOG.error('create_live_volume: Failed to create Live Volume from '
|
||||||
'%(svol)s to %(ssn)s',
|
'%(svol)s to %(ssn)s',
|
||||||
{'svol': self._get_id(scvolume), 'ssn': remotessn})
|
{'svol': self._get_id(scvolume), 'ssn': remotessn})
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -3493,7 +3493,7 @@ class VMAXCommon(object):
|
||||||
extra_specs[utils.WORKLOAD], extra_specs,
|
extra_specs[utils.WORKLOAD], extra_specs,
|
||||||
do_disable_compression, is_re=True, rep_mode=rep_mode))
|
do_disable_compression, is_re=True, rep_mode=rep_mode))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
exception_message = (_("Failed to get or create replication"
|
exception_message = (_("Failed to get or create replication "
|
||||||
"group. Exception received: %(e)s")
|
"group. Exception received: %(e)s")
|
||||||
% {'e': six.text_type(e)})
|
% {'e': six.text_type(e)})
|
||||||
LOG.exception(exception_message)
|
LOG.exception(exception_message)
|
||||||
|
@ -4579,7 +4579,7 @@ class VMAXCommon(object):
|
||||||
LOG.info("Reverted the volume to snapshot successfully")
|
LOG.info("Reverted the volume to snapshot successfully")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
exception_message = (_(
|
exception_message = (_(
|
||||||
"Failed to revert the volume to the snapshot"
|
"Failed to revert the volume to the snapshot. "
|
||||||
"Exception received was %(e)s") % {'e': six.text_type(e)})
|
"Exception received was %(e)s") % {'e': six.text_type(e)})
|
||||||
LOG.error(exception_message)
|
LOG.error(exception_message)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
|
|
|
@ -1143,7 +1143,7 @@ class VMAXRest(object):
|
||||||
host_lun_id = int(host_lun_id, 16)
|
host_lun_id = int(host_lun_id, 16)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error("Unable to retrieve connection information "
|
LOG.error("Unable to retrieve connection information "
|
||||||
"for volume %(vol)s in masking view %(mv)s"
|
"for volume %(vol)s in masking view %(mv)s. "
|
||||||
"Exception received: %(e)s.",
|
"Exception received: %(e)s.",
|
||||||
{'vol': device_id, 'mv': maskingview,
|
{'vol': device_id, 'mv': maskingview,
|
||||||
'e': e})
|
'e': e})
|
||||||
|
|
|
@ -245,7 +245,7 @@ class ReplicationAdapter(object):
|
||||||
group.replication_status ==
|
group.replication_status ==
|
||||||
fields.ReplicationStatus.ENABLED):
|
fields.ReplicationStatus.ENABLED):
|
||||||
# Group is on the primary VNX, failover is needed.
|
# Group is on the primary VNX, failover is needed.
|
||||||
LOG.info('%(group_id)s will be failed over to secondary'
|
LOG.info('%(group_id)s will be failed over to secondary '
|
||||||
'%(secondary_backend_id)s.',
|
'%(secondary_backend_id)s.',
|
||||||
{'group_id': group.id,
|
{'group_id': group.id,
|
||||||
'secondary_backend_id': secondary_backend_id})
|
'secondary_backend_id': secondary_backend_id})
|
||||||
|
@ -254,7 +254,7 @@ class ReplicationAdapter(object):
|
||||||
group.replication_status ==
|
group.replication_status ==
|
||||||
fields.ReplicationStatus.FAILED_OVER):
|
fields.ReplicationStatus.FAILED_OVER):
|
||||||
# Group is on the secondary VNX, failover is needed.
|
# Group is on the secondary VNX, failover is needed.
|
||||||
LOG.info('%(group_id)s will be failed over to primary'
|
LOG.info('%(group_id)s will be failed over to primary '
|
||||||
'%(secondary_backend_id)s.',
|
'%(secondary_backend_id)s.',
|
||||||
{'group_id': group.id,
|
{'group_id': group.id,
|
||||||
'secondary_backend_id': secondary_backend_id})
|
'secondary_backend_id': secondary_backend_id})
|
||||||
|
|
|
@ -332,7 +332,7 @@ class ExtendSMPTask(task.Task):
|
||||||
LOG.warning('Not extending the SMP: %s, because its base lun '
|
LOG.warning('Not extending the SMP: %s, because its base lun '
|
||||||
'is not thin.', smp_name)
|
'is not thin.', smp_name)
|
||||||
else:
|
else:
|
||||||
LOG.info('Not extending the SMP: %(smp)s, size: %(size)s, because'
|
LOG.info('Not extending the SMP: %(smp)s, size: %(size)s, because '
|
||||||
'the new size: %(new_size)s is smaller.',
|
'the new size: %(new_size)s is smaller.',
|
||||||
{'smp': smp_name, 'size': smp.total_capacity_gb,
|
{'smp': smp_name, 'size': smp.total_capacity_gb,
|
||||||
'new_size': lun_size})
|
'new_size': lun_size})
|
||||||
|
|
|
@ -136,7 +136,7 @@ def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC,
|
||||||
test_value = False
|
test_value = False
|
||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
reraise=reraise_arbiter(ex)):
|
reraise=reraise_arbiter(ex)):
|
||||||
LOG.debug('Exception raised when executing %(condition_name)s'
|
LOG.debug('Exception raised when executing %(condition_name)s '
|
||||||
'in wait_until. Message: %(msg)s',
|
'in wait_until. Message: %(msg)s',
|
||||||
{'condition_name': condition.__name__,
|
{'condition_name': condition.__name__,
|
||||||
'msg': ex.message})
|
'msg': ex.message})
|
||||||
|
@ -169,7 +169,7 @@ def validate_storage_migration(volume, target_host, src_serial, src_protocol):
|
||||||
return False
|
return False
|
||||||
if serial_number != src_serial:
|
if serial_number != src_serial:
|
||||||
LOG.debug('Skip storage-assisted migration because '
|
LOG.debug('Skip storage-assisted migration because '
|
||||||
'target and source backend are not managing'
|
'target and source backend are not managing '
|
||||||
'the same array.')
|
'the same array.')
|
||||||
return False
|
return False
|
||||||
if (target_host['capabilities']['storage_protocol'] != src_protocol
|
if (target_host['capabilities']['storage_protocol'] != src_protocol
|
||||||
|
|
|
@ -682,7 +682,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
|
||||||
if lss_pair[0] not in source_lss_used]
|
if lss_pair[0] not in source_lss_used]
|
||||||
self._assert(available_lss_pairs,
|
self._assert(available_lss_pairs,
|
||||||
"All LSSs reserved for CG have been used out, "
|
"All LSSs reserved for CG have been used out, "
|
||||||
"please reserve more LSS for CG if there are still"
|
"please reserve more LSS for CG if there are still "
|
||||||
"some empty LSSs left.")
|
"some empty LSSs left.")
|
||||||
else:
|
else:
|
||||||
available_lss_pairs = set()
|
available_lss_pairs = set()
|
||||||
|
|
|
@ -1668,7 +1668,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
||||||
src_vref_size = float(src_vref['size'])
|
src_vref_size = float(src_vref['size'])
|
||||||
volume_size = float(volume['size'])
|
volume_size = float(volume['size'])
|
||||||
if volume_size < src_vref_size:
|
if volume_size < src_vref_size:
|
||||||
error = (_("New volume size (%(vol_size)s GB) cannot be less"
|
error = (_("New volume size (%(vol_size)s GB) cannot be less "
|
||||||
"than the source volume size (%(src_size)s GB)..")
|
"than the source volume size (%(src_size)s GB)..")
|
||||||
% {'vol_size': volume_size, 'src_size': src_vref_size})
|
% {'vol_size': volume_size, 'src_size': src_vref_size})
|
||||||
LOG.error(error)
|
LOG.error(error)
|
||||||
|
|
|
@ -1864,7 +1864,7 @@ class StorwizeHelpers(object):
|
||||||
elif mapping_attrs['status'] == 'stopped':
|
elif mapping_attrs['status'] == 'stopped':
|
||||||
self.ssh.prestartfcconsistgrp(fc_consistgrp)
|
self.ssh.prestartfcconsistgrp(fc_consistgrp)
|
||||||
elif mapping_attrs['status'] != 'preparing':
|
elif mapping_attrs['status'] != 'preparing':
|
||||||
msg = (_('Unexpected mapping status %(status)s for mapping'
|
msg = (_('Unexpected mapping status %(status)s for mapping '
|
||||||
'%(id)s. Attributes: %(attr)s.') %
|
'%(id)s. Attributes: %(attr)s.') %
|
||||||
{'status': mapping_attrs['status'],
|
{'status': mapping_attrs['status'],
|
||||||
'id': fc_consistgrp,
|
'id': fc_consistgrp,
|
||||||
|
@ -1976,7 +1976,7 @@ class StorwizeHelpers(object):
|
||||||
pool=None):
|
pool=None):
|
||||||
"""Create a FlashCopy mapping and add to consistent group."""
|
"""Create a FlashCopy mapping and add to consistent group."""
|
||||||
LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy'
|
LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy'
|
||||||
' from source %(source)s to target %(target)s'
|
' from source %(source)s to target %(target)s. '
|
||||||
'Then add the flashcopy to %(cg)s.',
|
'Then add the flashcopy to %(cg)s.',
|
||||||
{'source': source, 'target': target, 'cg': consistgrp})
|
{'source': source, 'target': target, 'cg': consistgrp})
|
||||||
|
|
||||||
|
@ -2469,7 +2469,7 @@ class StorwizeHelpers(object):
|
||||||
topology is hyperswap.
|
topology is hyperswap.
|
||||||
"""
|
"""
|
||||||
if state['code_level'] < (7, 6, 0, 0):
|
if state['code_level'] < (7, 6, 0, 0):
|
||||||
LOG.debug('Hyperswap failure as the storage'
|
LOG.debug('Hyperswap failure as the storage '
|
||||||
'code_level is %(code_level)s, below '
|
'code_level is %(code_level)s, below '
|
||||||
'the required 7.6.0.0.',
|
'the required 7.6.0.0.',
|
||||||
{'code_level': state['code_level']})
|
{'code_level': state['code_level']})
|
||||||
|
@ -3511,7 +3511,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
|
||||||
{'master': volume['name'], 'aux': tgt_volume})
|
{'master': volume['name'], 'aux': tgt_volume})
|
||||||
continue
|
continue
|
||||||
LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
|
LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
|
||||||
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s'
|
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
|
||||||
'primary=%(primary)s',
|
'primary=%(primary)s',
|
||||||
{'vol': volume['name'],
|
{'vol': volume['name'],
|
||||||
'master_vol': rep_info['master_vdisk_name'],
|
'master_vol': rep_info['master_vdisk_name'],
|
||||||
|
@ -3657,7 +3657,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
|
||||||
def _replica_vol_ready():
|
def _replica_vol_ready():
|
||||||
rep_info = self._helpers.get_relationship_info(volume)
|
rep_info = self._helpers.get_relationship_info(volume)
|
||||||
if not rep_info:
|
if not rep_info:
|
||||||
msg = (_('_wait_replica_vol_ready: no rc-releationship'
|
msg = (_('_wait_replica_vol_ready: no rc-releationship '
|
||||||
'is established for volume:%(volume)s. Please '
|
'is established for volume:%(volume)s. Please '
|
||||||
're-establish the rc-relationship and '
|
're-establish the rc-relationship and '
|
||||||
'synchronize the volumes on backend storage.'),
|
'synchronize the volumes on backend storage.'),
|
||||||
|
@ -4235,7 +4235,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
|
||||||
rccg = self._helpers.get_rccg(rccg_name)
|
rccg = self._helpers.get_rccg(rccg_name)
|
||||||
if not rccg:
|
if not rccg:
|
||||||
msg = (_('_replica_grp_ready: no group %(rccg)s exists on the '
|
msg = (_('_replica_grp_ready: no group %(rccg)s exists on the '
|
||||||
'backend. Please re-create the rccg and synchronize'
|
'backend. Please re-create the rccg and synchronize '
|
||||||
'the volumes on backend storage.'),
|
'the volumes on backend storage.'),
|
||||||
{'rccg': rccg_name})
|
{'rccg': rccg_name})
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
|
|
|
@ -951,7 +951,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver):
|
||||||
self._state, opts)):
|
self._state, opts)):
|
||||||
msg = (_("Failed to manage existing volume due to "
|
msg = (_("Failed to manage existing volume due to "
|
||||||
"I/O group mismatch. The I/O group of the "
|
"I/O group mismatch. The I/O group of the "
|
||||||
"volume to be managed is %(vdisk_iogrp)s. I/O group"
|
"volume to be managed is %(vdisk_iogrp)s. I/O group "
|
||||||
"of the chosen type is %(opt_iogrp)s.") %
|
"of the chosen type is %(opt_iogrp)s.") %
|
||||||
{'vdisk_iogrp': vdisk['IO_group_name'],
|
{'vdisk_iogrp': vdisk['IO_group_name'],
|
||||||
'opt_iogrp': opts['iogrp']})
|
'opt_iogrp': opts['iogrp']})
|
||||||
|
@ -1360,7 +1360,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver):
|
||||||
{'master': volume.name, 'aux': tgt_volume})
|
{'master': volume.name, 'aux': tgt_volume})
|
||||||
continue
|
continue
|
||||||
LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
|
LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
|
||||||
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s'
|
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
|
||||||
'primary=%(primary)s',
|
'primary=%(primary)s',
|
||||||
{'vol': volume.name,
|
{'vol': volume.name,
|
||||||
'master_vol': rep_info['master_vdisk_name'],
|
'master_vol': rep_info['master_vdisk_name'],
|
||||||
|
@ -1455,7 +1455,7 @@ class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver):
|
||||||
def _replica_vol_ready():
|
def _replica_vol_ready():
|
||||||
rep_info = self._assistant.get_relationship_info(volume)
|
rep_info = self._assistant.get_relationship_info(volume)
|
||||||
if not rep_info:
|
if not rep_info:
|
||||||
msg = (_('_wait_replica_vol_ready: no rc-releationship'
|
msg = (_('_wait_replica_vol_ready: no rc-releationship '
|
||||||
'is established for volume:%(volume)s. Please '
|
'is established for volume:%(volume)s. Please '
|
||||||
're-establish the rc-relationship and '
|
're-establish the rc-relationship and '
|
||||||
'synchronize the volumes on backend storage.'),
|
'synchronize the volumes on backend storage.'),
|
||||||
|
@ -2525,7 +2525,7 @@ class InStorageAssistant(object):
|
||||||
greenthread.sleep(self.WAIT_TIME)
|
greenthread.sleep(self.WAIT_TIME)
|
||||||
|
|
||||||
if not mapping_ready:
|
if not mapping_ready:
|
||||||
msg = (_('Mapping %(id)s prepare failed to complete within the'
|
msg = (_('Mapping %(id)s prepare failed to complete within the '
|
||||||
'allotted %(to)d seconds timeout. Terminating.')
|
'allotted %(to)d seconds timeout. Terminating.')
|
||||||
% {'id': lc_map_id,
|
% {'id': lc_map_id,
|
||||||
'to': timeout})
|
'to': timeout})
|
||||||
|
@ -2752,8 +2752,8 @@ class InStorageAssistant(object):
|
||||||
config, opts, full_copy=False,
|
config, opts, full_copy=False,
|
||||||
pool=None):
|
pool=None):
|
||||||
"""Create a LocalCopy mapping and add to consistent group."""
|
"""Create a LocalCopy mapping and add to consistent group."""
|
||||||
LOG.debug('Enter: create_localcopy_to_consistgrp: create LocalCopy'
|
LOG.debug('Enter: create_localcopy_to_consistgrp: create LocalCopy '
|
||||||
' from source %(source)s to target %(target)s'
|
'from source %(source)s to target %(target)s. '
|
||||||
'Then add the localcopy to %(cg)s.',
|
'Then add the localcopy to %(cg)s.',
|
||||||
{'source': source, 'target': target, 'cg': consistgrp})
|
{'source': source, 'target': target, 'cg': consistgrp})
|
||||||
|
|
||||||
|
|
|
@ -114,7 +114,7 @@ NEXENTA_ISCSI_OPTS = [
|
||||||
help='Nexenta target portal groups'),
|
help='Nexenta target portal groups'),
|
||||||
cfg.StrOpt('nexenta_iscsi_target_portals',
|
cfg.StrOpt('nexenta_iscsi_target_portals',
|
||||||
default='',
|
default='',
|
||||||
help='Comma separated list of portals for NexentaStor5, in'
|
help='Comma separated list of portals for NexentaStor5, in '
|
||||||
'format of IP1:port1,IP2:port2. Port is optional, '
|
'format of IP1:port1,IP2:port2. Port is optional, '
|
||||||
'default=3260. Example: 10.10.10.1:3267,10.10.1.2'),
|
'default=3260. Example: 10.10.10.1:3267,10.10.1.2'),
|
||||||
cfg.StrOpt('nexenta_iscsi_target_host_group',
|
cfg.StrOpt('nexenta_iscsi_target_host_group',
|
||||||
|
|
|
@ -359,7 +359,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
|
||||||
if refresh:
|
if refresh:
|
||||||
group_info = self.APIExecutor.get_group_info()
|
group_info = self.APIExecutor.get_group_info()
|
||||||
if 'usage_valid' not in group_info:
|
if 'usage_valid' not in group_info:
|
||||||
raise NimbleDriverException(_('SpaceInfo returned by'
|
raise NimbleDriverException(_('SpaceInfo returned by '
|
||||||
'array is invalid'))
|
'array is invalid'))
|
||||||
total_capacity = (group_info['usable_capacity_bytes'] /
|
total_capacity = (group_info['usable_capacity_bytes'] /
|
||||||
float(units.Gi))
|
float(units.Gi))
|
||||||
|
@ -1467,7 +1467,7 @@ class NimbleRestAPIExecutor(object):
|
||||||
filter = {"name": initiator_group_name}
|
filter = {"name": initiator_group_name}
|
||||||
r = self.get_query(api, filter)
|
r = self.get_query(api, filter)
|
||||||
if not r.json()['data']:
|
if not r.json()['data']:
|
||||||
raise NimbleAPIException(_("Unable to retrieve information for"
|
raise NimbleAPIException(_("Unable to retrieve information for "
|
||||||
"initiator group : %s") %
|
"initiator group : %s") %
|
||||||
initiator_group_name)
|
initiator_group_name)
|
||||||
return r.json()['data'][0]['id']
|
return r.json()['data'][0]['id']
|
||||||
|
@ -1544,7 +1544,7 @@ class NimbleRestAPIExecutor(object):
|
||||||
self.remove_acl(volume, acl_record['initiator_group_name'])
|
self.remove_acl(volume, acl_record['initiator_group_name'])
|
||||||
|
|
||||||
def remove_acl(self, volume, initiator_group_name):
|
def remove_acl(self, volume, initiator_group_name):
|
||||||
LOG.info("removing ACL from volume=%(vol)s"
|
LOG.info("removing ACL from volume=%(vol)s "
|
||||||
"and %(igroup)s",
|
"and %(igroup)s",
|
||||||
{"vol": volume['name'],
|
{"vol": volume['name'],
|
||||||
"igroup": initiator_group_name})
|
"igroup": initiator_group_name})
|
||||||
|
|
|
@ -234,7 +234,7 @@ class ZFSSAApi(object):
|
||||||
'pool: %(pool)s '
|
'pool: %(pool)s '
|
||||||
'Project: %(proj)s '
|
'Project: %(proj)s '
|
||||||
'volume: %(vol)s '
|
'volume: %(vol)s '
|
||||||
'for target: %(tgt)s and pool: %(tgt_pool)s'
|
'for target: %(tgt)s and pool: %(tgt_pool)s. '
|
||||||
'Return code: %(ret.status)d '
|
'Return code: %(ret.status)d '
|
||||||
'Message: %(ret.data)s .')
|
'Message: %(ret.data)s .')
|
||||||
% {'pool': host_pool,
|
% {'pool': host_pool,
|
||||||
|
|
|
@ -1437,7 +1437,7 @@ class VolumeManager(manager.CleanableManager,
|
||||||
image_volume = objects.Volume(context=ctx, **new_vol_values)
|
image_volume = objects.Volume(context=ctx, **new_vol_values)
|
||||||
image_volume.create()
|
image_volume.create()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception('Create clone_image_volume: %(volume_id)s'
|
LOG.exception('Create clone_image_volume: %(volume_id)s '
|
||||||
'for image %(image_id)s, '
|
'for image %(image_id)s, '
|
||||||
'failed (Exception: %(except)s)',
|
'failed (Exception: %(except)s)',
|
||||||
{'volume_id': volume.id,
|
{'volume_id': volume.id,
|
||||||
|
|
|
@ -310,7 +310,7 @@ class SCSTAdm(iscsi.ISCSITarget):
|
||||||
self.show_target(iscsi_target, iqn)
|
self.show_target(iscsi_target, iqn)
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error("Skipping remove_export. No iscsi_target is"
|
LOG.error("Skipping remove_export. No iscsi_target is "
|
||||||
"presently exported for volume: %s", volume['id'])
|
"presently exported for volume: %s", volume['id'])
|
||||||
return
|
return
|
||||||
vol = self.db.volume_get(context, volume['id'])
|
vol = self.db.volume_get(context, volume['id'])
|
||||||
|
|
Loading…
Reference in New Issue