Fix spelling errors
Spelling errors fixed in comments and log messages. Change-Id: I8ce4899fbb22136ce6d03e1796fc01d929f35562
This commit is contained in:
parent
e58f1f31c5
commit
9c7f5c9c64
@ -175,7 +175,7 @@ class ShellCommands(object):
|
||||
|
||||
@args('--path', required=True, help='Script path')
|
||||
def script(self, path):
|
||||
"""Runs the script from the specifed path with flags set properly.
|
||||
"""Runs the script from the specified path with flags set properly.
|
||||
arguments: path
|
||||
"""
|
||||
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
|
||||
|
@ -124,7 +124,7 @@ class API(base.Base):
|
||||
|
||||
#TODO(DuncanT): In future, when we have a generic local attach,
|
||||
# this can go via the scheduler, which enables
|
||||
# better load ballancing and isolation of services
|
||||
# better load balancing and isolation of services
|
||||
self.backup_rpcapi.create_backup(context,
|
||||
backup['host'],
|
||||
backup['id'],
|
||||
|
@ -182,7 +182,7 @@ class TSMBackupDriver(BackupDriver):
|
||||
"""Get the real path for the volume block device.
|
||||
|
||||
If the volume is not a block device then issue an
|
||||
InvalidBackup exsception.
|
||||
InvalidBackup exception.
|
||||
|
||||
:param volume_file: file object representing the volume
|
||||
:param volume_id: Volume id for backup or as restore target
|
||||
|
@ -46,7 +46,7 @@ nova_opts = [
|
||||
help='region name of this node'),
|
||||
cfg.StrOpt('nova_ca_certificates_file',
|
||||
default=None,
|
||||
help='Location of ca certicates file to use for nova client '
|
||||
help='Location of ca certificates file to use for nova client '
|
||||
'requests.'),
|
||||
cfg.BoolOpt('nova_api_insecure',
|
||||
default=False,
|
||||
|
@ -185,7 +185,7 @@ class ProcessLauncher(object):
|
||||
signal.signal(signal.SIGTERM, _sigterm)
|
||||
# Block SIGINT and let the parent send us a SIGTERM
|
||||
# signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
# This differs from the behavior in nova in that we dont ignore this
|
||||
# This differs from the behavior in nova in that we don't ignore this
|
||||
# It allows the non-wsgi services to be terminated properly
|
||||
signal.signal(signal.SIGINT, _sigterm)
|
||||
|
||||
|
@ -86,7 +86,7 @@ class BlockDeviceDriver(driver.ISCSIDriver):
|
||||
model_update = {}
|
||||
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
# corresponding target admin class
|
||||
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||
lun = 0
|
||||
self._ensure_iscsi_targets(context, volume['host'])
|
||||
@ -120,7 +120,7 @@ class BlockDeviceDriver(driver.ISCSIDriver):
|
||||
"""Removes an export for a logical volume."""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
# corresponding target admin class
|
||||
|
||||
if isinstance(self.tgtadm, iscsi.LioAdm):
|
||||
try:
|
||||
@ -167,7 +167,7 @@ class BlockDeviceDriver(driver.ISCSIDriver):
|
||||
"""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
# corresponding target admin class
|
||||
|
||||
if isinstance(self.tgtadm, iscsi.LioAdm):
|
||||
try:
|
||||
@ -231,7 +231,7 @@ class BlockDeviceDriver(driver.ISCSIDriver):
|
||||
"""Ensure that target ids have been created in datastore."""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
# corresponding target admin class
|
||||
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
|
||||
host)
|
||||
|
@ -1292,7 +1292,7 @@ class EMCSMISCommon():
|
||||
return foundCtrl
|
||||
|
||||
# Find out how many volumes are mapped to a host
|
||||
# assoociated to the LunMaskingSCSIProtocolController
|
||||
# associated to the LunMaskingSCSIProtocolController
|
||||
def get_num_volumes_mapped(self, volume, connector):
|
||||
numVolumesMapped = 0
|
||||
volumename = volume['name']
|
||||
|
@ -171,7 +171,7 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
|
||||
sp = device_info['owningsp']
|
||||
endpoints = []
|
||||
if sp:
|
||||
# endpointss example:
|
||||
# endpoints example:
|
||||
# [iqn.1992-04.com.emc:cx.apm00123907237.a8,
|
||||
# iqn.1992-04.com.emc:cx.apm00123907237.a9]
|
||||
endpoints = self.common._find_iscsi_protocol_endpoints(
|
||||
|
@ -44,7 +44,7 @@ eqlx_opts = [
|
||||
help='Maximum retry count for reconnection'),
|
||||
cfg.BoolOpt('eqlx_use_chap',
|
||||
default=False,
|
||||
help='Use CHAP authentificaion for targets?'),
|
||||
help='Use CHAP authentication for targets?'),
|
||||
cfg.StrOpt('eqlx_chap_login',
|
||||
default='admin',
|
||||
help='Existing CHAP account name'),
|
||||
@ -102,7 +102,7 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
|
||||
san_ip=<ip_address>
|
||||
san_login=<user name>
|
||||
san_password=<user password>
|
||||
san_private_key=<file containig SSH prvate key>
|
||||
san_private_key=<file containing SSH private key>
|
||||
|
||||
Thin provision of volumes is enabled by default, to disable it use:
|
||||
san_thin_provision=false
|
||||
@ -406,7 +406,7 @@ class DellEQLSanISCSIDriver(SanISCSIDriver):
|
||||
volume['name'])
|
||||
|
||||
def terminate_connection(self, volume, connector, force=False, **kwargs):
|
||||
"""Remove access restictions from a volume."""
|
||||
"""Remove access restrictions from a volume."""
|
||||
try:
|
||||
self._eql_execute('volume', 'select', volume['name'],
|
||||
'access', 'delete', '1')
|
||||
|
@ -81,7 +81,7 @@ def _do_lu_range_check(start, end, maxlun):
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
if int(end) > int(maxlun):
|
||||
end = maxlun
|
||||
LOG.debug(_("setting LU uppper (end) limit to %s") % maxlun)
|
||||
LOG.debug(_("setting LU upper (end) limit to %s") % maxlun)
|
||||
return (start, end)
|
||||
|
||||
|
||||
|
@ -219,7 +219,7 @@ class HVSCommon():
|
||||
"""Calculate the volume size.
|
||||
|
||||
We should divide the given volume size by 512 for the HVS system
|
||||
caculates volume size with sectors, which is 512 bytes.
|
||||
calculates volume size with sectors, which is 512 bytes.
|
||||
"""
|
||||
|
||||
volume_size = units.GiB / 512 # 1G
|
||||
@ -245,7 +245,7 @@ class HVSCommon():
|
||||
self._delete_lungroup(lungroup_id)
|
||||
self._delete_lun(lun_id)
|
||||
else:
|
||||
LOG.warn(_("Can't find lun or lun goup in array"))
|
||||
LOG.warn(_("Can't find lun or lun group in array"))
|
||||
|
||||
def _delete_lun_from_qos_policy(self, volume, lun_id):
|
||||
"""Remove lun from qos policy."""
|
||||
@ -517,12 +517,12 @@ class HVSCommon():
|
||||
|
||||
(iscsi_iqn, target_ip) = self._get_iscsi_params(connector)
|
||||
|
||||
#create host_goup if not exist
|
||||
#create host_group if not exist
|
||||
hostid, hostgroup_id = self._add_host_into_hostgroup(connector['host'],
|
||||
connector['ip'])
|
||||
self._ensure_initiator_added(initiator_name, hostid)
|
||||
|
||||
# Mapping lungooup and hostgoup to view
|
||||
# Mapping lungroup and hostgroup to view
|
||||
lun_id = self._mapping_hostgroup_and_lungroup(volume_name,
|
||||
hostgroup_id, hostid)
|
||||
hostlunid = self._find_host_lun_id(hostid, lun_id)
|
||||
@ -894,7 +894,7 @@ class HVSCommon():
|
||||
self._assert_rest_result(result, 'Log out of session error.')
|
||||
|
||||
def _start_luncopy(self, luncopyid):
|
||||
"""Starte a LUNcopy."""
|
||||
"""Start a LUNcopy."""
|
||||
url = self.url + "/LUNCOPY/start"
|
||||
data = json.dumps({"TYPE": "219", "ID": luncopyid})
|
||||
result = self.call(url, data, "PUT")
|
||||
@ -1156,7 +1156,7 @@ class HVSCommon():
|
||||
params[key] = value.strip()
|
||||
else:
|
||||
conf = self.configuration.cinder_huawei_conf_file
|
||||
LOG.warn(_('_parse_volume_type: Unacceptable paramater '
|
||||
LOG.warn(_('_parse_volume_type: Unacceptable parameter '
|
||||
'%(key)s. Please check this key in extra_specs '
|
||||
'and make it consistent with the configuration '
|
||||
'file %(conf)s.') % {'key': key, 'conf': conf})
|
||||
|
@ -320,7 +320,7 @@ class TseriesCommon():
|
||||
# If constant prefetch, we should specify prefetch value.
|
||||
if params['PrefetchType'] == '1':
|
||||
prefetch_value_or_times = '-value %s' % params['PrefetchValue']
|
||||
# If variable prefetch, we should specify prefetch mutiple.
|
||||
# If variable prefetch, we should specify prefetch multiple.
|
||||
elif params['PrefetchType'] == '2':
|
||||
prefetch_value_or_times = '-times %s' % params['PrefetchTimes']
|
||||
|
||||
|
@ -384,7 +384,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||
self._stats = data
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extend an existing voumes size."""
|
||||
"""Extend an existing volume's size."""
|
||||
self.vg.extend_volume(volume['name'],
|
||||
self._sizestr(new_size))
|
||||
|
||||
@ -454,7 +454,7 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
# corresponding target admin class
|
||||
|
||||
if isinstance(self.tgtadm, iscsi.LioAdm):
|
||||
try:
|
||||
@ -573,7 +573,7 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
|
||||
"""Ensure that target ids have been created in datastore."""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
# corresponding target admin class
|
||||
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
|
||||
host)
|
||||
@ -600,7 +600,7 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
|
||||
model_update = {}
|
||||
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
# corresponding target admin class
|
||||
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||
lun = 0
|
||||
self._ensure_iscsi_targets(context, volume['host'])
|
||||
@ -630,7 +630,7 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
|
||||
"""Removes an export for a logical volume."""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
# corresponding target admin class
|
||||
|
||||
if isinstance(self.tgtadm, iscsi.LioAdm):
|
||||
try:
|
||||
|
@ -203,7 +203,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
|
||||
return {'provider_location': handle}
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Driver exntry point to remove an export for a volume.
|
||||
"""Driver entry point to remove an export for a volume.
|
||||
|
||||
Since exporting is idempotent in this driver, we have nothing
|
||||
to do for unexporting.
|
||||
@ -315,7 +315,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Driver entry point to unattach a volume from an instance.
|
||||
|
||||
Unmask the LUN on the storage system so the given intiator can no
|
||||
Unmask the LUN on the storage system so the given initiator can no
|
||||
longer access it.
|
||||
"""
|
||||
|
||||
@ -481,7 +481,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
|
||||
return False
|
||||
|
||||
def _create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
|
||||
"""Creates igoup with specified args."""
|
||||
"""Creates igroup with specified args."""
|
||||
igroup_create = NaElement.create_node_with_children(
|
||||
'igroup-create',
|
||||
**{'initiator-group-name': igroup,
|
||||
@ -533,7 +533,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_lun_by_args(self, **args):
|
||||
"""Retrives luns with specified args."""
|
||||
"""Retrieves luns with specified args."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_lun_attr(self, name, attr):
|
||||
@ -997,7 +997,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
|
||||
volume=ssc_utils.NetAppVolume(volume, self.vserver))
|
||||
|
||||
def _get_lun_by_args(self, **args):
|
||||
"""Retrives lun with specified args."""
|
||||
"""Retrieves lun with specified args."""
|
||||
lun_iter = NaElement('lun-get-iter')
|
||||
lun_iter.add_new_child('max-records', '100')
|
||||
query = NaElement('query')
|
||||
@ -1399,7 +1399,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
|
||||
clone_ops_info.get_child_content('reason'))
|
||||
|
||||
def _get_lun_by_args(self, **args):
|
||||
"""Retrives luns with specified args."""
|
||||
"""Retrieves luns with specified args."""
|
||||
lun_info = NaElement.create_node_with_children('lun-list-info', **args)
|
||||
result = self.client.invoke_successfully(lun_info, True)
|
||||
luns = result.get_child_by_name('luns')
|
||||
|
@ -289,7 +289,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Create snapshot of existing zvol on appliance.
|
||||
|
||||
:param snapshot: shapshot reference
|
||||
:param snapshot: snapshot reference
|
||||
"""
|
||||
self.nms.zvol.create_snapshot(
|
||||
self._get_zvol_name(snapshot['volume_name']),
|
||||
|
@ -649,7 +649,7 @@ exit
|
||||
self.config.hp3par_cpg)
|
||||
if cpg is not self.config.hp3par_cpg:
|
||||
# The cpg was specified in a volume type extra spec so it
|
||||
# needs to be validiated that it's in the correct domain.
|
||||
# needs to be validated that it's in the correct domain.
|
||||
self.validate_cpg(cpg)
|
||||
# Also, look to see if the snap_cpg was specified in volume
|
||||
# type extra spec, if not use the extra spec cpg as the
|
||||
|
@ -101,7 +101,7 @@ class SolidFireDriver(SanISCSIDriver):
|
||||
"""All API requests to SolidFire device go through this method.
|
||||
|
||||
Simple json-rpc web based API calls.
|
||||
each call takes a set of paramaters (dict)
|
||||
each call takes a set of parameters (dict)
|
||||
and returns results in a dict as well.
|
||||
|
||||
"""
|
||||
@ -529,7 +529,7 @@ class SolidFireDriver(SanISCSIDriver):
|
||||
def delete_volume(self, volume):
|
||||
"""Delete SolidFire Volume from device.
|
||||
|
||||
SolidFire allows multipe volumes with same name,
|
||||
SolidFire allows multiple volumes with same name,
|
||||
volumeID is what's guaranteed unique.
|
||||
|
||||
"""
|
||||
@ -541,7 +541,7 @@ class SolidFireDriver(SanISCSIDriver):
|
||||
LOG.error(_("Account for Volume ID %s was not found on "
|
||||
"the SolidFire Cluster!") % volume['id'])
|
||||
LOG.error(_("This usually means the volume was never "
|
||||
"succesfully created."))
|
||||
"successfully created."))
|
||||
return
|
||||
|
||||
params = {'accountID': sfaccount['accountID']}
|
||||
|
@ -1460,7 +1460,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||
LOG.debug(_('leave: extend_volume: volume %s') % volume['id'])
|
||||
|
||||
def migrate_volume(self, ctxt, volume, host):
|
||||
"""Migrate direclty if source and dest are managed by same storage.
|
||||
"""Migrate directly if source and dest are managed by same storage.
|
||||
|
||||
The method uses the migratevdisk method, which returns almost
|
||||
immediately, if the source and target pools have the same extent_size.
|
||||
|
@ -257,7 +257,7 @@ def _get_token(retrieve_result):
|
||||
|
||||
|
||||
def cancel_retrieval(vim, retrieve_result):
|
||||
"""Cancels the retrive operation if necessary.
|
||||
"""Cancels the retrieve operation if necessary.
|
||||
|
||||
:param vim: Vim object
|
||||
:param retrieve_result: Result from the RetrievePropertiesEx API
|
||||
|
@ -257,7 +257,7 @@ class VMwareVolumeOps(object):
|
||||
{'child_folder_name': child_folder_name,
|
||||
'parent_folder': parent_folder})
|
||||
|
||||
# Get list of child entites for the parent folder
|
||||
# Get list of child entities for the parent folder
|
||||
prop_val = self._session.invoke_api(vim_util, 'get_object_property',
|
||||
self._session.vim, parent_folder,
|
||||
'childEntity')
|
||||
|
@ -290,7 +290,7 @@ class WindowsUtils(object):
|
||||
wt_disk.Extend(additional_size)
|
||||
except wmi.x_wmi as exc:
|
||||
err_msg = (_(
|
||||
'extend: error when extending the volumne: %(vol_name)s '
|
||||
'extend: error when extending the volume: %(vol_name)s '
|
||||
'.WMI exception: %(wmi_exc)s') % {'vol_name': vol_name,
|
||||
'wmi_exc': exc})
|
||||
LOG.error(err_msg)
|
||||
|
@ -357,7 +357,7 @@ class ExtractVolumeRequestTask(base.CinderTask):
|
||||
if CONF.default_availability_zone:
|
||||
availability_zone = CONF.default_availability_zone
|
||||
else:
|
||||
# For backwards compatibility use the storge_availability_zone
|
||||
# For backwards compatibility use the storage_availability_zone
|
||||
availability_zone = CONF.storage_availability_zone
|
||||
if not self.az_check_functor(availability_zone):
|
||||
msg = _("Availability zone '%s' is invalid") % (availability_zone)
|
||||
@ -517,7 +517,7 @@ class EntryCreateTask(base.CinderTask):
|
||||
Accesses the database and creates a new entry for the to be created
|
||||
volume using the given volume properties which are extracted from the
|
||||
input kwargs (and associated requirements this task needs). These
|
||||
requirements should be previously satisifed and validated by a
|
||||
requirements should be previously satisfied and validated by a
|
||||
pre-cursor task.
|
||||
"""
|
||||
|
||||
@ -543,7 +543,7 @@ class EntryCreateTask(base.CinderTask):
|
||||
'volume_properties': volume_properties,
|
||||
# NOTE(harlowja): it appears like further usage of this volume
|
||||
# result actually depend on it being a sqlalchemy object and not
|
||||
# just a plain dictionary so thats why we are storing this here.
|
||||
# just a plain dictionary so that's why we are storing this here.
|
||||
#
|
||||
# In the future where this task results can be serialized and
|
||||
# restored automatically for continued running we will need to
|
||||
@ -1558,7 +1558,7 @@ def get_scheduler_flow(context, db, driver, request_spec=None,
|
||||
1. Inject keys & values for dependent tasks.
|
||||
2. Extracts a scheduler specification from the provided inputs.
|
||||
3. Attaches 2 activated only on *failure* tasks (one to update the db
|
||||
status and one to notify on the MQ of the failure that occured).
|
||||
status and one to notify on the MQ of the failure that occurred).
|
||||
4. Uses provided driver to to then select and continue processing of
|
||||
volume request.
|
||||
"""
|
||||
|
@ -432,7 +432,7 @@
|
||||
# region name of this node (string value)
|
||||
#os_region_name=<None>
|
||||
|
||||
# Location of ca certicates file to use for nova client
|
||||
# Location of ca certificates file to use for nova client
|
||||
# requests. (string value)
|
||||
#nova_ca_certificates_file=<None>
|
||||
|
||||
@ -1155,7 +1155,7 @@
|
||||
# Maximum retry count for reconnection (integer value)
|
||||
#eqlx_cli_max_retries=5
|
||||
|
||||
# Use CHAP authentificaion for targets? (boolean value)
|
||||
# Use CHAP authentication for targets? (boolean value)
|
||||
#eqlx_use_chap=false
|
||||
|
||||
# Existing CHAP account name (string value)
|
||||
|
Loading…
x
Reference in New Issue
Block a user