Fix typo/misspelled words
Fix type/misspelled words found by this handy tools: https://github.com/lyda/misspell-check Some 'typo's were intentional since they are part of function name, so they are left alone. Change-Id: I4badda869521659b541d018727a6f71f518e2ee0
This commit is contained in:
parent
f573b7e3b6
commit
2e0d1f41ec
@ -418,7 +418,7 @@ class ISCSIConnector(InitiatorConnector):
|
||||
check_exit_code=[0, 255])
|
||||
except putils.ProcessExecutionError as err:
|
||||
#as this might be one of many paths,
|
||||
#only set successfull logins to startup automatically
|
||||
#only set successful logins to startup automatically
|
||||
if err.exit_code in [15]:
|
||||
self._iscsiadm_update(connection_properties,
|
||||
"node.startup",
|
||||
|
@ -263,7 +263,7 @@ class Runner(object):
|
||||
|
||||
class TransitionNotifier(object):
|
||||
"""A utility helper class that can be used to subscribe to
|
||||
notifications of events occuring as well as allow a entity to post said
|
||||
notifications of events occurring as well as allow a entity to post said
|
||||
notifications to subscribers.
|
||||
"""
|
||||
|
||||
|
@ -62,7 +62,7 @@ class FakeRequest(object):
|
||||
|
||||
|
||||
# NOTE(uni): deprecating service request key, binary takes precedence
|
||||
# Still keeping service key here for API compability sake.
|
||||
# Still keeping service key here for API compatibility sake.
|
||||
class FakeRequestWithService(object):
|
||||
environ = {"cinder.context": context.get_admin_context()}
|
||||
GET = {"service": "cinder-volume"}
|
||||
@ -79,7 +79,7 @@ class FakeRequestWithHost(object):
|
||||
|
||||
|
||||
# NOTE(uni): deprecating service request key, binary takes precedence
|
||||
# Still keeping service key here for API compability sake.
|
||||
# Still keeping service key here for API compatibility sake.
|
||||
class FakeRequestWithHostService(object):
|
||||
environ = {"cinder.context": context.get_admin_context()}
|
||||
GET = {"host": "host1", "service": "cinder-volume"}
|
||||
|
@ -63,15 +63,15 @@ class VolumeRouterTestCase(test.TestCase):
|
||||
self.assertEqual(200, response.status_int)
|
||||
|
||||
def test_versions_action_args_index(self):
|
||||
request_enviroment = {'PATH_INFO': '/'}
|
||||
request_environment = {'PATH_INFO': '/'}
|
||||
resource = versions.Versions()
|
||||
result = resource.get_action_args(request_enviroment)
|
||||
result = resource.get_action_args(request_environment)
|
||||
self.assertEqual(result['action'], 'index')
|
||||
|
||||
def test_versions_action_args_multi(self):
|
||||
request_enviroment = {'PATH_INFO': '/fake/path'}
|
||||
request_environment = {'PATH_INFO': '/fake/path'}
|
||||
resource = versions.Versions()
|
||||
result = resource.get_action_args(request_enviroment)
|
||||
result = resource.get_action_args(request_environment)
|
||||
self.assertEqual(result['action'], 'multi')
|
||||
|
||||
def test_versions_get_most_recent_update(self):
|
||||
|
@ -568,7 +568,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
|
||||
self.app = limits.WsgiLimiter(TEST_LIMITS)
|
||||
|
||||
def _request_data(self, verb, path):
|
||||
"""Get data decribing a limit request verb/path."""
|
||||
"""Get data describing a limit request verb/path."""
|
||||
return jsonutils.dumps({"verb": verb, "path": path})
|
||||
|
||||
def _request(self, verb, url, username=None):
|
||||
|
@ -574,7 +574,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
|
||||
self.app = limits.WsgiLimiter(TEST_LIMITS)
|
||||
|
||||
def _request_data(self, verb, path):
|
||||
"""Get data decribing a limit request verb/path."""
|
||||
"""Get data describing a limit request verb/path."""
|
||||
return jsonutils.dumps({"verb": verb, "path": path})
|
||||
|
||||
def _request(self, verb, url, username=None):
|
||||
|
@ -636,10 +636,10 @@ class HVSRESTiSCSIDriverTestCase(test.TestCase):
|
||||
product.appendChild(product_text)
|
||||
storage.appendChild(product)
|
||||
|
||||
protocal = doc.createElement('Protocol')
|
||||
protocal_text = doc.createTextNode('iSCSI')
|
||||
protocal.appendChild(protocal_text)
|
||||
storage.appendChild(protocal)
|
||||
protocol = doc.createElement('Protocol')
|
||||
protocol_text = doc.createTextNode('iSCSI')
|
||||
protocol.appendChild(protocol_text)
|
||||
storage.appendChild(protocol)
|
||||
|
||||
username = doc.createElement('UserName')
|
||||
username_text = doc.createTextNode('admin')
|
||||
@ -819,10 +819,10 @@ class HVSRESTFCDriverTestCase(test.TestCase):
|
||||
product.appendChild(product_text)
|
||||
storage.appendChild(product)
|
||||
|
||||
protocal = doc.createElement('Protocol')
|
||||
protocal_text = doc.createTextNode('FC')
|
||||
protocal.appendChild(protocal_text)
|
||||
storage.appendChild(protocal)
|
||||
protocol = doc.createElement('Protocol')
|
||||
protocol_text = doc.createTextNode('FC')
|
||||
protocol.appendChild(protocol_text)
|
||||
storage.appendChild(protocol)
|
||||
|
||||
username = doc.createElement('UserName')
|
||||
username_text = doc.createTextNode('admin')
|
||||
|
@ -73,9 +73,9 @@ class FakeSnapshot(object):
|
||||
return self.__dict__[key]
|
||||
|
||||
|
||||
class FakeResponce(object):
|
||||
class FakeResponse(object):
|
||||
def __init__(self, status):
|
||||
"""Initialize FakeResponce.
|
||||
"""Initialize FakeResponse.
|
||||
|
||||
:param status: Either 'failed' or 'passed'
|
||||
"""
|
||||
|
@ -984,7 +984,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
self.context,
|
||||
snapshot_id)
|
||||
|
||||
def test_cant_delete_volume_in_use(self):
|
||||
def test_cannot_delete_volume_in_use(self):
|
||||
"""Test volume can't be deleted in invalid stats."""
|
||||
# create a volume and assign to host
|
||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||
@ -1029,7 +1029,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
# clean up
|
||||
self.volume.delete_volume(self.context, volume['id'])
|
||||
|
||||
def test_cant_force_delete_attached_volume(self):
|
||||
def test_cannot_force_delete_attached_volume(self):
|
||||
"""Test volume can't be force delete in attached state"""
|
||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||
self.volume.create_volume(self.context, volume['id'])
|
||||
@ -1047,7 +1047,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
|
||||
self.volume.delete_volume(self.context, volume['id'])
|
||||
|
||||
def test_cant_delete_volume_with_snapshots(self):
|
||||
def test_cannot_delete_volume_with_snapshots(self):
|
||||
"""Test volume can't be deleted with dependent snapshots."""
|
||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||
self.volume.create_volume(self.context, volume['id'])
|
||||
|
@ -64,7 +64,7 @@ class VolumeTypeTestCase(test.TestCase):
|
||||
|
||||
for k, v in self.vol_type1_specs.iteritems():
|
||||
self.assertEqual(v, new['extra_specs'][k],
|
||||
'one of fields doesnt match')
|
||||
'one of fields does not match')
|
||||
|
||||
new_all_vtypes = volume_types.get_all_types(self.ctxt)
|
||||
self.assertEqual(len(prev_all_vtypes) + 1,
|
||||
|
@ -221,7 +221,7 @@ class SSHPool(pools.Pool):
|
||||
raise exception.CinderException(msg)
|
||||
|
||||
# Paramiko by default sets the socket timeout to 0.1 seconds,
|
||||
# ignoring what we set thru the sshclient. This doesn't help for
|
||||
# ignoring what we set through the sshclient. This doesn't help for
|
||||
# keeping long lived connections. Hence we have to bypass it, by
|
||||
# overriding it after the transport is initialized. We are setting
|
||||
# the sockettimeout to None and setting a keepalive packet so that,
|
||||
|
@ -141,7 +141,7 @@ class VolumeDriver(object):
|
||||
self.set_execute(execute)
|
||||
self._stats = {}
|
||||
|
||||
# set True by manager after succesful check_for_setup
|
||||
# set True by manager after successful check_for_setup
|
||||
self._initialized = False
|
||||
|
||||
def set_execute(self, execute):
|
||||
|
@ -74,7 +74,7 @@ class HVSCommon():
|
||||
res = urllib2.urlopen(req).read().decode("utf-8")
|
||||
LOG.debug(_('HVS Response Data: %(res)s') % {'res': res})
|
||||
except Exception as err:
|
||||
err_msg = _('Bad reponse from server: %s') % err
|
||||
err_msg = _('Bad response from server: %s') % err
|
||||
LOG.error(err_msg)
|
||||
raise err
|
||||
|
||||
@ -218,7 +218,7 @@ class HVSCommon():
|
||||
def _get_volume_size(self, poolinfo, volume):
|
||||
"""Calculate the volume size.
|
||||
|
||||
We should devide the given volume size by 512 for the HVS system
|
||||
We should divide the given volume size by 512 for the HVS system
|
||||
caculates volume size with sectors, which is 512 bytes.
|
||||
"""
|
||||
|
||||
|
@ -1054,7 +1054,7 @@ class TseriesCommon():
|
||||
return None
|
||||
|
||||
def _get_host_map_info(self, hostid):
|
||||
"""Get map infomation of the given host."""
|
||||
"""Get map information of the given host."""
|
||||
|
||||
cli_cmd = 'showhostmap -host %(hostid)s' % {'hostid': hostid}
|
||||
out = self._execute_cli(cli_cmd)
|
||||
@ -1259,7 +1259,7 @@ class DoradoCommon(TseriesCommon):
|
||||
def _check_conf_file(self):
|
||||
"""Check the config file, make sure the key elements are set."""
|
||||
root = huawei_utils.parse_xml_file(self.xml_conf)
|
||||
# Check login infomation
|
||||
# Check login information
|
||||
check_list = ['Storage/ControllerIP0', 'Storage/ControllerIP1',
|
||||
'Storage/UserName', 'Storage/UserPassword']
|
||||
for item in check_list:
|
||||
|
@ -416,7 +416,7 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
|
||||
def _create_tgtadm_target(self, iscsi_name, iscsi_target,
|
||||
volume_path, chap_auth, lun=0,
|
||||
check_exit_code=False, old_name=None):
|
||||
# NOTE(jdg): tgt driver has an issue where with alot of activity
|
||||
# NOTE(jdg): tgt driver has an issue where with a lot of activity
|
||||
# (or sometimes just randomly) it will get *confused* and attempt
|
||||
# to reuse a target ID, resulting in a target already exists error
|
||||
# Typically a simple retry will address this
|
||||
|
@ -1050,7 +1050,7 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
|
||||
client.set_api_version(major, minor)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Checks if setup occured properly."""
|
||||
"""Checks if setup occurred properly."""
|
||||
api_version = self._client.get_api_version()
|
||||
if api_version:
|
||||
major, minor = api_version
|
||||
|
@ -371,7 +371,7 @@ class RBDDriver(driver.VolumeDriver):
|
||||
if not parent:
|
||||
return depth
|
||||
|
||||
# If clone depth was reached, flatten should have occured so if it has
|
||||
# If clone depth was reached, flatten should have occurred so if it has
|
||||
# been exceeded then something has gone wrong.
|
||||
if depth > CONF.rbd_max_clone_depth:
|
||||
raise Exception(_("clone depth exceeds limit of %s") %
|
||||
|
@ -232,7 +232,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||
return self.volumeops.get_vmfolder(datacenter)
|
||||
|
||||
def _select_datastore_summary(self, size_bytes, datastores):
|
||||
"""Get best summary from datastore list that can accomodate volume.
|
||||
"""Get best summary from datastore list that can accommodate volume.
|
||||
|
||||
The implementation selects datastore based on maximum relative
|
||||
free space, which is (free_space/total_space) and has free space to
|
||||
@ -254,7 +254,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||
best_summary = summary
|
||||
|
||||
if not best_summary:
|
||||
msg = _("Unable to pick datastore to accomodate %(size)s bytes "
|
||||
msg = _("Unable to pick datastore to accommodate %(size)s bytes "
|
||||
"from the datastores: %(dss)s.")
|
||||
LOG.error(msg % {'size': size_bytes, 'dss': datastores})
|
||||
raise error_util.VimException(msg %
|
||||
|
@ -212,7 +212,7 @@ class ExtractVolumeRequestTask(base.CinderTask):
|
||||
# TODO(harlowja): what happens if the status changes after this
|
||||
# initial snapshot status check occurs??? Seems like someone
|
||||
# could delete the snapshot after this check passes but before
|
||||
# the volume is offically created?
|
||||
# the volume is officially created?
|
||||
raise exception.InvalidSnapshot(reason=msg)
|
||||
snapshot_id = snapshot['id']
|
||||
return snapshot_id
|
||||
@ -235,7 +235,7 @@ class ExtractVolumeRequestTask(base.CinderTask):
|
||||
# TODO(harlowja): what happens if the status changes after this
|
||||
# initial volume status check occurs??? Seems like someone
|
||||
# could delete the volume after this check passes but before
|
||||
# the volume is offically created?
|
||||
# the volume is officially created?
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
source_volid = source_volume['id']
|
||||
return source_volid
|
||||
@ -475,7 +475,7 @@ class ExtractVolumeRequestTask(base.CinderTask):
|
||||
# TODO(joel-coffman): This special handling of snapshots to ensure that
|
||||
# their volume type matches the source volume is too convoluted. We
|
||||
# should copy encryption metadata from the encrypted volume type to the
|
||||
# volume upon creation and propogate that information to each snapshot.
|
||||
# volume upon creation and propagate that information to each snapshot.
|
||||
# This strategy avoid any dependency upon the encrypted volume type.
|
||||
if not volume_type and not source_volume and not snapshot:
|
||||
volume_type = volume_types.get_default_volume_type()
|
||||
@ -595,7 +595,7 @@ class QuotaReserveTask(base.CinderTask):
|
||||
Reversion strategy: rollback the quota reservation.
|
||||
|
||||
Warning Warning: if the process that is running this reserve and commit
|
||||
process fails (or is killed before the quota is rolled back or commited
|
||||
process fails (or is killed before the quota is rolled back or committed
|
||||
it does appear like the quota will never be rolled back). This makes
|
||||
software upgrades hard (inflight operations will need to be stopped or
|
||||
allowed to complete before the upgrade can occur). *In the future* when
|
||||
@ -659,7 +659,7 @@ class QuotaReserveTask(base.CinderTask):
|
||||
if not result:
|
||||
return
|
||||
if context.quota_committed:
|
||||
# The reservations have already been commited and can not be
|
||||
# The reservations have already been committed and can not be
|
||||
# rolled back at this point.
|
||||
return
|
||||
# We actually produced an output that we can revert so lets attempt
|
||||
@ -681,7 +681,7 @@ class QuotaCommitTask(base.CinderTask):
|
||||
the initial reservation (see: QuotaReserveTask).
|
||||
|
||||
Warning Warning: if the process that is running this reserve and commit
|
||||
process fails (or is killed before the quota is rolled back or commited
|
||||
process fails (or is killed before the quota is rolled back or committed
|
||||
it does appear like the quota will never be rolled back). This makes
|
||||
software upgrades hard (inflight operations will need to be stopped or
|
||||
allowed to complete before the upgrade can occur). *In the future* when
|
||||
@ -1615,7 +1615,7 @@ def get_api_flow(scheduler_rpcapi, volume_rpcapi, db,
|
||||
v_uuid = api_flow.add(EntryCreateTask(db))
|
||||
api_flow.add(QuotaCommitTask())
|
||||
|
||||
# If after commiting something fails, ensure we set the db to failure
|
||||
# If after committing something fails, ensure we set the db to failure
|
||||
# before reverting any prior tasks.
|
||||
api_flow.add(OnFailureChangeStatusTask(db))
|
||||
|
||||
|
@ -30,7 +30,7 @@ def attach_debug_listeners(flow):
|
||||
|
||||
These listeners will log when tasks/flows are transitioning from state to
|
||||
state so that said states can be seen in the debug log output which is very
|
||||
useful for figuring out where problems are occuring.
|
||||
useful for figuring out where problems are occurring.
|
||||
"""
|
||||
|
||||
def flow_log_change(state, details):
|
||||
|
@ -176,7 +176,7 @@
|
||||
x="4" dy="1.2em" class="st3">Network<v:newlineChar/></tspan><tspan x="4" dy="1.2em" class="st3">VPN</tspan></text> </g>
|
||||
<g id="shape16-56" v:mID="16" v:groupContext="shape" transform="translate(14.4132,-30.9923)">
|
||||
<title>Sheet.16</title>
|
||||
<desc>VM instance Security group Volume Snapshot VM image IP addres...</desc>
|
||||
<desc>VM instance Security group Volume Snapshot VM image IP address...</desc>
|
||||
<v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
|
||||
<v:textRect cx="42.5197" cy="340.209" width="85.04" height="34.0157"/>
|
||||
<rect x="0" y="323.201" width="85.0394" height="34.0157" class="st9"/>
|
||||
|
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
Loading…
Reference in New Issue
Block a user