Fixed many typos.

These were found using: https://github.com/intgr/topy

Change-Id: Ia4e14508c285d95ab4eaeabbde032ecc5e7c9e4b
This commit is contained in:
Alex Gaynor 2014-04-25 21:47:41 -07:00
parent 8604b7c0d7
commit b055e16846
32 changed files with 41 additions and 41 deletions

View File

@ -111,7 +111,7 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
* |DifferentHostFilter| - allows to put the instance on a different host from a
set of instances.
* |SameHostFilter| - puts the instance on the same host as another instance in
a set of of instances.
a set of instances.
* |RetryFilter| - filters hosts that have been attempted for scheduling.
Only passes hosts that have not been previously attempted.
* |TrustedFilter| - filters hosts based on their trust. Only passes hosts
@ -310,7 +310,7 @@ easily. Therefore the final weight for the object will be::
A weigher should be a subclass of ``weights.BaseHostWeigher`` and they must
implement the ``weight_multiplier`` and ``weight_object`` methods. If the
``weight_objects`` method is overriden it just return a list of weights, and not
``weight_objects`` method is overridden it just return a list of weights, and not
modify the weight of the object directly, since final weights are normalized and
computed by ``weight.BaseWeightHandler``.

View File

@ -180,7 +180,7 @@ class InstanceMetadata():
'content_path': "/%s/%s" % (CONTENT_DIR, key)}
# 'content' is passed in from the configdrive code in
# nova/virt/libvirt/driver.py. Thats how we get the injected files
# nova/virt/libvirt/driver.py. That's how we get the injected files
# (personalities) in. AFAIK they're not stored in the db at all,
# so are not available later (web service metadata time).
for (path, contents) in content:

View File

@ -397,7 +397,7 @@ class ComputeCellsAPI(compute_api.API):
@check_instance_cell
def get_console_output(self, context, instance, *args, **kwargs):
"""Get console output for an an instance."""
"""Get console output for an instance."""
# NOTE(comstud): Calling super() just to get policy check
super(ComputeCellsAPI, self).get_console_output(context, instance,
*args, **kwargs)

View File

@ -90,7 +90,7 @@ class Claim(NopClaim):
self.overhead = overhead
# Check claim at constuctor to avoid mess code
# Check claim at constructor to avoid mess code
# Raise exception ComputeResourcesUnavailable if claim failed
self._claim_test(resources, limits)

View File

@ -196,6 +196,6 @@ def all_monitors():
"""Return a list of monitor classes found in this directory.
This method is used as the default for available monitors
and should return a list of all monitor classes avaiable.
and should return a list of all monitor classes available.
"""
return ResourceMonitorHandler().get_all_classes()

View File

@ -182,7 +182,7 @@ class XVPConsoleProxy(object):
- password: the password to encode, max 8 char for vm passwords,
and 16 chars for pool passwords. passwords will
be trimmed to max len before encoding.
- is_pool_password: True if this this is the XenServer api password
- is_pool_password: True if this is the XenServer api password
False if it's a VM console password
(xvp uses different keys and max lengths for pool passwords)

View File

@ -609,7 +609,7 @@ class IptablesManager(object):
return True
# We filter duplicates, letting the *last* occurrence take
# precendence. We also filter out anything in the "remove"
# precedence. We also filter out anything in the "remove"
# lists.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
@ -1388,7 +1388,7 @@ def get_dev(network):
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API
for for all Linux interface drivers.
for all Linux interface drivers.
"""
def plug(self, network, mac_address):

View File

@ -386,7 +386,7 @@ class NetworkManager(manager.Manager):
try:
# NOTE(vish): We need to make sure the instance info cache has been
# updated with new ip info before we trigger the
# security group refresh. This is somewhat ineffecient
# security group refresh. This is somewhat inefficient
# but avoids doing some dangerous refactoring for a
# bug fix.
nw_info = self.get_instance_nw_info(admin_context, instance_id,
@ -1484,7 +1484,7 @@ class FlatManager(NetworkManager):
The idea is to create a single network for the host with a command like:
nova-manage network create 192.168.0.0/24 1 256. Creating multiple
networks for for one manager is currently not supported, but could be
networks for one manager is currently not supported, but could be
added by modifying allocate_fixed_ip and get_network to get the network
with new logic. Arbitrary lists of addresses in a single network can
be accomplished with manual db editing.

View File

@ -190,7 +190,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding muliple
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both. Multiple rules are
installed to a security group in neutron using bulk support.
@ -504,7 +504,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
self.raise_not_found(msg)
def populate_security_groups(self, instance, security_groups):
# Setting to emply list since we do not want to populate this field
# Setting to empty list since we do not want to populate this field
# in the nova database if using the neutron driver
instance['security_groups'] = security_group.SecurityGroupList()
instance['security_groups'].objects = []

View File

@ -54,7 +54,7 @@ class DifferentHostFilter(AffinityFilter):
class SameHostFilter(AffinityFilter):
'''Schedule the instance on the same host as another instance in a set of
of instances.
instances.
'''
# The hosts the instances are running on doesn't change within a request

View File

@ -19,7 +19,7 @@ from nova.scheduler import filters
class TypeAffinityFilter(filters.BaseHostFilter):
"""TypeAffinityFilter doesn't allow more then one VM type per host.
"""TypeAffinityFilter doesn't allow more than one VM type per host.
Note: this works best with ram_weight_multiplier
(spread) set to 1 (default).

View File

@ -142,7 +142,7 @@ class ServiceGroupDriver(object):
"""Base class for ServiceGroup drivers."""
def join(self, member_id, group_id, service=None):
"""Join the given service with it's group."""
"""Join the given service with its group."""
raise NotImplementedError()
def is_up(self, member):

View File

@ -38,7 +38,7 @@ class DbDriver(api.ServiceGroupDriver):
self.service_down_time = CONF.service_down_time
def join(self, member_id, group_id, service=None):
"""Join the given service with it's group."""
"""Join the given service with its group."""
msg = _('DB_Driver: join new ServiceGroup member %(member_id)s to '
'the %(group_id)s group, service = %(service)s')

View File

@ -34,7 +34,7 @@ verify and clean up during the tearDown step.
If using test.TestCase, calling the super class setUp is required and
calling the super class tearDown is required to be last if tearDown
is overriden.
is overridden.
Writing Functional Tests
------------------------

View File

@ -438,7 +438,7 @@ class TestCollectionLinks(test.NoDBTestCase):
{"uuid": "123"}
]
req = mock.MagicMock()
# Given limit is greater then default max, only return default max
# Given limit is greater than default max, only return default max
params = mock.PropertyMock(return_value=dict(limit=2))
type(req).params = params
self.flags(osapi_max_limit=1)

View File

@ -464,7 +464,7 @@ class ComputeHostAPICellsTestCase(ComputeHostAPITestCase):
self.assertEqual('fake-response', result)
def test_get_host_uptime_service_down(self):
# The corresponing Compute test case depends on the
# The corresponding Compute test case depends on the
# _assert_host_exists which is a no-op in the cells api
pass

View File

@ -3798,7 +3798,7 @@ class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
def create_ips(i, j):
return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)]
# NOTE(boris-42): Create more then 256 ip to check that
# NOTE(boris-42): Create more than 256 ip to check that
# _ip_range_splitter works properly.
for i in range(1, 3):
ips_for_delete.extend(create_ips(i, 255))
@ -4315,7 +4315,7 @@ class VolumeUsageDBApiTestCase(test.TestCase):
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less then the previous values
# less than the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
@ -4369,7 +4369,7 @@ class VolumeUsageDBApiTestCase(test.TestCase):
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less then the previous values
# less than the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,

View File

@ -56,7 +56,7 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
# processes running the same tests (and possibly forking more
# processes that end up in the same situation). So we need
# to catch all exceptions and make sure nothing leaks out, in
# particlar SystemExit, which is raised by sys.exit(). We use
# particular SystemExit, which is raised by sys.exit(). We use
# os._exit() which doesn't have this problem.
status = 0
try:

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for common notifcations."""
"""Tests for common notifications."""
import copy
@ -110,7 +110,7 @@ class NotificationsTestCase(test.TestCase):
def test_notif_disabled(self):
# test config disable of the notifcations
# test config disable of the notifications
self.flags(notify_on_state_change=None)
old = copy.copy(self.instance)
@ -148,7 +148,7 @@ class NotificationsTestCase(test.TestCase):
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
# ok now enable task state notifcations and re-try
# ok now enable task state notifications and re-try
self.flags(notify_on_state_change="vm_and_task_state")
notifications.send_update(self.context, old, self.instance)

View File

@ -435,7 +435,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We dont care about session id in test
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console['internal_access_path'].split('&')[0]
expected_path = "/console?ref=%s" % str(vm_ref)
@ -453,7 +453,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We dont care about session id in test
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console['internal_access_path'].split('&')[0]
expected_path = "/console?ref=%s" % str(rescue_vm)

View File

@ -188,7 +188,7 @@ def bm_node_update(context, bm_node_id, values):
def bm_node_associate_and_update(context, node_uuid, values):
"""Associate an instance to a node safely
Associate an instance to a node only if that node is not yet assocated.
Associate an instance to a node only if that node is not yet associated.
Allow the caller to set any other fields they require in the same
operation. For example, this is used to set the node's task_state to
BUILDING at the beginning of driver.spawn().

View File

@ -310,7 +310,7 @@ class PXE(base.NodeDriver):
if injected_files is None:
injected_files = []
else:
# NOTE(deva): copy so we dont modify the original
# NOTE(deva): copy so we don't modify the original
injected_files = list(injected_files)
net_config = build_network_config(network_info)

View File

@ -196,7 +196,7 @@ class VirtualPowerManager(base.PowerManager):
err_msg = _('Node "%(name)s" with MAC address %(mac)s not found.')
LOG.error(err_msg, {'name': self._node_name,
'mac': self._mac_addresses})
# in our case the _node_name is the the node_id
# in our case the _node_name is the node_id
raise exception.NodeNotFound(node_id=self._node_name)
cmd = self._vp_cmd.list_running_cmd

View File

@ -125,7 +125,7 @@ class DriverBlockDevice(dict):
def attach(self, **kwargs):
"""Make the device available to be used by VMs.
To be overriden in subclasses with the connecting logic for
To be overridden in subclasses with the connecting logic for
the type of device the subclass represents.
"""
raise NotImplementedError()

View File

@ -109,7 +109,7 @@ class VFSGuestFS(vfs.VFS):
except TypeError as e:
if 'close_on_exit' in str(e):
# NOTE(russellb) In case we're not using a version of
# libguestfs new enough to support the close_on_exit paramater,
# libguestfs new enough to support the close_on_exit parameter,
# which was added in libguestfs 1.20.
self.handle = tpool.Proxy(guestfs.GuestFS())
else:

View File

@ -118,7 +118,7 @@ class ImageCacheManager(object):
'originals': []}
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
"""Ages and verfies cached images."""
"""Ages and verifies cached images."""
raise NotImplementedError()

View File

@ -544,7 +544,7 @@ def get_disk_mapping(virt_type, instance,
# NOTE (ndipanov): This implicitly relies on image->local BDMs not
# being considered in the driver layer - so missing
# bdm with boot_index 0 means - use image, unless it was
# overriden. This can happen when using legacy syntax and
# overridden. This can happen when using legacy syntax and
# no root_device_name is set on the instance.
if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
block_device_info):

View File

@ -332,7 +332,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
Yields the name of the base file, a boolean which is True if the image
is "small", and a boolean which indicates if this is a resized image.
Note that is is possible for more than one yield to result from this
Note that it is possible for more than one yield to result from this
check.
If no base file is found, then nothing is yielded.

View File

@ -26,7 +26,7 @@ At each aging iteration we check if the image can be aged.
This is done by comparing the current nova compute time to the time embedded
in the timestamp. If the time exceeds the configured aging time then
the parent folder, that is the image ID folder, will be deleted.
That effectivly ages the cached image.
That effectively ages the cached image.
If an image is used then the timestamps will be deleted.
When accessing a timestamp we make use of locking. This ensure that aging

View File

@ -1758,7 +1758,7 @@ def preconfigure_instance(session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
# As mounting the image VDI is expensive, we only want do do it once,
# As mounting the image VDI is expensive, we only want do it once,
# if at all, so determine whether it's required first, and then do
# everything
mount_required = False

View File

@ -193,7 +193,7 @@ def apply_ovs_ipv6_flows(ovs, bridge, params):
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop")
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop")
# do not allow sending specifc ICMPv6 types
# do not allow sending specific ICMPv6 types
# Router Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop")
# Redirect Gateway

View File

@ -221,7 +221,7 @@ def _validate_vhd(vdi_path):
may pick up other errors also.
This check ensures that the timestamps listed in the VHD footer aren't in
the future. This can occur during a migration if the clocks on the the two
the future. This can occur during a migration if the clocks on the two
Dom0's are out-of-sync. This would corrupt the SR if it were imported, so
generate an exception to bail.
"""