Merge "Fix typos"

This commit is contained in:
Zuul 2022-06-01 10:50:15 +00:00 committed by Gerrit Code Review
commit d869163608
64 changed files with 113 additions and 112 deletions

View File

@ -612,7 +612,7 @@ def get_bdm_local_disk_num(block_device_mappings):
def get_bdm_image_metadata(context, image_api, volume_api,
block_device_mapping, legacy_bdm=True):
"""Attempt to retrive image metadata from a given block_device_mapping.
"""Attempt to retrieve image metadata from a given block_device_mapping.
If we are booting from a volume, we need to get the volume details from
Cinder and make sure we pass the metadata back accordingly.

View File

@ -8782,7 +8782,7 @@ class ComputeManager(manager.Manager):
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared
# storage
# vpmem must be cleanped
# vpmem must be cleaned
do_cleanup = not migrate_data.is_shared_instance_path or has_vpmem
destroy_disks = not migrate_data.is_shared_block_storage
elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData):

View File

@ -1163,7 +1163,7 @@ class RevertResizeTask(base.TaskBase):
setattr(source_obj, field, getattr(target_obj, field))
def _update_bdms_in_source_cell(self, source_cell_context):
"""Update BlockDeviceMapppings in the source cell database.
"""Update BlockDeviceMappings in the source cell database.
It is possible to attach/detach volumes to/from a resized instance,
which would create/delete BDM records in the target cell, so we have

View File

@ -305,7 +305,7 @@ class NovaWebSocketProxy(websockify.WebSocketProxy):
# ssl_options unset to default to the underlying system defaults.
# We do this to avoid using websockify's behaviour for 'default'
# in select_ssl_version(), which hardcodes the versions to be
# quite relaxed and prevents us from using sytem crypto policies.
# quite relaxed and prevents us from using system crypto policies.
ssl_min_version = kwargs.pop('ssl_minimum_version', None)
if ssl_min_version and ssl_min_version != 'default':
kwargs['ssl_options'] = websockify.websocketproxy. \

View File

@ -36,7 +36,7 @@ SERVICE_VERSION = 61
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
# time we bump the version, we will put an entry here to record the change,
# along with any pertinent data. For things that we can programatically
# along with any pertinent data. For things that we can programmatically
# detect that need a bump, we put something in _collect_things() below to
# assemble a dict of things we can check. For example, we pretty much always
# want to consider the compute RPC API version a thing that requires a service

View File

@ -1455,7 +1455,7 @@ class AvailabilityZoneFixture(fixtures.Fixture):
``get_availability_zones``.
``get_instance_availability_zone`` will return the availability_zone
requested when creating a server otherwise the instance.availabilty_zone
requested when creating a server otherwise the instance.availability_zone
or default_availability_zone is returned.
"""

View File

@ -65,7 +65,7 @@ class RealPolicyFixture(fixtures.Fixture):
def add_missing_default_rules(self, rules):
"""Adds default rules and their values to the given rules dict.
The given rulen dict may have an incomplete set of policy rules.
The given rules dict may have an incomplete set of policy rules.
This method will add the default policy rules and their values to
the dict. It will not override the existing rules.
"""
@ -141,7 +141,7 @@ class OverridePolicyFixture(RealPolicyFixture):
not used. One example is when policy rules are deprecated. In that case
tests can use this fixture and verify if deprecated rules are overridden
then does nova code enforce the overridden rules not only defaults.
As per oslo.policy deprecattion feature, if deprecated rule is overridden
As per oslo.policy deprecation feature, if deprecated rule is overridden
in policy file then, overridden check is used to verify the policy.
Example of usage:

View File

@ -267,7 +267,7 @@ class ComputeNodeTestCase(test.TestCase):
self.assertEqual(res, (1, 1))
# the ratio is refreshed to CONF.initial_xxx_allocation_ratio
# beacause CONF.xxx_allocation_ratio is None
# because CONF.xxx_allocation_ratio is None
cns = db.compute_node_get_all(self.context)
# the ratio is refreshed to CONF.xxx_allocation_ratio
for cn in cns:

View File

@ -731,7 +731,7 @@ class NUMAServersTest(NUMAServersTestBase):
for host, compute_rp_uuid in self.compute_rp_uuids.items():
if host == original_host:
# the host that had the instance should no longer have
# alocations since the resize has been confirmed
# allocations since the resize has been confirmed
expected_usage = {'VCPU': 0, 'PCPU': 0, 'DISK_GB': 0,
'MEMORY_MB': 0}
else:

View File

@ -994,7 +994,7 @@ class VDPAServersTest(_PCIServersTestBase):
def test_suspend(self):
self._test_common(self._suspend_server)
def test_evacute(self):
def test_evacuate(self):
self._test_common(self._evacuate_server)
def test_resize(self):
@ -1303,7 +1303,7 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
num_pci=1, numa_node=pci_numa_node)
self.start_compute(pci_info=pci_info)
# request cpu pinning to create a numa toplogy and allow the test to
# request cpu pinning to create a numa topology and allow the test to
# force which numa node the vm would have to be pinned too.
extra_spec = {
'hw:cpu_policy': 'dedicated',
@ -1514,7 +1514,7 @@ class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
num_pfs=1, num_vfs=2, numa_node=pci_numa_node)
self.start_compute(pci_info=pci_info)
# request cpu pinning to create a numa toplogy and allow the test to
# request cpu pinning to create a numa topology and allow the test to
# force which numa node the vm would have to be pinned too.
extra_spec = {
'hw:cpu_policy': 'dedicated',

View File

@ -193,7 +193,7 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.delete_migration(server['id'], migrations[0]['id'])
self._wait_for_notification('instance.live_migration_abort.start')
self._wait_for_state_change(server, 'ACTIVE')
# NOTE(gibi): the intance.live_migration_rollback notification emitted
# NOTE(gibi): the instance.live_migration_rollback notification emitted
# after the instance.live_migration_abort notification so we have to
# wait for the rollback to ensure we can assert both notifications
# below
@ -1500,8 +1500,8 @@ class TestInstanceNotificationSample(
self.api.delete_server_volume(server['id'], volume_id)
self._wait_for_notification('instance.volume_detach.end')
def _volume_swap_server(self, server, attachement_id, volume_id):
self.api.put_server_volume(server['id'], attachement_id, volume_id)
def _volume_swap_server(self, server, attachment_id, volume_id):
self.api.put_server_volume(server['id'], attachment_id, volume_id)
def test_volume_swap_server(self):
server = self._boot_a_server(

View File

@ -25,7 +25,7 @@ class TestServersPerUserQuota(test.TestCase,
tracking usages in a separate database table. As part of that change,
per-user quota functionality was broken for server creates.
When mulitple users in the same project have per-user quota, they are meant
When multiple users in the same project have per-user quota, they are meant
to be allowed to create resources such that may not exceed their
per-user quota nor their project quota.

View File

@ -60,7 +60,7 @@ class HostStatusPolicyTestCase(test.TestCase,
overwrite=False)
# Create a server as a normal non-admin user.
# In microversion 2.36 the /images proxy API was deprecated, so
# specifiy the image_uuid directly.
# specify the image_uuid directly.
kwargs = {'image_uuid': self.image_uuid}
if networks:
# Starting with microversion 2.37 the networks field is required.

View File

@ -764,7 +764,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
LOG.info('Attaching volume %s to server %s', volume_id, server_id)
# The fake driver doesn't implement get_device_name_for_instance, so
# we'll just raise the exception directly here, instead of simuluating
# we'll just raise the exception directly here, instead of simulating
# an instance with 26 disk devices already attached.
with mock.patch.object(self.compute.driver,
'get_device_name_for_instance') as mock_get:

View File

@ -190,7 +190,7 @@ class CyborgTestCase(test.NoDBTestCase):
},
"attach_handle_type": "PCI",
"state": "Bound",
# Devic eprofile name is common to all bound ARQs
# Device profile name is common to all bound ARQs
"device_profile_name": arqs[0]["device_profile_name"],
**common
}
@ -367,7 +367,7 @@ class CyborgTestCase(test.NoDBTestCase):
# If only some ARQs are resolved, return just the resolved ones
unbound_arqs, _ = self._get_arqs_and_request_groups()
_, bound_arqs = self._get_bound_arqs()
# Create a amixture of unbound and bound ARQs
# Create a mixture of unbound and bound ARQs
arqs = [unbound_arqs[0], bound_arqs[0]]
instance_uuid = bound_arqs[0]['instance_uuid']
@ -487,7 +487,7 @@ class CyborgTestCase(test.NoDBTestCase):
self.assertEqual(bound_arqs, ret_arqs)
def test_get_arq_pci_device_profile(self):
"""Test extractin arq pci device info"""
"""Test extracting arq pci device info"""
arq = {'uuid': uuids.arq_uuid,
'device_profile_name': "smart_nic",
'device_profile_group_id': '5',

View File

@ -254,7 +254,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
req_get = {'tenant_id': -1}
self._test_admin_can_fetch_used_limits_for_own_project(req_get)
def test_admin_can_fetch_used_limits_with_unkown_param(self):
def test_admin_can_fetch_used_limits_with_unknown_param(self):
req_get = {'tenant_id': '123', 'unknown': 'unknown'}
self._test_admin_can_fetch_used_limits_for_own_project(req_get)
@ -462,7 +462,7 @@ class LimitsControllerTestV275(BaseLimitTestSuite):
absolute_limits = {
"metadata_items": 1,
}
req = fakes.HTTPRequest.blank("/?unkown=fake",
req = fakes.HTTPRequest.blank("/?unknown=fake",
version='2.74')
def _get_project_quotas(context, project_id, usages=True):
@ -475,7 +475,7 @@ class LimitsControllerTestV275(BaseLimitTestSuite):
self.controller.index(req)
def test_index_additional_query_param(self):
req = fakes.HTTPRequest.blank("/?unkown=fake",
req = fakes.HTTPRequest.blank("/?unknown=fake",
version='2.75')
self.assertRaises(
exception.ValidationError,

View File

@ -114,7 +114,7 @@ class LockServerTestsV273(LockServerTestsV21):
self.controller._lock, self.req, instance.uuid, body=body)
self.assertIn("256 is not of type 'string'", str(exp))
def test_lock_with_invalid_paramater(self):
def test_lock_with_invalid_parameter(self):
# This will fail from 2.73 since we have a schema check that allows
# only locked_reason
instance = fake_instance.fake_instance_obj(

View File

@ -26,7 +26,7 @@ from nova import test
from nova.tests.unit.api.openstack import fakes
# NOTE(stephenfin): obviously these aren't complete reponses, but this is all
# NOTE(stephenfin): obviously these aren't complete responses, but this is all
# we care about
FAKE_NETWORKS = [
{

View File

@ -446,7 +446,7 @@ class ConsolesExtensionTestV26(test.NoDBTestCase):
self.req, fakes.FAKE_UUID, body=body)
self.assertTrue(mock_handler.called)
def test_create_console_not_found(self,):
def test_create_console_not_found(self):
mock_handler = mock.MagicMock()
mock_handler.side_effect = exception.InstanceNotFound(
instance_id='xxx')

View File

@ -192,7 +192,7 @@ class ServerExternalEventsTestV21(test.NoDBTestCase):
self.api.create, self.req, body=body)
def test_create_unknown_events(self):
self.event_1['name'] = 'unkown_event'
self.event_1['name'] = 'unknown_event'
body = {'events': self.event_1}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)

View File

@ -8023,7 +8023,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
version=self.microversion)
def test_get_server_list_detail_with_down_cells(self):
# Fake out 1 partially constructued instance and one full instance.
# Fake out 1 partially constructed instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
@ -8151,7 +8151,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_list_with_down_cells(self):
# Fake out 1 partially constructued instance and one full instance.
# Fake out 1 partially constructed instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
@ -8203,7 +8203,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_with_down_cells(self):
# Fake out 1 partially constructued instance.
# Fake out 1 partially constructed instance.
self.instance = objects.Instance(
context=self.ctxt,
uuid=self.uuid,
@ -8266,7 +8266,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_without_image_avz_user_id_set_from_down_cells(self):
# Fake out 1 partially constructued instance.
# Fake out 1 partially constructed instance.
self.instance = objects.Instance(
context=self.ctxt,
uuid=self.uuid,

View File

@ -18,7 +18,7 @@ Unit tests for the nova-status CLI interfaces.
# NOTE(cdent): Additional tests of nova-status may be found in
# nova/tests/functional/test_nova_status.py. Those tests use the external
# PlacementFixture, which is only available in functioanl tests.
# PlacementFixture, which is only available in functional tests.
from io import StringIO

View File

@ -1354,7 +1354,7 @@ class _ComputeAPIUnitTestMixIn(object):
hard_delete=False)
def _fake_do_delete(context, instance, bdms,
rservations=None, local=False):
reservations=None, local=False):
pass
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@ -3417,7 +3417,7 @@ class _ComputeAPIUnitTestMixIn(object):
raise exception.InstanceQuiesceNotSupported(
instance_id=instance['uuid'], reason='unsupported')
if quiesce_fails:
raise oslo_exceptions.MessagingTimeout('quiece timeout')
raise oslo_exceptions.MessagingTimeout('quiesce timeout')
quiesced[0] = True
def fake_unquiesce_instance(context, instance, mapping=None):
@ -3579,7 +3579,7 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_snapshot_volume_backed(quiesce_required=True,
quiesce_unsupported=False)
def test_snaphost_volume_backed_with_quiesce_failure(self):
def test_snapshot_volume_backed_with_quiesce_failure(self):
self.assertRaises(oslo_exceptions.MessagingTimeout,
self._test_snapshot_volume_backed,
quiesce_required=True,
@ -4815,7 +4815,7 @@ class _ComputeAPIUnitTestMixIn(object):
def test_validate_vol_az_for_create_vol_az_matches_default_cpu_az(self):
"""Tests the scenario that the instance is not being created in a
specific zone and the volume's zone matches
CONF.default_availabilty_zone so None is returned indicating the
CONF.default_availability_zone so None is returned indicating the
RequestSpec.availability_zone does not need to be updated.
"""
self.flags(cross_az_attach=False, group='cinder')
@ -7155,7 +7155,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
# be no conflict.
self.compute_api._validate_numa_rebuild(instance, image, flavor)
def test__validate_numa_rebuild_add_numa_toplogy(self):
def test__validate_numa_rebuild_add_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that requests a NUMA topology when the original instance did not
have a NUMA topology is invalid.
@ -7178,7 +7178,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
exception.ImageNUMATopologyRebuildConflict,
self.compute_api._validate_numa_rebuild, instance, image, flavor)
def test__validate_numa_rebuild_remove_numa_toplogy(self):
def test__validate_numa_rebuild_remove_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that does not request a NUMA topology when the original image did
is invalid if it would alter the instances topology as a result.
@ -7209,7 +7209,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.compute_api._validate_numa_rebuild, instance,
image, flavor)
def test__validate_numa_rebuild_alter_numa_toplogy(self):
def test__validate_numa_rebuild_alter_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that requests a different NUMA topology than the original image
is invalid.
@ -8031,7 +8031,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=54)
def test_block_accelerators_until_service(self, mock_get_min):
"""Support operating server with acclerators until compute service
"""Support operating server with accelerators until compute service
more than the version of 53.
"""
extra_specs = {'accel:device_profile': 'mydp'}

View File

@ -7379,7 +7379,7 @@ class ComputeTestCase(BaseTestCase,
fake_instance.fake_db_instance(uuid=uuids.migration_instance_5,
vm_state=vm_states.ACTIVE,
task_state=None),
# The expceted migration result will be None instead of error
# The expected migration result will be None instead of error
# since _poll_unconfirmed_resizes will not change it
# when the instance vm state is RESIZED and task state
# is deleting, see bug 1301696 for more detail
@ -10493,7 +10493,7 @@ class ComputeAPITestCase(BaseTestCase):
pci_reqs = mock_claim_pci.mock_calls[0][1][1]
self.assertEqual([pci_req], pci_reqs.requests)
# after the pci claim we also need to allocate that pci to the instace
# after the pci claim we also need to allocate that pci to the instance
mock_allocate_pci.assert_called_once_with(self.context, instance)
# and as this changes the instance we have to save it.
mock_save.assert_called_once_with()

View File

@ -5000,8 +5000,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute.reportclient,
'remove_provider_tree_from_instance_allocation'),
mock.patch('nova.objects.Instance.get_by_uuid')
) as (_get_intances_on_driver, destroy, migration_list, migration_save,
get_resources, remove_allocation, instance_get_by_uuid):
) as (_get_instances_on_driver, destroy, migration_list,
migration_save, get_resources, remove_allocation,
instance_get_by_uuid):
migration_list.return_value = [migration_1]
instance_get_by_uuid.return_value = instance_1
get_resources.return_value = mock.sentinel.resources
@ -5354,7 +5355,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self, mock_rebuild_claim, mock_set_migration_status,
mock_validate_policy, mock_image_meta, mock_notify_exists,
mock_notify_legacy, mock_notify, mock_instance_save,
mock_setup_networks, mock_setup_intance_network, mock_get_bdms,
mock_setup_networks, mock_setup_instance_network, mock_get_bdms,
mock_mutate_migration, mock_appy_migration, mock_drop_migration,
mock_context_elevated):
self.flags(api_servers=['http://localhost/image/v2'], group='glance')

View File

@ -196,7 +196,7 @@ class TestCreateFlavor(test.TestCase):
def test_rxtx_factor_must_be_within_sql_float_range(self):
# We do * 10 since this is an approximation and we need to make sure
# the difference is noticeble.
# the difference is noticeable.
over_rxtx_factor = db_const.SQL_SP_FLOAT_MAX * 10
self.assertInvalidInput('flavor1', 64, 1, 120,

View File

@ -4059,7 +4059,7 @@ class ProviderConfigTestCases(BaseTestCase):
# add the same trait in p_tree and provider config
# for testing ignoring CUSTOM trait code logic.
# If a programmer accidently forgets to ignore (substract)
# If a programmer accidentally forgets to ignore (subtract)
# existing custom traits, this test case will fail as we only expect
# "EXCEPTION_TRAIT" showed in ValueError exception rather than
# "EXCEPTION_TRAIT,CUSTOM_IGNORE_TRAIT"

View File

@ -1237,7 +1237,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_version_cap_all_cells_no_access(self, mock_allcells, mock_minver,
mock_log_error):
"""Tests a scenario where nova-compute is configured with a connection
to the API database and fails trying to get the minium nova-compute
to the API database and fails trying to get the minimum nova-compute
service version across all cells because nova-compute is configured to
not allow direct database access.
"""

View File

@ -2402,7 +2402,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
'1', None, None, dp_name)
arq_uuid = arq_in_list[0]['uuid']
# muliti device request
# multi device request
mock_create.return_value = [arq_in_list[0], arq_in_list[0]]
rp_map = {"request_group_0" + str(port_id): rp_uuid}
request_tuples = [('123', '1.2.3.4', port_id,

View File

@ -3359,7 +3359,7 @@ class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assertEqualListsOfObjects(expected, actions)
def test_instance_actions_get_are_in_order(self):
"""Ensure retrived actions are in order."""
"""Ensure retrieved actions are in order."""
uuid1 = uuidsentinel.uuid1
extra = {
@ -3618,7 +3618,7 @@ class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assertActionEventSaved(event, action['id'])
def test_instance_action_events_get_are_in_order(self):
"""Ensure retrived action events are in order."""
"""Ensure retrieved action events are in order."""
uuid1 = uuidsentinel.uuid1
action = db.action_start(self.ctxt,
@ -5690,7 +5690,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Ensure shadow tables are empty
This method ensures that all the shadow tables in the schema,
except for specificially named exceptions, are empty. This
except for specifically named exceptions, are empty. This
makes sure that archiving isn't moving unexpected content.
"""
metadata = sa.MetaData()

View File

@ -56,7 +56,7 @@ class TestDBURL(test.NoDBTestCase):
class TestDBSync(test.NoDBTestCase):
def test_db_sync_invalid_databse(self):
def test_db_sync_invalid_database(self):
"""We only have two databases."""
self.assertRaises(
exception.Invalid, migration.db_sync, database='invalid')
@ -152,7 +152,7 @@ class TestDBSync(test.NoDBTestCase):
@mock.patch.object(migration, '_find_migrate_repo')
class TestDBVersion(test.NoDBTestCase):
def test_db_version_invalid_databse(
def test_db_version_invalid_database(
self, mock_find_repo, mock_get_engine, mock_is_migrate,
mock_is_alembic, mock_migrate_version, mock_m_context_configure,
):

View File

@ -5136,7 +5136,7 @@ class TestAPI(TestAPIBase):
self.api.has_port_binding_extension = mock.Mock(return_value=True)
migrate_profile = {
constants.MIGRATING_ATTR: 'new-host'}
# Pass a port with an migration porfile attribute.
# Pass a port with an migration profile attribute.
port_id = uuids.port_id
get_ports = {'ports': [
{'id': port_id,
@ -5168,7 +5168,7 @@ class TestAPI(TestAPIBase):
self.api.has_port_binding_extension = mock.Mock(return_value=True)
migrate_profile = {
constants.MIGRATING_ATTR: 'new-host'}
# Pass a port with an migration porfile attribute.
# Pass a port with an migration profile attribute.
get_ports = {
'ports': [
{'id': uuids.port1,
@ -5208,7 +5208,7 @@ class TestAPI(TestAPIBase):
instance = fake_instance.fake_instance_obj(self.context)
self.api.has_port_binding_extension = mock.Mock(return_value=True)
# Pass a port without any migration porfile attribute.
# Pass a port without any migration profile attribute.
get_ports = {'ports': [
{'id': uuids.port_id,
constants.BINDING_HOST_ID: instance.host}]}
@ -8118,7 +8118,7 @@ class TestAPIPortbinding(TestAPIBase):
self.assertEqual(1, mocked_client.create_port_binding.call_count)
self.assertDictEqual({uuids.port: binding['binding']}, result)
# assert that that if vnic_type and profile are set in VIF object
# assert that if vnic_type and profile are set in VIF object
# the provided vnic_type and profile take precedence.
nwinfo = model.NetworkInfo([model.VIF(id=uuids.port,

View File

@ -317,7 +317,7 @@ class _TestInstanceGroupObject(object):
obj_primitive = obj.obj_to_primitive()
self.assertIn('policy', data(obj_primitive))
self.assertIn('policies', data(obj_primitive))
# Before 1.10, only has polices which is the list of policy name
# Before 1.10, only has policies which is the list of policy name
obj_primitive = obj.obj_to_primitive('1.10')
self.assertNotIn('policy', data(obj_primitive))
self.assertIn('policies', data(obj_primitive))

View File

@ -886,7 +886,7 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
'dev_type': 'type-PF'}])]
self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
def test_consume_VF_and_PF_same_prodict_id_failed(self):
def test_consume_VF_and_PF_same_product_id_failed(self):
self._create_pci_devices(pf_product_id=1515)
pci_requests = [objects.InstancePCIRequest(count=9,
spec=[{'product_id': '1515'}])]

View File

@ -44,7 +44,7 @@ class PciDeviceMatchTestCase(test.NoDBTestCase):
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_dismatch(self):
def test_spec_mismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},

View File

@ -79,7 +79,7 @@ class AdminActionsNoLegacyNoScopePolicyTest(AdminActionsPolicyTest):
def setUp(self):
super(AdminActionsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule and scope diable, only project admin
# With no legacy rule and scope disable, only project admin
# is able to perform server admin actions.
self.project_action_authorized_contexts = [self.project_admin_context]

View File

@ -189,12 +189,12 @@ class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
@mock.patch('nova.network.neutron.API.list_ports')
def test_deprecated_policy_overridden_rule_is_checked(self, mock_port,
mock_get):
# Test to verify if deprecatd overridden policy is working.
# Test to verify if deprecated overridden policy is working.
# check for success as admin role. Deprecated rule
# has been overridden with admin checks in policy.yaml
# If admin role pass it means overridden rule is enforced by
# olso.policy because new default is system or project reader and the
# oslo.policy because new default is system or project reader and the
# old default is admin.
self.controller.index(self.admin_req, uuids.fake_id)

View File

@ -191,7 +191,7 @@ class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
@mock.patch('nova.api.openstack.common.get_instance')
def test_deprecated_policy_overridden_rule_is_checked(
self, mock_instance_get, mock_actions_get):
# Test to verify if deprecatd overridden policy is working.
# Test to verify if deprecated overridden policy is working.
instance = fake_instance.fake_instance_obj(
self.admin_or_owner_req.environ['nova.context'])
@ -199,7 +199,7 @@ class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
# Check for success as admin_or_owner role. Deprecated rule
# has been overridden with admin checks in policy.yaml
# If admin role pass it means overridden rule is enforced by
# olso.policy because new default is system reader and the old
# oslo.policy because new default is system reader and the old
# default is admin.
self.controller.index(self.admin_or_owner_req, instance['uuid'])

View File

@ -119,7 +119,7 @@ class LockServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.lock')
def test_lock_sevrer_overridden_policy_pass_with_same_user(
def test_lock_server_overridden_policy_pass_with_same_user(
self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})

View File

@ -134,7 +134,7 @@ class MigrateServerOverridePolicyTest(
rule_migrate = ms_policies.POLICY_ROOT % 'migrate'
rule_live_migrate = ms_policies.POLICY_ROOT % 'migrate_live'
# NOTE(gmann): override the rule to project member and verify it
# work as policy is system and projct scoped.
# work as policy is system and project scoped.
self.policy.set_rules({
rule_migrate: base_policy.PROJECT_MEMBER,
rule_live_migrate: base_policy.PROJECT_MEMBER},

View File

@ -89,7 +89,7 @@ class RescueServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.rescue')
def test_rescue_sevrer_overridden_policy_pass_with_same_user(
def test_rescue_server_overridden_policy_pass_with_same_user(
self, mock_rescue):
rule_name = rs_policies.BASE_POLICY_NAME
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})

View File

@ -116,7 +116,7 @@ class ServerDiagnosticsOverridePolicyTest(
super(ServerDiagnosticsOverridePolicyTest, self).setUp()
rule = policies.BASE_POLICY_NAME
# NOTE(gmann): override the rule to project member and verify it
# work as policy is projct scoped.
# work as policy is project scoped.
self.policy.set_rules({
rule: base_policy.PROJECT_MEMBER},
overwrite=False)

View File

@ -83,7 +83,7 @@ class ServerIpsNoLegacyNoScopePolicyTest(ServerIpsPolicyTest):
def setUp(self):
super(ServerIpsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, only project admin, member, and reader will be able
# to get their server IP adderesses.
# to get their server IP addresses.
self.project_reader_authorized_contexts = [
self.project_admin_context, self.project_member_context,
self.project_reader_context,
@ -104,7 +104,7 @@ class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
super(ServerIpsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enabled, system users will not be able
# to get the server IP adderesses.
# to get the server IP addresses.
self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
@ -121,7 +121,7 @@ class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
def setUp(self):
super(ServerIpsScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to get their server IP adderesses.
# and reader will be able to get their server IP addresses.
self.project_reader_authorized_contexts = [
self.project_admin_context, self.project_member_context,
self.project_reader_context

View File

@ -100,7 +100,7 @@ class ShelveServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.shelve')
def test_shelve_sevrer_overridden_policy_pass_with_same_user(
def test_shelve_server_overridden_policy_pass_with_same_user(
self, mock_shelve):
rule_name = policies.POLICY_ROOT % 'shelve'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})

View File

@ -48,7 +48,7 @@ class TestComputeCapabilitiesFilter(test.NoDBTestCase):
flavor=objects.Flavor(memory_mb=1024, extra_specs=especs))
self.assertFalse(self.filt_cls.host_passes(None, spec_obj))
def test_compute_filter_fails_without_capabilites(self):
def test_compute_filter_fails_without_capabilities(self):
cpu_info = """ { } """
cpu_info = str(cpu_info)

View File

@ -114,7 +114,7 @@ class MetricsWeigherTestCase(test.NoDBTestCase):
setting = [idle + '=-2', idle + '=1']
self._do_test(setting, 1.0, 'host1')
def test_single_resourcenegtive_ratio(self):
def test_single_resourcenegative_ratio(self):
# host1: idle=512
# host2: idle=1024
# host3: idle=3072

View File

@ -127,7 +127,7 @@ class PCIWeigherTestCase(test.NoDBTestCase):
"""Test weigher with a PCI device instance and huge hosts.
Ensure that the weigher gracefully degrades when the number of PCI
devices on the host exceeeds MAX_DEVS.
devices on the host exceeds MAX_DEVS.
"""
hosts = [
('host1', 'node1', [500]), # 500 devs

View File

@ -29,7 +29,7 @@ class IdentityValidationTest(test.NoDBTestCase):
There are times when Nova stores keystone project_id and user_id
in our database as strings. Until the Pike release none of this
data was validated, so it was very easy for adminstrators to think
data was validated, so it was very easy for administrators to think
they were adjusting quota for a project (by name) when instead
they were just inserting keys in a database that would not get used.

View File

@ -1200,7 +1200,7 @@ class MetadataHandlerTestCase(test.TestCase):
def _fake_x_get_metadata(self, self_app, instance_id, remote_address):
if remote_address is None:
raise Exception('Expected X-Forwared-For header')
raise Exception('Expected X-Forwarded-For header')
if encodeutils.to_utf8(instance_id) == self.expected_instance_id:
return self.mdinst

View File

@ -112,7 +112,7 @@ class NotificationsTestCase(test.TestCase):
# test config disable of just the task state notifications
self.flags(notify_on_state_change="vm_state", group='notifications')
# we should not get a notification on task stgate chagne now
# we should not get a notification on task state change now
old = copy.copy(self.instance)
self.instance.task_state = task_states.SPAWNING

View File

@ -202,7 +202,7 @@ class MountTestCase(test.NoDBTestCase):
device)
self.assertIsInstance(inst, block.BlockMount)
def test_instance_for_device_block_partiton(self,):
def test_instance_for_device_block_partiton(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1

View File

@ -1830,7 +1830,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self.assertEqual(fake_local_disks, ret_val)
def test_get_scoped_flavor_extra_specs(self):
# The flavor extra spect dict contains only string values.
# The flavor extra specs dict contains only string values.
fake_total_bytes_sec = '8'
mock_instance = fake_instance.fake_instance_obj(self.context)

View File

@ -2931,7 +2931,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# NOTE(artom) This is a
# (cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
# tuple. See _get_guest_numa_config() docstring for full documenation.
# tuple. See _get_guest_numa_config() docstring for full documentation.
# _get_live_migrate_numa_info() only cares about guest_cpu_tune for CPU
# pinning and emulator thread pinning, and guest_numa_tune for cell
# pinning; so only include those 2 in the tuple.
@ -11429,7 +11429,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'_check_shared_storage_test_file')
def _test_check_can_live_migrate_source_block_migration_none(
self, block_migrate, is_shared_instance_path, is_share_block,
mock_check, mock_shared_block, mock_enough, mock_verson):
mock_check, mock_shared_block, mock_enough, mock_version):
mock_check.return_value = is_shared_instance_path
mock_shared_block.return_value = is_share_block
@ -14766,7 +14766,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('os.path.getsize')
def test_get_instance_disk_info_no_bdinfo_passed(self, mock_get_size,
mock_stat):
# NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
# NOTE(ndipanov): _get_disk_overcommitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
@ -24154,7 +24154,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_retry_succeeds(
self, state, mock_event_wait
):
"""Test that that a live detach times out while waiting for the libvirt
"""Test that a live detach times out while waiting for the libvirt
event but then the retry succeeds.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@ -24209,7 +24209,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_retry_unplug_in_progress(
self, mock_event_wait
):
"""Test that that a live detach times out while waiting for the libvirt
"""Test that a live detach times out while waiting for the libvirt
event but then the retry gets a unplug already in progress error from
libvirt, which it ignores, then the detach finishes and the event is
received.
@ -24289,10 +24289,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_run_out_of_retries(
self, state, mock_event_wait
):
"""Test that that a live detach times out while waiting for the libvirt
"""Test that a live detach times out while waiting for the libvirt
event at every attempt so the driver runs out of retry attempts.
"""
# decreased the number to simplyfy the test
# decreased the number to simplify the test
self.flags(group='libvirt', device_detach_attempts=2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)

View File

@ -899,7 +899,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config',
wraps=vif.designer.set_vif_guest_frontend_config)
def _test_model_sriov(self, vinc_type, mock_set):
def _test_model_sriov(self, vnic_type, mock_set):
"""Direct attach vNICs shouldn't retrieve info from image_meta."""
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
@ -911,7 +911,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio'}})
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
None, 'kvm', vinc_type)
None, 'kvm', vnic_type)
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
None, None, None, None)
self.assertIsNone(conf.vhost_queues)

View File

@ -2249,7 +2249,7 @@ class NUMATopologyTest(test.NoDBTestCase):
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
def test_host_usage_culmulative_with_free(self):
def test_host_usage_cumulative_with_free(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(
id=0,
@ -2657,7 +2657,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance)
def test_get_fitting_culmulative_fails_limits(self):
def test_get_fitting_cumulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
@ -2668,7 +2668,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance2)
def test_get_fitting_culmulative_success_limits(self):
def test_get_fitting_cumulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)

View File

@ -64,7 +64,7 @@ ephemeral_bdm = [block_device.BlockDeviceDict(
class ImageCacheManagerTests(test.NoDBTestCase):
def test_configurationi_defaults(self):
def test_configuration_defaults(self):
self.assertEqual(2400, CONF.image_cache.manager_interval)
self.assertEqual('_base', CONF.image_cache.subdirectory_name)
self.assertTrue(CONF.image_cache.remove_unused_base_images)

View File

@ -168,7 +168,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
self.fail("Couldn't load driver %s - %s" % (cls, e))
self.assertEqual(cm.driver.__class__.__name__, driver,
"Could't load driver %s" % cls)
"Couldn't load driver %s" % cls)
@mock.patch.object(sys, 'exit', side_effect=test.TestingException())
def test_fail_to_load_new_drivers(self, mock_exit):
@ -746,13 +746,13 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.flags(cpu_allocation_ratio=16.1)
self.flags(ram_allocation_ratio=1.6)
self.flags(disk_allocation_ratio=1.1)
expeced_ratios = {
expected_ratios = {
orc.VCPU: CONF.cpu_allocation_ratio,
orc.MEMORY_MB: CONF.ram_allocation_ratio,
orc.DISK_GB: CONF.disk_allocation_ratio
}
# If conf is set, return conf
self.assertEqual(expeced_ratios,
self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))
self.flags(cpu_allocation_ratio=None)
@ -761,25 +761,25 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.flags(initial_cpu_allocation_ratio=15.9)
self.flags(initial_ram_allocation_ratio=1.4)
self.flags(initial_disk_allocation_ratio=0.9)
expeced_ratios = {
expected_ratios = {
orc.VCPU: CONF.initial_cpu_allocation_ratio,
orc.MEMORY_MB: CONF.initial_ram_allocation_ratio,
orc.DISK_GB: CONF.initial_disk_allocation_ratio
}
# if conf is unset and inv doesn't exists, return init conf
self.assertEqual(expeced_ratios,
self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))
inv = {orc.VCPU: {'allocation_ratio': 3.0},
orc.MEMORY_MB: {'allocation_ratio': 3.1},
orc.DISK_GB: {'allocation_ratio': 3.2}}
expeced_ratios = {
expected_ratios = {
orc.VCPU: inv[orc.VCPU]['allocation_ratio'],
orc.MEMORY_MB: inv[orc.MEMORY_MB]['allocation_ratio'],
orc.DISK_GB: inv[orc.DISK_GB]['allocation_ratio']
}
# if conf is unset and inv exists, return inv
self.assertEqual(expeced_ratios,
self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))

View File

@ -580,7 +580,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
vmware_tools_status="toolsOk",
succeeds=False)
def test_clean_shutdown_no_vwaretools(self):
def test_clean_shutdown_no_vmwaretools(self):
self._test_clean_shutdown(timeout=10,
retry_interval=3,
returns_on=1,

View File

@ -131,7 +131,7 @@ CAPABILITY_TRAITS_MAP = {
def _check_image_type_exclude_list(capability, supported):
"""Enforce the exclusion list on image_type capabilites.
"""Enforce the exclusion list on image_type capabilities.
:param capability: The supports_image_type_foo capability being checked
:param supported: The flag indicating whether the virt driver *can*

View File

@ -742,7 +742,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# baremetal nodes. Depending on the version of Ironic,
# this can be as long as 2-10 seconds per every thousand
# nodes, and this call may retrieve all nodes in a deployment,
# depending on if any filter paramters are applied.
# depending on if any filter parameters are applied.
return self._get_node_list(fields=_NODE_FIELDS, **kwargs)
# NOTE(jroll) if partition_key is set, we need to limit nodes that
@ -2057,7 +2057,7 @@ class IronicDriver(virt_driver.ComputeDriver):
return None
def _can_send_version(self, min_version=None, max_version=None):
"""Validate if the suppplied version is available in the API."""
"""Validate if the supplied version is available in the API."""
# NOTE(TheJulia): This will effectively just be a pass if no
# version negotiation has occured, since there is no way for
# us to know without explicitly otherwise requesting that

View File

@ -9799,7 +9799,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
# 'dest' will be substituted into 'migration_uri' so ensure
# it does't contain any characters that could be used to
# it doesn't contain any characters that could be used to
# exploit the URI accepted by libvirt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)

View File

@ -533,7 +533,7 @@ class Guest(object):
:param no_metadata: Make snapshot without remembering it
:param disk_only: Disk snapshot, no system checkpoint
:param reuse_ext: Reuse any existing external files
:param quiesce: Use QGA to quiece all mounted file systems
:param quiesce: Use QGA to quiesce all mounted file systems
"""
flags = no_metadata and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA or 0)

View File

@ -623,7 +623,7 @@ class LibvirtGenericVIFDriver(object):
# 2. libvirt driver does not change mac address for macvtap VNICs
# or Alternatively does not rely on recreating libvirt's nodev
# name from the current mac address set on the netdevice.
# See: virt.libvrit.driver.LibvirtDriver._get_pcinet_info
# See: virt.libvirt.driver.LibvirtDriver._get_pcinet_info
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
set_vf_interface_vlan(
vif['profile']['pci_slot'],

View File

@ -131,7 +131,7 @@ class Hypervisor(object):
def guest_get_console_output(self, name):
"""get console out put of the given instance
:returns: The output of the console of the instace, in string format.
:returns: The output of the console of the instance, in string format.
"""
return self._reqh.call('guest_get_console_output', name)