[codespell] fix typos
This change fixes typos in tempest code, doc, zuul yaml and tox.ini file, which are found by codespell. Change-Id: I8051d979e28f7dbf532181f339cbef9cd7220c76
This commit is contained in:
parent
8794025588
commit
a85bdb47ef
@ -23,7 +23,7 @@ branches. When this happens the expectation for those branches is to either
|
||||
switch to running Tempest from a tag with support for the branch, or exclude
|
||||
a newly introduced test (if that is the cause of the issue). Tempest will not
|
||||
be creating stable branches to support *Extended Maintenance* phase branches, as
|
||||
the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
|
||||
the burden is on the *Extended Maintenance* phase branch maintainers, not the Tempest
|
||||
project, to support that branch.
|
||||
|
||||
.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
|
||||
|
@ -80,7 +80,7 @@
|
||||
|
||||
- name: Tempest 26.1.0 workaround to fallback exclude-list to blacklist
|
||||
# NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with
|
||||
# stestr 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1
|
||||
# stestr 2.5.1/3.0.1 (because of upper constraints of stestr 2.5.1/3.0.1
|
||||
# in stable/train|ussuri|victoria) which does not have new args exclude-list
|
||||
# so let's fallback to old arg if new arg is passed.
|
||||
set_fact:
|
||||
@ -109,7 +109,7 @@
|
||||
|
||||
- name: Tempest 26.1.0 workaround to fallback exclude-regex to black-regex
|
||||
# NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with stestr
|
||||
# 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1 in
|
||||
# 2.5.1/3.0.1 (because of upper constraints of stestr 2.5.1/3.0.1 in
|
||||
# stable/train|ussuri|victoria) which does not have new args exclude-list so
|
||||
# let's fallback to old arg if new arg is passed.
|
||||
set_fact:
|
||||
|
@ -24,7 +24,7 @@ CONF = config.CONF
|
||||
|
||||
|
||||
class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
|
||||
"""Test creating servers on mutiple nodes with scheduler_hints."""
|
||||
"""Test creating servers on multiple nodes with scheduler_hints."""
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ServersOnMultiNodesTest, cls).resource_setup()
|
||||
|
@ -410,7 +410,7 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
|
||||
:param validatable: whether to the server needs to be
|
||||
validatable. When True, validation resources are acquired via
|
||||
the `get_class_validation_resources` helper.
|
||||
:param kwargs: extra paramaters are passed through to the
|
||||
:param kwargs: extra parameters are passed through to the
|
||||
`create_test_server` call.
|
||||
:return: the UUID of the created server.
|
||||
"""
|
||||
|
@ -71,7 +71,7 @@ class ImagesTestJSON(base.BaseV2ComputeTest):
|
||||
self.assertEqual(snapshot_name, image['name'])
|
||||
except lib_exceptions.TimeoutException as ex:
|
||||
# If timeout is reached, we don't need to check state,
|
||||
# since, it wouldn't be a 'SAVING' state atleast and apart from
|
||||
# since, it wouldn't be a 'SAVING' state at least and apart from
|
||||
# it, this testcase doesn't have scope for other state transition
|
||||
# Hence, skip the test.
|
||||
raise self.skipException("This test is skipped because " + str(ex))
|
||||
|
@ -130,7 +130,7 @@ class ImagesOneServerNegativeTestJSON(base.BaseV2ComputeTest):
|
||||
except lib_exc.TimeoutException as ex:
|
||||
# Test cannot capture the image saving state.
|
||||
# If timeout is reached, we don't need to check state,
|
||||
# since, it wouldn't be a 'SAVING' state atleast and apart from
|
||||
# since, it wouldn't be a 'SAVING' state at least and apart from
|
||||
# it, this testcase doesn't have scope for other state transition
|
||||
# Hence, skip the test.
|
||||
raise self.skipException("This test is skipped because " + str(ex))
|
||||
|
@ -185,7 +185,7 @@ class ServersTestBootFromVolume(ServersTestJSON):
|
||||
|
||||
|
||||
class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
|
||||
"""Test creating server with FQDN hostname and verifying atrributes
|
||||
"""Test creating server with FQDN hostname and verifying attributes
|
||||
|
||||
Starting Wallaby release, Nova sanitizes freeform characters in
|
||||
server hostname with dashes. This test verifies the same.
|
||||
|
@ -40,7 +40,7 @@ class MultipleCreateNegativeTestJSON(base.BaseV2ComputeTest):
|
||||
@decorators.attr(type=['negative'])
|
||||
@decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
|
||||
def test_max_count_less_than_one(self):
|
||||
"""Test creating server with max_count < 1 shoudld fail"""
|
||||
"""Test creating server with max_count < 1 should fail"""
|
||||
invalid_max_count = 0
|
||||
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
|
||||
max_count=invalid_max_count)
|
||||
|
@ -234,7 +234,7 @@ class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
|
||||
and virtio as the rescue disk.
|
||||
"""
|
||||
# This test just check detach fail and does not
|
||||
# perfom the detach operation but in cleanup from
|
||||
# perform the detach operation but in cleanup from
|
||||
# self.attach_volume() it will try to detach the server
|
||||
# after unrescue the server. Due to that we need to make
|
||||
# server SSHable before it try to detach, more details are
|
||||
|
@ -139,7 +139,7 @@ class ServerRescueNegativeTestJSON(base.BaseV2ComputeTest):
|
||||
"""Test detaching volume from a rescued server should fail"""
|
||||
volume = self.create_volume()
|
||||
# This test just check detach fail and does not
|
||||
# perfom the detach operation but in cleanup from
|
||||
# perform the detach operation but in cleanup from
|
||||
# self.attach_volume() it will try to detach the server
|
||||
# after unrescue the server. Due to that we need to make
|
||||
# server SSHable before it try to detach, more details are
|
||||
|
@ -108,7 +108,7 @@ class AllowedAddressPairTestJSON(base.BaseNetworkTest):
|
||||
# both cases, with and without that "active" attribute, we need to
|
||||
# removes that field from the allowed_address_pairs which are returned
|
||||
# by the Neutron server.
|
||||
# We could make expected results of those tests to be dependend on the
|
||||
# We could make expected results of those tests to be dependent on the
|
||||
# available Neutron's API extensions but in that case existing tests
|
||||
# may fail randomly as all tests are always using same IP addresses
|
||||
# thus allowed_address_pair may be active=True or active=False.
|
||||
|
@ -129,7 +129,7 @@ class FloatingIPTestJSON(base.BaseNetworkTest):
|
||||
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
|
||||
self.assertIsNone(updated_floating_ip['router_id'])
|
||||
|
||||
# Explicity test deletion of floating IP
|
||||
# Explicitly test deletion of floating IP
|
||||
self.floating_ips_client.delete_floatingip(created_floating_ip['id'])
|
||||
|
||||
@decorators.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
|
||||
|
@ -118,7 +118,7 @@ class TagsExtTest(base.BaseNetworkTest):
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(TagsExtTest, cls).skip_checks()
|
||||
# Added condition to support backward compatiblity since
|
||||
# Added condition to support backward compatibility since
|
||||
# tag-ext has been renamed to standard-attr-tag
|
||||
if not (utils.is_extension_enabled('tag-ext', 'network') or
|
||||
utils.is_extension_enabled('standard-attr-tag', 'network')):
|
||||
|
@ -142,7 +142,7 @@ class ContainerSyncTest(base.BaseObjectTest):
|
||||
"""Test container synchronization"""
|
||||
def make_headers(cont, cont_client):
|
||||
# tell first container to synchronize to a second
|
||||
# use rsplit with a maxsplit of 1 to ensure ipv6 adresses are
|
||||
# use rsplit with a maxsplit of 1 to ensure ipv6 addresses are
|
||||
# handled properly as well
|
||||
client_proxy_ip = urlparse.urlparse(
|
||||
cont_client.base_url).netloc.rsplit(':', 1)[0]
|
||||
|
@ -424,7 +424,7 @@ def create_websocket(url):
|
||||
|
||||
class _WebSocket(object):
|
||||
def __init__(self, client_socket, url):
|
||||
"""Contructor for the WebSocket wrapper to the socket."""
|
||||
"""Constructor for the WebSocket wrapper to the socket."""
|
||||
self._socket = client_socket
|
||||
# cached stream for early frames.
|
||||
self.cached_stream = b''
|
||||
|
@ -53,7 +53,7 @@ class ExistsAllResponseHeaders(object):
|
||||
# Check common headers for all HTTP methods.
|
||||
#
|
||||
# Please note that for 1xx and 204 responses Content-Length presence
|
||||
# is not checked intensionally. According to RFC 7230 a server MUST
|
||||
# is not checked intentionally. According to RFC 7230 a server MUST
|
||||
# NOT send the header in such responses. Thus, clients should not
|
||||
# depend on this header. However, the standard does not require them
|
||||
# to validate the server's behavior. We leverage that to not refuse
|
||||
|
@ -606,7 +606,7 @@ def wait_for_server_floating_ip(servers_client, server, floating_ip,
|
||||
floating IPs.
|
||||
:param server: The server JSON dict on which to wait.
|
||||
:param floating_ip: The floating IP JSON dict on which to wait.
|
||||
:param wait_for_disassociate: Boolean indiating whether to wait for
|
||||
:param wait_for_disassociate: Boolean indicating whether to wait for
|
||||
disassociation instead of association.
|
||||
"""
|
||||
|
||||
|
@ -1177,7 +1177,7 @@ ScenarioGroup = [
|
||||
cfg.StrOpt('dhcp_client',
|
||||
default='udhcpc',
|
||||
choices=["udhcpc", "dhclient", "dhcpcd", ""],
|
||||
help='DHCP client used by images to renew DCHP lease. '
|
||||
help='DHCP client used by images to renew DHCP lease. '
|
||||
'If left empty, update operation will be skipped. '
|
||||
'Supported clients: "udhcpc", "dhclient", "dhcpcd"'),
|
||||
cfg.StrOpt('protocol',
|
||||
|
@ -236,7 +236,7 @@ show_volume_summary = {
|
||||
}
|
||||
}
|
||||
|
||||
# TODO(zhufl): This is under discussion, so will be merged in a seperate patch.
|
||||
# TODO(zhufl): This is under discussion, so will be merged in a separate patch.
|
||||
# https://bugs.launchpad.net/cinder/+bug/1880566
|
||||
# upload_volume = {
|
||||
# 'status_code': [202],
|
||||
|
@ -266,7 +266,7 @@ class TestChecker(object):
|
||||
"groups! This is not valid according to the PEP8 "
|
||||
"style guide. " % source_path)
|
||||
|
||||
# Divide grouped_imports into groupes based on PEP8 style guide
|
||||
# Divide grouped_imports into groups based on PEP8 style guide
|
||||
pep8_groups = {}
|
||||
package_name = self.package.__name__.split(".")[0]
|
||||
for key in grouped_imports:
|
||||
|
@ -51,7 +51,7 @@ class DynamicCredentialProvider(cred_provider.CredentialProvider):
|
||||
:param str identity_admin_role: The role name to use for admin
|
||||
:param list extra_roles: A list of strings for extra roles that should
|
||||
be assigned to all created users
|
||||
:param bool neutron_available: Whether we are running in an environemnt
|
||||
:param bool neutron_available: Whether we are running in an environment
|
||||
with neutron
|
||||
:param bool create_networks: Whether dynamic project networks should be
|
||||
created or not
|
||||
@ -453,7 +453,7 @@ class DynamicCredentialProvider(cred_provider.CredentialProvider):
|
||||
# NOTE(gmann): For 'domain' and 'system' scoped token, there is no
|
||||
# project_id so we are skipping the network creation for both
|
||||
# scope.
|
||||
# We need to create nework resource once per project.
|
||||
# We need to create network resource once per project.
|
||||
if (not project_id and (not scope or scope == 'project')):
|
||||
if (self.neutron_available and self.create_networks):
|
||||
network, subnet, router = self._create_network_resources(
|
||||
|
@ -198,7 +198,7 @@ class cleanup_order:
|
||||
There are functions created as classmethod and the cleanup
|
||||
was managed by the class with addClassResourceCleanup,
|
||||
In case the function called from a class level (resource_setup) its ok
|
||||
But when it is called from testcase level there is no reson to delete the
|
||||
But when it is called from testcase level there is no reason to delete the
|
||||
resource when class tears down.
|
||||
|
||||
The testcase results will not reflect the resources cleanup because test
|
||||
|
@ -159,7 +159,7 @@ class ImagesClient(rest_client.RestClient):
|
||||
"""
|
||||
url = 'images/%s/file' % image_id
|
||||
|
||||
# We are going to do chunked transfert, so split the input data
|
||||
# We are going to do chunked transfer, so split the input data
|
||||
# info fixed-sized chunks.
|
||||
headers = {'Content-Type': 'application/octet-stream'}
|
||||
data = iter(functools.partial(data.read, CHUNKSIZE), b'')
|
||||
|
@ -64,7 +64,7 @@ class ContainerClient(rest_client.RestClient):
|
||||
delete_metadata=None,
|
||||
create_update_metadata_prefix='X-Container-Meta-',
|
||||
delete_metadata_prefix='X-Remove-Container-Meta-'):
|
||||
"""Creates, Updates or deletes an containter metadata entry.
|
||||
"""Creates, Updates or deletes an container metadata entry.
|
||||
|
||||
Container Metadata can be created, updated or deleted based on
|
||||
metadata header or value. For detailed info, please refer to the
|
||||
|
@ -86,7 +86,7 @@ class VolumesClient(base_client.BaseClient):
|
||||
def migrate_volume(self, volume_id, **kwargs):
|
||||
"""Migrate a volume to a new backend
|
||||
|
||||
For a full list of available parameters please refer to the offical
|
||||
For a full list of available parameters please refer to the official
|
||||
API reference:
|
||||
|
||||
https://docs.openstack.org/api-ref/block-storage/v3/index.html#migrate-a-volume
|
||||
@ -173,7 +173,7 @@ class VolumesClient(base_client.BaseClient):
|
||||
resp, body = self.post(url, post_body)
|
||||
body = json.loads(body)
|
||||
# TODO(zhufl): This is under discussion, so will be merged
|
||||
# in a seperate patch.
|
||||
# in a separate patch.
|
||||
# https://bugs.launchpad.net/cinder/+bug/1880566
|
||||
# self.validate_response(schema.upload_volume, resp, body)
|
||||
self.expected_success(202, resp.status)
|
||||
|
@ -179,8 +179,7 @@ class TestNetworkBasicOps(manager.NetworkScenarioTest):
|
||||
def _check_public_network_connectivity(
|
||||
self, should_connect=True, msg=None,
|
||||
should_check_floating_ip_status=True, mtu=None):
|
||||
"""Verifies connectivty to a VM via public network and floating IP
|
||||
|
||||
"""Verifies connectivity to a VM via public network and floating IP
|
||||
and verifies floating IP has resource status is correct.
|
||||
|
||||
:param should_connect: bool. determines if connectivity check is
|
||||
|
@ -137,7 +137,7 @@ class TestStampPattern(manager.ScenarioTest):
|
||||
|
||||
# Make sure the machine ssh-able before attaching the volume
|
||||
# Just a live machine is responding
|
||||
# for device attache/detach as expected
|
||||
# for device attach/detach as expected
|
||||
linux_client = self.get_remote_client(
|
||||
ip_for_snapshot, private_key=keypair['private_key'],
|
||||
server=server_from_snapshot)
|
||||
|
@ -646,7 +646,7 @@ class BaseTestCase(testtools.testcase.WithAttributes,
|
||||
then be run.
|
||||
|
||||
Cleanup functions are always called during the test class tearDown
|
||||
fixture, even if an exception occured during setUp or tearDown.
|
||||
fixture, even if an exception occurred during setUp or tearDown.
|
||||
"""
|
||||
cls._class_cleanups.append((fn, arguments, keywordArguments))
|
||||
|
||||
|
@ -58,7 +58,7 @@ class TempestPlugin(object, metaclass=abc.ABCMeta):
|
||||
help="Whether or not my service is available")
|
||||
|
||||
# Note: as long as the group is listed in get_opt_lists,
|
||||
# it will be possible to access its optins in the plugin code
|
||||
# it will be possible to access its options in the plugin code
|
||||
# via ("-" in the group name are replaces with "_"):
|
||||
# CONF.my_service.<option_name>
|
||||
my_service_group = cfg.OptGroup(name="my-service",
|
||||
|
@ -252,7 +252,7 @@ class TestCredentialsFactory(base.TestCase):
|
||||
|
||||
@mock.patch('tempest.lib.auth.get_credentials')
|
||||
def test_get_credentials_v3_no_domain(self, mock_auth_get_credentials):
|
||||
expected_uri = 'https://v3.identity.exmaple.com'
|
||||
expected_uri = 'https://v3.identity.example.com'
|
||||
expected_result = 'my_creds'
|
||||
expected_domain = 'my_domain'
|
||||
mock_auth_get_credentials.return_value = expected_result
|
||||
@ -272,7 +272,7 @@ class TestCredentialsFactory(base.TestCase):
|
||||
|
||||
@mock.patch('tempest.lib.auth.get_credentials')
|
||||
def test_get_credentials_v3_domain(self, mock_auth_get_credentials):
|
||||
expected_uri = 'https://v3.identity.exmaple.com'
|
||||
expected_uri = 'https://v3.identity.example.com'
|
||||
expected_result = 'my_creds'
|
||||
expected_domain = 'my_domain'
|
||||
mock_auth_get_credentials.return_value = expected_result
|
||||
@ -291,7 +291,7 @@ class TestCredentialsFactory(base.TestCase):
|
||||
|
||||
@mock.patch('tempest.lib.auth.get_credentials')
|
||||
def test_get_credentials_v3_system(self, mock_auth_get_credentials):
|
||||
expected_uri = 'https://v3.identity.exmaple.com'
|
||||
expected_uri = 'https://v3.identity.example.com'
|
||||
expected_result = 'my_creds'
|
||||
mock_auth_get_credentials.return_value = expected_result
|
||||
cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
|
||||
|
@ -79,7 +79,7 @@ class TestDataUtils(base.TestCase):
|
||||
self.assertEqual(len(actual), 3)
|
||||
self.assertRegex(actual, "[A-Za-z0-9~!@#%^&*_=+]{3}")
|
||||
actual2 = data_utils.rand_password(2)
|
||||
# NOTE(masayukig): Originally, we checked that the acutal and actual2
|
||||
# NOTE(masayukig): Originally, we checked that the actual and actual2
|
||||
# are different each other. But only 3 letters can be the same value
|
||||
# in a very rare case. So, we just check the length here, too,
|
||||
# just in case.
|
||||
|
@ -54,7 +54,7 @@ class BaseServiceTest(base.TestCase):
|
||||
``assert_called_once_with(foo='bar')`` is called.
|
||||
* If mock_args='foo' then ``assert_called_once_with('foo')``
|
||||
is called.
|
||||
:param resp_as_string: Whether response body is retruned as string.
|
||||
:param resp_as_string: Whether response body is returned as string.
|
||||
This is for service client methods which return ResponseBodyData
|
||||
object.
|
||||
:param kwargs: kwargs that are passed to function.
|
||||
|
@ -162,7 +162,7 @@ class TestSshClient(base.TestCase):
|
||||
|
||||
client = ssh.Client('localhost', 'root', timeout=timeout)
|
||||
# We need to mock LOG here because LOG.info() calls time.time()
|
||||
# in order to preprend a timestamp.
|
||||
# in order to prepend a timestamp.
|
||||
with mock.patch.object(ssh, 'LOG'):
|
||||
self.assertRaises(exceptions.SSHTimeout,
|
||||
client._get_ssh_connection)
|
||||
|
2
tox.ini
2
tox.ini
@ -154,7 +154,7 @@ envdir = .tox/tempest
|
||||
sitepackages = {[tempestenv]sitepackages}
|
||||
setenv = {[tempestenv]setenv}
|
||||
deps = {[tempestenv]deps}
|
||||
# But exlcude the extra tests mentioned in tools/tempest-extra-tests-list.txt
|
||||
# But exclude the extra tests mentioned in tools/tempest-extra-tests-list.txt
|
||||
regex = '(^tempest\.scenario.*)|(^tempest\.serial_tests)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)'
|
||||
commands =
|
||||
find . -type f -name "*.pyc" -delete
|
||||
|
@ -24,7 +24,7 @@
|
||||
description: |
|
||||
Integration test of IPv6-only deployments. This job runs
|
||||
smoke and IPv6 relates tests only. Basic idea is to test
|
||||
whether OpenStack Services listen on IPv6 addrress or not.
|
||||
whether OpenStack Services listen on IPv6 address or not.
|
||||
timeout: 10800
|
||||
vars:
|
||||
tox_envlist: ipv6-only
|
||||
@ -73,7 +73,7 @@
|
||||
devstack_plugins:
|
||||
neutron: https://opendev.org/openstack/neutron
|
||||
devstack_services:
|
||||
# Enbale horizon so that we can run horizon test.
|
||||
# Enable horizon so that we can run horizon test.
|
||||
horizon: true
|
||||
|
||||
- job:
|
||||
@ -206,7 +206,7 @@
|
||||
tox_envlist: integrated-object-storage
|
||||
devstack_localrc:
|
||||
# NOTE(gmann): swift is not ready on python3 yet and devstack
|
||||
# install it on python2.7 only. But settting the USE_PYTHON3
|
||||
# install it on python2.7 only. But setting the USE_PYTHON3
|
||||
# for future once swift is ready on py3.
|
||||
USE_PYTHON3: true
|
||||
|
||||
@ -374,7 +374,7 @@
|
||||
This job runs the Tempest tests with scope and new defaults enabled.
|
||||
vars:
|
||||
devstack_localrc:
|
||||
# Enabeling the scope and new defaults for services.
|
||||
# Enabaling the scope and new defaults for services.
|
||||
# NOTE: (gmann) We need to keep keystone scope check disable as
|
||||
# services (except ironic) does not support the system scope and
|
||||
# they need keystone to continue working with project scope. Until
|
||||
@ -475,7 +475,7 @@
|
||||
# (on SLURP as well as non SLURP release) so we are adding grenade-skip-level-always
|
||||
# job in integrated gate and we do not need to update skip level job
|
||||
# here until Nova change the decision.
|
||||
# This is added from 2023.2 relese cycle onwards so we need to use branch variant
|
||||
# This is added from 2023.2 release cycle onwards so we need to use branch variant
|
||||
# to make sure we do not run this job on older than 2023.2 gate.
|
||||
- grenade-skip-level-always:
|
||||
branches:
|
||||
|
@ -103,7 +103,7 @@
|
||||
devstack_plugins:
|
||||
neutron: https://opendev.org/openstack/neutron
|
||||
devstack_services:
|
||||
# Enbale horizon so that we can run horizon test.
|
||||
# Enable horizon so that we can run horizon test.
|
||||
horizon: true
|
||||
|
||||
- job:
|
||||
|
Loading…
Reference in New Issue
Block a user