extend pre-commit with basic python linting

This change adds a number of basic linting check
including codespell and fixes the relevent issues in
the tempest plugin and playbooks.

conflicts were due to removing playbooks/templates/compute_nodes.yaml.j2
I1f22131dc04a2d7a5f010da2dfa3f4e9524656a2 removes compute_nodes.yaml.j2

Change-Id: Ife8704538555cb48954ad703b1354e3c69ee5884
This commit is contained in:
Sean Mooney 2024-04-19 18:43:23 +01:00 committed by Amit Uniyal
parent d78d93803e
commit a12607754f
8 changed files with 49 additions and 12 deletions

View File

@ -20,4 +20,40 @@ repos:
args: [--enable=default-role]
files: ^README.rst
types: [rst]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: trailing-whitespace
- id: mixed-line-ending
args: ['--fix', 'lf']
exclude: '.*\.(svg)$'
- id: check-added-large-files
- id: fix-byte-order-marker
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-symlinks
- id: destroyed-symlinks
- id: check-merge-conflict
- id: debug-statements
- id: detect-private-key
- id: end-of-file-fixer
- id: no-commit-to-branch
- id: check-json
files: .*\.json$
- id: check-yaml
files: .*\.(yaml|yml)$
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.5
hooks:
- id: remove-tabs
exclude: '.*\.(svg)$'
- repo: https://github.com/pre-commit/mirrors-autopep8
rev: v2.0.4
hooks:
- id: autopep8
files: '^.*\.py$'
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
- id: codespell
args: ['--ignore-words=dictionary.txt']

0
dictionary.txt Normal file
View File

View File

@ -165,7 +165,7 @@ class CPUPolicyTest(BasePinningTest):
# Get the configured shared CPUs of each compute host and confirm
# that every host has the minimum number of shared CPUs necessary
# to preform test
# to perform test
shared_cpus_per_host = self._get_shared_set_size()
if any(len(cpus) < self.minimum_shared_cpus for cpus in
shared_cpus_per_host):
@ -178,7 +178,7 @@ class CPUPolicyTest(BasePinningTest):
# Get the configured dedicated CPUs of each compute host and confirm
# that every host has the minimum number of shared CPUs necessary
# to preform test
# to perform test
dedicated_cpus_per_host = self._get_dedicated_set_size()
if any(len(cpus) < self.minimum_dedicated_cpus for cpus in
dedicated_cpus_per_host):

View File

@ -49,7 +49,8 @@ class NVDIMMTests(base.BaseWhiteboxComputeTest):
root = self.get_server_xml(server['id'])
pmem_device = root.find("./devices/memory[@model='nvdimm']")
self.assertIsNotNone(
pmem_device, "NVDIMM memory device was not found in instace %s XML"
pmem_device,
"NVDIMM memory device was not found in instance %s XML"
% server['id']
)

View File

@ -192,7 +192,7 @@ class SRIOVNumaAffinity(SRIOVBase, numa_helper.NUMAHelperMixin):
# Find the pinned pCPUs used by server B. They are not expected to have
# affinity so just confirm they are a subset of the host's
# cpu_dedicated_set. Also confirm pCPUs are not resued between guest A
# cpu_dedicated_set. Also confirm pCPUs are not rescued between guest A
# and B
cpu_pins_b = self.get_pinning_as_set(server_b['id'])
self.assertTrue(
@ -1019,7 +1019,7 @@ class SRIOVAttachAndDetach(SRIOVBase):
"""Verify sriov direct-physical port attached/detached from guest
1. Create and sr-iov port based on the provided vnic_type
2. Launch two guests accessable by the UC via SSH. Test creates two
2. Launch two guests accessible by the UC via SSH. Test creates two
guests to validate the same port can be attached/removed from multiple
guests
3. Iterate over both guests doing the following steps:
@ -1060,7 +1060,7 @@ class SRIOVAttachAndDetach(SRIOVBase):
self._validate_pf_pci_address_in_xml(
port['port']['id'], host_dev_xml)
# Verify the the interface's vendor ID and the phsyical device ID
# Verify the the interface's vendor ID and the physical device ID
# are present in the guest
self._check_device_in_guest(
linux_client,

View File

@ -231,7 +231,7 @@ class VGPUTest(base.BaseWhiteboxComputeTest):
"./devices/hostdev[@type='mdev']"
)
# Validate the numer of mdev host devices is equal to the expected
# Validate the number of mdev host devices is equal to the expected
# count provided to the method
self.assertEqual(
expected_device_count, len(vgpu_devices), "Expected %d "
@ -438,7 +438,7 @@ class VGPUColdMigration(VGPUTest):
'but instead found %d' %
(src_host, pre_src_usage, current_dest_usage))
# Do a final sanity check of the guest after the rever to confirm the
# Do a final sanity check of the guest after the revert to confirm the
# vgpu device is present in the XML and vendor id is present in sysfs
self._validate_vgpu_instance(
server,
@ -595,7 +595,7 @@ class VGPUMultiTypes(VGPUTest):
host. From there check the uuid of the vgpu device assigned to the
guest. Using the uuid, check mdev type of the host via
/sys/bus/mdev/devices/<uuid> and confirm the mdev_type there matches
the exepected mdev_type provided to the guest.
the expected mdev_type provided to the guest.
"""
custom_traits = CONF.whitebox_hardware.vgpu_type_mapping
for trait, expected_mdev_type in custom_traits.items():

View File

@ -27,7 +27,7 @@ class VTPMTest(base.BaseWhiteboxComputeTest):
Creating instance with a variety of device versions and module types are
tested. Tests require creating instance flavor with extra specs about the
tpm version and model to be specified and Barbican Key manager must enabled
in the environement to manage the instance secrets.
in the environment to manage the instance secrets.
"""
@classmethod

View File

@ -170,7 +170,7 @@ database_opts = [
help='Address of the database host. This is normally a controller.'),
cfg.StrOpt(
'internal_ip',
help='If the databse service is listening on separate internal '
help='If the database service is listening on separate internal '
'network, this option specifies its IP on that network. It will '
'be used to set up an SSH tunnel through the database host.'),
cfg.StrOpt(