trivial: Enable E275 harder

Change I4657d46d27ecfc45299d398cd2f3848fbc64b5b5 skipped this.
Change Ib9f9a8bc6720ca8e1b916b2fa1583982859b08ae unskipped it.
But some things fell out while we were shaking the code.

Change-Id: I0164582af81daf49b7391944940d2f8c187a9d42
Was-Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
Rebased-by: Pete Zaitcev <zaitcev@redhat.com>
This commit is contained in:
Stephen Finucane
2024-04-23 12:41:20 +01:00
parent a14312cc34
commit 4c0d0f8e98
22 changed files with 33 additions and 36 deletions

View File

@ -180,7 +180,7 @@ class API(base.Base):
idx = 0
while idx < len(services):
srv = services[idx]
if (self._az_matched(srv, availability_zone) and srv.is_up):
if self._az_matched(srv, availability_zone) and srv.is_up:
return srv.host
idx = idx + 1
return None

View File

@ -53,7 +53,7 @@ class ImageAccel(object):
self.src = src
self.dest = dest
self.compression_format = CONF.compression_format
if (self.compression_format == 'gzip'):
if self.compression_format == 'gzip':
self._accel_engine_path = _ACCEL_PATH_PREFERENCE_ORDER_LIST
else:
self._accel_engine_path = None

View File

@ -115,11 +115,11 @@ class SSHPool(pools.Pool):
self.resize(1)
# release all but the last connection using
# get and put to allow any get waiters to complete.
while (self.waiting() or self.current_size > 1):
while self.waiting() or self.current_size > 1:
conn = self.get()
self.put(conn)
# Now free everthing that is left
while (self.free_items):
while self.free_items:
self.free_items.popleft().close()
self.current_size -= 1

View File

@ -73,8 +73,7 @@ class BrickLvmTestCase(test.TestCase):
data = " fake-vg\n"
elif _lvm_prefix + 'lvm version' in cmd_string:
data = " LVM version: 2.03.07(2) (2019-11-30)\n"
elif (_lvm_prefix + 'vgs --noheadings -o uuid fake-vg' in
cmd_string):
elif _lvm_prefix + 'vgs --noheadings -o uuid fake-vg' in cmd_string:
data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
elif (_lvm_prefix + 'vgs --noheadings --unit=g '
'-o name,size,free,lv_count,uuid '

View File

@ -2470,7 +2470,7 @@ class TestVmdkImageChecks(test.TestCase):
def test_check_vmdk_image_handles_missing_info(self):
expected = 'Unable to determine VMDK createType'
# remove create-type
del (self.qdata_data['create-type'])
del self.qdata_data['create-type']
iue = self.assertRaises(exception.ImageUnacceptable,
image_utils.check_vmdk_image,
fake.IMAGE_ID,
@ -2478,7 +2478,7 @@ class TestVmdkImageChecks(test.TestCase):
self.assertIn(expected, str(iue))
# remove entire data section
del (self.qdata_data)
del self.qdata_data
iue = self.assertRaises(exception.ImageUnacceptable,
image_utils.check_vmdk_image,
fake.IMAGE_ID,

View File

@ -5360,7 +5360,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
conf = self._set_unique_fqdn_override(False, in_shared)
my_connector = self.connector.copy()
del (my_connector['initiator'])
del my_connector['initiator']
my_connector['host'] = "abc123abc123abc123abc123abc123abc123"
safe_host = common._safe_hostname(my_connector, conf)
self.assertEqual(fixed_hostname, safe_host)

View File

@ -800,7 +800,7 @@ port_speed!N/A
value1 = filter1.split('=')[1]
value2 = filter2.split('=')[1]
for v in ports:
if (str(v[5]) == value1 and str(v[7]) == value2):
if str(v[5]) == value1 and str(v[7]) == value2:
rows.append(v)
else:
value = kwargs['filtervalue'].split('=')[1]

View File

@ -723,7 +723,7 @@ class NetAppDriverUtilsTestCase(test.TestCase):
def test_get_backend_qos_spec_from_volume_type_no_qos_specs_id(self):
volume_type = copy.deepcopy(fake.VOLUME_TYPE)
del (volume_type['qos_specs_id'])
del volume_type['qos_specs_id']
mock_get_context = self.mock_object(context, 'get_admin_context')
result = na_utils.get_backend_qos_spec_from_volume_type(volume_type)

View File

@ -1355,7 +1355,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
def test_get_flow(self, is_migration_target, use_quota, flow_mock,
extract_ref_mock, onfailure_mock, extract_spec_mock,
notify_mock, create_mock, onfinish_mock, load_mock):
assert (isinstance(is_migration_target, bool))
self.assertIsInstance(is_migration_target, bool)
filter_properties = {'retry': mock.sentinel.retry}
tasks = [mock.call(extract_ref_mock.return_value),
mock.call(onfailure_mock.return_value),

View File

@ -411,7 +411,7 @@ class DataCoreVolumeDriver(driver.VolumeDriver):
kwargs = {'existing_ref': vd_alias,
'reason': 'Specified Virtual disk does not exist.'}
raise cinder_exception.ManageExistingInvalidReference(**kwargs)
return (self._get_size_in_gigabytes(virtual_disk.Size.Value))
return self._get_size_in_gigabytes(virtual_disk.Size.Value)
def _update_volume_stats(self):
performance_data = self._api.get_performance_by_type(

View File

@ -382,7 +382,7 @@ class RestClient(object):
if obj['status'] != 'ok':
msg = "is not mapped to the specified controller"
if (msg not in obj['message']):
if msg not in obj['message']:
errmsg = _('REST call status - %s') % obj['status']
raise exception.VolumeDriverException(errmsg)

View File

@ -1579,7 +1579,7 @@ class HPE3PARCommon(object):
"""We have to use a safe hostname length for 3PAR host names."""
hostname = connector['host']
unique_fqdn_network = configuration.unique_fqdn_network
if (not unique_fqdn_network and connector.get('initiator')):
if not unique_fqdn_network and connector.get('initiator'):
iqn = connector.get('initiator')
iqn = iqn.replace(":", "-")
return iqn[::-1][:31]

View File

@ -316,7 +316,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
self._group_target_enabled,
self._storage_protocol,
pool_name)
if (volume['size'] > snapshot['volume_size']):
if volume['size'] > snapshot['volume_size']:
vol_size = volume['size'] * units.Ki
reserve_size = 100 if reserve else 0
data = {"data": {'size': vol_size,

View File

@ -4439,7 +4439,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
if 'IOThrottle_rate' in model_update['metadata']:
del model_update['metadata']['IOThrottle_rate']
model_update['host'] = volume['host']
return (model_update)
return model_update
def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False):
return self._helpers.add_vdisk_copy(volume, dest_pool,

View File

@ -177,11 +177,11 @@ class NexentaISCSIDriver(driver.ISCSIDriver):
target_names = self.targets.keys()
if provider_location:
target_name = provider_location.split(',1 ')[1].split(' ')[0]
if not (self.targets.get(target_name)):
if not self.targets.get(target_name):
self.targets[target_name] = []
if not (volume['name'] in self.targets[target_name]):
if not volume['name'] in self.targets[target_name]:
self.targets[target_name].append(volume['name'])
elif not (target_names):
elif not target_names:
# create first target and target group
target_name = self._create_target(0)
self.targets[target_name].append(volume['name'])
@ -193,7 +193,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver):
if len(self.targets[target_name]) >= 20:
# create new target and target group
target_name = self._create_target(len(target_names))
if not (volume['name'] in self.targets[target_name]):
if volume['name'] not in self.targets[target_name]:
self.targets[target_name].append(volume['name'])
return target_name

View File

@ -187,7 +187,7 @@ class NfsDriver(remotefs.RemoteFSSnapDriverDistributed):
# If both nas_host and nas_share_path are set we are not
# going to use the nfs_shares_config file. So, only check
# for its existence if it is going to be used.
if ((not nas_host) or (not nas_share_path)):
if (not nas_host) or (not nas_share_path):
config = self.configuration.nfs_shares_config
if not config:
msg = (_("There's no NFS config file configured (%s)") %

View File

@ -1258,7 +1258,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
LOG.info('Deleting stale snapshot: %s', snapshot.id)
self._delete(snapshot_path)
del (snap_info[snapshot.id])
del snap_info[snapshot.id]
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot: objects.Snapshot) -> None:
@ -1435,7 +1435,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
self._rebase_img(higher_file_path, base_file, base_file_fmt)
# Remove snapshot_file from info
del (snap_info[snapshot.id])
del snap_info[snapshot.id]
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self,
@ -1836,7 +1836,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
'type': 'qcow2',
'volume_id': snapshot.volume.id}
del (snap_info[snapshot.id])
del snap_info[snapshot.id]
update_format = True
else:
# blockCommit snapshot into base
@ -1849,7 +1849,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
'type': 'qcow2',
'volume_id': snapshot.volume.id}
del (snap_info[snapshot.id])
del snap_info[snapshot.id]
self._nova_assisted_vol_snap_delete(context, snapshot, delete_info)

View File

@ -486,7 +486,7 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
self._delete(merged_img_path)
# TODO(lpetrut): drop snapshot info file usage.
del (snap_info[snapshot.id])
del snap_info[snapshot.id]
self._write_info_file(info_path, snap_info)
if not isinstance(snapshot, objects.Snapshot):

View File

@ -162,7 +162,7 @@ class BrcdFCZoneClientCLI(object):
'zone_members_with_sep': zone_members_with_sep}
LOG.debug("Creating zone, cmd to run %s", cmd)
self.apply_zone_change(cmd.split())
if (iterator_count > 0):
if iterator_count > 0:
zone_with_sep += ';'
iterator_count += 1
zone_with_sep += zone
@ -243,7 +243,7 @@ class BrcdFCZoneClientCLI(object):
'zone_members_with_sep': zone_members_with_sep}
LOG.debug("Updating zone, cmd to run %s", cmd)
self.apply_zone_change(cmd.split())
if (iterator_count > 0):
if iterator_count > 0:
zone_with_sep += ';'
iterator_count += 1
zone_with_sep += zone
@ -348,19 +348,17 @@ class BrcdFCZoneClientCLI(object):
self.apply_zone_change(cmd.split())
def _cfg_trans_abort(self):
is_abortable = self._is_trans_abortable()
if (is_abortable):
if self._is_trans_abortable():
self.apply_zone_change([zone_constant.CFG_ZONE_TRANS_ABORT])
def _is_trans_abortable(self):
is_abortable = False
stdout, stderr = None, None
stdout, stderr = self._run_ssh(
[zone_constant.CFG_SHOW_TRANS], True, 1)
output = stdout.splitlines()
is_abortable = False
for line in output:
if (zone_constant.TRANS_ABORTABLE in line):
if zone_constant.TRANS_ABORTABLE in line:
is_abortable = True
break
if stderr:

View File

@ -895,7 +895,7 @@ class BrcdHTTPFCZoneClient(object):
timeout = 360
sleep_time = 3
time_elapsed = 0
while (status != "done"):
while status != "done":
txn_response = self.connect(
zone_constant.GET_METHOD, transURL, "", headers)
parsed_data_txn = self.get_parsed_data(txn_response,

View File

@ -218,7 +218,7 @@ class CiscoFCSanLookupService(fc_service.FCSanLookupService):
"""
nsinfo_list = []
for line in switch_data:
if not (" N " in line):
if " N " not in line:
continue
linesplit = line.split()
if len(linesplit) > 2:

View File

@ -406,7 +406,7 @@ class CiscoFCZoneClientCLI(object):
"""
return_list = []
for line in switch_data:
if not (" N " in line):
if " N " not in line:
continue
linesplit = line.split()
if len(linesplit) > 2: