diff --git a/cinder/backup/api.py b/cinder/backup/api.py index ef9c385b687..8406e7e2285 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -180,7 +180,7 @@ class API(base.Base): idx = 0 while idx < len(services): srv = services[idx] - if (self._az_matched(srv, availability_zone) and srv.is_up): + if self._az_matched(srv, availability_zone) and srv.is_up: return srv.host idx = idx + 1 return None diff --git a/cinder/image/accelerator.py b/cinder/image/accelerator.py index a6539162df6..c4f5c3e48e9 100644 --- a/cinder/image/accelerator.py +++ b/cinder/image/accelerator.py @@ -53,7 +53,7 @@ class ImageAccel(object): self.src = src self.dest = dest self.compression_format = CONF.compression_format - if (self.compression_format == 'gzip'): + if self.compression_format == 'gzip': self._accel_engine_path = _ACCEL_PATH_PREFERENCE_ORDER_LIST else: self._accel_engine_path = None diff --git a/cinder/ssh_utils.py b/cinder/ssh_utils.py index 341440cdead..b40154b6df9 100644 --- a/cinder/ssh_utils.py +++ b/cinder/ssh_utils.py @@ -115,11 +115,11 @@ class SSHPool(pools.Pool): self.resize(1) # release all but the last connection using # get and put to allow any get waiters to complete. - while (self.waiting() or self.current_size > 1): + while self.waiting() or self.current_size > 1: conn = self.get() self.put(conn) # Now free everthing that is left - while (self.free_items): + while self.free_items: self.free_items.popleft().close() self.current_size -= 1 diff --git a/cinder/tests/unit/brick/test_brick_lvm.py b/cinder/tests/unit/brick/test_brick_lvm.py index 2b89b84ba6d..82c6703546c 100644 --- a/cinder/tests/unit/brick/test_brick_lvm.py +++ b/cinder/tests/unit/brick/test_brick_lvm.py @@ -73,8 +73,7 @@ class BrickLvmTestCase(test.TestCase): data = " fake-vg\n" elif _lvm_prefix + 'lvm version' in cmd_string: data = " LVM version: 2.03.07(2) (2019-11-30)\n" - elif (_lvm_prefix + 'vgs --noheadings -o uuid fake-vg' in - cmd_string): + elif _lvm_prefix + 'vgs --noheadings -o uuid fake-vg' in cmd_string: data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" elif (_lvm_prefix + 'vgs --noheadings --unit=g ' '-o name,size,free,lv_count,uuid ' diff --git a/cinder/tests/unit/test_image_utils.py b/cinder/tests/unit/test_image_utils.py index ce9b84b7c49..4905f66ac4c 100644 --- a/cinder/tests/unit/test_image_utils.py +++ b/cinder/tests/unit/test_image_utils.py @@ -2470,7 +2470,7 @@ class TestVmdkImageChecks(test.TestCase): def test_check_vmdk_image_handles_missing_info(self): expected = 'Unable to determine VMDK createType' # remove create-type - del (self.qdata_data['create-type']) + del self.qdata_data['create-type'] iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, @@ -2478,7 +2478,7 @@ class TestVmdkImageChecks(test.TestCase): self.assertIn(expected, str(iue)) # remove entire data section - del (self.qdata_data) + del self.qdata_data iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, diff --git a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py index 3079dea5cc0..f06cb987dee 100644 --- a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py +++ b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py @@ -5360,7 +5360,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver): conf = self._set_unique_fqdn_override(False, in_shared) my_connector = self.connector.copy() - del (my_connector['initiator']) + del my_connector['initiator'] my_connector['host'] = "abc123abc123abc123abc123abc123abc123" safe_host = common._safe_hostname(my_connector, conf) self.assertEqual(fixed_hostname, safe_host) diff --git a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py index 3af22c8aa95..2d7c48d5e86 100644 --- a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py @@ -800,7 +800,7 @@ port_speed!N/A value1 = filter1.split('=')[1] value2 = filter2.split('=')[1] for v in ports: - if (str(v[5]) == value1 and str(v[7]) == value2): + if str(v[5]) == value1 and str(v[7]) == value2: rows.append(v) else: value = kwargs['filtervalue'].split('=')[1] diff --git a/cinder/tests/unit/volume/drivers/netapp/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/test_utils.py index 3bd316c2847..0759ec7cb32 100644 --- a/cinder/tests/unit/volume/drivers/netapp/test_utils.py +++ b/cinder/tests/unit/volume/drivers/netapp/test_utils.py @@ -723,7 +723,7 @@ class NetAppDriverUtilsTestCase(test.TestCase): def test_get_backend_qos_spec_from_volume_type_no_qos_specs_id(self): volume_type = copy.deepcopy(fake.VOLUME_TYPE) - del (volume_type['qos_specs_id']) + del volume_type['qos_specs_id'] mock_get_context = self.mock_object(context, 'get_admin_context') result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py index 05fc0579fd4..eb677ccdbcf 100644 --- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py +++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py @@ -1355,7 +1355,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase): def test_get_flow(self, is_migration_target, use_quota, flow_mock, extract_ref_mock, onfailure_mock, extract_spec_mock, notify_mock, create_mock, onfinish_mock, load_mock): - assert (isinstance(is_migration_target, bool)) + self.assertIsInstance(is_migration_target, bool) filter_properties = {'retry': mock.sentinel.retry} tasks = [mock.call(extract_ref_mock.return_value), mock.call(onfailure_mock.return_value), diff --git a/cinder/volume/drivers/datacore/driver.py b/cinder/volume/drivers/datacore/driver.py index ca77d808872..6f6422b82b7 100644 --- a/cinder/volume/drivers/datacore/driver.py +++ b/cinder/volume/drivers/datacore/driver.py @@ -411,7 +411,7 @@ class DataCoreVolumeDriver(driver.VolumeDriver): kwargs = {'existing_ref': vd_alias, 'reason': 'Specified Virtual disk does not exist.'} raise cinder_exception.ManageExistingInvalidReference(**kwargs) - return (self._get_size_in_gigabytes(virtual_disk.Size.Value)) + return self._get_size_in_gigabytes(virtual_disk.Size.Value) def _update_volume_stats(self): performance_data = self._api.get_performance_by_type( diff --git a/cinder/volume/drivers/hedvig/rest_client.py b/cinder/volume/drivers/hedvig/rest_client.py index dfa421c6d46..c56754156b3 100644 --- a/cinder/volume/drivers/hedvig/rest_client.py +++ b/cinder/volume/drivers/hedvig/rest_client.py @@ -382,7 +382,7 @@ class RestClient(object): if obj['status'] != 'ok': msg = "is not mapped to the specified controller" - if (msg not in obj['message']): + if msg not in obj['message']: errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py index 799da394de6..c19cfb63771 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_common.py +++ b/cinder/volume/drivers/hpe/hpe_3par_common.py @@ -1579,7 +1579,7 @@ class HPE3PARCommon(object): """We have to use a safe hostname length for 3PAR host names.""" hostname = connector['host'] unique_fqdn_network = configuration.unique_fqdn_network - if (not unique_fqdn_network and connector.get('initiator')): + if not unique_fqdn_network and connector.get('initiator'): iqn = connector.get('initiator') iqn = iqn.replace(":", "-") return iqn[::-1][:31] diff --git a/cinder/volume/drivers/hpe/nimble.py b/cinder/volume/drivers/hpe/nimble.py index e3e7eb4c621..f24cc026e8b 100644 --- a/cinder/volume/drivers/hpe/nimble.py +++ b/cinder/volume/drivers/hpe/nimble.py @@ -316,7 +316,7 @@ class NimbleBaseVolumeDriver(san.SanDriver): self._group_target_enabled, self._storage_protocol, pool_name) - if (volume['size'] > snapshot['volume_size']): + if volume['size'] > snapshot['volume_size']: vol_size = volume['size'] * units.Ki reserve_size = 100 if reserve else 0 data = {"data": {'size': vol_size, diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py index c5c6ef792de..ba8b5989eab 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py @@ -4439,7 +4439,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, if 'IOThrottle_rate' in model_update['metadata']: del model_update['metadata']['IOThrottle_rate'] model_update['host'] = volume['host'] - return (model_update) + return model_update def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False): return self._helpers.add_vdisk_copy(volume, dest_pool, diff --git a/cinder/volume/drivers/nexenta/iscsi.py b/cinder/volume/drivers/nexenta/iscsi.py index c0e630c976a..682ef68cb9d 100644 --- a/cinder/volume/drivers/nexenta/iscsi.py +++ b/cinder/volume/drivers/nexenta/iscsi.py @@ -177,11 +177,11 @@ class NexentaISCSIDriver(driver.ISCSIDriver): target_names = self.targets.keys() if provider_location: target_name = provider_location.split(',1 ')[1].split(' ')[0] - if not (self.targets.get(target_name)): + if not self.targets.get(target_name): self.targets[target_name] = [] - if not (volume['name'] in self.targets[target_name]): + if not volume['name'] in self.targets[target_name]: self.targets[target_name].append(volume['name']) - elif not (target_names): + elif not target_names: # create first target and target group target_name = self._create_target(0) self.targets[target_name].append(volume['name']) @@ -193,7 +193,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): if len(self.targets[target_name]) >= 20: # create new target and target group target_name = self._create_target(len(target_names)) - if not (volume['name'] in self.targets[target_name]): + if volume['name'] not in self.targets[target_name]: self.targets[target_name].append(volume['name']) return target_name diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py index 95c33e0b756..b2942a9ee22 100644 --- a/cinder/volume/drivers/nfs.py +++ b/cinder/volume/drivers/nfs.py @@ -187,7 +187,7 @@ class NfsDriver(remotefs.RemoteFSSnapDriverDistributed): # If both nas_host and nas_share_path are set we are not # going to use the nfs_shares_config file. So, only check # for its existence if it is going to be used. - if ((not nas_host) or (not nas_share_path)): + if (not nas_host) or (not nas_share_path): config = self.configuration.nfs_shares_config if not config: msg = (_("There's no NFS config file configured (%s)") % diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py index 40d14b8faac..7d97e672b6a 100644 --- a/cinder/volume/drivers/remotefs.py +++ b/cinder/volume/drivers/remotefs.py @@ -1258,7 +1258,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): LOG.info('Deleting stale snapshot: %s', snapshot.id) self._delete(snapshot_path) - del (snap_info[snapshot.id]) + del snap_info[snapshot.id] self._write_info_file(info_path, snap_info) def _delete_snapshot(self, snapshot: objects.Snapshot) -> None: @@ -1435,7 +1435,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): self._rebase_img(higher_file_path, base_file, base_file_fmt) # Remove snapshot_file from info - del (snap_info[snapshot.id]) + del snap_info[snapshot.id] self._write_info_file(info_path, snap_info) def _create_volume_from_snapshot(self, @@ -1836,7 +1836,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): 'type': 'qcow2', 'volume_id': snapshot.volume.id} - del (snap_info[snapshot.id]) + del snap_info[snapshot.id] update_format = True else: # blockCommit snapshot into base @@ -1849,7 +1849,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): 'type': 'qcow2', 'volume_id': snapshot.volume.id} - del (snap_info[snapshot.id]) + del snap_info[snapshot.id] self._nova_assisted_vol_snap_delete(context, snapshot, delete_info) diff --git a/cinder/volume/drivers/windows/smbfs.py b/cinder/volume/drivers/windows/smbfs.py index cd0ee0bafab..0be1e7b9468 100644 --- a/cinder/volume/drivers/windows/smbfs.py +++ b/cinder/volume/drivers/windows/smbfs.py @@ -486,7 +486,7 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin, self._delete(merged_img_path) # TODO(lpetrut): drop snapshot info file usage. - del (snap_info[snapshot.id]) + del snap_info[snapshot.id] self._write_info_file(info_path, snap_info) if not isinstance(snapshot, objects.Snapshot): diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py index 9805f48b3ae..d96d5689fd9 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py @@ -162,7 +162,7 @@ class BrcdFCZoneClientCLI(object): 'zone_members_with_sep': zone_members_with_sep} LOG.debug("Creating zone, cmd to run %s", cmd) self.apply_zone_change(cmd.split()) - if (iterator_count > 0): + if iterator_count > 0: zone_with_sep += ';' iterator_count += 1 zone_with_sep += zone @@ -243,7 +243,7 @@ class BrcdFCZoneClientCLI(object): 'zone_members_with_sep': zone_members_with_sep} LOG.debug("Updating zone, cmd to run %s", cmd) self.apply_zone_change(cmd.split()) - if (iterator_count > 0): + if iterator_count > 0: zone_with_sep += ';' iterator_count += 1 zone_with_sep += zone @@ -348,19 +348,17 @@ class BrcdFCZoneClientCLI(object): self.apply_zone_change(cmd.split()) def _cfg_trans_abort(self): - is_abortable = self._is_trans_abortable() - if (is_abortable): + if self._is_trans_abortable(): self.apply_zone_change([zone_constant.CFG_ZONE_TRANS_ABORT]) def _is_trans_abortable(self): - is_abortable = False stdout, stderr = None, None stdout, stderr = self._run_ssh( [zone_constant.CFG_SHOW_TRANS], True, 1) output = stdout.splitlines() is_abortable = False for line in output: - if (zone_constant.TRANS_ABORTABLE in line): + if zone_constant.TRANS_ABORTABLE in line: is_abortable = True break if stderr: diff --git a/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py b/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py index eeb8b97e957..c2be409b939 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py +++ b/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py @@ -895,7 +895,7 @@ class BrcdHTTPFCZoneClient(object): timeout = 360 sleep_time = 3 time_elapsed = 0 - while (status != "done"): + while status != "done": txn_response = self.connect( zone_constant.GET_METHOD, transURL, "", headers) parsed_data_txn = self.get_parsed_data(txn_response, diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py index 1c7e429e29f..d672af391b8 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py @@ -218,7 +218,7 @@ class CiscoFCSanLookupService(fc_service.FCSanLookupService): """ nsinfo_list = [] for line in switch_data: - if not (" N " in line): + if " N " not in line: continue linesplit = line.split() if len(linesplit) > 2: diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py index f3c38e13851..026ed701ee2 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py @@ -406,7 +406,7 @@ class CiscoFCZoneClientCLI(object): """ return_list = [] for line in switch_data: - if not (" N " in line): + if " N " not in line: continue linesplit = line.split() if len(linesplit) > 2: