Merge "Re-enable new pycodestyle errors"
This commit is contained in:
commit
6077b33389
@ -168,6 +168,7 @@ class Limit(object):
|
||||
"resetTime": int(self.next_request or self._get_time()),
|
||||
}
|
||||
|
||||
|
||||
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
|
||||
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
|
||||
|
||||
|
@ -171,11 +171,11 @@ def _validate_status(param_value):
|
||||
msg = _("The 'status' can not be empty.")
|
||||
raise exception.InvalidGroupSnapshotStatus(reason=msg)
|
||||
elif param_value.lower() not in c_fields.GroupSnapshotStatus.ALL:
|
||||
msg = _("Group snapshot status: %(status)s is invalid, "
|
||||
"valid statuses are: "
|
||||
"%(valid)s.") % {'status': param_value,
|
||||
'valid': c_fields.GroupSnapshotStatus.ALL}
|
||||
raise exception.InvalidGroupSnapshotStatus(reason=msg)
|
||||
msg = _("Group snapshot status: %(status)s is invalid, "
|
||||
"valid statuses are: "
|
||||
"%(valid)s.") % {'status': param_value,
|
||||
'valid': c_fields.GroupSnapshotStatus.ALL}
|
||||
raise exception.InvalidGroupSnapshotStatus(reason=msg)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -103,6 +103,7 @@ def get_session(use_slave=False, **kwargs):
|
||||
def dispose_engine():
|
||||
get_engine().dispose()
|
||||
|
||||
|
||||
_DEFAULT_QUOTA_NAME = 'default'
|
||||
|
||||
|
||||
|
@ -338,9 +338,9 @@ def verify_glance_image_signature(context, image_service, image_id, path):
|
||||
img_sig_cert_uuid,
|
||||
img_sig_hash_method,
|
||||
img_sig_key_type]):
|
||||
LOG.error('Image signature metadata for image %s is '
|
||||
'incomplete.', image_id)
|
||||
raise exception.InvalidSignatureImage(image_id=image_id)
|
||||
LOG.error('Image signature metadata for image %s is '
|
||||
'incomplete.', image_id)
|
||||
raise exception.InvalidSignatureImage(image_id=image_id)
|
||||
|
||||
try:
|
||||
verifier = signature_utils.get_verifier(
|
||||
|
@ -221,6 +221,7 @@ class EvalBoolOrOp(object):
|
||||
right = self.value[2].eval()
|
||||
return left or right
|
||||
|
||||
|
||||
_parser = None
|
||||
_vars = {}
|
||||
|
||||
|
@ -489,7 +489,7 @@ class ApiSampleTestBase(functional_helpers._FunctionalTestBase):
|
||||
body = self._read_template(name) % self.subs
|
||||
sample = self._get_sample(name)
|
||||
if self.generate_samples and not os.path.exists(sample):
|
||||
self._write_sample(name, body)
|
||||
self._write_sample(name, body)
|
||||
return self._get_response(url, method, body, headers=headers)
|
||||
|
||||
def _do_put(self, url, name=None, subs=None, headers=None):
|
||||
|
@ -1046,11 +1046,11 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
||||
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
|
||||
|
||||
class FakeUsage(object):
|
||||
def __init__(self, in_use, reserved):
|
||||
self.in_use = in_use
|
||||
self.reserved = reserved
|
||||
self.until_refresh = None
|
||||
self.total = self.reserved + self.in_use
|
||||
def __init__(self, in_use, reserved):
|
||||
self.in_use = in_use
|
||||
self.reserved = reserved
|
||||
self.until_refresh = None
|
||||
self.total = self.reserved + self.in_use
|
||||
|
||||
def _fake__get_quota_usages(context, session, project_id,
|
||||
resources=None):
|
||||
|
@ -69,6 +69,7 @@ def service_get(context, service_id, backend_match_level=None, host=None,
|
||||
'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}
|
||||
raise exception.ServiceNotFound(service_id=host)
|
||||
|
||||
|
||||
# Some of the tests check that volume types are correctly validated during a
|
||||
# volume manage operation. This data structure represents an existing volume
|
||||
# type.
|
||||
|
@ -38,6 +38,7 @@ def generate_type(type_id, is_public):
|
||||
'is_public': bool(is_public)
|
||||
}
|
||||
|
||||
|
||||
VOLUME_TYPES = {
|
||||
fake.VOLUME_TYPE_ID: generate_type(fake.VOLUME_TYPE_ID, True),
|
||||
fake.VOLUME_TYPE2_ID: generate_type(fake.VOLUME_TYPE2_ID, True),
|
||||
|
@ -426,22 +426,22 @@ class PSSeriesISCSIDriverTestCase(test.TestCase):
|
||||
self.assert_volume_stats(stats)
|
||||
|
||||
def assert_volume_stats(self, stats):
|
||||
thin_enabled = self.configuration.san_thin_provision
|
||||
self.assertEqual(float('111.0'), stats['total_capacity_gb'])
|
||||
self.assertEqual(float('11.0'), stats['free_capacity_gb'])
|
||||
self.assertEqual(100, stats['total_volumes'])
|
||||
thin_enabled = self.configuration.san_thin_provision
|
||||
self.assertEqual(float('111.0'), stats['total_capacity_gb'])
|
||||
self.assertEqual(float('11.0'), stats['free_capacity_gb'])
|
||||
self.assertEqual(100, stats['total_volumes'])
|
||||
|
||||
if thin_enabled:
|
||||
self.assertEqual(80.0, stats['provisioned_capacity_gb'])
|
||||
else:
|
||||
space = stats['total_capacity_gb'] - stats['free_capacity_gb']
|
||||
self.assertEqual(space, stats['provisioned_capacity_gb'])
|
||||
if thin_enabled:
|
||||
self.assertEqual(80.0, stats['provisioned_capacity_gb'])
|
||||
else:
|
||||
space = stats['total_capacity_gb'] - stats['free_capacity_gb']
|
||||
self.assertEqual(space, stats['provisioned_capacity_gb'])
|
||||
|
||||
self.assertEqual(thin_enabled, stats['thin_provisioning_support'])
|
||||
self.assertEqual(not thin_enabled,
|
||||
stats['thick_provisioning_support'])
|
||||
self.assertEqual('Dell EMC', stats['vendor_name'])
|
||||
self.assertFalse(stats['multiattach'])
|
||||
self.assertEqual(thin_enabled, stats['thin_provisioning_support'])
|
||||
self.assertEqual(not thin_enabled,
|
||||
stats['thick_provisioning_support'])
|
||||
self.assertEqual('Dell EMC', stats['vendor_name'])
|
||||
self.assertFalse(stats['multiattach'])
|
||||
|
||||
def test_get_space_in_gb(self):
|
||||
self.assertEqual(123.0, self.driver._get_space_in_gb('123.0GB'))
|
||||
|
@ -10167,6 +10167,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
|
||||
expected +
|
||||
self.standard_logout)
|
||||
|
||||
|
||||
VLUNS5_RET = ({'members':
|
||||
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
|
||||
'active': True},
|
||||
|
@ -760,29 +760,29 @@ class XIVProxyTest(test.TestCase):
|
||||
"xiv_replication.VolumeReplication.create_replication",
|
||||
mock.MagicMock())
|
||||
def test_create_volume_with_consistency_group_diff_state(self):
|
||||
"""Test Create volume with consistency_group but diff state"""
|
||||
driver = mock.MagicMock()
|
||||
driver.VERSION = "VERSION"
|
||||
"""Test Create volume with consistency_group but diff state"""
|
||||
driver = mock.MagicMock()
|
||||
driver.VERSION = "VERSION"
|
||||
|
||||
p = self.proxy(
|
||||
self.default_storage_info,
|
||||
mock.MagicMock(),
|
||||
test_mock.cinder.exception,
|
||||
driver)
|
||||
p = self.proxy(
|
||||
self.default_storage_info,
|
||||
mock.MagicMock(),
|
||||
test_mock.cinder.exception,
|
||||
driver)
|
||||
|
||||
p.ibm_storage_cli = mock.MagicMock()
|
||||
p._cg_name_from_volume = mock.MagicMock(return_value="cg")
|
||||
p.ibm_storage_cli = mock.MagicMock()
|
||||
p._cg_name_from_volume = mock.MagicMock(return_value="cg")
|
||||
|
||||
vol_type = testutils.create_volume_type(self.ctxt, name='WTF')
|
||||
volume = testutils.create_volume(
|
||||
self.ctxt, size=16, volume_type_id=vol_type.id,
|
||||
host=self._get_test_host()['name'])
|
||||
vol_type = testutils.create_volume_type(self.ctxt, name='WTF')
|
||||
volume = testutils.create_volume(
|
||||
self.ctxt, size=16, volume_type_id=vol_type.id,
|
||||
host=self._get_test_host()['name'])
|
||||
|
||||
grp = self._create_test_group('WTF')
|
||||
grp['replication_status'] = 'enabled'
|
||||
volume.group = grp
|
||||
ex = getattr(p, "_get_exception")()
|
||||
self.assertRaises(ex, p.create_volume, volume)
|
||||
grp = self._create_test_group('WTF')
|
||||
grp['replication_status'] = 'enabled'
|
||||
volume.group = grp
|
||||
ex = getattr(p, "_get_exception")()
|
||||
self.assertRaises(ex, p.create_volume, volume)
|
||||
|
||||
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
|
||||
"xiv_replication.VolumeReplication.create_replication",
|
||||
|
@ -502,6 +502,7 @@ class test_volume(object):
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
|
||||
test_volume = test_volume()
|
||||
test_volume.id = {'vserver': 'openstack', 'name': 'vola'}
|
||||
test_volume.aggr = {
|
||||
@ -529,6 +530,7 @@ class test_snapshot(object):
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
|
||||
test_snapshot = test_snapshot()
|
||||
test_snapshot.id = 'fake_snap_id'
|
||||
test_snapshot.name = 'snapshot-%s' % test_snapshot.id
|
||||
|
@ -114,11 +114,11 @@ class TestNexentaEdgeISCSIDriver(test.TestCase):
|
||||
]
|
||||
|
||||
def my_side_effect(*args, **kwargs):
|
||||
return {'data': {
|
||||
'X-ISCSI-TargetName': ISCSI_TARGET_NAME,
|
||||
'X-ISCSI-TargetID': 1,
|
||||
'X-VIPS': json.dumps(vips)}
|
||||
}
|
||||
return {'data': {
|
||||
'X-ISCSI-TargetName': ISCSI_TARGET_NAME,
|
||||
'X-ISCSI-TargetID': 1,
|
||||
'X-VIPS': json.dumps(vips)}
|
||||
}
|
||||
|
||||
self.mock_api.side_effect = my_side_effect
|
||||
self.driver.do_setup(self.context)
|
||||
@ -132,11 +132,11 @@ class TestNexentaEdgeISCSIDriver(test.TestCase):
|
||||
]
|
||||
|
||||
def my_side_effect(*args, **kwargs):
|
||||
return {'data': {
|
||||
'X-ISCSI-TargetName': ISCSI_TARGET_NAME,
|
||||
'X-ISCSI-TargetID': 1,
|
||||
'X-VIPS': json.dumps(vips)}
|
||||
}
|
||||
return {'data': {
|
||||
'X-ISCSI-TargetName': ISCSI_TARGET_NAME,
|
||||
'X-ISCSI-TargetID': 1,
|
||||
'X-VIPS': json.dumps(vips)}
|
||||
}
|
||||
|
||||
self.mock_api.side_effect = my_side_effect
|
||||
self.assertRaises(utils.NexentaException,
|
||||
|
@ -56,6 +56,7 @@ class mock_vref(object):
|
||||
def get(self, item, arg2 = None):
|
||||
return self.__dict__[item]
|
||||
|
||||
|
||||
f_uuid = ['262b9ce2-a71a-4fbe-830c-c20c5596caea',
|
||||
'362b9ce2-a71a-4fbe-830c-c20c5596caea']
|
||||
|
||||
|
@ -144,12 +144,12 @@ FAKE_PATH = 'fake.cgi'
|
||||
|
||||
|
||||
class MockResponse(object):
|
||||
def __init__(self, json_data, status_code):
|
||||
self.json_data = json_data
|
||||
self.status_code = status_code
|
||||
def __init__(self, json_data, status_code):
|
||||
self.json_data = json_data
|
||||
self.status_code = status_code
|
||||
|
||||
def json(self):
|
||||
return self.json_data
|
||||
def json(self):
|
||||
return self.json_data
|
||||
|
||||
|
||||
class SynoSessionTestCase(test.TestCase):
|
||||
|
@ -617,6 +617,7 @@ class DateraVolumeTestCasev21(DateraVolumeTestCasev2):
|
||||
self._apiv = '2.1'
|
||||
self._tenant = self.cfg.datera_tenant_id
|
||||
|
||||
|
||||
stub_acl = {
|
||||
'initiator_groups': [
|
||||
'/initiator_groups/IG-8739f309-dae9-4534-aa02-5b8e9e96eefd'],
|
||||
|
@ -610,11 +610,11 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
|
||||
'rename_volume') as mock_rename_volume, \
|
||||
mock.patch.object(self.volume.driver.vg, 'get_volume',
|
||||
self._get_manage_existing_lvs):
|
||||
mock_rename_volume.return_value = _rename_volume
|
||||
size = self.volume.driver.manage_existing_get_size(vol, ref)
|
||||
self.assertEqual(2, size)
|
||||
model_update = self.volume.driver.manage_existing(vol, ref)
|
||||
self.assertIsNone(model_update)
|
||||
mock_rename_volume.return_value = _rename_volume
|
||||
size = self.volume.driver.manage_existing_get_size(vol, ref)
|
||||
self.assertEqual(2, size)
|
||||
model_update = self.volume.driver.manage_existing(vol, ref)
|
||||
self.assertIsNone(model_update)
|
||||
|
||||
def test_lvm_manage_existing_bad_size(self):
|
||||
"""Make sure correct exception on bad size returned from LVM.
|
||||
@ -673,13 +673,13 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
|
||||
'rename_volume') as mock_rename_volume, \
|
||||
mock.patch.object(self.volume.driver.vg, 'get_volume',
|
||||
self._get_manage_existing_lvs):
|
||||
mock_rename_volume.return_value = _rename_volume
|
||||
size = self.volume.driver.manage_existing_snapshot_get_size(
|
||||
snp, ref)
|
||||
self.assertEqual(2, size)
|
||||
model_update = self.volume.driver.manage_existing_snapshot(
|
||||
snp, ref)
|
||||
self.assertIsNone(model_update)
|
||||
mock_rename_volume.return_value = _rename_volume
|
||||
size = self.volume.driver.manage_existing_snapshot_get_size(
|
||||
snp, ref)
|
||||
self.assertEqual(2, size)
|
||||
model_update = self.volume.driver.manage_existing_snapshot(
|
||||
snp, ref)
|
||||
self.assertIsNone(model_update)
|
||||
|
||||
def test_lvm_manage_existing_snapshot_bad_ref(self):
|
||||
"""Error case where specified LV snapshot doesn't exist.
|
||||
|
@ -299,6 +299,7 @@ class RemoteFsDriverTestCase(test.TestCase):
|
||||
ret_flag = drv.secure_file_operations_enabled()
|
||||
self.assertFalse(ret_flag)
|
||||
|
||||
|
||||
# NFS configuration scenarios
|
||||
NFS_CONFIG1 = {'max_over_subscription_ratio': 1.0,
|
||||
'reserved_percentage': 0,
|
||||
|
@ -38,6 +38,7 @@ def fake_retry(exceptions, interval=1, retries=3, backoff_rate=2):
|
||||
return f
|
||||
return _decorator
|
||||
|
||||
|
||||
patch_retry = mock.patch('cinder.utils.retry', fake_retry)
|
||||
patch_retry.start()
|
||||
sys.modules['purestorage'] = mock.Mock()
|
||||
|
@ -2003,14 +2003,14 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
self.volume.driver, 'delete_volume') as mock_driver_delete,\
|
||||
mock.patch.object(
|
||||
self.volume, '_copy_volume_data') as mock_copy:
|
||||
temp_volume = tests_utils.create_volume(self.context,
|
||||
status='available')
|
||||
mock_temp.return_value = temp_volume
|
||||
self.volume._revert_to_snapshot_generic(
|
||||
self.context, fake_volume, fake_snapshot)
|
||||
mock_copy.assert_called_once_with(
|
||||
self.context, temp_volume, fake_volume)
|
||||
mock_driver_delete.assert_called_once_with(temp_volume)
|
||||
temp_volume = tests_utils.create_volume(self.context,
|
||||
status='available')
|
||||
mock_temp.return_value = temp_volume
|
||||
self.volume._revert_to_snapshot_generic(
|
||||
self.context, fake_volume, fake_snapshot)
|
||||
mock_copy.assert_called_once_with(
|
||||
self.context, temp_volume, fake_volume)
|
||||
mock_driver_delete.assert_called_once_with(temp_volume)
|
||||
|
||||
@ddt.data({'driver_error': True},
|
||||
{'driver_error': False})
|
||||
|
@ -585,9 +585,9 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
||||
|
||||
def fake_attach_volume(self, ctxt, volume, instance_uuid, host_name,
|
||||
mountpoint, mode):
|
||||
tests_utils.attach_volume(ctxt, volume.id,
|
||||
instance_uuid, host_name,
|
||||
'/dev/vda')
|
||||
tests_utils.attach_volume(ctxt, volume.id,
|
||||
instance_uuid, host_name,
|
||||
'/dev/vda')
|
||||
|
||||
def _test_migrate_volume_completion(self, status='available',
|
||||
instance_uuid=None, attached_host=None,
|
||||
|
@ -479,9 +479,9 @@ class PowerMaxUtils(object):
|
||||
"""
|
||||
if volume is not None:
|
||||
if volume.get('replication_status') and (
|
||||
volume.replication_status ==
|
||||
volume.replication_status ==
|
||||
fields.ReplicationStatus.FAILED_OVER):
|
||||
return True
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
|
@ -1219,8 +1219,8 @@ class SCApi(object):
|
||||
# at least give failback a shot.
|
||||
if lv and (self.is_swapped(provider_id, lv) and not self.failed_over
|
||||
and self._autofailback(lv)):
|
||||
lv = self.get_live_volume(provider_id)
|
||||
LOG.info('After failback %s', lv)
|
||||
lv = self.get_live_volume(provider_id)
|
||||
LOG.info('After failback %s', lv)
|
||||
# Make sure we still have a LV.
|
||||
if lv:
|
||||
# At this point if the secondaryRole is Active we have
|
||||
|
@ -3523,9 +3523,9 @@ class HPE3PARCommon(object):
|
||||
# Check to see if the user requested to failback.
|
||||
if (secondary_backend_id and
|
||||
secondary_backend_id == self.FAILBACK_VALUE):
|
||||
failover = False
|
||||
target_id = None
|
||||
group_target_id = self.FAILBACK_VALUE
|
||||
failover = False
|
||||
target_id = None
|
||||
group_target_id = self.FAILBACK_VALUE
|
||||
else:
|
||||
# Find the failover target.
|
||||
failover_target = None
|
||||
@ -4136,11 +4136,11 @@ class HPE3PARCommon(object):
|
||||
if not retype:
|
||||
self.client.deleteVolume(vol_name)
|
||||
except hpeexceptions.HTTPConflict as ex:
|
||||
if ex.get_code() == 34:
|
||||
# This is a special case which means the
|
||||
# volume is part of a volume set.
|
||||
self._delete_vvset(volume)
|
||||
self.client.deleteVolume(vol_name)
|
||||
if ex.get_code() == 34:
|
||||
# This is a special case which means the
|
||||
# volume is part of a volume set.
|
||||
self._delete_vvset(volume)
|
||||
self.client.deleteVolume(vol_name)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
@ -146,6 +146,7 @@ class DS8KConnectionPool(connectionpool.HTTPSConnectionPool):
|
||||
url = 'https://' + url[12:]
|
||||
return super(DS8KConnectionPool, self).urlopen(method, url, **kwargs)
|
||||
|
||||
|
||||
if hasattr(poolmanager, 'key_fn_by_scheme'):
|
||||
poolmanager.key_fn_by_scheme["httpsds8k"] = (
|
||||
poolmanager.key_fn_by_scheme["https"])
|
||||
|
@ -572,10 +572,10 @@ class DS8KProxy(proxy.IBMStorageProxy):
|
||||
excluded_lss.add(lun.pool_lss_pair['source'][1])
|
||||
if lun.group and (lun.group.consisgroup_snapshot_enabled or
|
||||
lun.group.consisgroup_replication_enabled):
|
||||
msg = _("The reserve LSS for CG is full. "
|
||||
"Volume can not be created on it.")
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
msg = _("The reserve LSS for CG is full. "
|
||||
"Volume can not be created on it.")
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
else:
|
||||
LOG.warning("LSS %s is full, find another one.",
|
||||
lun.pool_lss_pair['source'][1])
|
||||
|
@ -802,8 +802,8 @@ class XIVProxy(proxy.IBMStorageProxy):
|
||||
volume, not failback)
|
||||
|
||||
if result:
|
||||
status = goal_status
|
||||
group_updated['replication_status'] = status
|
||||
status = goal_status
|
||||
group_updated['replication_status'] = status
|
||||
else:
|
||||
status = 'error'
|
||||
updates = {'status': vol_goal_status}
|
||||
|
@ -511,26 +511,26 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
|
||||
return None, None
|
||||
|
||||
def _create_consistent_group_snapshot(self, group_snapshot, snapshots):
|
||||
flexvols = set()
|
||||
for snapshot in snapshots:
|
||||
flexvols.add(volume_utils.extract_host(
|
||||
snapshot['volume']['host'], level='pool'))
|
||||
flexvols = set()
|
||||
for snapshot in snapshots:
|
||||
flexvols.add(volume_utils.extract_host(
|
||||
snapshot['volume']['host'], level='pool'))
|
||||
|
||||
self.zapi_client.create_cg_snapshot(flexvols, group_snapshot['id'])
|
||||
self.zapi_client.create_cg_snapshot(flexvols, group_snapshot['id'])
|
||||
|
||||
for snapshot in snapshots:
|
||||
self._clone_lun(snapshot['volume']['name'], snapshot['name'],
|
||||
source_snapshot=group_snapshot['id'])
|
||||
for snapshot in snapshots:
|
||||
self._clone_lun(snapshot['volume']['name'], snapshot['name'],
|
||||
source_snapshot=group_snapshot['id'])
|
||||
|
||||
for flexvol in flexvols:
|
||||
try:
|
||||
self.zapi_client.wait_for_busy_snapshot(
|
||||
flexvol, group_snapshot['id'])
|
||||
self.zapi_client.delete_snapshot(
|
||||
flexvol, group_snapshot['id'])
|
||||
except exception.SnapshotIsBusy:
|
||||
self.zapi_client.mark_snapshot_for_deletion(
|
||||
flexvol, group_snapshot['id'])
|
||||
for flexvol in flexvols:
|
||||
try:
|
||||
self.zapi_client.wait_for_busy_snapshot(
|
||||
flexvol, group_snapshot['id'])
|
||||
self.zapi_client.delete_snapshot(
|
||||
flexvol, group_snapshot['id'])
|
||||
except exception.SnapshotIsBusy:
|
||||
self.zapi_client.mark_snapshot_for_deletion(
|
||||
flexvol, group_snapshot['id'])
|
||||
|
||||
def delete_group_snapshot(self, group_snapshot, snapshots):
|
||||
"""Delete LUNs backing each snapshot in the group snapshot.
|
||||
|
@ -1527,12 +1527,12 @@ class PureBaseVolumeDriver(san.SanDriver):
|
||||
if volume_type and volume_type.is_replicated():
|
||||
specs = volume_type.get("extra_specs")
|
||||
if specs and EXTRA_SPECS_REPL_TYPE in specs:
|
||||
replication_type_spec = specs[EXTRA_SPECS_REPL_TYPE]
|
||||
# Do not validate settings, ignore invalid.
|
||||
if replication_type_spec == "<in> async":
|
||||
return REPLICATION_TYPE_ASYNC
|
||||
elif replication_type_spec == "<in> sync":
|
||||
return REPLICATION_TYPE_SYNC
|
||||
replication_type_spec = specs[EXTRA_SPECS_REPL_TYPE]
|
||||
# Do not validate settings, ignore invalid.
|
||||
if replication_type_spec == "<in> async":
|
||||
return REPLICATION_TYPE_ASYNC
|
||||
elif replication_type_spec == "<in> sync":
|
||||
return REPLICATION_TYPE_SYNC
|
||||
else:
|
||||
# if no type was specified but replication is enabled assume
|
||||
# that async replication is enabled
|
||||
@ -2147,9 +2147,9 @@ class PureBaseVolumeDriver(san.SanDriver):
|
||||
|
||||
def _find_async_failover_target(self):
|
||||
if not self._replication_target_arrays:
|
||||
raise PureDriverException(
|
||||
reason=_("Unable to find failover target, no "
|
||||
"secondary targets configured."))
|
||||
raise PureDriverException(
|
||||
reason=_("Unable to find failover target, no "
|
||||
"secondary targets configured."))
|
||||
secondary_array = None
|
||||
pg_snap = None
|
||||
for array in self._replication_target_arrays:
|
||||
|
@ -261,8 +261,8 @@ def get_configuration(persona):
|
||||
except (exception.ErrorInSendingMsg,
|
||||
exception.UnableToExecuteHyperScaleCmd,
|
||||
exception.UnableToProcessHyperScaleCmdOutput):
|
||||
LOG.exception("Failed to get configuration from controller")
|
||||
raise exception.ErrorInFetchingConfiguration(persona=persona)
|
||||
LOG.exception("Failed to get configuration from controller")
|
||||
raise exception.ErrorInFetchingConfiguration(persona=persona)
|
||||
|
||||
return configuration
|
||||
|
||||
|
@ -752,8 +752,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
|
||||
if should_create_cache_entry:
|
||||
if virtual_size and virtual_size != original_size:
|
||||
volume.size = virtual_size
|
||||
volume.save()
|
||||
volume.size = virtual_size
|
||||
volume.save()
|
||||
model_update = self._create_from_image_download(
|
||||
context,
|
||||
volume,
|
||||
|
@ -4132,8 +4132,8 @@ class VolumeManager(manager.CleanableManager,
|
||||
new_volume.update(model_update_new)
|
||||
new_volume.save()
|
||||
with volume.obj_as_admin():
|
||||
volume.update(model_update_default)
|
||||
volume.save()
|
||||
volume.update(model_update_default)
|
||||
volume.save()
|
||||
|
||||
# Replication V2.1 and a/a method
|
||||
def failover(self, context, secondary_backend_id=None):
|
||||
|
3
tox.ini
3
tox.ini
@ -193,8 +193,7 @@ usedevelop = False
|
||||
# reason: no agreement on this being universally
|
||||
# preferable for our code. Disabled to keep checking
|
||||
# tools from getting in our way with regards to this.
|
||||
# E117/E305 - new rules, just disabled here temporarily.
|
||||
ignore = E251,E402,W503,W504,E117,E305
|
||||
ignore = E251,E402,W503,W504
|
||||
# H904 Delay string interpolations at logging calls.
|
||||
enable-extensions = H106,H203,H904
|
||||
exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build
|
||||
|
Loading…
x
Reference in New Issue
Block a user