Hitachi: Update retype and support storage assisted migration
This patch update retype operation to different pool and support storage assisted migration. Storage assisted migration feature is also used when retype a volume, which doesn't have any snapshots, to different pool. Implements: blueprint hitachi-vsp-update-retype Change-Id: I1f992f7986652098656662bf129b1dd8427ac694
This commit is contained in:
parent
b651965b24
commit
d148f41664
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2023, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -204,6 +204,7 @@ GET_LDEV_RESULT = {
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "HDP"],
|
||||
"status": "NML",
|
||||
"poolId": 30,
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT_MAPPED = {
|
||||
@ -1043,14 +1044,69 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
||||
self.driver.unmanage_snapshot,
|
||||
TEST_SNAPSHOT[0])
|
||||
|
||||
def test_retype(self):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_retype(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
new_specs = {'hbsd:test': 'test'}
|
||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||
diff = {}
|
||||
host = {}
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'pool_id': 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.retype(
|
||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
||||
self.assertFalse(ret)
|
||||
self.assertEqual(1, request.call_count)
|
||||
self.assertTrue(ret)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_migrate_volume(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'storage_id': CONFIG_MAP['serial'],
|
||||
'pool_id': 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host)
|
||||
self.assertEqual(2, request.call_count)
|
||||
actual = (True, None)
|
||||
self.assertTupleEqual(actual, ret)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_migrate_volume_diff_pool(self, request):
|
||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(200, NOTFOUND_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(200, GET_LDEV_RESULT),
|
||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'storage_id': CONFIG_MAP['serial'],
|
||||
'pool_id': 40,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host)
|
||||
self.assertEqual(15, request.call_count)
|
||||
actual = (True, {'provider_location': '1'})
|
||||
self.assertTupleEqual(actual, ret)
|
||||
|
||||
def test_backup_use_temp_snapshot(self):
|
||||
self.assertTrue(self.driver.backup_use_temp_snapshot())
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2023, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -190,6 +190,7 @@ GET_LDEV_RESULT = {
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "HDP"],
|
||||
"status": "NML",
|
||||
"poolId": 30,
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT_MAPPED = {
|
||||
@ -783,14 +784,39 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
||||
self.driver.unmanage_snapshot,
|
||||
TEST_SNAPSHOT[0])
|
||||
|
||||
def test_retype(self):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_retype(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
new_specs = {'hbsd:test': 'test'}
|
||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||
diff = {}
|
||||
host = {}
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'pool_id': 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.retype(
|
||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
||||
self.assertFalse(ret)
|
||||
self.assertEqual(1, request.call_count)
|
||||
self.assertTrue(ret)
|
||||
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_migrate_volume(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'storage_id': CONFIG_MAP['serial'],
|
||||
'pool_id': 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host)
|
||||
self.assertEqual(2, request.call_count)
|
||||
actual = (True, None)
|
||||
self.assertTupleEqual(actual, ret)
|
||||
|
||||
def test_backup_use_temp_snapshot(self):
|
||||
self.assertTrue(self.driver.backup_use_temp_snapshot())
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd.
|
||||
# Copyright (C) 2022, 2023, Hewlett Packard Enterprise, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -186,6 +186,7 @@ GET_LDEV_RESULT = {
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "THP"],
|
||||
"status": "NML",
|
||||
"poolId": 30,
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT_MAPPED = {
|
||||
@ -959,14 +960,23 @@ class HPEXPRESTFCDriverTest(test.TestCase):
|
||||
self.driver.unmanage_snapshot,
|
||||
TEST_SNAPSHOT[0])
|
||||
|
||||
def test_retype(self):
|
||||
new_specs = {'hpe_xp:test': 'test'}
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_retype(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
new_specs = {'hbsd:test': 'test'}
|
||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||
diff = {}
|
||||
host = {}
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'pool_id': 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.retype(
|
||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
||||
self.assertFalse(ret)
|
||||
self.assertEqual(1, request.call_count)
|
||||
self.assertTrue(ret)
|
||||
|
||||
def test_backup_use_temp_snapshot(self):
|
||||
self.assertTrue(self.driver.backup_use_temp_snapshot())
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd.
|
||||
# Copyright (C) 2022, 2023. Hewlett Packard Enterprise, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -189,6 +189,7 @@ GET_LDEV_RESULT = {
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "THP"],
|
||||
"status": "NML",
|
||||
"poolId": 30,
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT_MAPPED = {
|
||||
@ -772,14 +773,23 @@ class HPEXPRESTISCSIDriverTest(test.TestCase):
|
||||
self.driver.unmanage_snapshot,
|
||||
TEST_SNAPSHOT[0])
|
||||
|
||||
def test_retype(self):
|
||||
new_specs = {'hpe_xp:test': 'test'}
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_retype(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
new_specs = {'hbsd:test': 'test'}
|
||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||
diff = {}
|
||||
host = {}
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'pool_id': 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.retype(
|
||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
||||
self.assertFalse(ret)
|
||||
self.assertEqual(1, request.call_count)
|
||||
self.assertTrue(ret)
|
||||
|
||||
def test_backup_use_temp_snapshot(self):
|
||||
self.assertTrue(self.driver.backup_use_temp_snapshot())
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2021 NEC corporation
|
||||
# Copyright (C) 2021, 2023, NEC corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -186,6 +186,7 @@ GET_LDEV_RESULT = {
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "DP"],
|
||||
"status": "NML",
|
||||
"poolId": 30,
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT_MAPPED = {
|
||||
@ -953,14 +954,23 @@ class VStorageRESTFCDriverTest(test.TestCase):
|
||||
self.driver.unmanage_snapshot,
|
||||
TEST_SNAPSHOT[0])
|
||||
|
||||
def test_retype(self):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_retype(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
new_specs = {'nec:test': 'test'}
|
||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||
diff = {}
|
||||
host = {}
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'pool_id': 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.retype(
|
||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
||||
self.assertFalse(ret)
|
||||
self.assertEqual(1, request.call_count)
|
||||
self.assertTrue(ret)
|
||||
|
||||
def test_backup_use_temp_snapshot(self):
|
||||
self.assertTrue(self.driver.backup_use_temp_snapshot())
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2021 NEC corporation
|
||||
# Copyright (C) 2021, 2023, NEC corporation
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -190,6 +190,7 @@ GET_LDEV_RESULT = {
|
||||
"blockCapacity": 2097152,
|
||||
"attributes": ["CVS", "DP"],
|
||||
"status": "NML",
|
||||
"poolId": 30,
|
||||
}
|
||||
|
||||
GET_LDEV_RESULT_MAPPED = {
|
||||
@ -811,14 +812,23 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
|
||||
self.driver.unmanage_snapshot,
|
||||
TEST_SNAPSHOT[0])
|
||||
|
||||
def test_retype(self):
|
||||
@mock.patch.object(requests.Session, "request")
|
||||
def test_retype(self, request):
|
||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||
new_specs = {'nec:test': 'test'}
|
||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||
diff = {}
|
||||
host = {}
|
||||
host = {
|
||||
'capabilities': {
|
||||
'location_info': {
|
||||
'pool_id': 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
ret = self.driver.retype(
|
||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
||||
self.assertFalse(ret)
|
||||
self.assertEqual(1, request.call_count)
|
||||
self.assertTrue(ret)
|
||||
|
||||
def test_backup_use_temp_snapshot(self):
|
||||
self.assertTrue(self.driver.backup_use_temp_snapshot())
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2023, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -175,7 +175,7 @@ class HBSDCommon():
|
||||
return pool['location_info']['pool_id']
|
||||
return None
|
||||
|
||||
def create_ldev(self, size, pool_id):
|
||||
def create_ldev(self, size, pool_id, ldev_range):
|
||||
"""Create an LDEV and return its LDEV number."""
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -186,8 +186,9 @@ class HBSDCommon():
|
||||
def create_volume(self, volume):
|
||||
"""Create a volume and return its properties."""
|
||||
pool_id = self.get_pool_id_of_volume(volume)
|
||||
ldev_range = self.storage_info['ldev_range']
|
||||
try:
|
||||
ldev = self.create_ldev(volume['size'], pool_id)
|
||||
ldev = self.create_ldev(volume['size'], pool_id, ldev_range)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
utils.output_log(MSG.CREATE_LDEV_FAILED)
|
||||
@ -200,20 +201,29 @@ class HBSDCommon():
|
||||
"""Return a dictionary of LDEV-related items."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_pair_on_storage(self, pvol, svol, is_snapshot=False):
|
||||
def create_pair_on_storage(
|
||||
self, pvol, svol, snap_pool_id, is_snapshot=False):
|
||||
"""Create a copy pair on the storage."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _copy_on_storage(
|
||||
self, pvol, size, pool_id, is_snapshot=False):
|
||||
def wait_copy_completion(self, pvol, svol):
|
||||
"""Wait until copy is completed."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def copy_on_storage(
|
||||
self, pvol, size, pool_id, snap_pool_id, ldev_range,
|
||||
is_snapshot=False, sync=False):
|
||||
"""Create a copy of the specified LDEV on the storage."""
|
||||
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
|
||||
if ldev_info['status'] != 'NML':
|
||||
msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
|
||||
self.raise_error(msg)
|
||||
svol = self.create_ldev(size, pool_id)
|
||||
svol = self.create_ldev(size, pool_id, ldev_range)
|
||||
try:
|
||||
self.create_pair_on_storage(pvol, svol, is_snapshot=is_snapshot)
|
||||
self.create_pair_on_storage(
|
||||
pvol, svol, snap_pool_id, is_snapshot=is_snapshot)
|
||||
if sync:
|
||||
self.wait_copy_completion(pvol, svol)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
@ -234,7 +244,10 @@ class HBSDCommon():
|
||||
|
||||
size = volume['size']
|
||||
pool_id = self.get_pool_id_of_volume(volume)
|
||||
new_ldev = self._copy_on_storage(ldev, size, pool_id)
|
||||
snap_pool_id = self.storage_info['snap_pool_id']
|
||||
ldev_range = self.storage_info['ldev_range']
|
||||
new_ldev = self.copy_on_storage(
|
||||
ldev, size, pool_id, snap_pool_id, ldev_range)
|
||||
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
|
||||
|
||||
return {
|
||||
@ -323,8 +336,10 @@ class HBSDCommon():
|
||||
self.raise_error(msg)
|
||||
size = snapshot['volume_size']
|
||||
pool_id = self.get_pool_id_of_volume(snapshot['volume'])
|
||||
new_ldev = self._copy_on_storage(
|
||||
ldev, size, pool_id, is_snapshot=True)
|
||||
snap_pool_id = self.storage_info['snap_pool_id']
|
||||
ldev_range = self.storage_info['ldev_range']
|
||||
new_ldev = self.copy_on_storage(
|
||||
ldev, size, pool_id, snap_pool_id, ldev_range, is_snapshot=True)
|
||||
return {
|
||||
'provider_location': str(new_ldev),
|
||||
}
|
||||
@ -947,7 +962,12 @@ class HBSDCommon():
|
||||
MSG.SNAPSHOT_UNMANAGE_FAILED, snapshot_id=snapshot['id'])
|
||||
raise NotImplementedError()
|
||||
|
||||
def retype(self):
|
||||
def migrate_volume(self, volume, host):
|
||||
"""Migrate the specified volume."""
|
||||
return False
|
||||
|
||||
def retype(self, ctxt, volume, new_type, diff, host):
|
||||
"""Retype the specified volume."""
|
||||
return False
|
||||
|
||||
def has_snap_pair(self, pvol, svol):
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2023, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -72,6 +72,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
||||
2.2.2 - Add Target Port Assignment.
|
||||
2.2.3 - Add port scheduler.
|
||||
2.3.0 - Support multi pool.
|
||||
2.3.1 - Update retype and support storage assisted migration.
|
||||
|
||||
"""
|
||||
|
||||
@ -246,7 +247,12 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
||||
@volume_utils.trace
|
||||
def retype(self, ctxt, volume, new_type, diff, host):
|
||||
"""Retype the specified volume."""
|
||||
return self.common.retype()
|
||||
return self.common.retype(ctxt, volume, new_type, diff, host)
|
||||
|
||||
@volume_utils.trace
|
||||
def migrate_volume(self, ctxt, volume, host):
|
||||
"""Migrate the specified volume."""
|
||||
return self.common.migrate_volume(volume, host)
|
||||
|
||||
def backup_use_temp_snapshot(self):
|
||||
return True
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2023, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -72,6 +72,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
||||
2.2.2 - Add Target Port Assignment.
|
||||
2.2.3 - Add port scheduler.
|
||||
2.3.0 - Support multi pool.
|
||||
2.3.1 - Update retype and support storage assisted migration.
|
||||
|
||||
"""
|
||||
|
||||
@ -242,7 +243,12 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
||||
@volume_utils.trace
|
||||
def retype(self, ctxt, volume, new_type, diff, host):
|
||||
"""Retype the specified volume."""
|
||||
return self.common.retype()
|
||||
return self.common.retype(ctxt, volume, new_type, diff, host)
|
||||
|
||||
@volume_utils.trace
|
||||
def migrate_volume(self, ctxt, volume, host):
|
||||
"""Migrate the specified volume."""
|
||||
return self.common.migrate_volume(volume, host)
|
||||
|
||||
def backup_use_temp_snapshot(self):
|
||||
return True
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2023, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -26,6 +26,7 @@ from oslo_utils import units
|
||||
|
||||
from cinder import exception
|
||||
from cinder.objects import fields
|
||||
from cinder.objects import SnapshotList
|
||||
from cinder.volume import configuration
|
||||
from cinder.volume.drivers.hitachi import hbsd_common as common
|
||||
from cinder.volume.drivers.hitachi import hbsd_rest_api as rest_api
|
||||
@ -274,7 +275,7 @@ class HBSDREST(common.HBSDCommon):
|
||||
if self.client is not None:
|
||||
self.client.enter_keep_session()
|
||||
|
||||
def _create_ldev_on_storage(self, size, pool_id):
|
||||
def _create_ldev_on_storage(self, size, pool_id, ldev_range):
|
||||
"""Create an LDEV on the storage system."""
|
||||
body = {
|
||||
'byteFormatCapacity': '%sG' % size,
|
||||
@ -287,9 +288,9 @@ class HBSDREST(common.HBSDCommon):
|
||||
body['endLdevId'] = max_ldev
|
||||
return self.client.add_ldev(body, no_log=True)
|
||||
|
||||
def create_ldev(self, size, pool_id):
|
||||
def create_ldev(self, size, pool_id, ldev_range):
|
||||
"""Create an LDEV of the specified size and the specified type."""
|
||||
ldev = self._create_ldev_on_storage(size, pool_id=pool_id)
|
||||
ldev = self._create_ldev_on_storage(size, pool_id, ldev_range)
|
||||
LOG.debug('Created logical device. (LDEV: %s)', ldev)
|
||||
return ldev
|
||||
|
||||
@ -386,7 +387,7 @@ class HBSDREST(common.HBSDCommon):
|
||||
utils.output_log(
|
||||
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
|
||||
|
||||
def _create_clone_pair(self, pvol, svol):
|
||||
def _create_clone_pair(self, pvol, svol, snap_pool_id):
|
||||
"""Create a clone copy pair on the storage."""
|
||||
snapshot_name = '%(prefix)s%(svol)s' % {
|
||||
'prefix': self.driver_info['driver_prefix'] + '-clone',
|
||||
@ -428,12 +429,13 @@ class HBSDREST(common.HBSDCommon):
|
||||
utils.output_log(
|
||||
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
|
||||
|
||||
def create_pair_on_storage(self, pvol, svol, is_snapshot=False):
|
||||
def create_pair_on_storage(
|
||||
self, pvol, svol, snap_pool_id, is_snapshot=False):
|
||||
"""Create a copy pair on the storage."""
|
||||
if is_snapshot:
|
||||
self._create_snap_pair(pvol, svol)
|
||||
else:
|
||||
self._create_clone_pair(pvol, svol)
|
||||
self._create_clone_pair(pvol, svol, snap_pool_id)
|
||||
|
||||
def get_ldev_info(self, keys, ldev, **kwargs):
|
||||
"""Return a dictionary of LDEV-related items."""
|
||||
@ -1136,7 +1138,8 @@ class HBSDREST(common.HBSDCommon):
|
||||
self.raise_error(msg)
|
||||
size = snapshot.volume_size
|
||||
pool_id = self.get_pool_id_of_volume(snapshot.volume)
|
||||
pair['svol'] = self.create_ldev(size, pool_id)
|
||||
ldev_range = self.storage_info['ldev_range']
|
||||
pair['svol'] = self.create_ldev(size, pool_id, ldev_range)
|
||||
except Exception as exc:
|
||||
pair['msg'] = utils.get_exception_msg(exc)
|
||||
raise loopingcall.LoopingCallDone(pair)
|
||||
@ -1191,3 +1194,121 @@ class HBSDREST(common.HBSDCommon):
|
||||
return self._create_cgsnapshot(context, group_snapshot, snapshots)
|
||||
else:
|
||||
return self._create_non_cgsnapshot(group_snapshot, snapshots)
|
||||
|
||||
def migrate_volume(self, volume, host, new_type=None):
|
||||
"""Migrate the specified volume."""
|
||||
attachments = volume.volume_attachment
|
||||
if attachments:
|
||||
return False, None
|
||||
|
||||
pvol = utils.get_ldev(volume)
|
||||
if pvol is None:
|
||||
msg = utils.output_log(
|
||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=volume.id)
|
||||
self.raise_error(msg)
|
||||
|
||||
pair_info = self.get_pair_info(pvol)
|
||||
if pair_info:
|
||||
if pair_info['pvol'] == pvol:
|
||||
svols = []
|
||||
copy_methods = []
|
||||
svol_statuses = []
|
||||
for svol_info in pair_info['svol_info']:
|
||||
svols.append(str(svol_info['ldev']))
|
||||
copy_methods.append(utils.THIN)
|
||||
svol_statuses.append(svol_info['status'])
|
||||
if svols:
|
||||
pair_info = ['(%s, %s, %s, %s)' %
|
||||
(pvol, svol, copy_method, status)
|
||||
for svol, copy_method, status in
|
||||
zip(svols, copy_methods, svol_statuses)]
|
||||
msg = utils.output_log(
|
||||
MSG.MIGRATE_VOLUME_FAILED,
|
||||
volume=volume.id, ldev=pvol,
|
||||
pair_info=', '.join(pair_info))
|
||||
self.raise_error(msg)
|
||||
else:
|
||||
svol_info = pair_info['svol_info'][0]
|
||||
if svol_info['is_psus'] and svol_info['status'] != 'PSUP':
|
||||
return False, None
|
||||
else:
|
||||
pair_info = '(%s, %s, %s, %s)' % (
|
||||
pair_info['pvol'], svol_info['ldev'],
|
||||
utils.THIN, svol_info['status'])
|
||||
msg = utils.output_log(
|
||||
MSG.MIGRATE_VOLUME_FAILED,
|
||||
volume=volume.id, ldev=svol_info['ldev'],
|
||||
pair_info=pair_info)
|
||||
self.raise_error(msg)
|
||||
|
||||
old_storage_id = self.conf.hitachi_storage_id
|
||||
new_storage_id = (
|
||||
host['capabilities']['location_info'].get('storage_id'))
|
||||
if new_type is None:
|
||||
old_pool_id = self.get_ldev_info(['poolId'], pvol)['poolId']
|
||||
new_pool_id = host['capabilities']['location_info'].get('pool_id')
|
||||
|
||||
if old_storage_id != new_storage_id:
|
||||
return False, None
|
||||
|
||||
ldev_range = host['capabilities']['location_info'].get('ldev_range')
|
||||
if (new_type or old_pool_id != new_pool_id or
|
||||
(ldev_range and
|
||||
(pvol < ldev_range[0] or ldev_range[1] < pvol))):
|
||||
|
||||
snap_pool_id = host['capabilities']['location_info'].get(
|
||||
'snap_pool_id')
|
||||
ldev_range = host['capabilities']['location_info'].get(
|
||||
'ldev_range')
|
||||
svol = self.copy_on_storage(
|
||||
pvol, volume.size, new_pool_id, snap_pool_id, ldev_range,
|
||||
is_snapshot=False, sync=True)
|
||||
self.modify_ldev_name(svol, volume['id'].replace("-", ""))
|
||||
|
||||
try:
|
||||
self.delete_ldev(pvol)
|
||||
except exception.VolumeDriverException:
|
||||
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=pvol)
|
||||
|
||||
return True, {
|
||||
'provider_location': str(svol),
|
||||
}
|
||||
|
||||
return True, None
|
||||
|
||||
def retype(self, ctxt, volume, new_type, diff, host):
|
||||
"""Retype the specified volume."""
|
||||
|
||||
def _check_specs_diff(diff):
|
||||
for specs_key, specs_val in diff.items():
|
||||
for diff_key, diff_val in specs_val.items():
|
||||
if diff_val[0] != diff_val[1]:
|
||||
return False
|
||||
return True
|
||||
|
||||
ldev = utils.get_ldev(volume)
|
||||
if ldev is None:
|
||||
msg = utils.output_log(
|
||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume',
|
||||
id=volume['id'])
|
||||
self.raise_error(msg)
|
||||
ldev_info = self.get_ldev_info(
|
||||
['poolId'], ldev)
|
||||
old_pool_id = ldev_info['poolId']
|
||||
new_pool_id = host['capabilities']['location_info'].get('pool_id')
|
||||
if not _check_specs_diff(diff) or new_pool_id != old_pool_id:
|
||||
snaps = SnapshotList.get_all_for_volume(ctxt, volume.id)
|
||||
if not snaps:
|
||||
return self.migrate_volume(volume, host, new_type)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def wait_copy_completion(self, pvol, svol):
|
||||
"""Wait until copy is completed."""
|
||||
self._wait_copy_pair_status(svol, set([SMPL, PSUE]))
|
||||
status = self._get_copy_pair_status(svol)
|
||||
if status == PSUE:
|
||||
msg = utils.output_log(
|
||||
MSG.VOLUME_COPY_FAILED, pvol=pvol, svol=svol)
|
||||
self.raise_error(msg)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||
# Copyright (C) 2020, 2023, Hitachi, Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -25,7 +25,7 @@ from oslo_utils import units
|
||||
|
||||
from cinder import exception
|
||||
|
||||
VERSION = '2.3.0'
|
||||
VERSION = '2.3.1'
|
||||
CI_WIKI_NAME = 'Hitachi_VSP_CI'
|
||||
PARAM_PREFIX = 'hitachi'
|
||||
VENDOR_NAME = 'Hitachi'
|
||||
@ -43,6 +43,9 @@ GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512
|
||||
|
||||
NORMAL_LDEV_TYPE = 'Normal'
|
||||
|
||||
FULL = 'Full copy'
|
||||
THIN = 'Thin copy'
|
||||
|
||||
INFO_SUFFIX = 'I'
|
||||
WARNING_SUFFIX = 'W'
|
||||
ERROR_SUFFIX = 'E'
|
||||
@ -479,6 +482,14 @@ class HBSDMsg(enum.Enum):
|
||||
'resource of host group or wwn was found. (ports: %(ports)s)',
|
||||
'suffix': ERROR_SUFFIX,
|
||||
}
|
||||
MIGRATE_VOLUME_FAILED = {
|
||||
'msg_id': 760,
|
||||
'loglevel': base_logging.ERROR,
|
||||
'msg': 'Failed to migrate a volume. The volume is in a copy pair that '
|
||||
'cannot be deleted. (volume: %(volume)s, LDEV: %(ldev)s, '
|
||||
'(P-VOL, S-VOL, copy method, status): %(pair_info)s)',
|
||||
'suffix': ERROR_SUFFIX,
|
||||
}
|
||||
|
||||
def __init__(self, error_info):
|
||||
"""Initialize Enum attributes."""
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Hitachi driver: Update retype to different pool and support storage
|
||||
assisted migration.
|
||||
Storage assisted migration feature is also used when retype a volume,
|
||||
which doesn't have any snapshots, to different pool.
|
Loading…
Reference in New Issue
Block a user