Merge "NetApp SolidFire: Add storage assisted migration support"

This commit is contained in:
Zuul 2020-09-12 03:17:22 +00:00 committed by Gerrit Code Review
commit 4b5704645e
5 changed files with 1145 additions and 51 deletions

View File

@ -41,6 +41,7 @@ from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import test
from cinder.tests.unit import utils as test_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume import throttling
from cinder.volume import volume_types
from cinder.volume import volume_utils
@ -1197,3 +1198,35 @@ class VolumeUtilsTestCase(test.TestCase):
db,
volume,
mock.sentinel.context)
@mock.patch('cinder.volume.volume_utils.CONF.list_all_sections')
def test_get_backend_configuration_backend_stanza_not_found(self,
mock_conf):
mock_conf.return_value = []
self.assertRaises(exception.ConfigNotFound,
volume_utils.get_backend_configuration,
'backendA')
mock_conf.return_value = ['backendB']
self.assertRaises(exception.ConfigNotFound,
volume_utils.get_backend_configuration,
'backendA')
@mock.patch('cinder.volume.volume_utils.CONF.list_all_sections')
@mock.patch('cinder.volume.configuration.Configuration')
def test_get_backend_configuration_backend_opts(self, mock_configuration,
mock_conf):
mock_conf.return_value = ['backendA']
volume_utils.get_backend_configuration('backendA', ['someFakeOpt'])
mock_configuration.assert_called_with(driver.volume_opts,
config_group='backendA')
mock_configuration.return_value.\
append_config_values.assert_called_with(['someFakeOpt'])
@mock.patch('cinder.volume.volume_utils.CONF.list_all_sections')
@mock.patch('cinder.volume.configuration.Configuration')
def test_get_backend_configuration(self, mock_configuration, mock_conf):
mock_conf.return_value = ['backendA']
volume_utils.get_backend_configuration('backendA')
mock_configuration.assert_called_with(driver.volume_opts,
config_group='backendA')

View File

@ -14,14 +14,18 @@
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import datetime
import re
from unittest import mock
from unittest.mock import call
from unittest.mock import MagicMock
from ddt import data
from ddt import ddt
from ddt import file_data
from ddt import unpack
from oslo_service import loopingcall
from oslo_utils import timeutils
from oslo_utils import units
import six
@ -151,13 +155,11 @@ class SolidFireVolumeTestCase(test.TestCase):
'iqn': 'super_fake_iqn'}
self.fake_primary_cluster = (
{'endpoint': {
'passwd': 'admin',
'port': 443,
'url': 'https://192.168.139.11:443',
'svip': '10.10.8.11',
'mvip': '10.10.8.12',
'login': 'admin'},
{'endpoint': {'passwd': 'admin', 'port': 443,
'url': 'https://192.168.139.11:443',
'svip': '10.10.8.11',
'mvip': '10.10.8.12',
'login': 'admin'},
'name': 'volume-f0632d53-d836-474c-a5bc-478ef18daa32',
'clusterPairID': 33,
'uuid': 'f0632d53-d836-474c-a5bc-478ef18daa32',
@ -170,6 +172,25 @@ class SolidFireVolumeTestCase(test.TestCase):
'ensemble': ['10.10.5.130'],
'svipNodeID': 1})
self.fake_secondary_cluster = (
{'endpoint': {'passwd': 'admin', 'port': 443,
'url': 'https://192.168.139.102:443',
'svip': '10.10.8.134',
'mvip': '192.168.139.102',
'login': 'admin'},
'name': 'AutoTest2-6AjG-FOR-TEST-ONLY',
'clusterPairID': 331,
'clusterAPIVersion': '9.4',
'uuid': '9c499d4b-8fff-48b4-b875-27601d5d9889',
'svip': '10.10.23.2',
'mvipNodeID': 1,
'repCount': 1,
'encryptionAtRestState': 'disabled',
'attributes': {},
'mvip': '192.168.139.102',
'ensemble': ['10.10.5.130'],
'svipNodeID': 1})
self.cluster_pairs = (
[{'uniqueID': 'lu9f',
'endpoint': {'passwd': 'admin', 'port': 443,
@ -3455,3 +3476,698 @@ class SolidFireVolumeTestCase(test.TestCase):
expected_tgt_params,
endpoint=self.cluster_pairs[0]['endpoint']
)
@mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver, '_get_cluster_pair')
@mock.patch.object(solidfire.SolidFireDriver, '_create_remote_pairing')
def test_get_or_create_cluster_pairing(
self, mock_create_remote_pairing,
mock_get_cluster_pair,
mock_create_cluster_reference,
mock_issue_api_request,
mock_looping_call):
fake_remote_pair_connected = {'status': 'Connected'}
mock_get_cluster_pair.side_effect = [None, fake_remote_pair_connected]
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
result = sfv._get_or_create_cluster_pairing(
self.fake_secondary_cluster, check_connected=True)
mock_get_cluster_pair.assert_has_calls(
[call(self.fake_secondary_cluster),
call(self.fake_secondary_cluster)])
mock_create_remote_pairing.assert_called_with(
self.fake_secondary_cluster)
mock_looping_call.assert_not_called()
self.assertEqual(fake_remote_pair_connected, result)
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver, '_get_cluster_pair')
@mock.patch.object(solidfire.SolidFireDriver, '_create_remote_pairing')
def test_get_or_create_cluster_pairing_check_connected_true(
self, mock_create_remote_pairing,
mock_get_cluster_pair,
mock_create_cluster_reference,
mock_issue_api_request):
fake_remote_pair_misconfigured = {'status': 'Misconfigured'}
fake_remote_pair_connected = {'status': 'Connected'}
mock_get_cluster_pair.side_effect = [None,
fake_remote_pair_misconfigured,
fake_remote_pair_connected]
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
result = sfv._get_or_create_cluster_pairing(
self.fake_secondary_cluster, check_connected=True)
mock_get_cluster_pair.assert_has_calls(
[call(self.fake_secondary_cluster),
call(self.fake_secondary_cluster),
call(self.fake_secondary_cluster)])
mock_create_remote_pairing.assert_called_with(
self.fake_secondary_cluster)
self.assertEqual(fake_remote_pair_connected, result)
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
def test_get_cluster_pair(self, mock_create_cluster_reference,
mock_update_cluster_status,
mock_issue_api_request):
fake_cluster_pair = {
'result': {
'clusterPairs': [{
'mvip': self.fake_secondary_cluster['mvip']
}]
}
}
mock_issue_api_request.return_value = fake_cluster_pair
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
result = sfv._get_cluster_pair(self.fake_secondary_cluster)
mock_issue_api_request.assert_called_with('ListClusterPairs', {},
version='8.0')
self.assertEqual(
fake_cluster_pair['result']['clusterPairs'][0], result)
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
def test_get_cluster_pair_remote_not_found(self,
mock_create_cluster_reference,
mock_update_cluster_status,
mock_issue_api_request):
fake_cluster_pair = {
'result': {
'clusterPairs': []
}
}
mock_issue_api_request.return_value = fake_cluster_pair
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
result = sfv._get_cluster_pair(self.fake_secondary_cluster)
mock_issue_api_request.assert_called_with('ListClusterPairs', {},
version='8.0')
self.assertIsNone(result)
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
def _create_volume_pairing(self, mock_issue_api_request,
mock_update_cluster_status,
mock_create_cluster_reference):
ctx = context.get_admin_context()
type_fields = {'id': fakes.get_fake_uuid()}
src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
fake_src_sf_volid = 1111
vol_fields = {
'id': fakes.get_fake_uuid(),
'volume_type': src_vol_type,
'host': 'fakeHost@fakeBackend#fakePool',
'status': 'in-use',
'provider_id': "%s %s %s" % (fake_src_sf_volid,
fakes.get_fake_uuid(),
self.fake_primary_cluster['uuid'])
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
fake_dst_sf_volid = 9999
fake_dst_volume = {
'provider_id': "%s %s %s" % (fake_dst_sf_volid,
fakes.get_fake_uuid(),
fake_dst_cluster_ref['uuid'])
}
fake_start_volume_pairing = {
{'result': {'volumePairingKey': 'CAFE'}}
}
mock_issue_api_request.side_effect = [MagicMock(),
fake_start_volume_pairing]
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv._create_volume_pairing(vol, fake_dst_volume, fake_dst_cluster_ref)
src_params = {'volumeID': fake_src_sf_volid, 'mode': "Sync"}
dst_params = {'volumeID': fake_dst_sf_volid,
'volumePairingKey': 'CAFE'}
mock_issue_api_request.assert_has_calls([
call('RemoveVolumePair', src_params, '8.0'),
call('StartVolumePairing', src_params, '8.0'),
call('CompleteVolumePairing', dst_params, '8.0',
endpoint=fake_dst_cluster_ref['endpoint'])])
@mock.patch('cinder.volume.drivers.solidfire.retry')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
def _create_volume_pairing_timeout(self, mock_issue_api_request,
mock_update_cluster_status,
mock_create_cluster_reference,
mock_retry):
ctx = context.get_admin_context()
fake_src_sf_volid = 1111
vol_fields = {
'provider_id': "%s %s %s" % (fake_src_sf_volid,
fakes.get_fake_uuid(),
self.fake_primary_cluster['uuid'])
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
fake_dst_sf_volid = 9999
fake_dst_volume = {
'provider_id': "%s %s %s" % (fake_dst_sf_volid,
fakes.get_fake_uuid(),
fake_dst_cluster_ref['uuid'])
}
mock_retry.side_effect = solidfire.SolidFireReplicationPairingError()
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
mock_issue_api_request.reset_mock()
self.assertRaises(solidfire.SolidFireReplicationPairingError,
sfv._create_volume_pairing, vol, fake_dst_volume,
fake_dst_cluster_ref)
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration')
def test_migrate_volume_volume_is_not_available(
self, mock_do_intercluster_volume_migration):
ctx = context.get_admin_context()
vol_fields = {
'status': 'in-use'
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
host = {'host': 'fakeHost@anotherFakeBackend#fakePool'}
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.InvalidVolume,
sfv.migrate_volume, ctx, vol, host)
mock_do_intercluster_volume_migration.assert_not_called()
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration')
def test_migrate_volume_volume_is_replicated(
self, mock_do_intercluster_volume_migration):
ctx = context.get_admin_context()
type_fields = {'extra_specs': {'replication_enabled': '<is> True'},
'id': fakes.get_fake_uuid()}
src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
vol_fields = {
'id': fakes.get_fake_uuid(),
'volume_type': src_vol_type
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
vol.volume_type = src_vol_type
host = {'host': 'fakeHost@fakeBackend#fakePool'}
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.InvalidVolume,
sfv.migrate_volume, ctx, vol, host)
mock_do_intercluster_volume_migration.assert_not_called()
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration')
def test_migrate_volume_same_host_and_backend(
self, mock_do_intercluster_volume_migration):
ctx = context.get_admin_context()
type_fields = {'id': fakes.get_fake_uuid()}
src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
vol_fields = {
'id': fakes.get_fake_uuid(),
'volume_type': src_vol_type,
'host': 'fakeHost@fakeBackend#fakePool'
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
vol.volume_type = src_vol_type
host = {'host': 'fakeHost@fakeBackend#fakePool'}
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
result = sfv.migrate_volume(ctx, vol, host)
mock_do_intercluster_volume_migration.assert_not_called()
self.assertEqual((True, {}), result)
@mock.patch('cinder.volume.volume_utils.get_backend_configuration')
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration')
def test_migrate_volume_different_host_same_backend(
self, mock_do_intercluster_volume_migration,
mock_get_backend_configuration):
ctx = context.get_admin_context()
type_fields = {'id': fakes.get_fake_uuid()}
src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
vol_fields = {
'id': fakes.get_fake_uuid(),
'volume_type': src_vol_type,
'host': 'fakeHost@fakeBackend#fakePool'
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
vol.volume_type = src_vol_type
host = {'host': 'anotherFakeHost@fakeBackend#fakePool'}
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
result = sfv.migrate_volume(ctx, vol, host)
mock_get_backend_configuration.assert_not_called()
mock_do_intercluster_volume_migration.assert_not_called()
self.assertEqual((True, {}), result)
@mock.patch('cinder.volume.volume_utils.get_backend_configuration')
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration')
def test_migrate_volume_config_stanza_not_found(
self, mock_do_intercluster_volume_migration,
mock_get_backend_configuration):
ctx = context.get_admin_context()
type_fields = {'id': fakes.get_fake_uuid()}
src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
vol_fields = {
'id': fakes.get_fake_uuid(),
'volume_type': src_vol_type,
'host': 'fakeHost@fakeBackend#fakePool'
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
vol.volume_type = src_vol_type
host = {'host': 'fakeHost@anotherFakeBackend#fakePool'}
mock_get_backend_configuration.side_effect = \
exception.ConfigNotFound('error')
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.VolumeMigrationFailed,
sfv.migrate_volume, ctx, vol, host)
mock_get_backend_configuration.assert_called_with(
'anotherFakeBackend', sfv.get_driver_options())
mock_do_intercluster_volume_migration.assert_not_called()
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration')
@mock.patch('cinder.volume.volume_utils.get_backend_configuration')
def test_migrate_volume_different_backend_same_cluster(
self, mock_get_backend_configuration,
mock_do_intercluster_volume_migration):
ctx = context.get_admin_context()
type_fields = {'id': fakes.get_fake_uuid()}
src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
vol_fields = {
'id': fakes.get_fake_uuid(),
'volume_type': src_vol_type,
'host': 'fakeHost@fakeBackend#fakePool'
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
vol.volume_type = src_vol_type
host = {'host': 'fakeHost@anotherFakeBackend#fakePool'}
dst_config = conf.BackendGroupConfiguration(
[], conf.SHARED_CONF_GROUP)
dst_config.san_ip = '10.10.10.10'
mock_get_backend_configuration.return_value = dst_config
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.active_cluster['mvip'] = '10.10.10.10'
result = sfv.migrate_volume(ctx, vol, host)
mock_get_backend_configuration.assert_called_with(
'anotherFakeBackend', sfv.get_driver_options())
mock_do_intercluster_volume_migration.assert_not_called()
self.assertEqual((True, {}), result)
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration')
@mock.patch('cinder.volume.volume_utils.get_backend_configuration')
def test_migrate_volume_different_cluster(
self, mock_get_backend_configuration,
mock_do_intercluster_volume_migration):
ctx = context.get_admin_context()
type_fields = {'id': fakes.get_fake_uuid()}
src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
vol_fields = {
'id': fakes.get_fake_uuid(),
'volume_type': src_vol_type,
'host': 'fakeHost@fakeBackend#fakePool'
}
vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
vol.volume_type = src_vol_type
host = {'host': 'fakeHost@anotherFakeBackend#fakePool'}
dst_config = conf.BackendGroupConfiguration(
[], conf.SHARED_CONF_GROUP)
dst_config.san_ip = '10.10.10.10'
mock_get_backend_configuration.return_value = dst_config
mock_do_intercluster_volume_migration.return_value = {}
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.active_cluster['mvip'] = '20.20.20.20'
result = sfv.migrate_volume(ctx, vol, host)
mock_do_intercluster_volume_migration.assert_called()
self.assertEqual((True, {}), result)
@mock.patch.object(solidfire.SolidFireDriver, '_build_endpoint_info')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver,
'_setup_intercluster_volume_migration')
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration_data_sync')
@mock.patch.object(solidfire.SolidFireDriver,
'_cleanup_intercluster_volume_migration')
def test_do_intercluster_volume_migration(
self, mock_cleanup_intercluster_volume_migration,
mock_do_intercluster_volume_migration_data_sync,
mock_setup_intercluster_volume_migration,
mock_create_cluster_reference,
mock_build_endpoint_info):
vol_fields = {
'id': fakes.get_fake_uuid()
}
vol = fake_volume.fake_volume_obj(context.get_admin_context(),
**vol_fields)
host = {'host': 'fakeHost@anotherFakeBackend#fakePool'}
dst_config = conf.BackendGroupConfiguration(
[], conf.SHARED_CONF_GROUP)
fake_dst_endpoint = deepcopy(self.fake_secondary_cluster['endpoint'])
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
mock_build_endpoint_info.return_value = fake_dst_endpoint
mock_create_cluster_reference.return_value = fake_dst_cluster_ref
fake_dst_volume = {
'provider_id': "%s %s %s" % (9999,
fakes.get_fake_uuid(),
fake_dst_cluster_ref['uuid'])
}
mock_setup_intercluster_volume_migration.return_value = \
fake_dst_volume
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
result = sfv._do_intercluster_volume_migration(vol, host, dst_config)
mock_build_endpoint_info.assert_called_once_with(
backend_conf=dst_config)
mock_create_cluster_reference.assert_called_with(fake_dst_endpoint)
mock_setup_intercluster_volume_migration.assert_called_with(
vol, fake_dst_cluster_ref)
mock_do_intercluster_volume_migration_data_sync.assert_called_with(
vol, None, 9999, fake_dst_cluster_ref)
mock_cleanup_intercluster_volume_migration.assert_called_with(
vol, 9999, fake_dst_cluster_ref)
self.assertEqual(fake_dst_volume, result)
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver, '_get_create_account')
@mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params')
@mock.patch.object(solidfire.SolidFireDriver, '_do_volume_create')
@mock.patch.object(solidfire.SolidFireDriver, '_create_volume_pairing')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver,
'_get_or_create_cluster_pairing')
def test_setup_intercluster_volume_migration(
self, mock_get_or_create_cluster_pairing,
mock_issue_api_request,
mock_create_volume_pairing,
mock_do_volume_create,
mock_get_default_volume_params,
mock_get_create_account,
mock_create_cluster_reference):
fake_project_id = fakes.get_fake_uuid()
vol_fields = {
'id': fakes.get_fake_uuid(),
'project_id': fake_project_id
}
vol = fake_volume.fake_volume_obj(context.get_admin_context(),
**vol_fields)
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
fake_sfaccount = {'username': 'fakeAccount'}
mock_get_create_account.return_value = fake_sfaccount
fake_vol_default_params = {'name': 'someFakeVolumeName'}
mock_get_default_volume_params.return_value = fake_vol_default_params
fake_dst_volume = {'volumeID': 9999}
mock_do_volume_create.return_value = fake_dst_volume
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
mock_issue_api_request.reset_mock()
result = sfv._setup_intercluster_volume_migration(
vol, fake_dst_cluster_ref)
mock_get_or_create_cluster_pairing.assert_called_with(
fake_dst_cluster_ref, check_connected=True)
mock_get_create_account.assert_called_with(
fake_project_id, endpoint=fake_dst_cluster_ref['endpoint'])
mock_get_default_volume_params.assert_called_with(vol, fake_sfaccount)
mock_do_volume_create.assert_called_with(
fake_sfaccount,
fake_vol_default_params,
endpoint=fake_dst_cluster_ref['endpoint'])
mock_issue_api_request.assert_not_called()
mock_create_volume_pairing.assert_called_with(
vol, fake_dst_volume, fake_dst_cluster_ref)
self.assertEqual(fake_dst_volume, result)
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver, '_get_create_account')
@mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params')
@mock.patch.object(solidfire.SolidFireDriver, '_do_volume_create')
@mock.patch.object(solidfire.SolidFireDriver, '_create_volume_pairing')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver,
'_get_or_create_cluster_pairing')
def test_setup_intercluster_volume_migration_rollback(
self, mock_get_or_create_cluster_pairing,
mock_issue_api_request,
mock_create_volume_pairing,
mock_do_volume_create,
mock_get_default_volume_params,
mock_get_create_account,
mock_create_cluster_reference):
fake_project_id = fakes.get_fake_uuid()
fake_src_sf_volid = 1111
vol_fields = {
'id': fakes.get_fake_uuid(),
'project_id': fake_project_id,
'provider_id': "%s %s %s" % (fake_src_sf_volid,
fakes.get_fake_uuid(),
self.fake_primary_cluster['uuid'])
}
vol = fake_volume.fake_volume_obj(context.get_admin_context(),
**vol_fields)
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
fake_dst_sf_volid = 9999
fake_dst_volume = {
'provider_id': "%s %s %s" % (fake_dst_sf_volid,
fakes.get_fake_uuid(),
fake_dst_cluster_ref['uuid'])
}
mock_do_volume_create.return_value = fake_dst_volume
mock_create_volume_pairing.side_effect = \
solidfire.SolidFireReplicationPairingError()
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(solidfire.SolidFireReplicationPairingError,
sfv._setup_intercluster_volume_migration, vol,
fake_dst_cluster_ref)
src_params = {'volumeID': fake_src_sf_volid}
dst_params = {'volumeID': fake_dst_sf_volid}
mock_issue_api_request.assert_has_calls([
call('RemoveVolumePair', src_params, '8.0'),
call('RemoveVolumePair', dst_params, '8.0',
endpoint=fake_dst_cluster_ref["endpoint"]),
call('DeleteVolume', dst_params,
endpoint=fake_dst_cluster_ref["endpoint"]),
call('PurgeDeletedVolume', dst_params,
endpoint=fake_dst_cluster_ref["endpoint"])])
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration_complete_data_sync')
@mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_do_intercluster_volume_migration_data_sync(
self, mock_issue_api_request,
mock_create_cluster_reference,
mock_get_sf_volume,
mock_do_intercluster_volume_migration_complete_data_sync):
fake_src_sf_volid = 1111
vol_fields = {
'id': fakes.get_fake_uuid(),
'provider_id': "%s %s %s" % (fake_src_sf_volid,
fakes.get_fake_uuid(),
self.fake_primary_cluster['uuid'])
}
vol = fake_volume.fake_volume_obj(context.get_admin_context(),
**vol_fields)
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
fake_dst_sf_volid = 9999
fake_sfaccount = {'accountID': 'fakeAccountID'}
mock_get_sf_volume.return_value = {
'volumePairs': [{'remoteReplication': {'state': 'Active'}}]
}
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv._do_intercluster_volume_migration_data_sync(vol, fake_sfaccount,
fake_dst_sf_volid,
fake_dst_cluster_ref)
params = {'volumeID': fake_dst_sf_volid, 'access': 'replicationTarget'}
mock_issue_api_request.assert_called_with(
'ModifyVolume', params, '8.0',
endpoint=fake_dst_cluster_ref['endpoint'])
vol_params = {'accountID': fake_sfaccount['accountID']}
mock_get_sf_volume.assert_called_with(vol.id, vol_params)
mock_do_intercluster_volume_migration_complete_data_sync\
.asert_called_with(fake_dst_sf_volid, fake_dst_cluster_ref)
@mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall')
@mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
@mock.patch.object(solidfire.SolidFireDriver,
'_do_intercluster_volume_migration_complete_data_sync')
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_do_intercluster_volume_migration_data_sync_timeout(
self, mock_issue_api_request, mock_create_cluster_reference,
mock_do_intercluster_volume_migration_complete_data_sync,
mock_get_sf_volume,
mock_looping_call):
fake_src_sf_volid = 1111
vol_fields = {
'id': fakes.get_fake_uuid(),
'provider_id': "%s %s %s" % (fake_src_sf_volid,
fakes.get_fake_uuid(),
self.fake_primary_cluster['uuid'])
}
vol = fake_volume.fake_volume_obj(context.get_admin_context(),
**vol_fields)
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
fake_dst_sf_volid = 9999
fake_sfaccount = {'accountID': 'fakeAccountID'}
mock_looping_call.return_value.start.return_value.wait.side_effect = (
loopingcall.LoopingCallTimeOut())
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(solidfire.SolidFireDataSyncTimeoutError,
sfv._do_intercluster_volume_migration_data_sync,
vol,
fake_sfaccount,
fake_dst_sf_volid,
fake_dst_cluster_ref)
mock_get_sf_volume.assert_not_called()
mock_do_intercluster_volume_migration_complete_data_sync\
.assert_not_called()
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_do_intercluster_volume_migration_complete_data_sync(
self, mock_issue_api_request, mock_create_cluster_reference):
fake_src_sf_volid = 1111
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv._do_intercluster_volume_migration_complete_data_sync(
fake_src_sf_volid, fake_dst_cluster_ref)
params = {'volumeID': fake_src_sf_volid, 'access': 'readWrite'}
mock_issue_api_request.assert_called_with(
'ModifyVolume', params, '8.0',
endpoint=fake_dst_cluster_ref['endpoint'])
@mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_cleanup_intercluster_volume_migration(
self, mock_issue_api_request, mock_create_cluster_reference):
fake_src_sf_volid = 1111
vol_fields = {
'id': fakes.get_fake_uuid(),
'provider_id': "%s %s %s" % (fake_src_sf_volid,
fakes.get_fake_uuid(),
self.fake_primary_cluster['uuid'])
}
vol = fake_volume.fake_volume_obj(context.get_admin_context(),
**vol_fields)
fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster)
fake_dst_sf_volid = 9999
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv._cleanup_intercluster_volume_migration(vol, fake_dst_sf_volid,
fake_dst_cluster_ref)
src_params = {'volumeID': fake_src_sf_volid}
dst_params = {'volumeID': fake_dst_sf_volid}
mock_issue_api_request.assert_has_calls([
call('RemoveVolumePair', dst_params, '8.0',
endpoint=fake_dst_cluster_ref["endpoint"]),
call('RemoveVolumePair', src_params, '8.0'),
call('DeleteVolume', src_params),
call('PurgeDeletedVolume', src_params)])

View File

@ -24,6 +24,7 @@ import warnings
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
@ -79,6 +80,7 @@ sf_opts = [
cfg.BoolOpt('sf_enable_vag',
default=False,
help='Utilize volume access groups on a per-tenant basis.'),
cfg.StrOpt('sf_provisioning_calc',
default='maxProvisionedSpace',
choices=['maxProvisionedSpace', 'usedSpace'],
@ -86,7 +88,19 @@ sf_opts = [
'provisioning calculations. If this parameter is set to '
'\'usedSpace\', the driver will report correct '
'values as expected by Cinder '
'thin provisioning.')]
'thin provisioning.'),
cfg.IntOpt('sf_cluster_pairing_timeout',
default=60,
min=3,
help='Sets time in seconds to wait for clusters to complete '
'pairing.'),
cfg.IntOpt('sf_volume_pairing_timeout',
default=3600,
min=30,
help='Sets time in seconds to wait for a migrating volume to '
'complete pairing and sync.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP)
@ -115,8 +129,13 @@ class SolidFireAPIDataException(SolidFireAPIException):
class SolidFireAccountNotFound(SolidFireDriverException):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
message = _("Unable to locate account %(account_name)s in "
"SolidFire cluster")
class SolidFireVolumeNotFound(SolidFireDriverException):
message = _("Unable to locate volume id %(volume_id)s in "
"SolidFire cluster")
class SolidFireRetryableException(exception.VolumeBackendAPIException):
@ -127,6 +146,10 @@ class SolidFireReplicationPairingError(exception.VolumeBackendAPIException):
message = _("Error on SF Keys")
class SolidFireDataSyncTimeoutError(exception.VolumeBackendAPIException):
message = _("Data sync volumes timed out")
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
@ -227,9 +250,10 @@ class SolidFireDriver(san.SanISCSIDriver):
2.1.0 - Add Cinder Active/Active support
- Enable Active/Active support flag
- Implement Active/Active replication support
2.2.0 - Add storage assisted volume migration support
"""
VERSION = '2.1.0'
VERSION = '2.2.0'
SUPPORTS_ACTIVE_ACTIVE = True
@ -308,6 +332,7 @@ class SolidFireDriver(san.SanISCSIDriver):
self.failed_over = True
self.replication_enabled = True
else:
self.active_cluster = self._create_cluster_reference()
if self.configuration.replication_device:
@ -380,7 +405,7 @@ class SolidFireDriver(san.SanISCSIDriver):
with excutils.save_and_reraise_exception():
LOG.error('Cluster pairing failed: %s', ex.msg)
LOG.debug('Initialized Cluster pair with ID: %s', pair_id)
remote_device['clusterPairID'] = pair_id
return pair_id
def _get_cluster_info(self, remote_endpoint):
@ -417,39 +442,75 @@ class SolidFireDriver(san.SanISCSIDriver):
raise SolidFireDriverException(msg)
def _set_cluster_pairs(self):
repl_configs = self.configuration.replication_device[0]
existing_pairs = self._issue_api_request(
'ListClusterPairs',
{},
version='8.0')['result']['clusterPairs']
LOG.debug("Existing cluster pairs: %s", existing_pairs)
remote_pair = {}
remote_endpoint = self._build_repl_endpoint_info(**repl_configs)
remote_info = self._create_cluster_reference(remote_endpoint)
remote_info['backend_id'] = repl_configs['backend_id']
remote_cluster = self._create_cluster_reference(remote_endpoint)
remote_cluster['backend_id'] = repl_configs['backend_id']
for ep in existing_pairs:
if repl_configs['mvip'] == ep['mvip']:
remote_pair = ep
LOG.debug("Found remote pair: %s", remote_pair)
remote_info['clusterPairID'] = ep['clusterPairID']
break
if (not remote_pair and
remote_info['mvip'] != self.active_cluster['mvip']):
LOG.debug("Setting up new cluster pairs.")
# NOTE(jdg): create_remote_pairing sets the
# clusterPairID in remote_info for us
self._create_remote_pairing(remote_info)
cluster_pair = self._get_or_create_cluster_pairing(
remote_cluster, check_connected=True)
remote_cluster['clusterPairID'] = cluster_pair['clusterPairID']
if self.cluster_pairs:
self.cluster_pairs.clear()
self.cluster_pairs.append(remote_cluster)
self.cluster_pairs.append(remote_info)
LOG.debug("Available cluster pairs: %s", self.cluster_pairs)
def _get_cluster_pair(self, remote_cluster):
existing_pairs = self._issue_api_request(
'ListClusterPairs', {}, version='8.0')['result']['clusterPairs']
LOG.debug("Existing cluster pairs: %s", existing_pairs)
remote_pair = None
for ep in existing_pairs:
if remote_cluster['mvip'] == ep['mvip']:
remote_pair = ep
LOG.debug("Found remote pair: %s", remote_pair)
break
return remote_pair
def _get_or_create_cluster_pairing(self, remote_cluster,
check_connected=False):
# FIXME(sfernand): We check for pairs only in the remote cluster.
# This is an issue if a pair exists only in destination cluster.
remote_pair = self._get_cluster_pair(remote_cluster)
if not remote_pair:
LOG.debug("Setting up new cluster pairs.")
self._create_remote_pairing(remote_cluster)
remote_pair = self._get_cluster_pair(remote_cluster)
if check_connected:
if not remote_pair:
msg = _("Cluster pair not found for cluster [%s]",
remote_cluster['mvip'])
raise SolidFireReplicationPairingError(message=msg)
if remote_pair['status'] == 'Connected':
return remote_pair
def _wait_cluster_pairing_connected():
pair = self._get_cluster_pair(remote_cluster)
if pair and pair['status'] == 'Connected':
raise loopingcall.LoopingCallDone(pair)
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_cluster_pairing_connected)
remote_pair = timer.start(
interval=3,
timeout=self.configuration.sf_cluster_pairing_timeout) \
.wait()
except loopingcall.LoopingCallTimeOut:
msg = _("Cluster pair not found or in an invalid state.")
raise SolidFireReplicationPairingError(message=msg)
return remote_pair
def _create_cluster_reference(self, endpoint=None):
cluster_ref = {}
@ -569,23 +630,26 @@ class SolidFireDriver(san.SanISCSIDriver):
}
return endpoint
def _build_endpoint_info(self, **kwargs):
def _build_endpoint_info(self, backend_conf=None, **kwargs):
endpoint = {}
if not backend_conf:
backend_conf = self.configuration
# NOTE(jdg): We default to the primary cluster config settings
# but always check to see if desired settings were passed in
# to handle things like replication targets with unique settings
endpoint['mvip'] = (
kwargs.get('mvip', self.configuration.san_ip))
kwargs.get('mvip', backend_conf.san_ip))
endpoint['login'] = (
kwargs.get('login', self.configuration.san_login))
kwargs.get('login', backend_conf.san_login))
endpoint['passwd'] = (
kwargs.get('password', self.configuration.san_password))
kwargs.get('password', backend_conf.san_password))
endpoint['port'] = (
kwargs.get(('port'), self.configuration.sf_api_port))
kwargs.get(('port'), backend_conf.sf_api_port))
endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'],
endpoint['port'])
endpoint['svip'] = kwargs.get('svip', self.configuration.sf_svip)
endpoint['svip'] = kwargs.get('svip', backend_conf.sf_svip)
if not endpoint.get('mvip', None) and kwargs.get('backend_id', None):
endpoint['mvip'] = kwargs.get('backend_id')
return endpoint
@ -621,7 +685,7 @@ class SolidFireDriver(san.SanISCSIDriver):
if (('error' in response) and
response['error']['name'] == 'xInvalidPairingKey'):
LOG.debug("Error on volume pairing!")
LOG.debug("Error on volume pairing")
raise SolidFireReplicationPairingError
if 'error' in response:
@ -1081,8 +1145,9 @@ class SolidFireDriver(san.SanISCSIDriver):
# we use tenantID in here to get secondaries that might exist
# Also: we expect this to be sorted, so we get the primary first
# in the list
return sorted([acc for acc in accounts if
cinder_project_id in acc['username']],
return sorted([acc for acc in accounts
if self._get_sf_account_name(cinder_project_id) in
acc['username']],
key=lambda k: k['accountID'])
def _get_all_active_volumes(self, cinder_uuid=None):
@ -1344,9 +1409,12 @@ class SolidFireDriver(san.SanISCSIDriver):
> 0 else volume.get('size'))
return qos
def _get_default_volume_params(self, volume, is_clone=False):
def _get_default_volume_params(self, volume, sf_account=None,
is_clone=False):
if not sf_account:
sf_account = self._get_create_account(volume.project_id)
sf_account = self._get_create_account(volume.project_id)
qos = self._retrieve_qos_setting(volume)
create_time = volume.created_at.isoformat()
@ -1390,7 +1458,7 @@ class SolidFireDriver(san.SanISCSIDriver):
"""
sf_account = self._get_create_account(volume['project_id'])
params = self._get_default_volume_params(volume)
params = self._get_default_volume_params(volume, sf_account)
# NOTE(jdg): Check if we're a migration tgt, if so
# use the old volume-id here for the SF Name
@ -1450,6 +1518,50 @@ class SolidFireDriver(san.SanISCSIDriver):
return rep_opts
def _create_volume_pairing(self, volume, dst_volume, tgt_cluster):
src_sf_volid = int(volume['provider_id'].split()[0])
dst_sf_volid = int(dst_volume['provider_id'].split()[0])
@retry(SolidFireReplicationPairingError, tries=6)
def _pair_volumes():
rep_type = "Sync"
# Enable volume pairing
LOG.debug("Starting pairing source volume ID: %s",
src_sf_volid)
# Make sure we split any pair the volume has
params = {
'volumeID': src_sf_volid,
'mode': rep_type
}
self._issue_api_request('RemoveVolumePair', params, '8.0')
rep_key = self._issue_api_request(
'StartVolumePairing', params,
'8.0')['result']['volumePairingKey']
LOG.debug("Volume pairing started on source: "
"%(endpoint)s",
{'endpoint': tgt_cluster['endpoint']['url']})
params = {
'volumeID': dst_sf_volid,
'volumePairingKey': rep_key
}
self._issue_api_request('CompleteVolumePairing',
params,
'8.0',
endpoint=tgt_cluster['endpoint'])
LOG.debug("Volume pairing completed on destination: "
"%(endpoint)s",
{'endpoint': tgt_cluster['endpoint']['url']})
_pair_volumes()
def _replicate_volume(self, volume, params,
parent_sfaccount, rep_info):
@ -2164,6 +2276,205 @@ class SolidFireDriver(san.SanISCSIDriver):
volume['user_id'] = new_user
return self.target_driver.ensure_export(context, volume, None)
def _setup_intercluster_volume_migration(self, src_volume,
dst_cluster_ref):
LOG.info("Setting up cluster migration for volume [%s]",
src_volume.name)
# We should be able to rollback in case something went wrong
def _do_migrate_setup_rollback(src_sf_volume_id, dst_sf_volume_id):
# Removing volume pair in source cluster
params = {'volumeID': src_sf_volume_id}
self._issue_api_request('RemoveVolumePair', params, '8.0')
# Removing volume pair in destination cluster
params = {'volumeID': dst_sf_volume_id}
self._issue_api_request('RemoveVolumePair', params, '8.0',
endpoint=dst_cluster_ref["endpoint"])
# Destination volume should also be removed.
self._issue_api_request('DeleteVolume', params,
endpoint=dst_cluster_ref["endpoint"])
self._issue_api_request('PurgeDeletedVolume', params,
endpoint=dst_cluster_ref["endpoint"])
self._get_or_create_cluster_pairing(
dst_cluster_ref, check_connected=True)
dst_sf_account = self._get_create_account(
src_volume['project_id'], endpoint=dst_cluster_ref['endpoint'])
LOG.debug("Destination account is [%s]", dst_sf_account["username"])
params = self._get_default_volume_params(src_volume, dst_sf_account)
dst_volume = self._do_volume_create(
dst_sf_account, params, endpoint=dst_cluster_ref['endpoint'])
try:
self._create_volume_pairing(
src_volume, dst_volume, dst_cluster_ref)
except SolidFireReplicationPairingError:
with excutils.save_and_reraise_exception():
dst_sf_volid = int(dst_volume['provider_id'].split()[0])
src_sf_volid = int(src_volume['provider_id'].split()[0])
LOG.debug("Error pairing volume on remote cluster. Rolling "
"back and deleting volume %(vol)s at cluster "
"%(cluster)s.",
{'vol': dst_sf_volid,
'cluster': dst_cluster_ref['mvip']})
_do_migrate_setup_rollback(src_sf_volid, dst_sf_volid)
return dst_volume
def _do_intercluster_volume_migration_data_sync(self, src_volume,
src_sf_account,
dst_sf_volume_id,
dst_cluster_ref):
params = {'volumeID': dst_sf_volume_id, 'access': 'replicationTarget'}
self._issue_api_request('ModifyVolume',
params,
'8.0',
endpoint=dst_cluster_ref['endpoint'])
def _wait_sync_completed():
vol_params = None
if src_sf_account:
vol_params = {'accountID': src_sf_account['accountID']}
sf_vol = self._get_sf_volume(src_volume.id, vol_params)
state = sf_vol['volumePairs'][0]['remoteReplication']['state']
if state == 'Active':
raise loopingcall.LoopingCallDone(sf_vol)
LOG.debug("Waiting volume data to sync. "
"Replication state is [%s]", state)
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_sync_completed)
timer.start(
interval=30,
timeout=self.configuration.sf_volume_pairing_timeout).wait()
except loopingcall.LoopingCallTimeOut:
msg = _("Timeout waiting volumes to sync.")
raise SolidFireDataSyncTimeoutError(reason=msg)
self._do_intercluster_volume_migration_complete_data_sync(
dst_sf_volume_id, dst_cluster_ref)
def _do_intercluster_volume_migration_complete_data_sync(self,
sf_volume_id,
cluster_ref):
params = {'volumeID': sf_volume_id, 'access': 'readWrite'}
self._issue_api_request('ModifyVolume',
params,
'8.0',
endpoint=cluster_ref['endpoint'])
def _cleanup_intercluster_volume_migration(self, src_volume,
dst_sf_volume_id,
dst_cluster_ref):
src_sf_volume_id = int(src_volume['provider_id'].split()[0])
# Removing volume pair in destination cluster
params = {'volumeID': dst_sf_volume_id}
self._issue_api_request('RemoveVolumePair', params, '8.0',
endpoint=dst_cluster_ref["endpoint"])
# Removing volume pair in source cluster
params = {'volumeID': src_sf_volume_id}
self._issue_api_request('RemoveVolumePair', params, '8.0')
# Destination volume should also be removed.
self._issue_api_request('DeleteVolume', params)
self._issue_api_request('PurgeDeletedVolume', params)
def _do_intercluster_volume_migration(self, volume, host, dst_config):
LOG.debug("Start migrating volume [%(name)s] to cluster [%(cluster)s]",
{"name": volume.name, "cluster": host["host"]})
dst_endpoint = self._build_endpoint_info(backend_conf=dst_config)
LOG.debug("Destination cluster mvip is [%s]", dst_endpoint["mvip"])
dst_cluster_ref = self._create_cluster_reference(dst_endpoint)
LOG.debug("Destination cluster reference created. API version is [%s]",
dst_cluster_ref["clusterAPIVersion"])
dst_volume = self._setup_intercluster_volume_migration(
volume, dst_cluster_ref)
dst_sf_volume_id = int(dst_volume["provider_id"].split()[0])
# FIXME(sfernand): should pass src account to improve performance
self._do_intercluster_volume_migration_data_sync(
volume, None, dst_sf_volume_id, dst_cluster_ref)
self._cleanup_intercluster_volume_migration(
volume, dst_sf_volume_id, dst_cluster_ref)
return dst_volume
def migrate_volume(self, ctxt, volume, host):
"""Migrate a SolidFire volume to the specified host/backend"""
LOG.info("Migrate volume %(vol_id)s to %(host)s.",
{"vol_id": volume.id, "host": host["host"]})
if volume.status != fields.VolumeStatus.AVAILABLE:
msg = _("Volume status must be 'available' to execute "
"storage assisted migration.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume.is_replicated():
msg = _("Migration of replicated volumes is not allowed.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
src_backend = volume_utils.extract_host(
volume.host, "backend").split("@")[1]
dst_backend = volume_utils.extract_host(
host["host"], "backend").split("@")[1]
if src_backend == dst_backend:
LOG.info("Same backend, nothing to do.")
return True, {}
try:
dst_config = volume_utils.get_backend_configuration(
dst_backend, self.get_driver_options())
except exception.ConfigNotFound:
msg = _("Destination backend config not found. Check if "
"destination backend stanza is properly configured in "
"cinder.conf, or add parameter --force-host-copy True "
"to perform host-assisted migration.")
raise exception.VolumeMigrationFailed(reason=msg)
if self.active_cluster['mvip'] == dst_config.san_ip:
LOG.info("Same cluster, nothing to do.")
return True, {}
else:
LOG.info("Source and destination clusters are different. "
"A cluster migration will be performed.")
LOG.debug("Active cluster: [%(active)s], "
"Destination: [%(dst)s]",
{"active": self.active_cluster['mvip'],
"dst": dst_config.san_ip})
updates = self._do_intercluster_volume_migration(volume, host,
dst_config)
LOG.info("Successfully migrated volume %(vol_id)s to %(host)s.",
{"vol_id": volume.id, "host": host["host"]})
return True, updates
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
@ -2525,9 +2836,13 @@ class SolidFireDriver(san.SanISCSIDriver):
LOG.info("Failback completed. "
"Switching active cluster back to default.")
self.active_cluster = self._create_cluster_reference()
self.failed_over = False
# Recreating cluster pairs after a successful failback
self._set_cluster_pairs()
if self.configuration.replication_device:
self._set_cluster_pairs()
self.replication_enabled = True
else:
LOG.info("Failover completed. "
"Switching active cluster to %s.", active_backend_id)

View File

@ -56,6 +56,8 @@ from cinder import objects
from cinder.objects import fields
from cinder import rpc
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import group_types
from cinder.volume import throttling
from cinder.volume import volume_types
@ -1277,3 +1279,25 @@ def upload_volume(context, image_service, image_meta, volume_path,
run_as_root=run_as_root,
compress=compress, store_id=store_id,
base_image_ref=base_image_ref)
def get_backend_configuration(backend_name, backend_opts=None):
"""Get a configuration object for a specific backend."""
config_stanzas = CONF.list_all_sections()
if backend_name not in config_stanzas:
msg = _("Could not find backend stanza %(backend_name)s in "
"configuration. Available stanzas are %(stanzas)s")
params = {
"stanzas": config_stanzas,
"backend_name": backend_name,
}
raise exception.ConfigNotFound(message=msg % params)
config = configuration.Configuration(driver.volume_opts,
config_group=backend_name)
if backend_opts:
config.append_config_values(backend_opts)
return config

View File

@ -0,0 +1,6 @@
---
features:
- |
NetApp SolidFire driver:
Added inter-cluster volume migration (storage assisted) support. This allows
users to efficiently migrate volumes between different SolidFire backends.